code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import os
import sys
import functools
import operator
import weakref
import inspect
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
else:
string_types = basestring,
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
class _ObjectProxyMethods(object):
# We use properties to override the values of __module__ and
# __doc__. If we add these in ObjectProxy, the derived class
# __dict__ will still be setup to have string variants of these
# attributes and the rules of descriptors means that they appear to
# take precedence over the properties in the base class. To avoid
# that, we copy the properties into the derived class type itself
# via a meta class. In that way the properties will always take
# precedence.
@property
def __module__(self):
return self.__wrapped__.__module__
@__module__.setter
def __module__(self, value):
self.__wrapped__.__module__ = value
@property
def __doc__(self):
return self.__wrapped__.__doc__
@__doc__.setter
def __doc__(self, value):
self.__wrapped__.__doc__ = value
# We similar use a property for __dict__. We need __dict__ to be
# explicit to ensure that vars() works as expected.
@property
def __dict__(self):
return self.__wrapped__.__dict__
# Need to also propagate the special __weakref__ attribute for case
# where decorating classes which will define this. If do not define
# it and use a function like inspect.getmembers() on a decorator
# class it will fail. This can't be in the derived classes.
@property
def __weakref__(self):
return self.__wrapped__.__weakref__
class _ObjectProxyMetaType(type):
def __new__(cls, name, bases, dictionary):
# Copy our special properties into the class so that they
# always take precedence over attributes of the same name added
# during construction of a derived class. This is to save
# duplicating the implementation for them in all derived classes.
dictionary.update(vars(_ObjectProxyMethods))
return type.__new__(cls, name, bases, dictionary)
class ObjectProxy(with_metaclass(_ObjectProxyMetaType)):
__slots__ = '__wrapped__'
def __init__(self, wrapped):
object.__setattr__(self, '__wrapped__', wrapped)
# Python 3.2+ has the __qualname__ attribute, but it does not
# allow it to be overridden using a property and it must instead
# be an actual string object instead.
try:
object.__setattr__(self, '__qualname__', wrapped.__qualname__)
except AttributeError:
pass
@property
def __name__(self):
return self.__wrapped__.__name__
@__name__.setter
def __name__(self, value):
self.__wrapped__.__name__ = value
@property
def __class__(self):
return self.__wrapped__.__class__
@__class__.setter
def __class__(self, value):
self.__wrapped__.__class__ = value
@property
def __annotations__(self):
return self.__wrapped__.__anotations__
@__annotations__.setter
def __annotations__(self, value):
self.__wrapped__.__annotations__ = value
def __dir__(self):
return dir(self.__wrapped__)
def __str__(self):
return str(self.__wrapped__)
if PY3:
def __bytes__(self):
return bytes(self.__wrapped__)
def __repr__(self):
return '<%s at 0x%x for %s at 0x%x>' % (
type(self).__name__, id(self),
type(self.__wrapped__).__name__,
id(self.__wrapped__))
def __reversed__(self):
return reversed(self.__wrapped__)
if PY3:
def __round__(self):
return round(self.__wrapped__)
def __lt__(self, other):
return self.__wrapped__ < other
def __le__(self, other):
return self.__wrapped__ <= other
def __eq__(self, other):
return self.__wrapped__ == other
def __ne__(self, other):
return self.__wrapped__ != other
def __gt__(self, other):
return self.__wrapped__ > other
def __ge__(self, other):
return self.__wrapped__ >= other
def __hash__(self):
return hash(self.__wrapped__)
def __nonzero__(self):
return bool(self.__wrapped__)
def __bool__(self):
return bool(self.__wrapped__)
def __setattr__(self, name, value):
if name.startswith('_self_'):
object.__setattr__(self, name, value)
elif name == '__wrapped__':
object.__setattr__(self, name, value)
try:
object.__delattr__(self, '__qualname__')
except AttributeError:
pass
try:
object.__setattr__(self, '__qualname__', value.__qualname__)
except AttributeError:
pass
elif name == '__qualname__':
setattr(self.__wrapped__, name, value)
object.__setattr__(self, name, value)
elif hasattr(type(self), name):
object.__setattr__(self, name, value)
else:
setattr(self.__wrapped__, name, value)
def __getattr__(self, name):
# If we are being to lookup '__wrapped__' then the
# '__init__()' method cannot have been called.
if name == '__wrapped__':
raise ValueError('wrapper has not been initialised')
return getattr(self.__wrapped__, name)
def __delattr__(self, name):
if name.startswith('_self_'):
object.__delattr__(self, name)
elif name == '__wrapped__':
raise TypeError('__wrapped__ must be an object')
elif name == '__qualname__':
object.__delattr__(self, name)
delattr(self.__wrapped__, name)
elif hasattr(type(self), name):
object.__delattr__(self, name)
else:
delattr(self.__wrapped__, name)
def __add__(self, other):
return self.__wrapped__ + other
def __sub__(self, other):
return self.__wrapped__ - other
def __mul__(self, other):
return self.__wrapped__ * other
def __div__(self, other):
return operator.div(self.__wrapped__, other)
def __truediv__(self, other):
return operator.truediv(self.__wrapped__, other)
def __floordiv__(self, other):
return self.__wrapped__ // other
def __mod__(self, other):
return self.__wrapped__ % other
def __divmod__(self, other):
return divmod(self.__wrapped__, other)
def __pow__(self, other, *args):
return pow(self.__wrapped__, other, *args)
def __lshift__(self, other):
return self.__wrapped__ << other
def __rshift__(self, other):
return self.__wrapped__ >> other
def __and__(self, other):
return self.__wrapped__ & other
def __xor__(self, other):
return self.__wrapped__ ^ other
def __or__(self, other):
return self.__wrapped__ | other
def __radd__(self, other):
return other + self.__wrapped__
def __rsub__(self, other):
return other - self.__wrapped__
def __rmul__(self, other):
return other * self.__wrapped__
def __rdiv__(self, other):
return operator.div(other, self.__wrapped__)
def __rtruediv__(self, other):
return operator.truediv(other, self.__wrapped__)
def __rfloordiv__(self, other):
return other // self.__wrapped__
def __rmod__(self, other):
return other % self.__wrapped__
def __rdivmod__(self, other):
return divmod(other, self.__wrapped__)
def __rpow__(self, other, *args):
return pow(other, self.__wrapped__, *args)
def __rlshift__(self, other):
return other << self.__wrapped__
def __rrshift__(self, other):
return other >> self.__wrapped__
def __rand__(self, other):
return other & self.__wrapped__
def __rxor__(self, other):
return other ^ self.__wrapped__
def __ror__(self, other):
return other | self.__wrapped__
def __iadd__(self, other):
self.__wrapped__ += other
return self
def __isub__(self, other):
self.__wrapped__ -= other
return self
def __imul__(self, other):
self.__wrapped__ *= other
return self
def __idiv__(self, other):
self.__wrapped__ = operator.idiv(self.__wrapped__, other)
return self
def __itruediv__(self, other):
self.__wrapped__ = operator.itruediv(self.__wrapped__, other)
return self
def __ifloordiv__(self, other):
self.__wrapped__ //= other
return self
def __imod__(self, other):
self.__wrapped__ %= other
return self
def __ipow__(self, other):
self.__wrapped__ **= other
return self
def __ilshift__(self, other):
self.__wrapped__ <<= other
return self
def __irshift__(self, other):
self.__wrapped__ >>= other
return self
def __iand__(self, other):
self.__wrapped__ &= other
return self
def __ixor__(self, other):
self.__wrapped__ ^= other
return self
def __ior__(self, other):
self.__wrapped__ |= other
return self
def __neg__(self):
return -self.__wrapped__
def __pos__(self):
return +self.__wrapped__
def __abs__(self):
return abs(self.__wrapped__)
def __invert__(self):
return ~self.__wrapped__
def __int__(self):
return int(self.__wrapped__)
def __long__(self):
return long(self.__wrapped__)
def __float__(self):
return float(self.__wrapped__)
def __oct__(self):
return oct(self.__wrapped__)
def __hex__(self):
return hex(self.__wrapped__)
def __index__(self):
return operator.index(self.__wrapped__)
def __len__(self):
return len(self.__wrapped__)
def __contains__(self, value):
return value in self.__wrapped__
def __getitem__(self, key):
return self.__wrapped__[key]
def __setitem__(self, key, value):
self.__wrapped__[key] = value
def __delitem__(self, key):
del self.__wrapped__[key]
def __getslice__(self, i, j):
return self.__wrapped__[i:j]
def __setslice__(self, i, j, value):
self.__wrapped__[i:j] = value
def __delslice__(self, i, j):
del self.__wrapped__[i:j]
def __enter__(self):
return self.__wrapped__.__enter__()
def __exit__(self, *args, **kwargs):
return self.__wrapped__.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self.__wrapped__)
class CallableObjectProxy(ObjectProxy):
def __call__(self, *args, **kwargs):
return self.__wrapped__(*args, **kwargs)
class _FunctionWrapperBase(ObjectProxy):
__slots__ = ('_self_instance', '_self_wrapper', '_self_enabled',
'_self_binding', '_self_parent')
def __init__(self, wrapped, instance, wrapper, enabled=None,
binding='function', parent=None):
super(_FunctionWrapperBase, self).__init__(wrapped)
object.__setattr__(self, '_self_instance', instance)
object.__setattr__(self, '_self_wrapper', wrapper)
object.__setattr__(self, '_self_enabled', enabled)
object.__setattr__(self, '_self_binding', binding)
object.__setattr__(self, '_self_parent', parent)
def __get__(self, instance, owner):
# This method is actually doing double duty for both unbound and
# bound derived wrapper classes. It should possibly be broken up
# and the distinct functionality moved into the derived classes.
# Can't do that straight away due to some legacy code which is
# relying on it being here in this base class.
#
# The distinguishing attribute which determines whether we are
# being called in an unbound or bound wrapper is the parent
# attribute. If binding has never occurred, then the parent will
# be None.
#
# First therefore, is if we are called in an unbound wrapper. In
# this case we perform the binding.
#
# We have one special case to worry about here. This is where we
# are decorating a nested class. In this case the wrapped class
# would not have a __get__() method to call. In that case we
# simply return self.
#
# Note that we otherwise still do binding even if instance is
# None and accessing an unbound instance method from a class.
# This is because we need to be able to later detect that
# specific case as we will need to extract the instance from the
# first argument of those passed in.
if self._self_parent is None:
if not inspect.isclass(self.__wrapped__):
descriptor = self.__wrapped__.__get__(instance, owner)
return self.__bound_function_wrapper__(descriptor, instance,
self._self_wrapper, self._self_enabled,
self._self_binding, self)
return self
# Now we have the case of binding occurring a second time on what
# was already a bound function. In this case we would usually
# return ourselves again. This mirrors what Python does.
#
# The special case this time is where we were originally bound
# with an instance of None and we were likely an instance
# method. In that case we rebind against the original wrapped
# function from the parent again.
if self._self_instance is None and self._self_binding == 'function':
descriptor = self._self_parent.__wrapped__.__get__(
instance, owner)
return self._self_parent.__bound_function_wrapper__(
descriptor, instance, self._self_wrapper,
self._self_enabled, self._self_binding,
self._self_parent)
return self
def __call__(self, *args, **kwargs):
# If enabled has been specified, then evaluate it at this point
# and if the wrapper is not to be executed, then simply return
# the bound function rather than a bound wrapper for the bound
# function. When evaluating enabled, if it is callable we call
# it, otherwise we evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# This can occur where initial function wrapper was applied to
# a function that was already bound to an instance. In that case
# we want to extract the instance from the function and use it.
if self._self_binding == 'function':
if self._self_instance is None:
instance = getattr(self.__wrapped__, '__self__', None)
if instance is not None:
return self._self_wrapper(self.__wrapped__, instance,
args, kwargs)
# This is generally invoked when the wrapped function is being
# called as a normal function and is not bound to a class as an
# instance method. This is also invoked in the case where the
# wrapped function was a method, but this wrapper was in turn
# wrapped using the staticmethod decorator.
return self._self_wrapper(self.__wrapped__, self._self_instance,
args, kwargs)
class BoundFunctionWrapper(_FunctionWrapperBase):
def __call__(self, *args, **kwargs):
# If enabled has been specified, then evaluate it at this point
# and if the wrapper is not to be executed, then simply return
# the bound function rather than a bound wrapper for the bound
# function. When evaluating enabled, if it is callable we call
# it, otherwise we evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# We need to do things different depending on whether we are
# likely wrapping an instance method vs a static method or class
# method.
if self._self_binding == 'function':
if self._self_instance is None:
# This situation can occur where someone is calling the
# instancemethod via the class type and passing the instance
# as the first argument. We need to shift the args before
# making the call to the wrapper and effectively bind the
# instance to the wrapped function using a partial so the
# wrapper doesn't see anything as being different.
if not args:
raise TypeError('missing 1 required positional argument')
instance, args = args[0], args[1:]
wrapped = functools.partial(self.__wrapped__, instance)
return self._self_wrapper(wrapped, instance, args, kwargs)
return self._self_wrapper(self.__wrapped__, self._self_instance,
args, kwargs)
else:
# As in this case we would be dealing with a classmethod or
# staticmethod, then _self_instance will only tell us whether
# when calling the classmethod or staticmethod they did it via an
# instance of the class it is bound to and not the case where
# done by the class type itself. We thus ignore _self_instance
# and use the __self__ attribute of the bound function instead.
# For a classmethod, this means instance will be the class type
# and for a staticmethod it will be None. This is probably the
# more useful thing we can pass through even though we loose
# knowledge of whether they were called on the instance vs the
# class type, as it reflects what they have available in the
# decoratored function.
instance = getattr(self.__wrapped__, '__self__', None)
return self._self_wrapper(self.__wrapped__, instance, args,
kwargs)
class FunctionWrapper(_FunctionWrapperBase):
__bound_function_wrapper__ = BoundFunctionWrapper
def __init__(self, wrapped, wrapper, enabled=None):
# What it is we are wrapping here could be anything. We need to
# try and detect specific cases though. In particular, we need
# to detect when we are given something that is a method of a
# class. Further, we need to know when it is likely an instance
# method, as opposed to a class or static method. This can
# become problematic though as there isn't strictly a fool proof
# method of knowing.
#
# The situations we could encounter when wrapping a method are:
#
# 1. The wrapper is being applied as part of a decorator which
# is a part of the class definition. In this case what we are
# given is the raw unbound function, classmethod or staticmethod
# wrapper objects.
#
# The problem here is that we will not know we are being applied
# in the context of the class being set up. This becomes
# important later for the case of an instance method, because in
# that case we just see it as a raw function and can't
# distinguish it from wrapping a normal function outside of
# a class context.
#
# 2. The wrapper is being applied when performing monkey
# patching of the class type afterwards and the method to be
# wrapped was retrieved direct from the __dict__ of the class
# type. This is effectively the same as (1) above.
#
# 3. The wrapper is being applied when performing monkey
# patching of the class type afterwards and the method to be
# wrapped was retrieved from the class type. In this case
# binding will have been performed where the instance against
# which the method is bound will be None at that point.
#
# This case is a problem because we can no longer tell if the
# method was a static method, plus if using Python3, we cannot
# tell if it was an instance method as the concept of an
# unnbound method no longer exists.
#
# 4. The wrapper is being applied when performing monkey
# patching of an instance of a class. In this case binding will
# have been perfomed where the instance was not None.
#
# This case is a problem because we can no longer tell if the
# method was a static method.
#
# Overall, the best we can do is look at the original type of the
# object which was wrapped prior to any binding being done and
# see if it is an instance of classmethod or staticmethod. In
# the case where other decorators are between us and them, if
# they do not propagate the __class__ attribute so that the
# isinstance() checks works, then likely this will do the wrong
# thing where classmethod and staticmethod are used.
#
# Since it is likely to be very rare that anyone even puts
# decorators around classmethod and staticmethod, likelihood of
# that being an issue is very small, so we accept it and suggest
# that those other decorators be fixed. It is also only an issue
# if a decorator wants to actually do things with the arguments.
#
# As to not being able to identify static methods properly, we
# just hope that that isn't something people are going to want
# to wrap, or if they do suggest they do it the correct way by
# ensuring that it is decorated in the class definition itself,
# or patch it in the __dict__ of the class type.
#
# So to get the best outcome we can, whenever we aren't sure what
# it is, we label it as a 'function'. If it was already bound and
# that is rebound later, we assume that it will be an instance
# method and try an cope with the possibility that the 'self'
# argument it being passed as an explicit argument and shuffle
# the arguments around to extract 'self' for use as the instance.
if isinstance(wrapped, classmethod):
binding = 'classmethod'
elif isinstance(wrapped, staticmethod):
binding = 'staticmethod'
elif hasattr(wrapped, '__self__'):
if inspect.isclass(wrapped.__self__):
binding = 'classmethod'
else:
binding = 'function'
else:
binding = 'function'
super(FunctionWrapper, self).__init__(wrapped, None, wrapper,
enabled, binding)
try:
if not os.environ.get('WRAPT_DISABLE_EXTENSIONS'):
from ._wrappers import (ObjectProxy, CallableObjectProxy,
FunctionWrapper, BoundFunctionWrapper, _FunctionWrapperBase)
except ImportError:
pass
# Helper functions for applying wrappers to existing functions.
def resolve_path(module, name):
if isinstance(module, string_types):
__import__(module)
module = sys.modules[module]
parent = module
path = name.split('.')
attribute = path[0]
original = getattr(parent, attribute)
for attribute in path[1:]:
parent = original
# We can't just always use getattr() because in doing
# that on a class it will cause binding to occur which
# will complicate things later and cause some things not
# to work. For the case of a class we therefore access
# the __dict__ directly. To cope though with the wrong
# class being given to us, or a method being moved into
# a base class, we need to walk the class hierarchy to
# work out exactly which __dict__ the method was defined
# in, as accessing it from __dict__ will fail if it was
# not actually on the class given. Fallback to using
# getattr() if we can't find it. If it truly doesn't
# exist, then that will fail.
if inspect.isclass(original):
for cls in inspect.getmro(original):
if attribute in vars(cls):
original = vars(cls)[attribute]
break
else:
original = getattr(original, attribute)
else:
original = getattr(original, attribute)
return (parent, attribute, original)
def apply_patch(parent, attribute, replacement):
setattr(parent, attribute, replacement)
def wrap_object(module, name, factory, args=(), kwargs={}):
(parent, attribute, original) = resolve_path(module, name)
wrapper = factory(original, *args, **kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Function for applying a proxy object to an attribute of a class
# instance. The wrapper works by defining an attribute of the same name
# on the class which is a descriptor and which intercepts access to the
# instance attribute. Note that this cannot be used on attributes which
# are themselves defined by a property object.
class AttributeWrapper(object):
def __init__(self, attribute, factory, args, kwargs):
self.attribute = attribute
self.factory = factory
self.args = args
self.kwargs = kwargs
def __get__(self, instance, owner):
value = instance.__dict__[self.attribute]
return self.factory(value, *self.args, **self.kwargs)
def __set__(self, instance, value):
instance.__dict__[self.attribute] = value
def __delete__(self, instance):
del instance.__dict__[self.attribute]
def wrap_object_attribute(module, name, factory, args=(), kwargs={}):
path, attribute = name.rsplit('.', 1)
parent = resolve_path(module, path)[2]
wrapper = AttributeWrapper(attribute, factory, args, kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Functions for creating a simple decorator using a FunctionWrapper,
# plus short cut functions for applying wrappers to functions. These are
# for use when doing monkey patching. For a more featured way of
# creating decorators see the decorator decorator instead.
def function_wrapper(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
return FunctionWrapper(target_wrapped, target_wrapper)
return FunctionWrapper(wrapper, _wrapper)
def wrap_function_wrapper(module, name, wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
def patch_function_wrapper(module, name):
def _wrapper(wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
return _wrapper
def transient_function_wrapper(module, name):
def _decorator(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
def _execute(wrapped, instance, args, kwargs):
(parent, attribute, original) = resolve_path(module, name)
replacement = FunctionWrapper(original, target_wrapper)
setattr(parent, attribute, replacement)
try:
return wrapped(*args, **kwargs)
finally:
setattr(parent, attribute, original)
return FunctionWrapper(target_wrapped, _execute)
return FunctionWrapper(wrapper, _wrapper)
return _decorator
# A weak function proxy. This will work on instance methods, class
# methods, static methods and regular functions. Special treatment is
# needed for the method types because the bound method is effectively a
# transient object and applying a weak reference to one will immediately
# result in it being destroyed and the weakref callback called. The weak
# reference is therefore applied to the instance the method is bound to
# and the original function. The function is then rebound at the point
# of a call via the weak function proxy.
def _weak_function_proxy_callback(ref, proxy, callback):
if proxy._self_expired:
return
proxy._self_expired = True
# This could raise an exception. We let it propagate back and let
# the weakref.proxy() deal with it, at which point it generally
# prints out a short error message direct to stderr and keeps going.
if callback is not None:
callback(proxy)
class WeakFunctionProxy(ObjectProxy):
__slots__ = ('_self_expired', '_self_instance')
def __init__(self, wrapped, callback=None):
# We need to determine if the wrapped function is actually a
# bound method. In the case of a bound method, we need to keep a
# reference to the original unbound function and the instance.
# This is necessary because if we hold a reference to the bound
# function, it will be the only reference and given it is a
# temporary object, it will almost immediately expire and
# the weakref callback triggered. So what is done is that we
# hold a reference to the instance and unbound function and
# when called bind the function to the instance once again and
# then call it. Note that we avoid using a nested function for
# the callback here so as not to cause any odd reference cycles.
_callback = callback and functools.partial(
_weak_function_proxy_callback, proxy=self,
callback=callback)
self._self_expired = False
if isinstance(wrapped, _FunctionWrapperBase):
self._self_instance = weakref.ref(wrapped._self_instance,
_callback)
if wrapped._self_parent is not None:
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped._self_parent, _callback))
else:
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped, _callback))
return
try:
self._self_instance = weakref.ref(wrapped.__self__, _callback)
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped.__func__, _callback))
except AttributeError:
self._self_instance = None
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped, _callback))
def __call__(self, *args, **kwargs):
# We perform a boolean check here on the instance and wrapped
# function as that will trigger the reference error prior to
# calling if the reference had expired.
instance = self._self_instance and self._self_instance()
function = self.__wrapped__ and self.__wrapped__
# If the wrapped function was originally a bound function, for
# which we retained a reference to the instance and the unbound
# function we need to rebind the function and then call it. If
# not just called the wrapped function.
if instance is None:
return self.__wrapped__(*args, **kwargs)
return function.__get__(instance, type(instance))(*args, **kwargs) | zuqa-agent-python | /zuqa-agent-python-1.0.0a1.tar.gz/zuqa-agent-python-1.0.0a1/zuqa/utils/wrapt/wrappers.py | wrappers.py |
import logging
import os
import re
import socket
import threading
from zuqa.utils import compat, starmatch_to_regex
from zuqa.utils.logging import get_logger
from zuqa.utils.threading import IntervalTimer, ThreadManager
__all__ = ("setup_logging", "Config")
logger = get_logger("zuqa.conf")
class ConfigurationError(ValueError):
def __init__(self, msg, field_name):
self.field_name = field_name
super(ValueError, self).__init__(msg)
class _ConfigValue(object):
def __init__(self, dict_key, env_key=None, type=compat.text_type, validators=None, default=None, required=False):
self.type = type
self.dict_key = dict_key
self.validators = validators
self.default = default
self.required = required
if env_key is None:
env_key = "ZUQA_" + dict_key
self.env_key = env_key
def __get__(self, instance, owner):
if instance:
return instance._values.get(self.dict_key, self.default)
else:
return self.default
def __set__(self, instance, value):
value = self._validate(instance, value)
instance._values[self.dict_key] = value
def _validate(self, instance, value):
if value is None and self.required:
raise ConfigurationError(
"Configuration error: value for {} is required.".format(self.dict_key), self.dict_key
)
if self.validators and value is not None:
for validator in self.validators:
value = validator(value, self.dict_key)
if self.type and value is not None:
try:
value = self.type(value)
except ValueError as e:
raise ConfigurationError("{}: {}".format(self.dict_key, compat.text_type(e)), self.dict_key)
instance._errors.pop(self.dict_key, None)
return value
class _ListConfigValue(_ConfigValue):
def __init__(self, dict_key, list_separator=",", **kwargs):
self.list_separator = list_separator
super(_ListConfigValue, self).__init__(dict_key, **kwargs)
def __set__(self, instance, value):
if isinstance(value, compat.string_types):
value = value.split(self.list_separator)
elif value is not None:
value = list(value)
if value:
value = [self.type(item) for item in value]
instance._values[self.dict_key] = value
class _DictConfigValue(_ConfigValue):
def __init__(self, dict_key, item_separator=",", keyval_separator="=", **kwargs):
self.item_separator = item_separator
self.keyval_separator = keyval_separator
super(_DictConfigValue, self).__init__(dict_key, **kwargs)
def __set__(self, instance, value):
if isinstance(value, compat.string_types):
items = (item.split(self.keyval_separator) for item in value.split(self.item_separator))
value = {key.strip(): self.type(val.strip()) for key, val in items}
elif not isinstance(value, dict):
# TODO: better error handling
value = None
instance._values[self.dict_key] = value
class _BoolConfigValue(_ConfigValue):
def __init__(self, dict_key, true_string="true", false_string="false", **kwargs):
self.true_string = true_string
self.false_string = false_string
super(_BoolConfigValue, self).__init__(dict_key, **kwargs)
def __set__(self, instance, value):
if isinstance(value, compat.string_types):
if value.lower() == self.true_string:
value = True
elif value.lower() == self.false_string:
value = False
instance._values[self.dict_key] = bool(value)
class RegexValidator(object):
def __init__(self, regex, verbose_pattern=None):
self.regex = regex
self.verbose_pattern = verbose_pattern or regex
def __call__(self, value, field_name):
value = compat.text_type(value)
match = re.match(self.regex, value)
if match:
return value
raise ConfigurationError("{} does not match pattern {}".format(value, self.verbose_pattern), field_name)
class UnitValidator(object):
def __init__(self, regex, verbose_pattern, unit_multipliers):
self.regex = regex
self.verbose_pattern = verbose_pattern
self.unit_multipliers = unit_multipliers
def __call__(self, value, field_name):
value = compat.text_type(value)
match = re.match(self.regex, value, re.IGNORECASE)
if not match:
raise ConfigurationError("{} does not match pattern {}".format(value, self.verbose_pattern), field_name)
val, unit = match.groups()
try:
val = int(val) * self.unit_multipliers[unit]
except KeyError:
raise ConfigurationError("{} is not a supported unit".format(unit), field_name)
return val
duration_validator = UnitValidator(r"^((?:-)?\d+)(ms|s|m)$", r"\d+(ms|s|m)", {"ms": 1, "s": 1000, "m": 60000})
size_validator = UnitValidator(
r"^(\d+)(b|kb|mb|gb)$", r"\d+(b|KB|MB|GB)", {"b": 1, "kb": 1024, "mb": 1024 * 1024, "gb": 1024 * 1024 * 1024}
)
class ExcludeRangeValidator(object):
def __init__(self, range_start, range_end, range_desc):
self.range_start = range_start
self.range_end = range_end
self.range_desc = range_desc
def __call__(self, value, field_name):
if self.range_start <= value <= self.range_end:
raise ConfigurationError(
"{} cannot be in range: {}".format(
value, self.range_desc.format(**{"range_start": self.range_start, "range_end": self.range_end})
),
field_name,
)
return value
class FileIsReadableValidator(object):
def __call__(self, value, field_name):
value = os.path.normpath(value)
if not os.path.exists(value):
raise ConfigurationError("{} does not exist".format(value), field_name)
elif not os.path.isfile(value):
raise ConfigurationError("{} is not a file".format(value), field_name)
elif not os.access(value, os.R_OK):
raise ConfigurationError("{} is not readable".format(value), field_name)
return value
class _ConfigBase(object):
_NO_VALUE = object() # sentinel object
def __init__(self, config_dict=None, env_dict=None, inline_dict=None):
self._values = {}
self._errors = {}
self.update(config_dict, env_dict, inline_dict)
def update(self, config_dict=None, env_dict=None, inline_dict=None):
if config_dict is None:
config_dict = {}
if env_dict is None:
env_dict = os.environ
if inline_dict is None:
inline_dict = {}
for field, config_value in self.__class__.__dict__.items():
if not isinstance(config_value, _ConfigValue):
continue
new_value = self._NO_VALUE
# first check environment
if config_value.env_key and config_value.env_key in env_dict:
new_value = env_dict[config_value.env_key]
# check the inline config
elif field in inline_dict:
new_value = inline_dict[field]
# finally, check config dictionary
elif config_value.dict_key in config_dict:
new_value = config_dict[config_value.dict_key]
# only set if new_value changed. We'll fall back to the field default if not.
if new_value is not self._NO_VALUE:
try:
setattr(self, field, new_value)
except ConfigurationError as e:
self._errors[e.field_name] = str(e)
@property
def values(self):
return self._values
@values.setter
def values(self, values):
self._values = values
@property
def errors(self):
return self._errors
class Config(_ConfigBase):
service_name = _ConfigValue("SERVICE_NAME", validators=[RegexValidator("^[a-zA-Z0-9 _-]+$")], required=True)
service_node_name = _ConfigValue("SERVICE_NODE_NAME", default=None)
environment = _ConfigValue("ENVIRONMENT", default=None)
secret_token = _ConfigValue("SECRET_TOKEN")
api_key = _ConfigValue("API_KEY")
debug = _BoolConfigValue("DEBUG", default=False)
server_url = _ConfigValue("SERVER_URL", default="http://localhost:32140", required=True)
server_cert = _ConfigValue("SERVER_CERT", default=None, required=False, validators=[FileIsReadableValidator()])
verify_server_cert = _BoolConfigValue("VERIFY_SERVER_CERT", default=True)
include_paths = _ListConfigValue("INCLUDE_PATHS")
exclude_paths = _ListConfigValue("EXCLUDE_PATHS", default=compat.get_default_library_patters())
filter_exception_types = _ListConfigValue("FILTER_EXCEPTION_TYPES")
server_timeout = _ConfigValue(
"SERVER_TIMEOUT",
type=float,
validators=[
UnitValidator(r"^((?:-)?\d+)(ms|s|m)?$", r"\d+(ms|s|m)", {"ms": 0.001, "s": 1, "m": 60, None: 1000})
],
default=5,
)
hostname = _ConfigValue("HOSTNAME", default=socket.gethostname())
auto_log_stacks = _BoolConfigValue("AUTO_LOG_STACKS", default=True)
transport_class = _ConfigValue("TRANSPORT_CLASS", default="zuqa.transport.http.Transport", required=True)
processors = _ListConfigValue(
"PROCESSORS",
default=[
"zuqa.processors.sanitize_stacktrace_locals",
"zuqa.processors.sanitize_http_request_cookies",
"zuqa.processors.sanitize_http_response_cookies",
"zuqa.processors.sanitize_http_headers",
"zuqa.processors.sanitize_http_wsgi_env",
"zuqa.processors.sanitize_http_request_querystring",
"zuqa.processors.sanitize_http_request_body",
],
)
metrics_sets = _ListConfigValue(
"METRICS_SETS",
default=[
"zuqa.metrics.sets.cpu.CPUMetricSet",
"zuqa.metrics.sets.transactions.TransactionsMetricSet",
],
)
metrics_interval = _ConfigValue(
"METRICS_INTERVAL",
type=int,
validators=[duration_validator, ExcludeRangeValidator(1, 99, "{range_start} - {range_end} ms")],
default=100,
)
breakdown_metrics = _BoolConfigValue("BREAKDOWN_METRICS", default=True)
disable_metrics = _ListConfigValue("DISABLE_METRICS", type=starmatch_to_regex, default=[])
central_config = _BoolConfigValue("CENTRAL_CONFIG", default=True)
api_request_size = _ConfigValue("API_REQUEST_SIZE", type=int, validators=[size_validator], default=768 * 1024)
api_request_time = _ConfigValue("API_REQUEST_TIME", type=int, validators=[duration_validator], default=1 * 1000)
transaction_sample_rate = _ConfigValue("TRANSACTION_SAMPLE_RATE", type=float, default=1.0)
transaction_max_spans = _ConfigValue("TRANSACTION_MAX_SPANS", type=int, default=500)
stack_trace_limit = _ConfigValue("STACK_TRACE_LIMIT", type=int, default=500)
span_frames_min_duration = _ConfigValue(
"SPAN_FRAMES_MIN_DURATION",
default=5,
validators=[
UnitValidator(r"^((?:-)?\d+)(ms|s|m)?$", r"\d+(ms|s|m)", {"ms": 1, "s": 1000, "m": 60000, None: 1})
],
type=int,
)
collect_local_variables = _ConfigValue("COLLECT_LOCAL_VARIABLES", default="errors")
source_lines_error_app_frames = _ConfigValue("SOURCE_LINES_ERROR_APP_FRAMES", type=int, default=5)
source_lines_error_library_frames = _ConfigValue("SOURCE_LINES_ERROR_LIBRARY_FRAMES", type=int, default=5)
source_lines_span_app_frames = _ConfigValue("SOURCE_LINES_SPAN_APP_FRAMES", type=int, default=0)
source_lines_span_library_frames = _ConfigValue("SOURCE_LINES_SPAN_LIBRARY_FRAMES", type=int, default=0)
local_var_max_length = _ConfigValue("LOCAL_VAR_MAX_LENGTH", type=int, default=200)
local_var_list_max_length = _ConfigValue("LOCAL_VAR_LIST_MAX_LENGTH", type=int, default=10)
local_var_dict_max_length = _ConfigValue("LOCAL_VAR_DICT_MAX_LENGTH", type=int, default=10)
capture_body = _ConfigValue(
"CAPTURE_BODY",
default="off",
validators=[lambda val, _: {"errors": "error", "transactions": "transaction"}.get(val, val)],
)
async_mode = _BoolConfigValue("ASYNC_MODE", default=True)
instrument_django_middleware = _BoolConfigValue("INSTRUMENT_DJANGO_MIDDLEWARE", default=True)
autoinsert_django_middleware = _BoolConfigValue("AUTOINSERT_DJANGO_MIDDLEWARE", default=True)
transactions_ignore_patterns = _ListConfigValue("TRANSACTIONS_IGNORE_PATTERNS", default=[])
service_version = _ConfigValue("SERVICE_VERSION")
framework_name = _ConfigValue("FRAMEWORK_NAME", default=None)
framework_version = _ConfigValue("FRAMEWORK_VERSION", default=None)
global_labels = _DictConfigValue("GLOBAL_LABELS", default=None)
disable_send = _BoolConfigValue("DISABLE_SEND", default=False)
enabled = _BoolConfigValue("ENABLED", default=True)
recording = _BoolConfigValue("RECORDING", default=True)
instrument = _BoolConfigValue("INSTRUMENT", default=True)
enable_distributed_tracing = _BoolConfigValue("ENABLE_DISTRIBUTED_TRACING", default=True)
capture_headers = _BoolConfigValue("CAPTURE_HEADERS", default=True)
django_transaction_name_from_route = _BoolConfigValue("DJANGO_TRANSACTION_NAME_FROM_ROUTE", default=False)
disable_log_record_factory = _BoolConfigValue("DISABLE_LOG_RECORD_FACTORY", default=False)
use_elastic_traceparent_header = _BoolConfigValue("USE_ELASTIC_TRACEPARENT_HEADER", default=True)
@property
def is_recording(self):
if not self.enabled:
return False
else:
return self.recording
class VersionedConfig(ThreadManager):
"""
A thin layer around Config that provides versioning
"""
__slots__ = (
"_config",
"_version",
"_first_config",
"_first_version",
"_lock",
"transport",
"_update_thread",
"pid",
)
def __init__(self, config_object, version, transport=None):
"""
Create a new VersionedConfig with an initial Config object
:param config_object: the initial Config object
:param version: a version identifier for the configuration
"""
self._config = self._first_config = config_object
self._version = self._first_version = version
self.transport = transport
self._lock = threading.Lock()
self._update_thread = None
super(VersionedConfig, self).__init__()
def update(self, version, **config):
"""
Update the configuration version
:param version: version identifier for the new configuration
:param config: a key/value map of new configuration
:return: configuration errors, if any
"""
new_config = Config()
new_config.values = self._config.values.copy()
# pass an empty env dict to ensure the environment doesn't get precedence
new_config.update(inline_dict=config, env_dict={})
if not new_config.errors:
with self._lock:
self._version = version
self._config = new_config
else:
return new_config.errors
def reset(self):
"""
Reset state to the original configuration
"""
with self._lock:
self._version = self._first_version
self._config = self._first_config
@property
def changed(self):
return self._config != self._first_config
def __getattr__(self, item):
return getattr(self._config, item)
def __setattr__(self, name, value):
if name not in self.__slots__:
setattr(self._config, name, value)
else:
super(VersionedConfig, self).__setattr__(name, value)
@property
def config_version(self):
return self._version
def update_config(self):
if not self.transport:
logger.warning("No transport set for config updates, skipping")
return
logger.debug("Checking for new config...")
keys = {"service": {"name": self.service_name}}
if self.environment:
keys["service"]["environment"] = self.environment
new_version, new_config, next_run = self.transport.get_config(self.config_version, keys)
if new_version and new_config:
errors = self.update(new_version, **new_config)
if errors:
logger.error("Error applying new configuration: %s", repr(errors))
else:
logger.info(
"Applied new configuration: %s",
"; ".join(
"%s=%s" % (compat.text_type(k), compat.text_type(v)) for k, v in compat.iteritems(new_config)
),
)
elif new_version == self.config_version:
logger.debug("Remote config unchanged")
elif not new_config and self.changed:
logger.debug("Remote config disappeared, resetting to original")
self.reset()
return next_run
def start_thread(self, pid=None):
self._update_thread = IntervalTimer(
self.update_config, 1, "eapm conf updater", daemon=True, evaluate_function_interval=True
)
self._update_thread.start()
super(VersionedConfig, self).start_thread(pid=pid)
def stop_thread(self):
if self._update_thread:
self._update_thread.cancel()
self._update_thread = None
def setup_logging(handler, exclude=("gunicorn", "south", "zuqa.errors")):
"""
Configures logging to pipe to ZUQA.
- ``exclude`` is a list of loggers that shouldn't go to ZUQA.
For a typical Python install:
>>> from zuqa.handlers.logging import LoggingHandler
>>> client = ZUQA(...)
>>> setup_logging(LoggingHandler(client))
Within Django:
>>> from zuqa.contrib.django.handlers import LoggingHandler
>>> setup_logging(LoggingHandler())
Returns a boolean based on if logging was configured or not.
"""
logger = logging.getLogger()
if handler.__class__ in map(type, logger.handlers):
return False
logger.addHandler(handler)
return True | zuqa-agent-python | /zuqa-agent-python-1.0.0a1.tar.gz/zuqa-agent-python-1.0.0a1/zuqa/conf/__init__.py | __init__.py |
import threading
import time
from collections import defaultdict
from zuqa.conf import constants
from zuqa.utils import compat
from zuqa.utils.logging import get_logger
from zuqa.utils.module_import import import_string
from zuqa.utils.threading import IntervalTimer, ThreadManager
logger = get_logger("zuqa.metrics")
DISTINCT_LABEL_LIMIT = 1000
class MetricsRegistry(ThreadManager):
def __init__(self, client, tags=None):
"""
Creates a new metric registry
:param client: client instance
:param tags:
"""
self.client = client
self._metricsets = {}
self._tags = tags or {}
self._collect_timer = None
self.collect_actively = False # for transaction specific metrics
self.last_transaction_name = None # for transaction specific metrics
self.transaction_metrics_data = [] # for transaction specific metrics
super(MetricsRegistry, self).__init__()
def register(self, class_path):
"""
Register a new metric set
:param class_path: a string with the import path of the metricset class
"""
if class_path in self._metricsets:
return
else:
try:
class_obj = import_string(class_path)
self._metricsets[class_path] = class_obj(self)
except ImportError as e:
logger.warning("Could not register %s metricset: %s", class_path, compat.text_type(e))
def get_metricset(self, class_path):
try:
return self._metricsets[class_path]
except KeyError:
raise MetricSetNotFound(class_path)
def collect(self):
"""
Collect metrics from all registered metric sets and queues them for sending
:return:
"""
if self.collect_actively:
if self.client.config.is_recording:
logger.debug("Collecting metrics")
for _, metricset in compat.iteritems(self._metricsets):
for data in metricset.collect():
self.transaction_metrics_data.append(data)
elif len(self.transaction_metrics_data) > 0:
self.client.queue(constants.TRANSACTION_METRICSET, {
"url": self.last_transaction_name,
"metricsets": self.transaction_metrics_data
}, flush=True)
self.transaction_metrics_data = []
def start_thread(self, pid=None):
super(MetricsRegistry, self).start_thread(pid=pid)
if self.client.config.metrics_interval:
self._collect_timer = IntervalTimer(
self.collect, self.collect_interval, name="eapm metrics collect timer", daemon=True
)
logger.debug("Starting metrics collect timer")
self._collect_timer.start()
def stop_thread(self):
if self._collect_timer and self._collect_timer.is_alive():
logger.debug("Cancelling collect timer")
self._collect_timer.cancel()
self._collect_timer = None
@property
def collect_interval(self):
return self.client.config.metrics_interval / 1000.0
@property
def ignore_patterns(self):
return self.client.config.disable_metrics or []
class MetricsSet(object):
def __init__(self, registry):
self._lock = threading.Lock()
self._counters = {}
self._gauges = {}
self._timers = {}
self._registry = registry
self._label_limit_logged = False
def counter(self, name, reset_on_collect=False, **labels):
"""
Returns an existing or creates and returns a new counter
:param name: name of the counter
:param reset_on_collect: indicate if the counter should be reset to 0 when collecting
:param labels: a flat key/value map of labels
:return: the counter object
"""
return self._metric(self._counters, Counter, name, reset_on_collect, labels)
def gauge(self, name, reset_on_collect=False, **labels):
"""
Returns an existing or creates and returns a new gauge
:param name: name of the gauge
:param reset_on_collect: indicate if the gouge should be reset to 0 when collecting
:param labels: a flat key/value map of labels
:return: the gauge object
"""
return self._metric(self._gauges, Gauge, name, reset_on_collect, labels)
def timer(self, name, reset_on_collect=False, **labels):
"""
Returns an existing or creates and returns a new timer
:param name: name of the timer
:param reset_on_collect: indicate if the timer should be reset to 0 when collecting
:param labels: a flat key/value map of labels
:return: the timer object
"""
return self._metric(self._timers, Timer, name, reset_on_collect, labels)
def _metric(self, container, metric_class, name, reset_on_collect, labels):
"""
Returns an existing or creates and returns a metric
:param container: the container for the metric
:param metric_class: the class of the metric
:param name: name of the metric
:param reset_on_collect: indicate if the metric should be reset to 0 when collecting
:param labels: a flat key/value map of labels
:return: the metric object
"""
labels = self._labels_to_key(labels)
key = (name, labels)
with self._lock:
if key not in container:
if any(pattern.match(name) for pattern in self._registry.ignore_patterns):
metric = noop_metric
elif len(self._gauges) + len(self._counters) + len(self._timers) >= DISTINCT_LABEL_LIMIT:
if not self._label_limit_logged:
self._label_limit_logged = True
logger.warning(
"The limit of %d metricsets has been reached, no new metricsets will be created."
% DISTINCT_LABEL_LIMIT
)
metric = noop_metric
else:
metric = metric_class(name, reset_on_collect=reset_on_collect)
container[key] = metric
return container[key]
def collect(self):
"""
Collects all metrics attached to this metricset, and returns it as a generator
with one or more elements. More than one element is returned if labels are used.
The format of the return value should be
{
"samples": {"metric.name": {"value": some_float}, ...},
"timestamp": unix epoch in microsecond precision
}
"""
self.before_collect()
timestamp = int(time.time() * 1000000)
samples = defaultdict(dict)
if self._counters:
# iterate over a copy of the dict to avoid threading issues, see #717
for (name, labels), c in compat.iteritems(self._counters.copy()):
if c is not noop_metric:
val = c.val
if val or not c.reset_on_collect:
samples[labels].update({name: {"value": val}})
if c.reset_on_collect:
c.reset()
if self._gauges:
for (name, labels), g in compat.iteritems(self._gauges.copy()):
if g is not noop_metric:
val = g.val
if val or not g.reset_on_collect:
samples[labels].update({name: {"value": val}})
if g.reset_on_collect:
g.reset()
if self._timers:
for (name, labels), t in compat.iteritems(self._timers.copy()):
if t is not noop_metric:
val, count = t.val
if val or not t.reset_on_collect:
samples[labels].update({name + ".sum.us": {"value": int(val * 1000000)}})
samples[labels].update({name + ".count": {"value": count}})
if t.reset_on_collect:
t.reset()
if samples:
for labels, sample in compat.iteritems(samples):
result = {"samples": sample, "timestamp": timestamp}
if labels:
result["tags"] = {k: v for k, v in labels}
yield self.before_yield(result)
def before_collect(self):
"""
A method that is called right before collection. Can be used to gather metrics.
:return:
"""
pass
def before_yield(self, data):
return data
def _labels_to_key(self, labels):
return tuple((k, compat.text_type(v)) for k, v in sorted(compat.iteritems(labels)))
class SpanBoundMetricSet(MetricsSet):
def before_yield(self, data):
tags = data.get("tags", None)
if tags:
span_type, span_subtype = tags.pop("span.type", None), tags.pop("span.subtype", "")
if span_type or span_subtype:
data["span"] = {"type": span_type, "subtype": span_subtype}
transaction_name, transaction_type = tags.pop("transaction.name", None), tags.pop("transaction.type", None)
if transaction_name or transaction_type:
data["transaction"] = {"name": transaction_name, "type": transaction_type}
return data
class Counter(object):
__slots__ = ("name", "_lock", "_initial_value", "_val", "reset_on_collect")
def __init__(self, name, initial_value=0, reset_on_collect=False):
"""
Creates a new counter
:param name: name of the counter
:param initial_value: initial value of the counter, defaults to 0
"""
self.name = name
self._lock = threading.Lock()
self._val = self._initial_value = initial_value
self.reset_on_collect = reset_on_collect
def inc(self, delta=1):
"""
Increments the counter. If no delta is provided, it is incremented by one
:param delta: the amount to increment the counter by
:returns the counter itself
"""
with self._lock:
self._val += delta
return self
def dec(self, delta=1):
"""
Decrements the counter. If no delta is provided, it is decremented by one
:param delta: the amount to decrement the counter by
:returns the counter itself
"""
with self._lock:
self._val -= delta
return self
def reset(self):
"""
Reset the counter to the initial value
:returns the counter itself
"""
with self._lock:
self._val = self._initial_value
return self
@property
def val(self):
"""Returns the current value of the counter"""
return self._val
class Gauge(object):
__slots__ = ("name", "_val", "reset_on_collect")
def __init__(self, name, reset_on_collect=False):
"""
Creates a new gauge
:param name: label of the gauge
"""
self.name = name
self._val = None
self.reset_on_collect = reset_on_collect
@property
def val(self):
return self._val
@val.setter
def val(self, value):
self._val = value
def reset(self):
self._val = 0
class Timer(object):
__slots__ = ("name", "_val", "_count", "_lock", "reset_on_collect")
def __init__(self, name=None, reset_on_collect=False):
self.name = name
self._val = 0
self._count = 0
self._lock = threading.Lock()
self.reset_on_collect = reset_on_collect
def update(self, duration, count=1):
with self._lock:
self._val += duration
self._count += count
def reset(self):
with self._lock:
self._val = 0
self._count = 0
@property
def val(self):
with self._lock:
return self._val, self._count
class NoopMetric(object):
"""
A no-op metric that implements the "interface" of both Counter and Gauge.
Note that even when using a no-op metric, the value itself will still be calculated.
"""
def __init__(self, label, initial_value=0):
return
@property
def val(self):
return None
@val.setter
def val(self, value):
return
def inc(self, delta=1):
return
def dec(self, delta=-1):
return
def update(self, duration, count=1):
return
def reset(self):
return
noop_metric = NoopMetric("noop")
class MetricSetNotFound(LookupError):
def __init__(self, class_path):
super(MetricSetNotFound, self).__init__("%s metric set not found" % class_path) | zuqa-agent-python | /zuqa-agent-python-1.0.0a1.tar.gz/zuqa-agent-python-1.0.0a1/zuqa/metrics/base_metrics.py | base_metrics.py |
import os
import re
import resource
import threading
from zuqa.metrics.base_metrics import MetricsSet
SYS_STATS = "/proc/stat"
MEM_STATS = "/proc/meminfo"
PROC_STATS = "/proc/self/stat"
CPU_FIELDS = ("user", "nice", "system", "idle", "iowait", "irq", "softirq", "steal", "guest", "guest_nice")
MEM_FIELDS = ("MemTotal", "MemAvailable", "MemFree", "Buffers", "Cached")
whitespace_re = re.compile(r"\s+")
if not os.path.exists(SYS_STATS):
raise ImportError("This metric set is only available on Linux")
class CPUMetricSet(MetricsSet):
def __init__(self, registry, sys_stats_file=SYS_STATS, process_stats_file=PROC_STATS, memory_stats_file=MEM_STATS):
self.page_size = resource.getpagesize()
self.previous = {}
self._read_data_lock = threading.Lock()
self.sys_stats_file = sys_stats_file
self.process_stats_file = process_stats_file
self.memory_stats_file = memory_stats_file
self._sys_clock_ticks = os.sysconf("SC_CLK_TCK")
with self._read_data_lock:
self.previous.update(self.read_process_stats())
self.previous.update(self.read_system_stats())
super(CPUMetricSet, self).__init__(registry)
def before_collect(self):
new = self.read_process_stats()
new.update(self.read_system_stats())
with self._read_data_lock:
prev = self.previous
delta = {k: new[k] - prev[k] for k in new.keys()}
try:
cpu_usage_ratio = delta["cpu_usage"] / delta["cpu_total"]
except ZeroDivisionError:
cpu_usage_ratio = 0
self.gauge("system.cpu.total.norm.pct").val = cpu_usage_ratio
# MemAvailable not present in linux before kernel 3.14
# fallback to MemFree + Buffers + Cache if not present - see #500
if "MemAvailable" in new:
mem_free = new["MemAvailable"]
else:
mem_free = sum(new.get(mem_field, 0) for mem_field in ("MemFree", "Buffers", "Cached"))
self.gauge("system.memory.actual.free").val = mem_free
self.gauge("system.memory.total").val = new["MemTotal"]
try:
cpu_process_percent = delta["proc_total_time"] / delta["cpu_total"]
except ZeroDivisionError:
cpu_process_percent = 0
self.gauge("system.process.cpu.total.norm.pct").val = cpu_process_percent
self.gauge("system.process.memory.size").val = new["vsize"]
self.gauge("system.process.memory.rss.bytes").val = new["rss"] * self.page_size
self.previous = new
def read_system_stats(self):
stats = {}
with open(self.sys_stats_file, "r") as pidfile:
for line in pidfile:
if line.startswith("cpu "):
fields = whitespace_re.split(line)[1:-1]
num_fields = len(fields)
# Not all fields are available on all platforms (e.g. RHEL 6 does not provide steal, guest, and
# guest_nice. If a field is missing, we default to 0
f = {field: int(fields[i]) if i < num_fields else 0 for i, field in enumerate(CPU_FIELDS)}
stats["cpu_total"] = float(
f["user"]
+ f["nice"]
+ f["system"]
+ f["idle"]
+ f["iowait"]
+ f["irq"]
+ f["softirq"]
+ f["steal"]
)
stats["cpu_usage"] = stats["cpu_total"] - (f["idle"] + f["iowait"])
break
with open(self.memory_stats_file, "r") as memfile:
for line in memfile:
metric_name = line.split(":")[0]
if metric_name in MEM_FIELDS:
value_in_bytes = int(whitespace_re.split(line)[1]) * 1024
stats[metric_name] = value_in_bytes
return stats
def read_process_stats(self):
stats = {}
with open(self.process_stats_file, "r") as pidfile:
data = pidfile.readline().split(" ")
stats["utime"] = int(data[13])
stats["stime"] = int(data[14])
stats["proc_total_time"] = stats["utime"] + stats["stime"]
stats["vsize"] = int(data[22])
stats["rss"] = int(data[23])
return stats | zuqa-agent-python | /zuqa-agent-python-1.0.0a1.tar.gz/zuqa-agent-python-1.0.0a1/zuqa/metrics/sets/cpu_linux.py | cpu_linux.py |
This repository is automatically updated from https://github.com/zurb/bower-foundation
=============
`Foundation`_
=============
.. _Foundation: http://foundation.zurb.com
Foundation is the most advanced responsive front-end framework in the world. You can quickly prototype and build sites or apps that work on any kind of device with Foundation, which includes layout constructs (like a fully responsive grid), elements and best practices.
To get started, check out http://foundation.zurb.com/docs
Installation
============
To get going with Foundation python module you can install it from `PyPi package`_:
.. _PyPi package: https://pypi.python.org/pypi/zurb-foundation
.. sourcecode:: sh
pip install zurb-foundation
Documentation
=============
Foundation documentation pages are available at http://foundation.zurb.com/docs
Python package
==============
After installation you can use *pkg_resource* module to access assets:
.. sourcecode:: python
import pkg_resources
as_string = pkg_resources.resource_string("zurb_foundation", "js/vendor/custom.modernizr.js")
full_path_to_file = pkg_resources.resource_filename("zurb_foundation", "js/vendor/custom.modernizr.js")
file_like = pkg_resources.resource_stream("zurb_foundation", "js/vendor/custom.modernizr.js")
Package consists of: *js*, compiled *css* and *scss* files.
| zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/README.rst | README.rst |
;(function ($, window, document, undefined) {
'use strict';
Foundation.libs.abide = {
name : 'abide',
version : '5.5.3',
settings : {
live_validate : true, // validate the form as you go
validate_on_blur : true, // validate whenever you focus/blur on an input field
// validate_on: 'tab', // tab (when user tabs between fields), change (input changes), manual (call custom events)
focus_on_invalid : true, // automatically bring the focus to an invalid input field
error_labels : true, // labels with a for="inputId" will receive an `error` class
error_class : 'error', // labels with a for="inputId" will receive an `error` class
// the amount of time Abide will take before it validates the form (in ms).
// smaller time will result in faster validation
timeout : 1000,
patterns : {
alpha : /^[a-zA-Z]+$/,
alpha_numeric : /^[a-zA-Z0-9]+$/,
integer : /^[-+]?\d+$/,
number : /^[-+]?\d*(?:[\.\,]\d+)?$/,
// amex, visa, diners
card : /^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\d{3})\d{11})$/,
cvv : /^([0-9]){3,4}$/,
// http://www.whatwg.org/specs/web-apps/current-work/multipage/states-of-the-type-attribute.html#valid-e-mail-address
email : /^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+$/,
// http://blogs.lse.ac.uk/lti/2008/04/23/a-regular-expression-to-match-any-url/
url: /^(https?|ftp|file|ssh):\/\/([-;:&=\+\$,\w]+@{1})?([-A-Za-z0-9\.]+)+:?(\d+)?((\/[-\+~%\/\.\w]+)?\??([-\+=&;%@\.\w]+)?#?([\w]+)?)?/,
// abc.de
domain : /^([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,8}$/,
datetime : /^([0-2][0-9]{3})\-([0-1][0-9])\-([0-3][0-9])T([0-5][0-9])\:([0-5][0-9])\:([0-5][0-9])(Z|([\-\+]([0-1][0-9])\:00))$/,
// YYYY-MM-DD
date : /(?:19|20)[0-9]{2}-(?:(?:0[1-9]|1[0-2])-(?:0[1-9]|1[0-9]|2[0-9])|(?:(?!02)(?:0[1-9]|1[0-2])-(?:30))|(?:(?:0[13578]|1[02])-31))$/,
// HH:MM:SS
time : /^(0[0-9]|1[0-9]|2[0-3])(:[0-5][0-9]){2}$/,
dateISO : /^\d{4}[\/\-]\d{1,2}[\/\-]\d{1,2}$/,
// MM/DD/YYYY
month_day_year : /^(0[1-9]|1[012])[- \/.](0[1-9]|[12][0-9]|3[01])[- \/.]\d{4}$/,
// DD/MM/YYYY
day_month_year : /^(0[1-9]|[12][0-9]|3[01])[- \/.](0[1-9]|1[012])[- \/.]\d{4}$/,
// #FFF or #FFFFFF
color : /^#?([a-fA-F0-9]{6}|[a-fA-F0-9]{3})$/
},
validators : {
equalTo : function (el, required, parent) {
var from = document.getElementById(el.getAttribute(this.add_namespace('data-equalto'))).value,
to = el.value,
valid = (from === to);
return valid;
}
}
},
timer : null,
init : function (scope, method, options) {
this.bindings(method, options);
},
events : function (scope) {
var self = this,
form = self.S(scope).attr('novalidate', 'novalidate'),
settings = form.data(this.attr_name(true) + '-init') || {};
this.invalid_attr = this.add_namespace('data-invalid');
function validate(originalSelf, e) {
clearTimeout(self.timer);
self.timer = setTimeout(function () {
self.validate([originalSelf], e);
}.bind(originalSelf), settings.timeout);
}
form
.off('.abide')
.on('submit.fndtn.abide', function (e) {
var is_ajax = /ajax/i.test(self.S(this).attr(self.attr_name()));
return self.validate(self.S(this).find('input, textarea, select').not(":hidden, [data-abide-ignore]").get(), e, is_ajax);
})
.on('validate.fndtn.abide', function (e) {
if (settings.validate_on === 'manual') {
self.validate([e.target], e);
}
})
.on('reset', function (e) {
return self.reset($(this), e);
})
.find('input, textarea, select').not(":hidden, [data-abide-ignore]")
.off('.abide')
.on('blur.fndtn.abide change.fndtn.abide', function (e) {
var id = this.getAttribute('id'),
eqTo = form.find('[data-equalto="'+ id +'"]');
// old settings fallback
// will be deprecated with F6 release
if (settings.validate_on_blur && settings.validate_on_blur === true) {
validate(this, e);
}
// checks if there is an equalTo equivalent related by id
if(typeof eqTo.get(0) !== "undefined" && eqTo.val().length){
validate(eqTo.get(0),e);
}
// new settings combining validate options into one setting
if (settings.validate_on === 'change') {
validate(this, e);
}
})
.on('keydown.fndtn.abide', function (e) {
var id = this.getAttribute('id'),
eqTo = form.find('[data-equalto="'+ id +'"]');
// old settings fallback
// will be deprecated with F6 release
if (settings.live_validate && settings.live_validate === true && e.which != 9) {
validate(this, e);
}
// checks if there is an equalTo equivalent related by id
if(typeof eqTo.get(0) !== "undefined" && eqTo.val().length){
validate(eqTo.get(0),e);
}
// new settings combining validate options into one setting
if (settings.validate_on === 'tab' && e.which === 9) {
validate(this, e);
}
else if (settings.validate_on === 'change') {
validate(this, e);
}
})
.on('focus', function (e) {
if (navigator.userAgent.match(/iPad|iPhone|Android|BlackBerry|Windows Phone|webOS/i)) {
$('html, body').animate({
scrollTop: $(e.target).offset().top
}, 100);
}
});
},
reset : function (form, e) {
var self = this;
form.removeAttr(self.invalid_attr);
$('[' + self.invalid_attr + ']', form).removeAttr(self.invalid_attr);
$('.' + self.settings.error_class, form).not('small').removeClass(self.settings.error_class);
$(':input', form).not(':button, :submit, :reset, :hidden, [data-abide-ignore]').val('').removeAttr(self.invalid_attr);
},
validate : function (els, e, is_ajax) {
var validations = this.parse_patterns(els),
validation_count = validations.length,
form = this.S(els[0]).closest('form'),
submit_event = /submit/.test(e.type);
// Has to count up to make sure the focus gets applied to the top error
for (var i = 0; i < validation_count; i++) {
if (!validations[i] && (submit_event || is_ajax)) {
if (this.settings.focus_on_invalid) {
els[i].focus();
}
form.trigger('invalid.fndtn.abide');
this.S(els[i]).closest('form').attr(this.invalid_attr, '');
return false;
}
}
if (submit_event || is_ajax) {
form.trigger('valid.fndtn.abide');
}
form.removeAttr(this.invalid_attr);
if (is_ajax) {
return false;
}
return true;
},
parse_patterns : function (els) {
var i = els.length,
el_patterns = [];
while (i--) {
el_patterns.push(this.pattern(els[i]));
}
return this.check_validation_and_apply_styles(el_patterns);
},
pattern : function (el) {
var type = el.getAttribute('type'),
required = typeof el.getAttribute('required') === 'string';
var pattern = el.getAttribute('pattern') || '';
if (this.settings.patterns.hasOwnProperty(pattern) && pattern.length > 0) {
return [el, this.settings.patterns[pattern], required];
} else if (pattern.length > 0) {
return [el, new RegExp(pattern), required];
}
if (this.settings.patterns.hasOwnProperty(type)) {
return [el, this.settings.patterns[type], required];
}
pattern = /.*/;
return [el, pattern, required];
},
// TODO: Break this up into smaller methods, getting hard to read.
check_validation_and_apply_styles : function (el_patterns) {
var i = el_patterns.length,
validations = [];
if (i == 0) {
return validations;
}
var form = this.S(el_patterns[0][0]).closest('[data-' + this.attr_name(true) + ']'),
settings = form.data(this.attr_name(true) + '-init') || {};
while (i--) {
var el = el_patterns[i][0],
required = el_patterns[i][2],
value = el.value.trim(),
direct_parent = this.S(el).parent(),
validator = el.getAttribute(this.add_namespace('data-abide-validator')),
is_radio = el.type === 'radio',
is_checkbox = el.type === 'checkbox',
label = this.S('label[for="' + el.getAttribute('id') + '"]'),
valid_length = (required) ? (el.value.length > 0) : true,
el_validations = [];
var parent, valid;
// support old way to do equalTo validations
if (el.getAttribute(this.add_namespace('data-equalto'))) { validator = 'equalTo' }
if (!direct_parent.is('label')) {
parent = direct_parent;
} else {
parent = direct_parent.parent();
}
if (is_radio && required) {
el_validations.push(this.valid_radio(el, required));
} else if (is_checkbox && required) {
el_validations.push(this.valid_checkbox(el, required));
} else if (validator) {
// Validate using each of the specified (space-delimited) validators.
var validators = validator.split(' ');
var last_valid = true, all_valid = true;
for (var iv = 0; iv < validators.length; iv++) {
valid = this.settings.validators[validators[iv]].apply(this, [el, required, parent])
el_validations.push(valid);
all_valid = valid && last_valid;
last_valid = valid;
}
if (all_valid) {
this.S(el).removeAttr(this.invalid_attr);
parent.removeClass('error');
if (label.length > 0 && this.settings.error_labels) {
label.removeClass(this.settings.error_class).removeAttr('role');
}
$(el).triggerHandler('valid');
} else {
this.S(el).attr(this.invalid_attr, '');
parent.addClass('error');
if (label.length > 0 && this.settings.error_labels) {
label.addClass(this.settings.error_class).attr('role', 'alert');
}
$(el).triggerHandler('invalid');
}
} else {
if (el_patterns[i][1].test(value) && valid_length ||
!required && el.value.length < 1 || $(el).attr('disabled')) {
el_validations.push(true);
} else {
el_validations.push(false);
}
el_validations = [el_validations.every(function (valid) {return valid;})];
if (el_validations[0]) {
this.S(el).removeAttr(this.invalid_attr);
el.setAttribute('aria-invalid', 'false');
el.removeAttribute('aria-describedby');
parent.removeClass(this.settings.error_class);
if (label.length > 0 && this.settings.error_labels) {
label.removeClass(this.settings.error_class).removeAttr('role');
}
$(el).triggerHandler('valid');
} else {
this.S(el).attr(this.invalid_attr, '');
el.setAttribute('aria-invalid', 'true');
// Try to find the error associated with the input
var errorElem = parent.find('small.' + this.settings.error_class, 'span.' + this.settings.error_class);
var errorID = errorElem.length > 0 ? errorElem[0].id : '';
if (errorID.length > 0) {
el.setAttribute('aria-describedby', errorID);
}
// el.setAttribute('aria-describedby', $(el).find('.error')[0].id);
parent.addClass(this.settings.error_class);
if (label.length > 0 && this.settings.error_labels) {
label.addClass(this.settings.error_class).attr('role', 'alert');
}
$(el).triggerHandler('invalid');
}
}
validations = validations.concat(el_validations);
}
return validations;
},
valid_checkbox : function (el, required) {
var el = this.S(el),
valid = (el.is(':checked') || !required || el.get(0).getAttribute('disabled'));
if (valid) {
el.removeAttr(this.invalid_attr).parent().removeClass(this.settings.error_class);
$(el).triggerHandler('valid');
} else {
el.attr(this.invalid_attr, '').parent().addClass(this.settings.error_class);
$(el).triggerHandler('invalid');
}
return valid;
},
valid_radio : function (el, required) {
var name = el.getAttribute('name'),
group = this.S(el).closest('[data-' + this.attr_name(true) + ']').find("[name='" + name + "']"),
count = group.length,
valid = false,
disabled = false;
// Has to count up to make sure the focus gets applied to the top error
for (var i=0; i < count; i++) {
if( group[i].getAttribute('disabled') ){
disabled=true;
valid=true;
} else {
if (group[i].checked){
valid = true;
} else {
if( disabled ){
valid = false;
}
}
}
}
// Has to count up to make sure the focus gets applied to the top error
for (var i = 0; i < count; i++) {
if (valid) {
this.S(group[i]).removeAttr(this.invalid_attr).parent().removeClass(this.settings.error_class);
$(group[i]).triggerHandler('valid');
} else {
this.S(group[i]).attr(this.invalid_attr, '').parent().addClass(this.settings.error_class);
$(group[i]).triggerHandler('invalid');
}
}
return valid;
},
valid_equal : function (el, required, parent) {
var from = document.getElementById(el.getAttribute(this.add_namespace('data-equalto'))).value,
to = el.value,
valid = (from === to);
if (valid) {
this.S(el).removeAttr(this.invalid_attr);
parent.removeClass(this.settings.error_class);
if (label.length > 0 && settings.error_labels) {
label.removeClass(this.settings.error_class);
}
} else {
this.S(el).attr(this.invalid_attr, '');
parent.addClass(this.settings.error_class);
if (label.length > 0 && settings.error_labels) {
label.addClass(this.settings.error_class);
}
}
return valid;
},
valid_oneof : function (el, required, parent, doNotValidateOthers) {
var el = this.S(el),
others = this.S('[' + this.add_namespace('data-oneof') + ']'),
valid = others.filter(':checked').length > 0;
if (valid) {
el.removeAttr(this.invalid_attr).parent().removeClass(this.settings.error_class);
} else {
el.attr(this.invalid_attr, '').parent().addClass(this.settings.error_class);
}
if (!doNotValidateOthers) {
var _this = this;
others.each(function () {
_this.valid_oneof.call(_this, this, null, null, true);
});
}
return valid;
},
reflow : function(scope, options) {
var self = this,
form = self.S('[' + this.attr_name() + ']').attr('novalidate', 'novalidate');
self.S(form).each(function (idx, el) {
self.events(el);
});
}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.abide.js | foundation.abide.js |
;(function ($, window, document, undefined) {
'use strict';
Foundation.libs.clearing = {
name : 'clearing',
version : '5.5.3',
settings : {
templates : {
viewing : '<a href="#" class="clearing-close">×</a>' +
'<div class="visible-img" style="display: none"><div class="clearing-touch-label"></div><img src="data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs%3D" alt="" />' +
'<p class="clearing-caption"></p><a href="#" class="clearing-main-prev"><span></span></a>' +
'<a href="#" class="clearing-main-next"><span></span></a></div>' +
'<img class="clearing-preload-next" style="display: none" src="data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs%3D" alt="" />' +
'<img class="clearing-preload-prev" style="display: none" src="data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs%3D" alt="" />'
},
// comma delimited list of selectors that, on click, will close clearing,
// add 'div.clearing-blackout, div.visible-img' to close on background click
close_selectors : '.clearing-close, div.clearing-blackout',
// Default to the entire li element.
open_selectors : '',
// Image will be skipped in carousel.
skip_selector : '',
touch_label : '',
// event initializer and locks
init : false,
locked : false
},
init : function (scope, method, options) {
var self = this;
Foundation.inherit(this, 'throttle image_loaded');
this.bindings(method, options);
if (self.S(this.scope).is('[' + this.attr_name() + ']')) {
this.assemble(self.S('li', this.scope));
} else {
self.S('[' + this.attr_name() + ']', this.scope).each(function () {
self.assemble(self.S('li', this));
});
}
},
events : function (scope) {
var self = this,
S = self.S,
$scroll_container = $('.scroll-container');
if ($scroll_container.length > 0) {
this.scope = $scroll_container;
}
S(this.scope)
.off('.clearing')
.on('click.fndtn.clearing', 'ul[' + this.attr_name() + '] li ' + this.settings.open_selectors,
function (e, current, target) {
var current = current || S(this),
target = target || current,
next = current.next('li'),
settings = current.closest('[' + self.attr_name() + ']').data(self.attr_name(true) + '-init'),
image = S(e.target);
e.preventDefault();
if (!settings) {
self.init();
settings = current.closest('[' + self.attr_name() + ']').data(self.attr_name(true) + '-init');
}
// if clearing is open and the current image is
// clicked, go to the next image in sequence
if (target.hasClass('visible') &&
current[0] === target[0] &&
next.length > 0 && self.is_open(current)) {
target = next;
image = S('img', target);
}
// set current and target to the clicked li if not otherwise defined.
self.open(image, current, target);
self.update_paddles(target);
})
.on('click.fndtn.clearing', '.clearing-main-next',
function (e) { self.nav(e, 'next') })
.on('click.fndtn.clearing', '.clearing-main-prev',
function (e) { self.nav(e, 'prev') })
.on('click.fndtn.clearing', this.settings.close_selectors,
function (e) { Foundation.libs.clearing.close(e, this) });
$(document).on('keydown.fndtn.clearing',
function (e) { self.keydown(e) });
S(window).off('.clearing').on('resize.fndtn.clearing',
function () { self.resize() });
this.swipe_events(scope);
},
swipe_events : function (scope) {
var self = this,
S = self.S;
S(this.scope)
.on('touchstart.fndtn.clearing', '.visible-img', function (e) {
if (!e.touches) { e = e.originalEvent; }
var data = {
start_page_x : e.touches[0].pageX,
start_page_y : e.touches[0].pageY,
start_time : (new Date()).getTime(),
delta_x : 0,
is_scrolling : undefined
};
S(this).data('swipe-transition', data);
e.stopPropagation();
})
.on('touchmove.fndtn.clearing', '.visible-img', function (e) {
if (!e.touches) {
e = e.originalEvent;
}
// Ignore pinch/zoom events
if (e.touches.length > 1 || e.scale && e.scale !== 1) {
return;
}
var data = S(this).data('swipe-transition');
if (typeof data === 'undefined') {
data = {};
}
data.delta_x = e.touches[0].pageX - data.start_page_x;
if (Foundation.rtl) {
data.delta_x = -data.delta_x;
}
if (typeof data.is_scrolling === 'undefined') {
data.is_scrolling = !!( data.is_scrolling || Math.abs(data.delta_x) < Math.abs(e.touches[0].pageY - data.start_page_y) );
}
if (!data.is_scrolling && !data.active) {
e.preventDefault();
var direction = (data.delta_x < 0) ? 'next' : 'prev';
data.active = true;
self.nav(e, direction);
}
})
.on('touchend.fndtn.clearing', '.visible-img', function (e) {
S(this).data('swipe-transition', {});
e.stopPropagation();
});
},
assemble : function ($li) {
var $el = $li.parent();
if ($el.parent().hasClass('carousel')) {
return;
}
$el.after('<div id="foundationClearingHolder"></div>');
var grid = $el.detach(),
grid_outerHTML = '';
if (grid[0] == null) {
return;
} else {
grid_outerHTML = grid[0].outerHTML;
}
var holder = this.S('#foundationClearingHolder'),
settings = $el.data(this.attr_name(true) + '-init'),
data = {
grid : '<div class="carousel">' + grid_outerHTML + '</div>',
viewing : settings.templates.viewing
},
wrapper = '<div class="clearing-assembled"><div>' + data.viewing +
data.grid + '</div></div>',
touch_label = this.settings.touch_label;
if (Modernizr.touch) {
wrapper = $(wrapper).find('.clearing-touch-label').html(touch_label).end();
}
holder.after(wrapper).remove();
},
open : function ($image, current, target) {
var self = this,
body = $(document.body),
root = target.closest('.clearing-assembled'),
container = self.S('div', root).first(),
visible_image = self.S('.visible-img', container),
image = self.S('img', visible_image).not($image),
label = self.S('.clearing-touch-label', container),
error = false,
loaded = {};
// Event to disable scrolling on touch devices when Clearing is activated
$('body').on('touchmove', function (e) {
e.preventDefault();
});
image.error(function () {
error = true;
});
function startLoad() {
setTimeout(function () {
this.image_loaded(image, function () {
if (image.outerWidth() === 1 && !error) {
startLoad.call(this);
} else {
cb.call(this, image);
}
}.bind(this));
}.bind(this), 100);
}
function cb (image) {
var $image = $(image);
$image.css('visibility', 'visible');
$image.trigger('imageVisible');
// toggle the gallery
body.css('overflow', 'hidden');
root.addClass('clearing-blackout');
container.addClass('clearing-container');
visible_image.show();
this.fix_height(target)
.caption(self.S('.clearing-caption', visible_image), self.S('img', target))
.center_and_label(image, label)
.shift(current, target, function () {
target.closest('li').siblings().removeClass('visible');
target.closest('li').addClass('visible');
});
visible_image.trigger('opened.fndtn.clearing')
}
if (!this.locked()) {
visible_image.trigger('open.fndtn.clearing');
// set the image to the selected thumbnail
loaded = this.load($image);
if (loaded.interchange) {
image
.attr('data-interchange', loaded.interchange)
.foundation('interchange', 'reflow');
} else {
image
.attr('src', loaded.src)
.attr('data-interchange', '');
}
image.css('visibility', 'hidden');
startLoad.call(this);
}
},
close : function (e, el) {
e.preventDefault();
var root = (function (target) {
if (/blackout/.test(target.selector)) {
return target;
} else {
return target.closest('.clearing-blackout');
}
}($(el))),
body = $(document.body), container, visible_image;
if (el === e.target && root) {
body.css('overflow', '');
container = $('div', root).first();
visible_image = $('.visible-img', container);
visible_image.trigger('close.fndtn.clearing');
this.settings.prev_index = 0;
$('ul[' + this.attr_name() + ']', root)
.attr('style', '').closest('.clearing-blackout')
.removeClass('clearing-blackout');
container.removeClass('clearing-container');
visible_image.hide();
visible_image.trigger('closed.fndtn.clearing');
}
// Event to re-enable scrolling on touch devices
$('body').off('touchmove');
return false;
},
is_open : function (current) {
return current.parent().prop('style').length > 0;
},
keydown : function (e) {
var clearing = $('.clearing-blackout ul[' + this.attr_name() + ']'),
NEXT_KEY = this.rtl ? 37 : 39,
PREV_KEY = this.rtl ? 39 : 37,
ESC_KEY = 27;
if (e.which === NEXT_KEY) {
this.go(clearing, 'next');
}
if (e.which === PREV_KEY) {
this.go(clearing, 'prev');
}
if (e.which === ESC_KEY) {
this.S('a.clearing-close').trigger('click.fndtn.clearing');
}
},
nav : function (e, direction) {
var clearing = $('ul[' + this.attr_name() + ']', '.clearing-blackout');
e.preventDefault();
this.go(clearing, direction);
},
resize : function () {
var image = $('img', '.clearing-blackout .visible-img'),
label = $('.clearing-touch-label', '.clearing-blackout');
if (image.length) {
this.center_and_label(image, label);
image.trigger('resized.fndtn.clearing')
}
},
// visual adjustments
fix_height : function (target) {
var lis = target.parent().children(),
self = this;
lis.each(function () {
var li = self.S(this),
image = li.find('img');
if (li.height() > image.outerHeight()) {
li.addClass('fix-height');
}
})
.closest('ul')
.width(lis.length * 100 + '%');
return this;
},
update_paddles : function (target) {
target = target.closest('li');
var visible_image = target
.closest('.carousel')
.siblings('.visible-img');
if (target.next().length > 0) {
this.S('.clearing-main-next', visible_image).removeClass('disabled');
} else {
this.S('.clearing-main-next', visible_image).addClass('disabled');
}
if (target.prev().length > 0) {
this.S('.clearing-main-prev', visible_image).removeClass('disabled');
} else {
this.S('.clearing-main-prev', visible_image).addClass('disabled');
}
},
center_and_label : function (target, label) {
if (!this.rtl && label.length > 0) {
label.css({
marginLeft : -(label.outerWidth() / 2),
marginTop : -(target.outerHeight() / 2)-label.outerHeight()-10
});
} else {
label.css({
marginRight : -(label.outerWidth() / 2),
marginTop : -(target.outerHeight() / 2)-label.outerHeight()-10,
left: 'auto',
right: '50%'
});
}
return this;
},
// image loading and preloading
load : function ($image) {
var href,
interchange,
closest_a;
if ($image[0].nodeName === 'A') {
href = $image.attr('href');
interchange = $image.data('clearing-interchange');
} else {
closest_a = $image.closest('a');
href = closest_a.attr('href');
interchange = closest_a.data('clearing-interchange');
}
this.preload($image);
return {
'src': href ? href : $image.attr('src'),
'interchange': href ? interchange : $image.data('clearing-interchange')
}
},
preload : function ($image) {
this
.img($image.closest('li').next(), 'next')
.img($image.closest('li').prev(), 'prev');
},
img : function (img, sibling_type) {
if (img.length) {
var preload_img = $('.clearing-preload-' + sibling_type),
new_a = this.S('a', img),
src,
interchange,
image;
if (new_a.length) {
src = new_a.attr('href');
interchange = new_a.data('clearing-interchange');
} else {
image = this.S('img', img);
src = image.attr('src');
interchange = image.data('clearing-interchange');
}
if (interchange) {
preload_img.attr('data-interchange', interchange);
} else {
preload_img.attr('src', src);
preload_img.attr('data-interchange', '');
}
}
return this;
},
// image caption
caption : function (container, $image) {
var caption = $image.attr('data-caption');
if (caption) {
var containerPlain = container.get(0);
containerPlain.innerHTML = caption;
container.show();
} else {
container
.text('')
.hide();
}
return this;
},
// directional methods
go : function ($ul, direction) {
var current = this.S('.visible', $ul),
target = current[direction]();
// Check for skip selector.
if (this.settings.skip_selector && target.find(this.settings.skip_selector).length != 0) {
target = target[direction]();
}
if (target.length) {
this.S('img', target)
.trigger('click.fndtn.clearing', [current, target])
.trigger('change.fndtn.clearing');
}
},
shift : function (current, target, callback) {
var clearing = target.parent(),
old_index = this.settings.prev_index || target.index(),
direction = this.direction(clearing, current, target),
dir = this.rtl ? 'right' : 'left',
left = parseInt(clearing.css('left'), 10),
width = target.outerWidth(),
skip_shift;
var dir_obj = {};
// we use jQuery animate instead of CSS transitions because we
// need a callback to unlock the next animation
// needs support for RTL **
if (target.index() !== old_index && !/skip/.test(direction)) {
if (/left/.test(direction)) {
this.lock();
dir_obj[dir] = left + width;
clearing.animate(dir_obj, 300, this.unlock());
} else if (/right/.test(direction)) {
this.lock();
dir_obj[dir] = left - width;
clearing.animate(dir_obj, 300, this.unlock());
}
} else if (/skip/.test(direction)) {
// the target image is not adjacent to the current image, so
// do we scroll right or not
skip_shift = target.index() - this.settings.up_count;
this.lock();
if (skip_shift > 0) {
dir_obj[dir] = -(skip_shift * width);
clearing.animate(dir_obj, 300, this.unlock());
} else {
dir_obj[dir] = 0;
clearing.animate(dir_obj, 300, this.unlock());
}
}
callback();
},
direction : function ($el, current, target) {
var lis = this.S('li', $el),
li_width = lis.outerWidth() + (lis.outerWidth() / 4),
up_count = Math.floor(this.S('.clearing-container').outerWidth() / li_width) - 1,
target_index = lis.index(target),
response;
this.settings.up_count = up_count;
if (this.adjacent(this.settings.prev_index, target_index)) {
if ((target_index > up_count) && target_index > this.settings.prev_index) {
response = 'right';
} else if ((target_index > up_count - 1) && target_index <= this.settings.prev_index) {
response = 'left';
} else {
response = false;
}
} else {
response = 'skip';
}
this.settings.prev_index = target_index;
return response;
},
adjacent : function (current_index, target_index) {
for (var i = target_index + 1; i >= target_index - 1; i--) {
if (i === current_index) {
return true;
}
}
return false;
},
// lock management
lock : function () {
this.settings.locked = true;
},
unlock : function () {
this.settings.locked = false;
},
locked : function () {
return this.settings.locked;
},
off : function () {
this.S(this.scope).off('.fndtn.clearing');
this.S(window).off('.fndtn.clearing');
},
reflow : function () {
this.init();
}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.clearing.js | foundation.clearing.js |
;(function ($, window, document, undefined) {
'use strict';
Foundation.libs.topbar = {
name : 'topbar',
version : '5.5.3',
settings : {
index : 0,
start_offset : 0,
sticky_class : 'sticky',
custom_back_text : true,
back_text : 'Back',
mobile_show_parent_link : true,
is_hover : true,
scrolltop : true, // jump to top when sticky nav menu toggle is clicked
sticky_on : 'all',
dropdown_autoclose: true
},
init : function (section, method, options) {
Foundation.inherit(this, 'add_custom_rule register_media throttle');
var self = this;
self.register_media('topbar', 'foundation-mq-topbar');
this.bindings(method, options);
self.S('[' + this.attr_name() + ']', this.scope).each(function () {
var topbar = $(this),
settings = topbar.data(self.attr_name(true) + '-init'),
section = self.S('section, .top-bar-section', this);
topbar.data('index', 0);
var topbarContainer = topbar.parent();
if (topbarContainer.hasClass('fixed') || self.is_sticky(topbar, topbarContainer, settings) ) {
self.settings.sticky_class = settings.sticky_class;
self.settings.sticky_topbar = topbar;
topbar.data('height', topbarContainer.outerHeight());
topbar.data('stickyoffset', topbarContainer.offset().top);
} else {
topbar.data('height', topbar.outerHeight());
}
if (!settings.assembled) {
self.assemble(topbar);
}
if (settings.is_hover) {
self.S('.has-dropdown', topbar).addClass('not-click');
} else {
self.S('.has-dropdown', topbar).removeClass('not-click');
}
// Pad body when sticky (scrolled) or fixed.
self.add_custom_rule('.f-topbar-fixed { padding-top: ' + topbar.data('height') + 'px }');
if (topbarContainer.hasClass('fixed')) {
self.S('body').addClass('f-topbar-fixed');
}
});
},
is_sticky : function (topbar, topbarContainer, settings) {
var sticky = topbarContainer.hasClass(settings.sticky_class);
var smallMatch = matchMedia(Foundation.media_queries.small).matches;
var medMatch = matchMedia(Foundation.media_queries.medium).matches;
var lrgMatch = matchMedia(Foundation.media_queries.large).matches;
if (sticky && settings.sticky_on === 'all') {
return true;
}
if (sticky && this.small() && settings.sticky_on.indexOf('small') !== -1) {
if (smallMatch && !medMatch && !lrgMatch) { return true; }
}
if (sticky && this.medium() && settings.sticky_on.indexOf('medium') !== -1) {
if (smallMatch && medMatch && !lrgMatch) { return true; }
}
if (sticky && this.large() && settings.sticky_on.indexOf('large') !== -1) {
if (smallMatch && medMatch && lrgMatch) { return true; }
}
return false;
},
toggle : function (toggleEl) {
var self = this,
topbar;
if (toggleEl) {
topbar = self.S(toggleEl).closest('[' + this.attr_name() + ']');
} else {
topbar = self.S('[' + this.attr_name() + ']');
}
var settings = topbar.data(this.attr_name(true) + '-init');
var section = self.S('section, .top-bar-section', topbar);
if (self.breakpoint()) {
if (!self.rtl) {
section.css({left : '0%'});
$('>.name', section).css({left : '100%'});
} else {
section.css({right : '0%'});
$('>.name', section).css({right : '100%'});
}
self.S('li.moved', section).removeClass('moved');
topbar.data('index', 0);
topbar
.toggleClass('expanded')
.css('height', '');
}
if (settings.scrolltop) {
if (!topbar.hasClass('expanded')) {
if (topbar.hasClass('fixed')) {
topbar.parent().addClass('fixed');
topbar.removeClass('fixed');
self.S('body').addClass('f-topbar-fixed');
}
} else if (topbar.parent().hasClass('fixed')) {
if (settings.scrolltop) {
topbar.parent().removeClass('fixed');
topbar.addClass('fixed');
self.S('body').removeClass('f-topbar-fixed');
window.scrollTo(0, 0);
} else {
topbar.parent().removeClass('expanded');
}
}
} else {
if (self.is_sticky(topbar, topbar.parent(), settings)) {
topbar.parent().addClass('fixed');
}
if (topbar.parent().hasClass('fixed')) {
if (!topbar.hasClass('expanded')) {
topbar.removeClass('fixed');
topbar.parent().removeClass('expanded');
self.update_sticky_positioning();
} else {
topbar.addClass('fixed');
topbar.parent().addClass('expanded');
self.S('body').addClass('f-topbar-fixed');
}
}
}
},
timer : null,
events : function (bar) {
var self = this,
S = this.S;
S(this.scope)
.off('.topbar')
.on('click.fndtn.topbar', '[' + this.attr_name() + '] .toggle-topbar', function (e) {
e.preventDefault();
self.toggle(this);
})
.on('click.fndtn.topbar contextmenu.fndtn.topbar', '.top-bar .top-bar-section li a[href^="#"],[' + this.attr_name() + '] .top-bar-section li a[href^="#"]', function (e) {
var li = $(this).closest('li'),
topbar = li.closest('[' + self.attr_name() + ']'),
settings = topbar.data(self.attr_name(true) + '-init');
if (settings.dropdown_autoclose && settings.is_hover) {
var hoverLi = $(this).closest('.hover');
hoverLi.removeClass('hover');
}
if (self.breakpoint() && !li.hasClass('back') && !li.hasClass('has-dropdown')) {
self.toggle();
}
})
.on('click.fndtn.topbar', '[' + this.attr_name() + '] li.has-dropdown', function (e) {
var li = S(this),
target = S(e.target),
topbar = li.closest('[' + self.attr_name() + ']'),
settings = topbar.data(self.attr_name(true) + '-init');
if (target.data('revealId')) {
self.toggle();
return;
}
if (self.breakpoint()) {
return;
}
if (settings.is_hover && !Modernizr.touch) {
return;
}
e.stopImmediatePropagation();
if (li.hasClass('hover')) {
li
.removeClass('hover')
.find('li')
.removeClass('hover');
li.parents('li.hover')
.removeClass('hover');
} else {
li.addClass('hover');
$(li).siblings().removeClass('hover');
if (target[0].nodeName === 'A' && target.parent().hasClass('has-dropdown')) {
e.preventDefault();
}
}
})
.on('click.fndtn.topbar', '[' + this.attr_name() + '] .has-dropdown>a', function (e) {
if (self.breakpoint()) {
e.preventDefault();
var $this = S(this),
topbar = $this.closest('[' + self.attr_name() + ']'),
section = topbar.find('section, .top-bar-section'),
dropdownHeight = $this.next('.dropdown').outerHeight(),
$selectedLi = $this.closest('li');
topbar.data('index', topbar.data('index') + 1);
$selectedLi.addClass('moved');
if (!self.rtl) {
section.css({left : -(100 * topbar.data('index')) + '%'});
section.find('>.name').css({left : 100 * topbar.data('index') + '%'});
} else {
section.css({right : -(100 * topbar.data('index')) + '%'});
section.find('>.name').css({right : 100 * topbar.data('index') + '%'});
}
topbar.css('height', $this.siblings('ul').outerHeight(true) + topbar.data('height'));
}
});
S(window).off('.topbar').on('resize.fndtn.topbar', self.throttle(function () {
self.resize.call(self);
}, 50)).trigger('resize.fndtn.topbar').load(function () {
// Ensure that the offset is calculated after all of the pages resources have loaded
S(this).trigger('resize.fndtn.topbar');
});
S('body').off('.topbar').on('click.fndtn.topbar', function (e) {
var parent = S(e.target).closest('li').closest('li.hover');
if (parent.length > 0) {
return;
}
S('[' + self.attr_name() + '] li.hover').removeClass('hover');
});
// Go up a level on Click
S(this.scope).on('click.fndtn.topbar', '[' + this.attr_name() + '] .has-dropdown .back', function (e) {
e.preventDefault();
var $this = S(this),
topbar = $this.closest('[' + self.attr_name() + ']'),
section = topbar.find('section, .top-bar-section'),
settings = topbar.data(self.attr_name(true) + '-init'),
$movedLi = $this.closest('li.moved'),
$previousLevelUl = $movedLi.parent();
topbar.data('index', topbar.data('index') - 1);
if (!self.rtl) {
section.css({left : -(100 * topbar.data('index')) + '%'});
section.find('>.name').css({left : 100 * topbar.data('index') + '%'});
} else {
section.css({right : -(100 * topbar.data('index')) + '%'});
section.find('>.name').css({right : 100 * topbar.data('index') + '%'});
}
if (topbar.data('index') === 0) {
topbar.css('height', '');
} else {
topbar.css('height', $previousLevelUl.outerHeight(true) + topbar.data('height'));
}
setTimeout(function () {
$movedLi.removeClass('moved');
}, 300);
});
// Show dropdown menus when their items are focused
S(this.scope).find('.dropdown a')
.focus(function () {
$(this).parents('.has-dropdown').addClass('hover');
})
.blur(function () {
$(this).parents('.has-dropdown').removeClass('hover');
});
},
resize : function () {
var self = this;
self.S('[' + this.attr_name() + ']').each(function () {
var topbar = self.S(this),
settings = topbar.data(self.attr_name(true) + '-init');
var stickyContainer = topbar.parent('.' + self.settings.sticky_class);
var stickyOffset;
if (!self.breakpoint()) {
var doToggle = topbar.hasClass('expanded');
topbar
.css('height', '')
.removeClass('expanded')
.find('li')
.removeClass('hover');
if (doToggle) {
self.toggle(topbar);
}
}
if (self.is_sticky(topbar, stickyContainer, settings)) {
if (stickyContainer.hasClass('fixed')) {
// Remove the fixed to allow for correct calculation of the offset.
stickyContainer.removeClass('fixed');
stickyOffset = stickyContainer.offset().top;
if (self.S(document.body).hasClass('f-topbar-fixed')) {
stickyOffset -= topbar.data('height');
}
topbar.data('stickyoffset', stickyOffset);
stickyContainer.addClass('fixed');
} else {
stickyOffset = stickyContainer.offset().top;
topbar.data('stickyoffset', stickyOffset);
}
}
});
},
breakpoint : function () {
return !matchMedia(Foundation.media_queries['topbar']).matches;
},
small : function () {
return matchMedia(Foundation.media_queries['small']).matches;
},
medium : function () {
return matchMedia(Foundation.media_queries['medium']).matches;
},
large : function () {
return matchMedia(Foundation.media_queries['large']).matches;
},
assemble : function (topbar) {
var self = this,
settings = topbar.data(this.attr_name(true) + '-init'),
section = self.S('section, .top-bar-section', topbar);
// Pull element out of the DOM for manipulation
section.detach();
self.S('.has-dropdown>a', section).each(function () {
var $link = self.S(this),
$dropdown = $link.siblings('.dropdown'),
url = $link.attr('href'),
$titleLi;
if (!$dropdown.find('.title.back').length) {
if (settings.mobile_show_parent_link == true && url) {
$titleLi = $('<li class="title back js-generated"><h5><a href="javascript:void(0)"></a></h5></li><li class="parent-link hide-for-medium-up"><a class="parent-link js-generated" href="' + url + '">' + $link.html() +'</a></li>');
} else {
$titleLi = $('<li class="title back js-generated"><h5><a href="javascript:void(0)"></a></h5>');
}
// Copy link to subnav
if (settings.custom_back_text == true) {
$('h5>a', $titleLi).html(settings.back_text);
} else {
$('h5>a', $titleLi).html('« ' + $link.html());
}
$dropdown.prepend($titleLi);
}
});
// Put element back in the DOM
section.appendTo(topbar);
// check for sticky
this.sticky();
this.assembled(topbar);
},
assembled : function (topbar) {
topbar.data(this.attr_name(true), $.extend({}, topbar.data(this.attr_name(true)), {assembled : true}));
},
height : function (ul) {
var total = 0,
self = this;
$('> li', ul).each(function () {
total += self.S(this).outerHeight(true);
});
return total;
},
sticky : function () {
var self = this;
this.S(window).on('scroll', function () {
self.update_sticky_positioning();
});
},
update_sticky_positioning : function () {
var klass = '.' + this.settings.sticky_class,
$window = this.S(window),
self = this;
if (self.settings.sticky_topbar && self.is_sticky(this.settings.sticky_topbar,this.settings.sticky_topbar.parent(), this.settings)) {
var distance = this.settings.sticky_topbar.data('stickyoffset') + this.settings.start_offset;
if (!self.S(klass).hasClass('expanded')) {
if ($window.scrollTop() > (distance)) {
if (!self.S(klass).hasClass('fixed')) {
self.S(klass).addClass('fixed');
self.S('body').addClass('f-topbar-fixed');
}
} else if ($window.scrollTop() <= distance) {
if (self.S(klass).hasClass('fixed')) {
self.S(klass).removeClass('fixed');
self.S('body').removeClass('f-topbar-fixed');
}
}
}
}
},
off : function () {
this.S(this.scope).off('.fndtn.topbar');
this.S(window).off('.fndtn.topbar');
},
reflow : function () {}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.topbar.js | foundation.topbar.js |
(function ($, window, document, undefined) {
'use strict';
var header_helpers = function (class_array) {
var head = $('head');
head.prepend($.map(class_array, function (class_name) {
if (head.has('.' + class_name).length === 0) {
return '<meta class="' + class_name + '" />';
}
}));
};
header_helpers([
'foundation-mq-small',
'foundation-mq-small-only',
'foundation-mq-medium',
'foundation-mq-medium-only',
'foundation-mq-large',
'foundation-mq-large-only',
'foundation-mq-xlarge',
'foundation-mq-xlarge-only',
'foundation-mq-xxlarge',
'foundation-data-attribute-namespace']);
// Enable FastClick if present
$(function () {
if (typeof FastClick !== 'undefined') {
// Don't attach to body if undefined
if (typeof document.body !== 'undefined') {
FastClick.attach(document.body);
}
}
});
// private Fast Selector wrapper,
// returns jQuery object. Only use where
// getElementById is not available.
var S = function (selector, context) {
if (typeof selector === 'string') {
if (context) {
var cont;
if (context.jquery) {
cont = context[0];
if (!cont) {
return context;
}
} else {
cont = context;
}
return $(cont.querySelectorAll(selector));
}
return $(document.querySelectorAll(selector));
}
return $(selector, context);
};
// Namespace functions.
var attr_name = function (init) {
var arr = [];
if (!init) {
arr.push('data');
}
if (this.namespace.length > 0) {
arr.push(this.namespace);
}
arr.push(this.name);
return arr.join('-');
};
var add_namespace = function (str) {
var parts = str.split('-'),
i = parts.length,
arr = [];
while (i--) {
if (i !== 0) {
arr.push(parts[i]);
} else {
if (this.namespace.length > 0) {
arr.push(this.namespace, parts[i]);
} else {
arr.push(parts[i]);
}
}
}
return arr.reverse().join('-');
};
// Event binding and data-options updating.
var bindings = function (method, options) {
var self = this,
bind = function(){
var $this = S(this),
should_bind_events = !$this.data(self.attr_name(true) + '-init');
$this.data(self.attr_name(true) + '-init', $.extend({}, self.settings, (options || method), self.data_options($this)));
if (should_bind_events) {
self.events(this);
}
};
if (S(this.scope).is('[' + this.attr_name() +']')) {
bind.call(this.scope);
} else {
S('[' + this.attr_name() +']', this.scope).each(bind);
}
// # Patch to fix #5043 to move this *after* the if/else clause in order for Backbone and similar frameworks to have improved control over event binding and data-options updating.
if (typeof method === 'string') {
return this[method].call(this, options);
}
};
var single_image_loaded = function (image, callback) {
function loaded () {
callback(image[0]);
}
function bindLoad () {
this.one('load', loaded);
if (/MSIE (\d+\.\d+);/.test(navigator.userAgent)) {
var src = this.attr( 'src' ),
param = src.match( /\?/ ) ? '&' : '?';
param += 'random=' + (new Date()).getTime();
this.attr('src', src + param);
}
}
if (!image.attr('src')) {
loaded();
return;
}
if (image[0].complete || image[0].readyState === 4) {
loaded();
} else {
bindLoad.call(image);
}
};
/*! matchMedia() polyfill - Test a CSS media type/query in JS. Authors & copyright (c) 2012: Scott Jehl, Paul Irish, Nicholas Zakas, David Knight. Dual MIT/BSD license */
window.matchMedia || (window.matchMedia = function() {
"use strict";
// For browsers that support matchMedium api such as IE 9 and webkit
var styleMedia = (window.styleMedia || window.media);
// For those that don't support matchMedium
if (!styleMedia) {
var style = document.createElement('style'),
script = document.getElementsByTagName('script')[0],
info = null;
style.type = 'text/css';
style.id = 'matchmediajs-test';
script.parentNode.insertBefore(style, script);
// 'style.currentStyle' is used by IE <= 8 and 'window.getComputedStyle' for all other browsers
info = ('getComputedStyle' in window) && window.getComputedStyle(style, null) || style.currentStyle;
styleMedia = {
matchMedium: function(media) {
var text = '@media ' + media + '{ #matchmediajs-test { width: 1px; } }';
// 'style.styleSheet' is used by IE <= 8 and 'style.textContent' for all other browsers
if (style.styleSheet) {
style.styleSheet.cssText = text;
} else {
style.textContent = text;
}
// Test if media query is true or false
return info.width === '1px';
}
};
}
return function(media) {
return {
matches: styleMedia.matchMedium(media || 'all'),
media: media || 'all'
};
};
}());
/*
* jquery.requestAnimationFrame
* https://github.com/gnarf37/jquery-requestAnimationFrame
* Requires jQuery 1.8+
*
* Copyright (c) 2012 Corey Frang
* Licensed under the MIT license.
*/
(function(jQuery) {
// requestAnimationFrame polyfill adapted from Erik Möller
// fixes from Paul Irish and Tino Zijdel
// http://paulirish.com/2011/requestanimationframe-for-smart-animating/
// http://my.opera.com/emoller/blog/2011/12/20/requestanimationframe-for-smart-er-animating
var animating,
lastTime = 0,
vendors = ['webkit', 'moz'],
requestAnimationFrame = window.requestAnimationFrame,
cancelAnimationFrame = window.cancelAnimationFrame,
jqueryFxAvailable = 'undefined' !== typeof jQuery.fx;
for (; lastTime < vendors.length && !requestAnimationFrame; lastTime++) {
requestAnimationFrame = window[ vendors[lastTime] + 'RequestAnimationFrame' ];
cancelAnimationFrame = cancelAnimationFrame ||
window[ vendors[lastTime] + 'CancelAnimationFrame' ] ||
window[ vendors[lastTime] + 'CancelRequestAnimationFrame' ];
}
function raf() {
if (animating) {
requestAnimationFrame(raf);
if (jqueryFxAvailable) {
jQuery.fx.tick();
}
}
}
if (requestAnimationFrame) {
// use rAF
window.requestAnimationFrame = requestAnimationFrame;
window.cancelAnimationFrame = cancelAnimationFrame;
if (jqueryFxAvailable) {
jQuery.fx.timer = function (timer) {
if (timer() && jQuery.timers.push(timer) && !animating) {
animating = true;
raf();
}
};
jQuery.fx.stop = function () {
animating = false;
};
}
} else {
// polyfill
window.requestAnimationFrame = function (callback) {
var currTime = new Date().getTime(),
timeToCall = Math.max(0, 16 - (currTime - lastTime)),
id = window.setTimeout(function () {
callback(currTime + timeToCall);
}, timeToCall);
lastTime = currTime + timeToCall;
return id;
};
window.cancelAnimationFrame = function (id) {
clearTimeout(id);
};
}
}( $ ));
function removeQuotes (string) {
if (typeof string === 'string' || string instanceof String) {
string = string.replace(/^['\\/"]+|(;\s?})+|['\\/"]+$/g, '');
}
return string;
}
function MediaQuery(selector) {
this.selector = selector;
this.query = '';
}
MediaQuery.prototype.toString = function () {
return this.query || (this.query = S(this.selector).css('font-family').replace(/^[\/\\'"]+|(;\s?})+|[\/\\'"]+$/g, ''));
};
window.Foundation = {
name : 'Foundation',
version : '5.5.3',
media_queries : {
'small' : new MediaQuery('.foundation-mq-small'),
'small-only' : new MediaQuery('.foundation-mq-small-only'),
'medium' : new MediaQuery('.foundation-mq-medium'),
'medium-only' : new MediaQuery('.foundation-mq-medium-only'),
'large' : new MediaQuery('.foundation-mq-large'),
'large-only' : new MediaQuery('.foundation-mq-large-only'),
'xlarge' : new MediaQuery('.foundation-mq-xlarge'),
'xlarge-only' : new MediaQuery('.foundation-mq-xlarge-only'),
'xxlarge' : new MediaQuery('.foundation-mq-xxlarge')
},
stylesheet : $('<style></style>').appendTo('head')[0].sheet,
global : {
namespace : undefined
},
init : function (scope, libraries, method, options, response) {
var args = [scope, method, options, response],
responses = [];
// check RTL
this.rtl = /rtl/i.test(S('html').attr('dir'));
// set foundation global scope
this.scope = scope || this.scope;
this.set_namespace();
if (libraries && typeof libraries === 'string' && !/reflow/i.test(libraries)) {
if (this.libs.hasOwnProperty(libraries)) {
responses.push(this.init_lib(libraries, args));
}
} else {
for (var lib in this.libs) {
responses.push(this.init_lib(lib, libraries));
}
}
S(window).load(function () {
S(window)
.trigger('resize.fndtn.clearing')
.trigger('resize.fndtn.dropdown')
.trigger('resize.fndtn.equalizer')
.trigger('resize.fndtn.interchange')
.trigger('resize.fndtn.joyride')
.trigger('resize.fndtn.magellan')
.trigger('resize.fndtn.topbar')
.trigger('resize.fndtn.slider');
});
return scope;
},
init_lib : function (lib, args) {
if (this.libs.hasOwnProperty(lib)) {
this.patch(this.libs[lib]);
if (args && args.hasOwnProperty(lib)) {
if (typeof this.libs[lib].settings !== 'undefined') {
$.extend(true, this.libs[lib].settings, args[lib]);
} else if (typeof this.libs[lib].defaults !== 'undefined') {
$.extend(true, this.libs[lib].defaults, args[lib]);
}
return this.libs[lib].init.apply(this.libs[lib], [this.scope, args[lib]]);
}
args = args instanceof Array ? args : new Array(args);
return this.libs[lib].init.apply(this.libs[lib], args);
}
return function () {};
},
patch : function (lib) {
lib.scope = this.scope;
lib.namespace = this.global.namespace;
lib.rtl = this.rtl;
lib['data_options'] = this.utils.data_options;
lib['attr_name'] = attr_name;
lib['add_namespace'] = add_namespace;
lib['bindings'] = bindings;
lib['S'] = this.utils.S;
},
inherit : function (scope, methods) {
var methods_arr = methods.split(' '),
i = methods_arr.length;
while (i--) {
if (this.utils.hasOwnProperty(methods_arr[i])) {
scope[methods_arr[i]] = this.utils[methods_arr[i]];
}
}
},
set_namespace : function () {
// Description:
// Don't bother reading the namespace out of the meta tag
// if the namespace has been set globally in javascript
//
// Example:
// Foundation.global.namespace = 'my-namespace';
// or make it an empty string:
// Foundation.global.namespace = '';
//
//
// If the namespace has not been set (is undefined), try to read it out of the meta element.
// Otherwise use the globally defined namespace, even if it's empty ('')
var namespace = ( this.global.namespace === undefined ) ? $('.foundation-data-attribute-namespace').css('font-family') : this.global.namespace;
// Finally, if the namsepace is either undefined or false, set it to an empty string.
// Otherwise use the namespace value.
this.global.namespace = ( namespace === undefined || /false/i.test(namespace) ) ? '' : namespace;
},
libs : {},
// methods that can be inherited in libraries
utils : {
// Description:
// Fast Selector wrapper returns jQuery object. Only use where getElementById
// is not available.
//
// Arguments:
// Selector (String): CSS selector describing the element(s) to be
// returned as a jQuery object.
//
// Scope (String): CSS selector describing the area to be searched. Default
// is document.
//
// Returns:
// Element (jQuery Object): jQuery object containing elements matching the
// selector within the scope.
S : S,
// Description:
// Executes a function a max of once every n milliseconds
//
// Arguments:
// Func (Function): Function to be throttled.
//
// Delay (Integer): Function execution threshold in milliseconds.
//
// Returns:
// Lazy_function (Function): Function with throttling applied.
throttle : function (func, delay) {
var timer = null;
return function () {
var context = this, args = arguments;
if (timer == null) {
timer = setTimeout(function () {
func.apply(context, args);
timer = null;
}, delay);
}
};
},
// Description:
// Executes a function when it stops being invoked for n seconds
// Modified version of _.debounce() http://underscorejs.org
//
// Arguments:
// Func (Function): Function to be debounced.
//
// Delay (Integer): Function execution threshold in milliseconds.
//
// Immediate (Bool): Whether the function should be called at the beginning
// of the delay instead of the end. Default is false.
//
// Returns:
// Lazy_function (Function): Function with debouncing applied.
debounce : function (func, delay, immediate) {
var timeout, result;
return function () {
var context = this, args = arguments;
var later = function () {
timeout = null;
if (!immediate) {
result = func.apply(context, args);
}
};
var callNow = immediate && !timeout;
clearTimeout(timeout);
timeout = setTimeout(later, delay);
if (callNow) {
result = func.apply(context, args);
}
return result;
};
},
// Description:
// Parses data-options attribute
//
// Arguments:
// El (jQuery Object): Element to be parsed.
//
// Returns:
// Options (Javascript Object): Contents of the element's data-options
// attribute.
data_options : function (el, data_attr_name) {
data_attr_name = data_attr_name || 'options';
var opts = {}, ii, p, opts_arr,
data_options = function (el) {
var namespace = Foundation.global.namespace;
if (namespace.length > 0) {
return el.data(namespace + '-' + data_attr_name);
}
return el.data(data_attr_name);
};
var cached_options = data_options(el);
if (typeof cached_options === 'object') {
return cached_options;
}
opts_arr = (cached_options || ':').split(';');
ii = opts_arr.length;
function isNumber (o) {
return !isNaN (o - 0) && o !== null && o !== '' && o !== false && o !== true;
}
function trim (str) {
if (typeof str === 'string') {
return $.trim(str);
}
return str;
}
while (ii--) {
p = opts_arr[ii].split(':');
p = [p[0], p.slice(1).join(':')];
if (/true/i.test(p[1])) {
p[1] = true;
}
if (/false/i.test(p[1])) {
p[1] = false;
}
if (isNumber(p[1])) {
if (p[1].indexOf('.') === -1) {
p[1] = parseInt(p[1], 10);
} else {
p[1] = parseFloat(p[1]);
}
}
if (p.length === 2 && p[0].length > 0) {
opts[trim(p[0])] = trim(p[1]);
}
}
return opts;
},
// Description:
// Adds JS-recognizable media queries
//
// Arguments:
// Media (String): Key string for the media query to be stored as in
// Foundation.media_queries
//
// Class (String): Class name for the generated <meta> tag
register_media : function (media, media_class) {
if (Foundation.media_queries[media] === undefined) {
$('head').append('<meta class="' + media_class + '"/>');
Foundation.media_queries[media] = removeQuotes($('.' + media_class).css('font-family'));
}
},
// Description:
// Add custom CSS within a JS-defined media query
//
// Arguments:
// Rule (String): CSS rule to be appended to the document.
//
// Media (String): Optional media query string for the CSS rule to be
// nested under.
add_custom_rule : function (rule, media) {
if (media === undefined && Foundation.stylesheet) {
Foundation.stylesheet.insertRule(rule, Foundation.stylesheet.cssRules.length);
} else {
var query = Foundation.media_queries[media];
if (query !== undefined) {
Foundation.stylesheet.insertRule('@media ' +
Foundation.media_queries[media] + '{ ' + rule + ' }', Foundation.stylesheet.cssRules.length);
}
}
},
// Description:
// Performs a callback function when an image is fully loaded
//
// Arguments:
// Image (jQuery Object): Image(s) to check if loaded.
//
// Callback (Function): Function to execute when image is fully loaded.
image_loaded : function (images, callback) {
var self = this,
unloaded = images.length;
function pictures_has_height(images) {
var pictures_number = images.length;
for (var i = pictures_number - 1; i >= 0; i--) {
if(images.attr('height') === undefined) {
return false;
};
};
return true;
}
if (unloaded === 0 || pictures_has_height(images)) {
callback(images);
}
images.each(function () {
single_image_loaded(self.S(this), function () {
unloaded -= 1;
if (unloaded === 0) {
callback(images);
}
});
});
},
// Description:
// Returns a random, alphanumeric string
//
// Arguments:
// Length (Integer): Length of string to be generated. Defaults to random
// integer.
//
// Returns:
// Rand (String): Pseudo-random, alphanumeric string.
random_str : function () {
if (!this.fidx) {
this.fidx = 0;
}
this.prefix = this.prefix || [(this.name || 'F'), (+new Date).toString(36)].join('-');
return this.prefix + (this.fidx++).toString(36);
},
// Description:
// Helper for window.matchMedia
//
// Arguments:
// mq (String): Media query
//
// Returns:
// (Boolean): Whether the media query passes or not
match : function (mq) {
return window.matchMedia(mq).matches;
},
// Description:
// Helpers for checking Foundation default media queries with JS
//
// Returns:
// (Boolean): Whether the media query passes or not
is_small_up : function () {
return this.match(Foundation.media_queries.small);
},
is_medium_up : function () {
return this.match(Foundation.media_queries.medium);
},
is_large_up : function () {
return this.match(Foundation.media_queries.large);
},
is_xlarge_up : function () {
return this.match(Foundation.media_queries.xlarge);
},
is_xxlarge_up : function () {
return this.match(Foundation.media_queries.xxlarge);
},
is_small_only : function () {
return !this.is_medium_up() && !this.is_large_up() && !this.is_xlarge_up() && !this.is_xxlarge_up();
},
is_medium_only : function () {
return this.is_medium_up() && !this.is_large_up() && !this.is_xlarge_up() && !this.is_xxlarge_up();
},
is_large_only : function () {
return this.is_medium_up() && this.is_large_up() && !this.is_xlarge_up() && !this.is_xxlarge_up();
},
is_xlarge_only : function () {
return this.is_medium_up() && this.is_large_up() && this.is_xlarge_up() && !this.is_xxlarge_up();
},
is_xxlarge_only : function () {
return this.is_medium_up() && this.is_large_up() && this.is_xlarge_up() && this.is_xxlarge_up();
}
}
};
$.fn.foundation = function () {
var args = Array.prototype.slice.call(arguments, 0);
return this.each(function () {
Foundation.init.apply(Foundation, [this].concat(args));
return this;
});
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.js | foundation.js |
;(function ($, window, document, undefined) {
'use strict';
var Modernizr = Modernizr || false;
Foundation.libs.joyride = {
name : 'joyride',
version : '5.5.3',
defaults : {
expose : false, // turn on or off the expose feature
modal : true, // Whether to cover page with modal during the tour
keyboard : true, // enable left, right and esc keystrokes
tip_location : 'bottom', // 'top', 'bottom', 'left' or 'right' in relation to parent
nub_position : 'auto', // override on a per tooltip bases
scroll_speed : 1500, // Page scrolling speed in milliseconds, 0 = no scroll animation
scroll_animation : 'linear', // supports 'swing' and 'linear', extend with jQuery UI.
timer : 0, // 0 = no timer , all other numbers = timer in milliseconds
start_timer_on_click : true, // true or false - true requires clicking the first button start the timer
start_offset : 0, // the index of the tooltip you want to start on (index of the li)
next_button : true, // true or false to control whether a next button is used
prev_button : true, // true or false to control whether a prev button is used
tip_animation : 'fade', // 'pop' or 'fade' in each tip
pause_after : [], // array of indexes where to pause the tour after
exposed : [], // array of expose elements
tip_animation_fade_speed : 300, // when tipAnimation = 'fade' this is speed in milliseconds for the transition
cookie_monster : false, // true or false to control whether cookies are used
cookie_name : 'joyride', // Name the cookie you'll use
cookie_domain : false, // Will this cookie be attached to a domain, ie. '.notableapp.com'
cookie_expires : 365, // set when you would like the cookie to expire.
tip_container : 'body', // Where will the tip be attached
abort_on_close : true, // When true, the close event will not fire any callback
tip_location_patterns : {
top : ['bottom'],
bottom : [], // bottom should not need to be repositioned
left : ['right', 'top', 'bottom'],
right : ['left', 'top', 'bottom']
},
post_ride_callback : function () {}, // A method to call once the tour closes (canceled or complete)
post_step_callback : function () {}, // A method to call after each step
pre_step_callback : function () {}, // A method to call before each step
pre_ride_callback : function () {}, // A method to call before the tour starts (passed index, tip, and cloned exposed element)
post_expose_callback : function () {}, // A method to call after an element has been exposed
template : { // HTML segments for tip layout
link : '<a href="#close" class="joyride-close-tip">×</a>',
timer : '<div class="joyride-timer-indicator-wrap"><span class="joyride-timer-indicator"></span></div>',
tip : '<div class="joyride-tip-guide"><span class="joyride-nub"></span></div>',
wrapper : '<div class="joyride-content-wrapper"></div>',
button : '<a href="#" class="small button joyride-next-tip"></a>',
prev_button : '<a href="#" class="small button joyride-prev-tip"></a>',
modal : '<div class="joyride-modal-bg"></div>',
expose : '<div class="joyride-expose-wrapper"></div>',
expose_cover : '<div class="joyride-expose-cover"></div>'
},
expose_add_class : '' // One or more space-separated class names to be added to exposed element
},
init : function (scope, method, options) {
Foundation.inherit(this, 'throttle random_str');
this.settings = this.settings || $.extend({}, this.defaults, (options || method));
this.bindings(method, options)
},
go_next : function () {
if (this.settings.$li.next().length < 1) {
this.end();
} else if (this.settings.timer > 0) {
clearTimeout(this.settings.automate);
this.hide();
this.show();
this.startTimer();
} else {
this.hide();
this.show();
}
},
go_prev : function () {
if (this.settings.$li.prev().length < 1) {
// Do nothing if there are no prev element
} else if (this.settings.timer > 0) {
clearTimeout(this.settings.automate);
this.hide();
this.show(null, true);
this.startTimer();
} else {
this.hide();
this.show(null, true);
}
},
events : function () {
var self = this;
$(this.scope)
.off('.joyride')
.on('click.fndtn.joyride', '.joyride-next-tip, .joyride-modal-bg', function (e) {
e.preventDefault();
this.go_next()
}.bind(this))
.on('click.fndtn.joyride', '.joyride-prev-tip', function (e) {
e.preventDefault();
this.go_prev();
}.bind(this))
.on('click.fndtn.joyride', '.joyride-close-tip', function (e) {
e.preventDefault();
this.end(this.settings.abort_on_close);
}.bind(this))
.on('keyup.fndtn.joyride', function (e) {
// Don't do anything if keystrokes are disabled
// or if the joyride is not being shown
if (!this.settings.keyboard || !this.settings.riding) {
return;
}
switch (e.which) {
case 39: // right arrow
e.preventDefault();
this.go_next();
break;
case 37: // left arrow
e.preventDefault();
this.go_prev();
break;
case 27: // escape
e.preventDefault();
this.end(this.settings.abort_on_close);
}
}.bind(this));
$(window)
.off('.joyride')
.on('resize.fndtn.joyride', self.throttle(function () {
if ($('[' + self.attr_name() + ']').length > 0 && self.settings.$next_tip && self.settings.riding) {
if (self.settings.exposed.length > 0) {
var $els = $(self.settings.exposed);
$els.each(function () {
var $this = $(this);
self.un_expose($this);
self.expose($this);
});
}
if (self.is_phone()) {
self.pos_phone();
} else {
self.pos_default(false);
}
}
}, 100));
},
start : function () {
var self = this,
$this = $('[' + this.attr_name() + ']', this.scope),
integer_settings = ['timer', 'scrollSpeed', 'startOffset', 'tipAnimationFadeSpeed', 'cookieExpires'],
int_settings_count = integer_settings.length;
if (!$this.length > 0) {
return;
}
if (!this.settings.init) {
this.events();
}
this.settings = $this.data(this.attr_name(true) + '-init');
// non configureable settings
this.settings.$content_el = $this;
this.settings.$body = $(this.settings.tip_container);
this.settings.body_offset = $(this.settings.tip_container).position();
this.settings.$tip_content = this.settings.$content_el.find('> li');
this.settings.paused = false;
this.settings.attempts = 0;
this.settings.riding = true;
// can we create cookies?
if (typeof $.cookie !== 'function') {
this.settings.cookie_monster = false;
}
// generate the tips and insert into dom.
if (!this.settings.cookie_monster || this.settings.cookie_monster && !$.cookie(this.settings.cookie_name)) {
this.settings.$tip_content.each(function (index) {
var $this = $(this);
this.settings = $.extend({}, self.defaults, self.data_options($this));
// Make sure that settings parsed from data_options are integers where necessary
var i = int_settings_count;
while (i--) {
self.settings[integer_settings[i]] = parseInt(self.settings[integer_settings[i]], 10);
}
self.create({$li : $this, index : index});
});
// show first tip
if (!this.settings.start_timer_on_click && this.settings.timer > 0) {
this.show('init');
this.startTimer();
} else {
this.show('init');
}
}
},
resume : function () {
this.set_li();
this.show();
},
tip_template : function (opts) {
var $blank, content;
opts.tip_class = opts.tip_class || '';
$blank = $(this.settings.template.tip).addClass(opts.tip_class);
content = $.trim($(opts.li).html()) +
this.prev_button_text(opts.prev_button_text, opts.index) +
this.button_text(opts.button_text) +
this.settings.template.link +
this.timer_instance(opts.index);
$blank.append($(this.settings.template.wrapper));
$blank.first().attr(this.add_namespace('data-index'), opts.index);
$('.joyride-content-wrapper', $blank).append(content);
return $blank[0];
},
timer_instance : function (index) {
var txt;
if ((index === 0 && this.settings.start_timer_on_click && this.settings.timer > 0) || this.settings.timer === 0) {
txt = '';
} else {
txt = $(this.settings.template.timer)[0].outerHTML;
}
return txt;
},
button_text : function (txt) {
if (this.settings.tip_settings.next_button) {
txt = $.trim(txt) || 'Next';
txt = $(this.settings.template.button).append(txt)[0].outerHTML;
} else {
txt = '';
}
return txt;
},
prev_button_text : function (txt, idx) {
if (this.settings.tip_settings.prev_button) {
txt = $.trim(txt) || 'Previous';
// Add the disabled class to the button if it's the first element
if (idx == 0) {
txt = $(this.settings.template.prev_button).append(txt).addClass('disabled')[0].outerHTML;
} else {
txt = $(this.settings.template.prev_button).append(txt)[0].outerHTML;
}
} else {
txt = '';
}
return txt;
},
create : function (opts) {
this.settings.tip_settings = $.extend({}, this.settings, this.data_options(opts.$li));
var buttonText = opts.$li.attr(this.add_namespace('data-button')) || opts.$li.attr(this.add_namespace('data-text')),
prevButtonText = opts.$li.attr(this.add_namespace('data-button-prev')) || opts.$li.attr(this.add_namespace('data-prev-text')),
tipClass = opts.$li.attr('class'),
$tip_content = $(this.tip_template({
tip_class : tipClass,
index : opts.index,
button_text : buttonText,
prev_button_text : prevButtonText,
li : opts.$li
}));
$(this.settings.tip_container).append($tip_content);
},
show : function (init, is_prev) {
var $timer = null;
// are we paused?
if (this.settings.$li === undefined || ($.inArray(this.settings.$li.index(), this.settings.pause_after) === -1)) {
// don't go to the next li if the tour was paused
if (this.settings.paused) {
this.settings.paused = false;
} else {
this.set_li(init, is_prev);
}
this.settings.attempts = 0;
if (this.settings.$li.length && this.settings.$target.length > 0) {
if (init) { //run when we first start
this.settings.pre_ride_callback(this.settings.$li.index(), this.settings.$next_tip);
if (this.settings.modal) {
this.show_modal();
}
}
this.settings.pre_step_callback(this.settings.$li.index(), this.settings.$next_tip);
if (this.settings.modal && this.settings.expose) {
this.expose();
}
this.settings.tip_settings = $.extend({}, this.settings, this.data_options(this.settings.$li));
this.settings.timer = parseInt(this.settings.timer, 10);
this.settings.tip_settings.tip_location_pattern = this.settings.tip_location_patterns[this.settings.tip_settings.tip_location];
// scroll and hide bg if not modal and not expose
if (!/body/i.test(this.settings.$target.selector) && !this.settings.expose) {
var joyridemodalbg = $('.joyride-modal-bg');
if (/pop/i.test(this.settings.tipAnimation)) {
joyridemodalbg.hide();
} else {
joyridemodalbg.fadeOut(this.settings.tipAnimationFadeSpeed);
}
this.scroll_to();
}
if (this.is_phone()) {
this.pos_phone(true);
} else {
this.pos_default(true);
}
$timer = this.settings.$next_tip.find('.joyride-timer-indicator');
if (/pop/i.test(this.settings.tip_animation)) {
$timer.width(0);
if (this.settings.timer > 0) {
this.settings.$next_tip.show();
setTimeout(function () {
$timer.animate({
width : $timer.parent().width()
}, this.settings.timer, 'linear');
}.bind(this), this.settings.tip_animation_fade_speed);
} else {
this.settings.$next_tip.show();
}
} else if (/fade/i.test(this.settings.tip_animation)) {
$timer.width(0);
if (this.settings.timer > 0) {
this.settings.$next_tip
.fadeIn(this.settings.tip_animation_fade_speed)
.show();
setTimeout(function () {
$timer.animate({
width : $timer.parent().width()
}, this.settings.timer, 'linear');
}.bind(this), this.settings.tip_animation_fade_speed);
} else {
this.settings.$next_tip.fadeIn(this.settings.tip_animation_fade_speed);
}
}
this.settings.$current_tip = this.settings.$next_tip;
// skip non-existant targets
} else if (this.settings.$li && this.settings.$target.length < 1) {
this.show(init, is_prev);
} else {
this.end();
}
} else {
this.settings.paused = true;
}
},
is_phone : function () {
return matchMedia(Foundation.media_queries.small).matches &&
!matchMedia(Foundation.media_queries.medium).matches;
},
hide : function () {
if (this.settings.modal && this.settings.expose) {
this.un_expose();
}
if (!this.settings.modal) {
$('.joyride-modal-bg').hide();
}
// Prevent scroll bouncing...wait to remove from layout
this.settings.$current_tip.css('visibility', 'hidden');
setTimeout($.proxy(function () {
this.hide();
this.css('visibility', 'visible');
}, this.settings.$current_tip), 0);
this.settings.post_step_callback(this.settings.$li.index(),
this.settings.$current_tip);
},
set_li : function (init, is_prev) {
if (init) {
this.settings.$li = this.settings.$tip_content.eq(this.settings.start_offset);
this.set_next_tip();
this.settings.$current_tip = this.settings.$next_tip;
} else {
if (is_prev) {
this.settings.$li = this.settings.$li.prev();
} else {
this.settings.$li = this.settings.$li.next();
}
this.set_next_tip();
}
this.set_target();
},
set_next_tip : function () {
this.settings.$next_tip = $('.joyride-tip-guide').eq(this.settings.$li.index());
this.settings.$next_tip.data('closed', '');
},
set_target : function () {
var cl = this.settings.$li.attr(this.add_namespace('data-class')),
id = this.settings.$li.attr(this.add_namespace('data-id')),
$sel = function () {
if (id) {
return $(document.getElementById(id));
} else if (cl) {
return $('.' + cl).first();
} else {
return $('body');
}
};
this.settings.$target = $sel();
},
scroll_to : function () {
var window_half, tipOffset;
window_half = $(window).height() / 2;
tipOffset = Math.ceil(this.settings.$target.offset().top - window_half + this.settings.$next_tip.outerHeight());
if (tipOffset != 0) {
$('html, body').stop().animate({
scrollTop : tipOffset
}, this.settings.scroll_speed, 'swing');
}
},
paused : function () {
return ($.inArray((this.settings.$li.index() + 1), this.settings.pause_after) === -1);
},
restart : function () {
this.hide();
this.settings.$li = undefined;
this.show('init');
},
pos_default : function (init) {
var $nub = this.settings.$next_tip.find('.joyride-nub'),
nub_width = Math.ceil($nub.outerWidth() / 2),
nub_height = Math.ceil($nub.outerHeight() / 2),
toggle = init || false;
// tip must not be "display: none" to calculate position
if (toggle) {
this.settings.$next_tip.css('visibility', 'hidden');
this.settings.$next_tip.show();
}
if (!/body/i.test(this.settings.$target.selector)) {
var topAdjustment = this.settings.tip_settings.tipAdjustmentY ? parseInt(this.settings.tip_settings.tipAdjustmentY) : 0,
leftAdjustment = this.settings.tip_settings.tipAdjustmentX ? parseInt(this.settings.tip_settings.tipAdjustmentX) : 0;
if (this.bottom()) {
if (this.rtl) {
this.settings.$next_tip.css({
top : (this.settings.$target.offset().top + nub_height + this.settings.$target.outerHeight() + topAdjustment),
left : this.settings.$target.offset().left + this.settings.$target.outerWidth() - this.settings.$next_tip.outerWidth() + leftAdjustment});
} else {
this.settings.$next_tip.css({
top : (this.settings.$target.offset().top + nub_height + this.settings.$target.outerHeight() + topAdjustment),
left : this.settings.$target.offset().left + leftAdjustment});
}
this.nub_position($nub, this.settings.tip_settings.nub_position, 'top');
} else if (this.top()) {
if (this.rtl) {
this.settings.$next_tip.css({
top : (this.settings.$target.offset().top - this.settings.$next_tip.outerHeight() - nub_height + topAdjustment),
left : this.settings.$target.offset().left + this.settings.$target.outerWidth() - this.settings.$next_tip.outerWidth()});
} else {
this.settings.$next_tip.css({
top : (this.settings.$target.offset().top - this.settings.$next_tip.outerHeight() - nub_height + topAdjustment),
left : this.settings.$target.offset().left + leftAdjustment});
}
this.nub_position($nub, this.settings.tip_settings.nub_position, 'bottom');
} else if (this.right()) {
this.settings.$next_tip.css({
top : this.settings.$target.offset().top + topAdjustment,
left : (this.settings.$target.outerWidth() + this.settings.$target.offset().left + nub_width + leftAdjustment)});
this.nub_position($nub, this.settings.tip_settings.nub_position, 'left');
} else if (this.left()) {
this.settings.$next_tip.css({
top : this.settings.$target.offset().top + topAdjustment,
left : (this.settings.$target.offset().left - this.settings.$next_tip.outerWidth() - nub_width + leftAdjustment)});
this.nub_position($nub, this.settings.tip_settings.nub_position, 'right');
}
if (!this.visible(this.corners(this.settings.$next_tip)) && this.settings.attempts < this.settings.tip_settings.tip_location_pattern.length) {
$nub.removeClass('bottom')
.removeClass('top')
.removeClass('right')
.removeClass('left');
this.settings.tip_settings.tip_location = this.settings.tip_settings.tip_location_pattern[this.settings.attempts];
this.settings.attempts++;
this.pos_default();
}
} else if (this.settings.$li.length) {
this.pos_modal($nub);
}
if (toggle) {
this.settings.$next_tip.hide();
this.settings.$next_tip.css('visibility', 'visible');
}
},
pos_phone : function (init) {
var tip_height = this.settings.$next_tip.outerHeight(),
tip_offset = this.settings.$next_tip.offset(),
target_height = this.settings.$target.outerHeight(),
$nub = $('.joyride-nub', this.settings.$next_tip),
nub_height = Math.ceil($nub.outerHeight() / 2),
toggle = init || false;
$nub.removeClass('bottom')
.removeClass('top')
.removeClass('right')
.removeClass('left');
if (toggle) {
this.settings.$next_tip.css('visibility', 'hidden');
this.settings.$next_tip.show();
}
if (!/body/i.test(this.settings.$target.selector)) {
if (this.top()) {
this.settings.$next_tip.offset({top : this.settings.$target.offset().top - tip_height - nub_height});
$nub.addClass('bottom');
} else {
this.settings.$next_tip.offset({top : this.settings.$target.offset().top + target_height + nub_height});
$nub.addClass('top');
}
} else if (this.settings.$li.length) {
this.pos_modal($nub);
}
if (toggle) {
this.settings.$next_tip.hide();
this.settings.$next_tip.css('visibility', 'visible');
}
},
pos_modal : function ($nub) {
this.center();
$nub.hide();
this.show_modal();
},
show_modal : function () {
if (!this.settings.$next_tip.data('closed')) {
var joyridemodalbg = $('.joyride-modal-bg');
if (joyridemodalbg.length < 1) {
var joyridemodalbg = $(this.settings.template.modal);
joyridemodalbg.appendTo('body');
}
if (/pop/i.test(this.settings.tip_animation)) {
joyridemodalbg.show();
} else {
joyridemodalbg.fadeIn(this.settings.tip_animation_fade_speed);
}
}
},
expose : function () {
var expose,
exposeCover,
el,
origCSS,
origClasses,
randId = 'expose-' + this.random_str(6);
if (arguments.length > 0 && arguments[0] instanceof $) {
el = arguments[0];
} else if (this.settings.$target && !/body/i.test(this.settings.$target.selector)) {
el = this.settings.$target;
} else {
return false;
}
if (el.length < 1) {
if (window.console) {
console.error('element not valid', el);
}
return false;
}
expose = $(this.settings.template.expose);
this.settings.$body.append(expose);
expose.css({
top : el.offset().top,
left : el.offset().left,
width : el.outerWidth(true),
height : el.outerHeight(true)
});
exposeCover = $(this.settings.template.expose_cover);
origCSS = {
zIndex : el.css('z-index'),
position : el.css('position')
};
origClasses = el.attr('class') == null ? '' : el.attr('class');
el.css('z-index', parseInt(expose.css('z-index')) + 1);
if (origCSS.position == 'static') {
el.css('position', 'relative');
}
el.data('expose-css', origCSS);
el.data('orig-class', origClasses);
el.attr('class', origClasses + ' ' + this.settings.expose_add_class);
exposeCover.css({
top : el.offset().top,
left : el.offset().left,
width : el.outerWidth(true),
height : el.outerHeight(true)
});
if (this.settings.modal) {
this.show_modal();
}
this.settings.$body.append(exposeCover);
expose.addClass(randId);
exposeCover.addClass(randId);
el.data('expose', randId);
this.settings.post_expose_callback(this.settings.$li.index(), this.settings.$next_tip, el);
this.add_exposed(el);
},
un_expose : function () {
var exposeId,
el,
expose,
origCSS,
origClasses,
clearAll = false;
if (arguments.length > 0 && arguments[0] instanceof $) {
el = arguments[0];
} else if (this.settings.$target && !/body/i.test(this.settings.$target.selector)) {
el = this.settings.$target;
} else {
return false;
}
if (el.length < 1) {
if (window.console) {
console.error('element not valid', el);
}
return false;
}
exposeId = el.data('expose');
expose = $('.' + exposeId);
if (arguments.length > 1) {
clearAll = arguments[1];
}
if (clearAll === true) {
$('.joyride-expose-wrapper,.joyride-expose-cover').remove();
} else {
expose.remove();
}
origCSS = el.data('expose-css');
if (origCSS.zIndex == 'auto') {
el.css('z-index', '');
} else {
el.css('z-index', origCSS.zIndex);
}
if (origCSS.position != el.css('position')) {
if (origCSS.position == 'static') {// this is default, no need to set it.
el.css('position', '');
} else {
el.css('position', origCSS.position);
}
}
origClasses = el.data('orig-class');
el.attr('class', origClasses);
el.removeData('orig-classes');
el.removeData('expose');
el.removeData('expose-z-index');
this.remove_exposed(el);
},
add_exposed : function (el) {
this.settings.exposed = this.settings.exposed || [];
if (el instanceof $ || typeof el === 'object') {
this.settings.exposed.push(el[0]);
} else if (typeof el == 'string') {
this.settings.exposed.push(el);
}
},
remove_exposed : function (el) {
var search, i;
if (el instanceof $) {
search = el[0]
} else if (typeof el == 'string') {
search = el;
}
this.settings.exposed = this.settings.exposed || [];
i = this.settings.exposed.length;
while (i--) {
if (this.settings.exposed[i] == search) {
this.settings.exposed.splice(i, 1);
return;
}
}
},
center : function () {
var $w = $(window);
this.settings.$next_tip.css({
top : ((($w.height() - this.settings.$next_tip.outerHeight()) / 2) + $w.scrollTop()),
left : ((($w.width() - this.settings.$next_tip.outerWidth()) / 2) + $w.scrollLeft())
});
return true;
},
bottom : function () {
return /bottom/i.test(this.settings.tip_settings.tip_location);
},
top : function () {
return /top/i.test(this.settings.tip_settings.tip_location);
},
right : function () {
return /right/i.test(this.settings.tip_settings.tip_location);
},
left : function () {
return /left/i.test(this.settings.tip_settings.tip_location);
},
corners : function (el) {
if (el.length === 0) {
return [false, false, false, false];
}
var w = $(window),
window_half = w.height() / 2,
//using this to calculate since scroll may not have finished yet.
tipOffset = Math.ceil(this.settings.$target.offset().top - window_half + this.settings.$next_tip.outerHeight()),
right = w.width() + w.scrollLeft(),
offsetBottom = w.height() + tipOffset,
bottom = w.height() + w.scrollTop(),
top = w.scrollTop();
if (tipOffset < top) {
if (tipOffset < 0) {
top = 0;
} else {
top = tipOffset;
}
}
if (offsetBottom > bottom) {
bottom = offsetBottom;
}
return [
el.offset().top < top,
right < el.offset().left + el.outerWidth(),
bottom < el.offset().top + el.outerHeight(),
w.scrollLeft() > el.offset().left
];
},
visible : function (hidden_corners) {
var i = hidden_corners.length;
while (i--) {
if (hidden_corners[i]) {
return false;
}
}
return true;
},
nub_position : function (nub, pos, def) {
if (pos === 'auto') {
nub.addClass(def);
} else {
nub.addClass(pos);
}
},
startTimer : function () {
if (this.settings.$li.length) {
this.settings.automate = setTimeout(function () {
this.hide();
this.show();
this.startTimer();
}.bind(this), this.settings.timer);
} else {
clearTimeout(this.settings.automate);
}
},
end : function (abort) {
if (this.settings.cookie_monster) {
$.cookie(this.settings.cookie_name, 'ridden', {expires : this.settings.cookie_expires, domain : this.settings.cookie_domain});
}
if (this.settings.timer > 0) {
clearTimeout(this.settings.automate);
}
if (this.settings.modal && this.settings.expose) {
this.un_expose();
}
// Unplug keystrokes listener
$(this.scope).off('keyup.joyride')
this.settings.$next_tip.data('closed', true);
this.settings.riding = false;
$('.joyride-modal-bg').hide();
this.settings.$current_tip.hide();
if (typeof abort === 'undefined' || abort === false) {
this.settings.post_step_callback(this.settings.$li.index(), this.settings.$current_tip);
this.settings.post_ride_callback(this.settings.$li.index(), this.settings.$current_tip);
}
$('.joyride-tip-guide').remove();
},
off : function () {
$(this.scope).off('.joyride');
$(window).off('.joyride');
$('.joyride-close-tip, .joyride-next-tip, .joyride-modal-bg').off('.joyride');
$('.joyride-tip-guide, .joyride-modal-bg').remove();
clearTimeout(this.settings.automate);
},
reflow : function () {}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.joyride.js | foundation.joyride.js |
;(function ($, window, document, undefined) {
'use strict';
Foundation.libs['magellan-expedition'] = {
name : 'magellan-expedition',
version : '5.5.3',
settings : {
active_class : 'active',
threshold : 0, // pixels from the top of the expedition for it to become fixes
destination_threshold : 20, // pixels from the top of destination for it to be considered active
throttle_delay : 30, // calculation throttling to increase framerate
fixed_top : 0, // top distance in pixels assigend to the fixed element on scroll
offset_by_height : true, // whether to offset the destination by the expedition height. Usually you want this to be true, unless your expedition is on the side.
duration : 700, // animation duration time
easing : 'swing' // animation easing
},
init : function (scope, method, options) {
Foundation.inherit(this, 'throttle');
this.bindings(method, options);
},
events : function () {
var self = this,
S = self.S,
settings = self.settings;
// initialize expedition offset
self.set_expedition_position();
S(self.scope)
.off('.magellan')
.on('click.fndtn.magellan', '[' + self.add_namespace('data-magellan-arrival') + '] a[href*=#]', function (e) {
var sameHost = ((this.hostname === location.hostname) || !this.hostname),
samePath = self.filterPathname(location.pathname) === self.filterPathname(this.pathname),
testHash = this.hash.replace(/(:|\.|\/)/g, '\\$1'),
anchor = this;
if (sameHost && samePath && testHash) {
e.preventDefault();
var expedition = $(this).closest('[' + self.attr_name() + ']'),
settings = expedition.data('magellan-expedition-init'),
hash = this.hash.split('#').join(''),
target = $('a[name="' + hash + '"]');
if (target.length === 0) {
target = $('#' + hash);
}
// Account for expedition height if fixed position
var scroll_top = target.offset().top - settings.destination_threshold + 1;
if (settings.offset_by_height) {
scroll_top = scroll_top - expedition.outerHeight();
}
$('html, body').stop().animate({
'scrollTop' : scroll_top
}, settings.duration, settings.easing, function () {
if (history.pushState) {
history.pushState(null, null, anchor.pathname + anchor.search + '#' + hash);
} else {
location.hash = anchor.pathname + anchor.search + '#' + hash;
}
});
}
})
.on('scroll.fndtn.magellan', self.throttle(this.check_for_arrivals.bind(this), settings.throttle_delay));
},
check_for_arrivals : function () {
var self = this;
self.update_arrivals();
self.update_expedition_positions();
},
set_expedition_position : function () {
var self = this;
$('[' + this.attr_name() + '=fixed]', self.scope).each(function (idx, el) {
var expedition = $(this),
settings = expedition.data('magellan-expedition-init'),
styles = expedition.attr('styles'), // save styles
top_offset, fixed_top;
expedition.attr('style', '');
top_offset = expedition.offset().top + settings.threshold;
//set fixed-top by attribute
fixed_top = parseInt(expedition.data('magellan-fixed-top'));
if (!isNaN(fixed_top)) {
self.settings.fixed_top = fixed_top;
}
expedition.data(self.data_attr('magellan-top-offset'), top_offset);
expedition.attr('style', styles);
});
},
update_expedition_positions : function () {
var self = this,
window_top_offset = $(window).scrollTop();
$('[' + this.attr_name() + '=fixed]', self.scope).each(function () {
var expedition = $(this),
settings = expedition.data('magellan-expedition-init'),
styles = expedition.attr('style'), // save styles
top_offset = expedition.data('magellan-top-offset');
//scroll to the top distance
if (window_top_offset + self.settings.fixed_top >= top_offset) {
// Placeholder allows height calculations to be consistent even when
// appearing to switch between fixed/non-fixed placement
var placeholder = expedition.prev('[' + self.add_namespace('data-magellan-expedition-clone') + ']');
if (placeholder.length === 0) {
placeholder = expedition.clone();
placeholder.removeAttr(self.attr_name());
placeholder.attr(self.add_namespace('data-magellan-expedition-clone'), '');
expedition.before(placeholder);
}
expedition.css({position :'fixed', top : settings.fixed_top}).addClass('fixed');
} else {
expedition.prev('[' + self.add_namespace('data-magellan-expedition-clone') + ']').remove();
expedition.attr('style', styles).css('position', '').css('top', '').removeClass('fixed');
}
});
},
update_arrivals : function () {
var self = this,
window_top_offset = $(window).scrollTop();
$('[' + this.attr_name() + ']', self.scope).each(function () {
var expedition = $(this),
settings = expedition.data(self.attr_name(true) + '-init'),
offsets = self.offsets(expedition, window_top_offset),
arrivals = expedition.find('[' + self.add_namespace('data-magellan-arrival') + ']'),
active_item = false;
offsets.each(function (idx, item) {
if (item.viewport_offset >= item.top_offset) {
var arrivals = expedition.find('[' + self.add_namespace('data-magellan-arrival') + ']');
arrivals.not(item.arrival).removeClass(settings.active_class);
item.arrival.addClass(settings.active_class);
active_item = true;
return true;
}
});
if (!active_item) {
arrivals.removeClass(settings.active_class);
}
});
},
offsets : function (expedition, window_offset) {
var self = this,
settings = expedition.data(self.attr_name(true) + '-init'),
viewport_offset = window_offset;
return expedition.find('[' + self.add_namespace('data-magellan-arrival') + ']').map(function (idx, el) {
var name = $(this).data(self.data_attr('magellan-arrival')),
dest = $('[' + self.add_namespace('data-magellan-destination') + '=' + name + ']');
if (dest.length > 0) {
var top_offset = dest.offset().top - settings.destination_threshold;
if (settings.offset_by_height) {
top_offset = top_offset - expedition.outerHeight();
}
top_offset = Math.floor(top_offset);
return {
destination : dest,
arrival : $(this),
top_offset : top_offset,
viewport_offset : viewport_offset
}
}
}).sort(function (a, b) {
if (a.top_offset < b.top_offset) {
return -1;
}
if (a.top_offset > b.top_offset) {
return 1;
}
return 0;
});
},
data_attr : function (str) {
if (this.namespace.length > 0) {
return this.namespace + '-' + str;
}
return str;
},
off : function () {
this.S(this.scope).off('.magellan');
this.S(window).off('.magellan');
},
filterPathname : function (pathname) {
pathname = pathname || '';
return pathname
.replace(/^\//,'')
.replace(/(?:index|default).[a-zA-Z]{3,4}$/,'')
.replace(/\/$/,'');
},
reflow : function () {
var self = this;
// remove placeholder expeditions used for height calculation purposes
$('[' + self.add_namespace('data-magellan-expedition-clone') + ']', self.scope).remove();
}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.magellan.js | foundation.magellan.js |
;(function ($, window, document, undefined) {
'use strict';
Foundation.libs.accordion = {
name : 'accordion',
version : '5.5.3',
settings : {
content_class : 'content',
active_class : 'active',
multi_expand : false,
toggleable : true,
callback : function () {}
},
init : function (scope, method, options) {
this.bindings(method, options);
},
events : function (instance) {
var self = this;
var S = this.S;
self.create(this.S(instance));
S(this.scope)
.off('.fndtn.accordion')
.on('click.fndtn.accordion', '[' + this.attr_name() + '] > dd > a, [' + this.attr_name() + '] > li > a', function (e) {
var accordion = S(this).closest('[' + self.attr_name() + ']'),
groupSelector = self.attr_name() + '=' + accordion.attr(self.attr_name()),
settings = accordion.data(self.attr_name(true) + '-init') || self.settings,
target = S('#' + this.href.split('#')[1]),
aunts = $('> dd, > li', accordion),
siblings = aunts.children('.' + settings.content_class),
active_content = siblings.filter('.' + settings.active_class);
e.preventDefault();
if (accordion.attr(self.attr_name())) {
siblings = siblings.add('[' + groupSelector + '] dd > ' + '.' + settings.content_class + ', [' + groupSelector + '] li > ' + '.' + settings.content_class);
aunts = aunts.add('[' + groupSelector + '] dd, [' + groupSelector + '] li');
}
if (settings.toggleable && target.is(active_content)) {
target.parent('dd, li').toggleClass(settings.active_class, false);
target.toggleClass(settings.active_class, false);
S(this).attr('aria-expanded', function(i, attr){
return attr === 'true' ? 'false' : 'true';
});
settings.callback(target);
target.triggerHandler('toggled', [accordion]);
accordion.triggerHandler('toggled', [target]);
return;
}
if (!settings.multi_expand) {
siblings.removeClass(settings.active_class);
aunts.removeClass(settings.active_class);
aunts.children('a').attr('aria-expanded','false');
}
target.addClass(settings.active_class).parent().addClass(settings.active_class);
settings.callback(target);
target.triggerHandler('toggled', [accordion]);
accordion.triggerHandler('toggled', [target]);
S(this).attr('aria-expanded','true');
});
},
create: function($instance) {
var self = this,
accordion = $instance,
aunts = $('> .accordion-navigation', accordion),
settings = accordion.data(self.attr_name(true) + '-init') || self.settings;
aunts.children('a').attr('aria-expanded','false');
aunts.has('.' + settings.content_class + '.' + settings.active_class).addClass(settings.active_class).children('a').attr('aria-expanded','true');
if (settings.multi_expand) {
$instance.attr('aria-multiselectable','true');
}
},
toggle : function(options) {
var options = typeof options !== 'undefined' ? options : {};
var selector = typeof options.selector !== 'undefined' ? options.selector : '';
var toggle_state = typeof options.toggle_state !== 'undefined' ? options.toggle_state : '';
var $accordion = typeof options.$accordion !== 'undefined' ? options.$accordion : this.S(this.scope).closest('[' + this.attr_name() + ']');
var $items = $accordion.find('> dd' + selector + ', > li' + selector);
if ( $items.length < 1 ) {
if ( window.console ) {
console.error('Selection not found.', selector);
}
return false;
}
var S = this.S;
var active_class = this.settings.active_class;
$items.each(function() {
var $item = S(this);
var is_active = $item.hasClass(active_class);
if ( ( is_active && toggle_state === 'close' ) || ( !is_active && toggle_state === 'open' ) || toggle_state === '' ) {
$item.find('> a').trigger('click.fndtn.accordion');
}
});
},
open : function(options) {
var options = typeof options !== 'undefined' ? options : {};
options.toggle_state = 'open';
this.toggle(options);
},
close : function(options) {
var options = typeof options !== 'undefined' ? options : {};
options.toggle_state = 'close';
this.toggle(options);
},
off : function () {},
reflow : function () {}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.accordion.js | foundation.accordion.js |
;(function ($, window, document, undefined) {
'use strict';
Foundation.libs.interchange = {
name : 'interchange',
version : '5.5.3',
cache : {},
images_loaded : false,
nodes_loaded : false,
settings : {
load_attr : 'interchange',
named_queries : {
'default' : 'only screen',
'small' : Foundation.media_queries['small'],
'small-only' : Foundation.media_queries['small-only'],
'medium' : Foundation.media_queries['medium'],
'medium-only' : Foundation.media_queries['medium-only'],
'large' : Foundation.media_queries['large'],
'large-only' : Foundation.media_queries['large-only'],
'xlarge' : Foundation.media_queries['xlarge'],
'xlarge-only' : Foundation.media_queries['xlarge-only'],
'xxlarge' : Foundation.media_queries['xxlarge'],
'landscape' : 'only screen and (orientation: landscape)',
'portrait' : 'only screen and (orientation: portrait)',
'retina' : 'only screen and (-webkit-min-device-pixel-ratio: 2),' +
'only screen and (min--moz-device-pixel-ratio: 2),' +
'only screen and (-o-min-device-pixel-ratio: 2/1),' +
'only screen and (min-device-pixel-ratio: 2),' +
'only screen and (min-resolution: 192dpi),' +
'only screen and (min-resolution: 2dppx)'
},
directives : {
replace : function (el, path, trigger) {
// The trigger argument, if called within the directive, fires
// an event named after the directive on the element, passing
// any parameters along to the event that you pass to trigger.
//
// ex. trigger(), trigger([a, b, c]), or trigger(a, b, c)
//
// This allows you to bind a callback like so:
// $('#interchangeContainer').on('replace', function (e, a, b, c) {
// console.log($(this).html(), a, b, c);
// });
if (el !== null && /IMG/.test(el[0].nodeName)) {
var orig_path = $.each(el, function(){this.src = path;});
// var orig_path = el[0].src;
if (new RegExp(path, 'i').test(orig_path)) {
return;
}
el.attr("src", path);
return trigger(el[0].src);
}
var last_path = el.data(this.data_attr + '-last-path'),
self = this;
if (last_path == path) {
return;
}
if (/\.(gif|jpg|jpeg|tiff|png)([?#].*)?/i.test(path)) {
$(el).css('background-image', 'url(' + path + ')');
el.data('interchange-last-path', path);
return trigger(path);
}
return $.get(path, function (response) {
el.html(response);
el.data(self.data_attr + '-last-path', path);
trigger();
});
}
}
},
init : function (scope, method, options) {
Foundation.inherit(this, 'throttle random_str');
this.data_attr = this.set_data_attr();
$.extend(true, this.settings, method, options);
this.bindings(method, options);
this.reflow();
},
get_media_hash : function () {
var mediaHash = '';
for (var queryName in this.settings.named_queries ) {
mediaHash += matchMedia(this.settings.named_queries[queryName]).matches.toString();
}
return mediaHash;
},
events : function () {
var self = this, prevMediaHash;
$(window)
.off('.interchange')
.on('resize.fndtn.interchange', self.throttle(function () {
var currMediaHash = self.get_media_hash();
if (currMediaHash !== prevMediaHash) {
self.resize();
}
prevMediaHash = currMediaHash;
}, 50));
return this;
},
resize : function () {
var cache = this.cache;
if (!this.images_loaded || !this.nodes_loaded) {
setTimeout($.proxy(this.resize, this), 50);
return;
}
for (var uuid in cache) {
if (cache.hasOwnProperty(uuid)) {
var passed = this.results(uuid, cache[uuid]);
if (passed) {
this.settings.directives[passed
.scenario[1]].call(this, passed.el, passed.scenario[0], (function (passed) {
if (arguments[0] instanceof Array) {
var args = arguments[0];
} else {
var args = Array.prototype.slice.call(arguments, 0);
}
return function() {
passed.el.trigger(passed.scenario[1], args);
}
}(passed)));
}
}
}
},
results : function (uuid, scenarios) {
var count = scenarios.length;
if (count > 0) {
var el = this.S('[' + this.add_namespace('data-uuid') + '="' + uuid + '"]');
while (count--) {
var mq, rule = scenarios[count][2];
if (this.settings.named_queries.hasOwnProperty(rule)) {
mq = matchMedia(this.settings.named_queries[rule]);
} else {
mq = matchMedia(rule);
}
if (mq.matches) {
return {el : el, scenario : scenarios[count]};
}
}
}
return false;
},
load : function (type, force_update) {
if (typeof this['cached_' + type] === 'undefined' || force_update) {
this['update_' + type]();
}
return this['cached_' + type];
},
update_images : function () {
var images = this.S('img[' + this.data_attr + ']'),
count = images.length,
i = count,
loaded_count = 0,
data_attr = this.data_attr;
this.cache = {};
this.cached_images = [];
this.images_loaded = (count === 0);
while (i--) {
loaded_count++;
if (images[i]) {
var str = images[i].getAttribute(data_attr) || '';
if (str.length > 0) {
this.cached_images.push(images[i]);
}
}
if (loaded_count === count) {
this.images_loaded = true;
this.enhance('images');
}
}
return this;
},
update_nodes : function () {
var nodes = this.S('[' + this.data_attr + ']').not('img'),
count = nodes.length,
i = count,
loaded_count = 0,
data_attr = this.data_attr;
this.cached_nodes = [];
this.nodes_loaded = (count === 0);
while (i--) {
loaded_count++;
var str = nodes[i].getAttribute(data_attr) || '';
if (str.length > 0) {
this.cached_nodes.push(nodes[i]);
}
if (loaded_count === count) {
this.nodes_loaded = true;
this.enhance('nodes');
}
}
return this;
},
enhance : function (type) {
var i = this['cached_' + type].length;
while (i--) {
this.object($(this['cached_' + type][i]));
}
return $(window).trigger('resize.fndtn.interchange');
},
convert_directive : function (directive) {
var trimmed = this.trim(directive);
if (trimmed.length > 0) {
return trimmed;
}
return 'replace';
},
parse_scenario : function (scenario) {
// This logic had to be made more complex since some users were using commas in the url path
// So we cannot simply just split on a comma
var directive_match = scenario[0].match(/(.+),\s*(\w+)\s*$/),
// getting the mq has gotten a bit complicated since we started accounting for several use cases
// of URLs. For now we'll continue to match these scenarios, but we may consider having these scenarios
// as nested objects or arrays in F6.
// regex: match everything before close parenthesis for mq
media_query = scenario[1].match(/(.*)\)/);
if (directive_match) {
var path = directive_match[1],
directive = directive_match[2];
} else {
var cached_split = scenario[0].split(/,\s*$/),
path = cached_split[0],
directive = '';
}
return [this.trim(path), this.convert_directive(directive), this.trim(media_query[1])];
},
object : function (el) {
var raw_arr = this.parse_data_attr(el),
scenarios = [],
i = raw_arr.length;
if (i > 0) {
while (i--) {
// split array between comma delimited content and mq
// regex: comma, optional space, open parenthesis
var scenario = raw_arr[i].split(/,\s?\(/);
if (scenario.length > 1) {
var params = this.parse_scenario(scenario);
scenarios.push(params);
}
}
}
return this.store(el, scenarios);
},
store : function (el, scenarios) {
var uuid = this.random_str(),
current_uuid = el.data(this.add_namespace('uuid', true));
if (this.cache[current_uuid]) {
return this.cache[current_uuid];
}
el.attr(this.add_namespace('data-uuid'), uuid);
return this.cache[uuid] = scenarios;
},
trim : function (str) {
if (typeof str === 'string') {
return $.trim(str);
}
return str;
},
set_data_attr : function (init) {
if (init) {
if (this.namespace.length > 0) {
return this.namespace + '-' + this.settings.load_attr;
}
return this.settings.load_attr;
}
if (this.namespace.length > 0) {
return 'data-' + this.namespace + '-' + this.settings.load_attr;
}
return 'data-' + this.settings.load_attr;
},
parse_data_attr : function (el) {
var raw = el.attr(this.attr_name()).split(/\[(.*?)\]/),
i = raw.length,
output = [];
while (i--) {
if (raw[i].replace(/[\W\d]+/, '').length > 4) {
output.push(raw[i]);
}
}
return output;
},
reflow : function () {
this.load('images', true);
this.load('nodes', true);
}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.interchange.js | foundation.interchange.js |
;(function ($, window, document, undefined) {
'use strict';
Foundation.libs.offcanvas = {
name : 'offcanvas',
version : '5.5.3',
settings : {
open_method : 'move',
close_on_click : false
},
init : function (scope, method, options) {
this.bindings(method, options);
},
events : function () {
var self = this,
S = self.S,
move_class = '',
right_postfix = '',
left_postfix = '',
top_postfix = '',
bottom_postfix = '';
if (this.settings.open_method === 'move') {
move_class = 'move-';
right_postfix = 'right';
left_postfix = 'left';
top_postfix = 'top';
bottom_postfix = 'bottom';
} else if (this.settings.open_method === 'overlap_single') {
move_class = 'offcanvas-overlap-';
right_postfix = 'right';
left_postfix = 'left';
top_postfix = 'top';
bottom_postfix = 'bottom';
} else if (this.settings.open_method === 'overlap') {
move_class = 'offcanvas-overlap';
}
S(this.scope).off('.offcanvas')
.on('click.fndtn.offcanvas', '.left-off-canvas-toggle', function (e) {
self.click_toggle_class(e, move_class + right_postfix);
if (self.settings.open_method !== 'overlap') {
S('.left-submenu').removeClass(move_class + right_postfix);
}
$('.left-off-canvas-toggle').attr('aria-expanded', 'true');
})
.on('click.fndtn.offcanvas', '.left-off-canvas-menu a', function (e) {
var settings = self.get_settings(e);
var parent = S(this).parent();
if (settings.close_on_click && !parent.hasClass('has-submenu') && !parent.hasClass('back')) {
self.hide.call(self, move_class + right_postfix, self.get_wrapper(e));
parent.parent().removeClass(move_class + right_postfix);
} else if (S(this).parent().hasClass('has-submenu')) {
e.preventDefault();
S(this).siblings('.left-submenu').toggleClass(move_class + right_postfix);
} else if (parent.hasClass('back')) {
e.preventDefault();
parent.parent().removeClass(move_class + right_postfix);
}
$('.left-off-canvas-toggle').attr('aria-expanded', 'true');
})
//end of left canvas
.on('click.fndtn.offcanvas', '.right-off-canvas-toggle', function (e) {
self.click_toggle_class(e, move_class + left_postfix);
if (self.settings.open_method !== 'overlap') {
S('.right-submenu').removeClass(move_class + left_postfix);
}
$('.right-off-canvas-toggle').attr('aria-expanded', 'true');
})
.on('click.fndtn.offcanvas', '.right-off-canvas-menu a', function (e) {
var settings = self.get_settings(e);
var parent = S(this).parent();
if (settings.close_on_click && !parent.hasClass('has-submenu') && !parent.hasClass('back')) {
self.hide.call(self, move_class + left_postfix, self.get_wrapper(e));
parent.parent().removeClass(move_class + left_postfix);
} else if (S(this).parent().hasClass('has-submenu')) {
e.preventDefault();
S(this).siblings('.right-submenu').toggleClass(move_class + left_postfix);
} else if (parent.hasClass('back')) {
e.preventDefault();
parent.parent().removeClass(move_class + left_postfix);
}
$('.right-off-canvas-toggle').attr('aria-expanded', 'true');
})
//end of right canvas
.on('click.fndtn.offcanvas', '.top-off-canvas-toggle', function (e) {
self.click_toggle_class(e, move_class + bottom_postfix);
if (self.settings.open_method !== 'overlap') {
S('.top-submenu').removeClass(move_class + bottom_postfix);
}
$('.top-off-canvas-toggle').attr('aria-expanded', 'true');
})
.on('click.fndtn.offcanvas', '.top-off-canvas-menu a', function (e) {
var settings = self.get_settings(e);
var parent = S(this).parent();
if (settings.close_on_click && !parent.hasClass('has-submenu') && !parent.hasClass('back')) {
self.hide.call(self, move_class + bottom_postfix, self.get_wrapper(e));
parent.parent().removeClass(move_class + bottom_postfix);
} else if (S(this).parent().hasClass('has-submenu')) {
e.preventDefault();
S(this).siblings('.top-submenu').toggleClass(move_class + bottom_postfix);
} else if (parent.hasClass('back')) {
e.preventDefault();
parent.parent().removeClass(move_class + bottom_postfix);
}
$('.top-off-canvas-toggle').attr('aria-expanded', 'true');
})
//end of top canvas
.on('click.fndtn.offcanvas', '.bottom-off-canvas-toggle', function (e) {
self.click_toggle_class(e, move_class + top_postfix);
if (self.settings.open_method !== 'overlap') {
S('.bottom-submenu').removeClass(move_class + top_postfix);
}
$('.bottom-off-canvas-toggle').attr('aria-expanded', 'true');
})
.on('click.fndtn.offcanvas', '.bottom-off-canvas-menu a', function (e) {
var settings = self.get_settings(e);
var parent = S(this).parent();
if (settings.close_on_click && !parent.hasClass('has-submenu') && !parent.hasClass('back')) {
self.hide.call(self, move_class + top_postfix, self.get_wrapper(e));
parent.parent().removeClass(move_class + top_postfix);
} else if (S(this).parent().hasClass('has-submenu')) {
e.preventDefault();
S(this).siblings('.bottom-submenu').toggleClass(move_class + top_postfix);
} else if (parent.hasClass('back')) {
e.preventDefault();
parent.parent().removeClass(move_class + top_postfix);
}
$('.bottom-off-canvas-toggle').attr('aria-expanded', 'true');
})
//end of bottom
.on('click.fndtn.offcanvas', '.exit-off-canvas', function (e) {
self.click_remove_class(e, move_class + left_postfix);
S('.right-submenu').removeClass(move_class + left_postfix);
if (right_postfix) {
self.click_remove_class(e, move_class + right_postfix);
S('.left-submenu').removeClass(move_class + left_postfix);
}
$('.right-off-canvas-toggle').attr('aria-expanded', 'true');
})
.on('click.fndtn.offcanvas', '.exit-off-canvas', function (e) {
self.click_remove_class(e, move_class + left_postfix);
$('.left-off-canvas-toggle').attr('aria-expanded', 'false');
if (right_postfix) {
self.click_remove_class(e, move_class + right_postfix);
$('.right-off-canvas-toggle').attr('aria-expanded', 'false');
}
})
.on('click.fndtn.offcanvas', '.exit-off-canvas', function (e) {
self.click_remove_class(e, move_class + top_postfix);
S('.bottom-submenu').removeClass(move_class + top_postfix);
if (bottom_postfix) {
self.click_remove_class(e, move_class + bottom_postfix);
S('.top-submenu').removeClass(move_class + top_postfix);
}
$('.bottom-off-canvas-toggle').attr('aria-expanded', 'true');
})
.on('click.fndtn.offcanvas', '.exit-off-canvas', function (e) {
self.click_remove_class(e, move_class + top_postfix);
$('.top-off-canvas-toggle').attr('aria-expanded', 'false');
if (bottom_postfix) {
self.click_remove_class(e, move_class + bottom_postfix);
$('.bottom-off-canvas-toggle').attr('aria-expanded', 'false');
}
});
},
toggle : function (class_name, $off_canvas) {
$off_canvas = $off_canvas || this.get_wrapper();
if ($off_canvas.is('.' + class_name)) {
this.hide(class_name, $off_canvas);
} else {
this.show(class_name, $off_canvas);
}
},
show : function (class_name, $off_canvas) {
$off_canvas = $off_canvas || this.get_wrapper();
$off_canvas.trigger('open.fndtn.offcanvas');
$off_canvas.addClass(class_name);
},
hide : function (class_name, $off_canvas) {
$off_canvas = $off_canvas || this.get_wrapper();
$off_canvas.trigger('close.fndtn.offcanvas');
$off_canvas.removeClass(class_name);
},
click_toggle_class : function (e, class_name) {
e.preventDefault();
var $off_canvas = this.get_wrapper(e);
this.toggle(class_name, $off_canvas);
},
click_remove_class : function (e, class_name) {
e.preventDefault();
var $off_canvas = this.get_wrapper(e);
this.hide(class_name, $off_canvas);
},
get_settings : function (e) {
var offcanvas = this.S(e.target).closest('[' + this.attr_name() + ']');
return offcanvas.data(this.attr_name(true) + '-init') || this.settings;
},
get_wrapper : function (e) {
var $off_canvas = this.S(e ? e.target : this.scope).closest('.off-canvas-wrap');
if ($off_canvas.length === 0) {
$off_canvas = this.S('.off-canvas-wrap');
}
return $off_canvas;
},
reflow : function () {}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.offcanvas.js | foundation.offcanvas.js |
;(function ($, window, document, undefined) {
'use strict';
Foundation.libs.dropdown = {
name : 'dropdown',
version : '5.5.3',
settings : {
active_class : 'open',
disabled_class : 'disabled',
mega_class : 'mega',
align : 'bottom',
is_hover : false,
hover_timeout : 150,
opened : function () {},
closed : function () {}
},
init : function (scope, method, options) {
Foundation.inherit(this, 'throttle');
$.extend(true, this.settings, method, options);
this.bindings(method, options);
},
events : function (scope) {
var self = this,
S = self.S;
S(this.scope)
.off('.dropdown')
.on('click.fndtn.dropdown', '[' + this.attr_name() + ']', function (e) {
var settings = S(this).data(self.attr_name(true) + '-init') || self.settings;
if (!settings.is_hover || Modernizr.touch) {
e.preventDefault();
if (S(this).parent('[data-reveal-id]').length) {
e.stopPropagation();
}
self.toggle($(this));
}
})
.on('mouseenter.fndtn.dropdown', '[' + this.attr_name() + '], [' + this.attr_name() + '-content]', function (e) {
var $this = S(this),
dropdown,
target;
clearTimeout(self.timeout);
if ($this.data(self.data_attr())) {
dropdown = S('#' + $this.data(self.data_attr()));
target = $this;
} else {
dropdown = $this;
target = S('[' + self.attr_name() + '="' + dropdown.attr('id') + '"]');
}
var settings = target.data(self.attr_name(true) + '-init') || self.settings;
if (S(e.currentTarget).data(self.data_attr()) && settings.is_hover) {
self.closeall.call(self);
}
if (settings.is_hover) {
self.open.apply(self, [dropdown, target]);
}
})
.on('mouseleave.fndtn.dropdown', '[' + this.attr_name() + '], [' + this.attr_name() + '-content]', function (e) {
var $this = S(this);
var settings;
if ($this.data(self.data_attr())) {
settings = $this.data(self.data_attr(true) + '-init') || self.settings;
} else {
var target = S('[' + self.attr_name() + '="' + S(this).attr('id') + '"]'),
settings = target.data(self.attr_name(true) + '-init') || self.settings;
}
self.timeout = setTimeout(function () {
if ($this.data(self.data_attr())) {
if (settings.is_hover) {
self.close.call(self, S('#' + $this.data(self.data_attr())));
}
} else {
if (settings.is_hover) {
self.close.call(self, $this);
}
}
}.bind(this), settings.hover_timeout);
})
.on('click.fndtn.dropdown', function (e) {
var parent = S(e.target).closest('[' + self.attr_name() + '-content]');
var links = parent.find('a');
if (links.length > 0 && parent.attr('aria-autoclose') !== 'false') {
self.close.call(self, S('[' + self.attr_name() + '-content]'));
}
if (e.target !== document && !$.contains(document.documentElement, e.target)) {
return;
}
if (S(e.target).closest('[' + self.attr_name() + ']').length > 0) {
return;
}
if (!(S(e.target).data('revealId')) &&
(parent.length > 0 && (S(e.target).is('[' + self.attr_name() + '-content]') ||
$.contains(parent.first()[0], e.target)))) {
e.stopPropagation();
return;
}
self.close.call(self, S('[' + self.attr_name() + '-content]'));
})
.on('opened.fndtn.dropdown', '[' + self.attr_name() + '-content]', function () {
self.settings.opened.call(this);
})
.on('closed.fndtn.dropdown', '[' + self.attr_name() + '-content]', function () {
self.settings.closed.call(this);
});
S(window)
.off('.dropdown')
.on('resize.fndtn.dropdown', self.throttle(function () {
self.resize.call(self);
}, 50));
this.resize();
},
close : function (dropdown) {
var self = this;
dropdown.each(function (idx) {
var original_target = $('[' + self.attr_name() + '=' + dropdown[idx].id + ']') || $('aria-controls=' + dropdown[idx].id + ']');
original_target.attr('aria-expanded', 'false');
if (self.S(this).hasClass(self.settings.active_class)) {
self.S(this)
.css(Foundation.rtl ? 'right' : 'left', '-99999px')
.attr('aria-hidden', 'true')
.removeClass(self.settings.active_class)
.prev('[' + self.attr_name() + ']')
.removeClass(self.settings.active_class)
.removeData('target');
self.S(this).trigger('closed.fndtn.dropdown', [dropdown]);
}
});
dropdown.removeClass('f-open-' + this.attr_name(true));
},
closeall : function () {
var self = this;
$.each(self.S('.f-open-' + this.attr_name(true)), function () {
self.close.call(self, self.S(this));
});
},
open : function (dropdown, target) {
this
.css(dropdown
.addClass(this.settings.active_class), target);
dropdown.prev('[' + this.attr_name() + ']').addClass(this.settings.active_class);
dropdown.data('target', target.get(0)).trigger('opened.fndtn.dropdown', [dropdown, target]);
dropdown.attr('aria-hidden', 'false');
target.attr('aria-expanded', 'true');
dropdown.focus();
dropdown.addClass('f-open-' + this.attr_name(true));
},
data_attr : function () {
if (this.namespace.length > 0) {
return this.namespace + '-' + this.name;
}
return this.name;
},
toggle : function (target) {
if (target.hasClass(this.settings.disabled_class)) {
return;
}
var dropdown = this.S('#' + target.data(this.data_attr()));
if (dropdown.length === 0) {
// No dropdown found, not continuing
return;
}
this.close.call(this, this.S('[' + this.attr_name() + '-content]').not(dropdown));
if (dropdown.hasClass(this.settings.active_class)) {
this.close.call(this, dropdown);
if (dropdown.data('target') !== target.get(0)) {
this.open.call(this, dropdown, target);
}
} else {
this.open.call(this, dropdown, target);
}
},
resize : function () {
var dropdown = this.S('[' + this.attr_name() + '-content].open');
var target = $(dropdown.data("target"));
if (dropdown.length && target.length) {
this.css(dropdown, target);
}
},
css : function (dropdown, target) {
var left_offset = Math.max((target.width() - dropdown.width()) / 2, 8),
settings = target.data(this.attr_name(true) + '-init') || this.settings,
parentOverflow = dropdown.parent().css('overflow-y') || dropdown.parent().css('overflow');
this.clear_idx();
if (this.small()) {
var p = this.dirs.bottom.call(dropdown, target, settings);
dropdown.attr('style', '').removeClass('drop-left drop-right drop-top').css({
position : 'absolute',
width : '95%',
'max-width' : 'none',
top : p.top
});
dropdown.css(Foundation.rtl ? 'right' : 'left', left_offset);
}
// detect if dropdown is in an overflow container
else if (parentOverflow !== 'visible') {
var offset = target[0].offsetTop + target[0].offsetHeight;
dropdown.attr('style', '').css({
position : 'absolute',
top : offset
});
dropdown.css(Foundation.rtl ? 'right' : 'left', left_offset);
}
else {
this.style(dropdown, target, settings);
}
return dropdown;
},
style : function (dropdown, target, settings) {
var css = $.extend({position : 'absolute'},
this.dirs[settings.align].call(dropdown, target, settings));
dropdown.attr('style', '').css(css);
},
// return CSS property object
// `this` is the dropdown
dirs : {
// Calculate target offset
_base : function (t, s) {
var o_p = this.offsetParent(),
o = o_p.offset(),
p = t.offset();
p.top -= o.top;
p.left -= o.left;
//set some flags on the p object to pass along
p.missRight = false;
p.missTop = false;
p.missLeft = false;
p.leftRightFlag = false;
//lets see if the panel will be off the screen
//get the actual width of the page and store it
var actualBodyWidth;
var windowWidth = window.innerWidth;
if (document.getElementsByClassName('row')[0]) {
actualBodyWidth = document.getElementsByClassName('row')[0].clientWidth;
} else {
actualBodyWidth = windowWidth;
}
var actualMarginWidth = (windowWidth - actualBodyWidth) / 2;
var actualBoundary = actualBodyWidth;
if (!this.hasClass('mega') && !s.ignore_repositioning) {
var outerWidth = this.outerWidth();
var o_left = t.offset().left;
//miss top
if (t.offset().top <= this.outerHeight()) {
p.missTop = true;
actualBoundary = windowWidth - actualMarginWidth;
p.leftRightFlag = true;
}
//miss right
if (o_left + outerWidth > o_left + actualMarginWidth && o_left - actualMarginWidth > outerWidth) {
p.missRight = true;
p.missLeft = false;
}
//miss left
if (o_left - outerWidth <= 0) {
p.missLeft = true;
p.missRight = false;
}
}
return p;
},
top : function (t, s) {
var self = Foundation.libs.dropdown,
p = self.dirs._base.call(this, t, s);
this.addClass('drop-top');
if (p.missTop == true) {
p.top = p.top + t.outerHeight() + this.outerHeight();
this.removeClass('drop-top');
}
if (p.missRight == true) {
p.left = p.left - this.outerWidth() + t.outerWidth();
}
if (t.outerWidth() < this.outerWidth() || self.small() || this.hasClass(s.mega_menu)) {
self.adjust_pip(this, t, s, p);
}
if (Foundation.rtl) {
return {left : p.left - this.outerWidth() + t.outerWidth(),
top : p.top - this.outerHeight()};
}
return {left : p.left, top : p.top - this.outerHeight()};
},
bottom : function (t, s) {
var self = Foundation.libs.dropdown,
p = self.dirs._base.call(this, t, s);
if (p.missRight == true) {
p.left = p.left - this.outerWidth() + t.outerWidth();
}
if (t.outerWidth() < this.outerWidth() || self.small() || this.hasClass(s.mega_menu)) {
self.adjust_pip(this, t, s, p);
}
if (self.rtl) {
return {left : p.left - this.outerWidth() + t.outerWidth(), top : p.top + t.outerHeight()};
}
return {left : p.left, top : p.top + t.outerHeight()};
},
left : function (t, s) {
var p = Foundation.libs.dropdown.dirs._base.call(this, t, s);
this.addClass('drop-left');
if (p.missLeft == true) {
p.left = p.left + this.outerWidth();
p.top = p.top + t.outerHeight();
this.removeClass('drop-left');
}
return {left : p.left - this.outerWidth(), top : p.top};
},
right : function (t, s) {
var p = Foundation.libs.dropdown.dirs._base.call(this, t, s);
this.addClass('drop-right');
if (p.missRight == true) {
p.left = p.left - this.outerWidth();
p.top = p.top + t.outerHeight();
this.removeClass('drop-right');
} else {
p.triggeredRight = true;
}
var self = Foundation.libs.dropdown;
if (t.outerWidth() < this.outerWidth() || self.small() || this.hasClass(s.mega_menu)) {
self.adjust_pip(this, t, s, p);
}
return {left : p.left + t.outerWidth(), top : p.top};
}
},
// Insert rule to style psuedo elements
adjust_pip : function (dropdown, target, settings, position) {
var sheet = Foundation.stylesheet,
pip_offset_base = 8;
if (dropdown.hasClass(settings.mega_class)) {
pip_offset_base = position.left + (target.outerWidth() / 2) - 8;
} else if (this.small()) {
pip_offset_base += position.left - 8;
}
this.rule_idx = sheet.cssRules.length;
//default
var sel_before = '.f-dropdown.open:before',
sel_after = '.f-dropdown.open:after',
css_before = 'left: ' + pip_offset_base + 'px;',
css_after = 'left: ' + (pip_offset_base - 1) + 'px;';
if (position.missRight == true) {
pip_offset_base = dropdown.outerWidth() - 23;
sel_before = '.f-dropdown.open:before',
sel_after = '.f-dropdown.open:after',
css_before = 'left: ' + pip_offset_base + 'px;',
css_after = 'left: ' + (pip_offset_base - 1) + 'px;';
}
//just a case where right is fired, but its not missing right
if (position.triggeredRight == true) {
sel_before = '.f-dropdown.open:before',
sel_after = '.f-dropdown.open:after',
css_before = 'left:-12px;',
css_after = 'left:-14px;';
}
if (sheet.insertRule) {
sheet.insertRule([sel_before, '{', css_before, '}'].join(' '), this.rule_idx);
sheet.insertRule([sel_after, '{', css_after, '}'].join(' '), this.rule_idx + 1);
} else {
sheet.addRule(sel_before, css_before, this.rule_idx);
sheet.addRule(sel_after, css_after, this.rule_idx + 1);
}
},
// Remove old dropdown rule index
clear_idx : function () {
var sheet = Foundation.stylesheet;
if (typeof this.rule_idx !== 'undefined') {
sheet.deleteRule(this.rule_idx);
sheet.deleteRule(this.rule_idx);
delete this.rule_idx;
}
},
small : function () {
return matchMedia(Foundation.media_queries.small).matches &&
!matchMedia(Foundation.media_queries.medium).matches;
},
off : function () {
this.S(this.scope).off('.fndtn.dropdown');
this.S('html, body').off('.fndtn.dropdown');
this.S(window).off('.fndtn.dropdown');
this.S('[data-dropdown-content]').off('.fndtn.dropdown');
},
reflow : function () {}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.dropdown.js | foundation.dropdown.js |
;(function ($, window, document, undefined) {
'use strict';
Foundation.libs.equalizer = {
name : 'equalizer',
version : '5.5.3',
settings : {
use_tallest : true,
before_height_change : $.noop,
after_height_change : $.noop,
equalize_on_stack : false,
act_on_hidden_el: false
},
init : function (scope, method, options) {
Foundation.inherit(this, 'image_loaded');
this.bindings(method, options);
this.reflow();
},
events : function () {
this.S(window).off('.equalizer').on('resize.fndtn.equalizer', function (e) {
this.reflow();
}.bind(this));
},
equalize : function (equalizer) {
var isStacked = false,
group = equalizer.data('equalizer'),
settings = equalizer.data(this.attr_name(true)+'-init') || this.settings,
vals,
firstTopOffset;
if (settings.act_on_hidden_el) {
vals = group ? equalizer.find('['+this.attr_name()+'-watch="'+group+'"]') : equalizer.find('['+this.attr_name()+'-watch]');
}
else {
vals = group ? equalizer.find('['+this.attr_name()+'-watch="'+group+'"]:visible') : equalizer.find('['+this.attr_name()+'-watch]:visible');
}
if (vals.length === 0) {
return;
}
settings.before_height_change();
equalizer.trigger('before-height-change.fndth.equalizer');
vals.height('inherit');
if (settings.equalize_on_stack === false) {
firstTopOffset = vals.first().offset().top;
vals.each(function () {
if ($(this).offset().top !== firstTopOffset) {
isStacked = true;
return false;
}
});
if (isStacked) {
return;
}
}
var heights = vals.map(function () { return $(this).outerHeight(false) }).get();
if (settings.use_tallest) {
var max = Math.max.apply(null, heights);
vals.css('height', max);
} else {
var min = Math.min.apply(null, heights);
vals.css('height', min);
}
settings.after_height_change();
equalizer.trigger('after-height-change.fndtn.equalizer');
},
reflow : function () {
var self = this;
this.S('[' + this.attr_name() + ']', this.scope).each(function () {
var $eq_target = $(this),
media_query = $eq_target.data('equalizer-mq'),
ignore_media_query = true;
if (media_query) {
media_query = 'is_' + media_query.replace(/-/g, '_');
if (Foundation.utils.hasOwnProperty(media_query)) {
ignore_media_query = false;
}
}
self.image_loaded(self.S('img', this), function () {
if (ignore_media_query || Foundation.utils[media_query]()) {
self.equalize($eq_target)
} else {
var vals = $eq_target.find('[' + self.attr_name() + '-watch]:visible');
vals.css('height', 'auto');
}
});
});
}
};
})(jQuery, window, window.document); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.equalizer.js | foundation.equalizer.js |
;(function ($, window, document, undefined) {
'use strict';
Foundation.libs.slider = {
name : 'slider',
version : '5.5.3',
settings : {
start : 0,
end : 100,
step : 1,
precision : 2,
initial : null,
display_selector : '',
vertical : false,
trigger_input_change : false,
on_change : function () {}
},
cache : {},
init : function (scope, method, options) {
Foundation.inherit(this, 'throttle');
this.bindings(method, options);
this.reflow();
},
events : function () {
var self = this;
$(this.scope)
.off('.slider')
.on('mousedown.fndtn.slider touchstart.fndtn.slider pointerdown.fndtn.slider',
'[' + self.attr_name() + ']:not(.disabled, [disabled]) .range-slider-handle', function (e) {
if (!self.cache.active) {
e.preventDefault();
self.set_active_slider($(e.target));
}
})
.on('mousemove.fndtn.slider touchmove.fndtn.slider pointermove.fndtn.slider', function (e) {
if (!!self.cache.active) {
e.preventDefault();
if ($.data(self.cache.active[0], 'settings').vertical) {
var scroll_offset = 0;
if (!e.pageY) {
scroll_offset = window.scrollY;
}
self.calculate_position(self.cache.active, self.get_cursor_position(e, 'y') + scroll_offset);
} else {
self.calculate_position(self.cache.active, self.get_cursor_position(e, 'x'));
}
}
})
.on('mouseup.fndtn.slider touchend.fndtn.slider pointerup.fndtn.slider', function (e) {
if(!self.cache.active) {
// if the user has just clicked into the slider without starting to drag the handle
var slider = $(e.target).attr('role') === 'slider' ? $(e.target) : $(e.target).closest('.range-slider').find("[role='slider']");
if (slider.length && (!slider.parent().hasClass('disabled') && !slider.parent().attr('disabled'))) {
self.set_active_slider(slider);
if ($.data(self.cache.active[0], 'settings').vertical) {
var scroll_offset = 0;
if (!e.pageY) {
scroll_offset = window.scrollY;
}
self.calculate_position(self.cache.active, self.get_cursor_position(e, 'y') + scroll_offset);
} else {
self.calculate_position(self.cache.active, self.get_cursor_position(e, 'x'));
}
}
}
self.remove_active_slider();
})
.on('change.fndtn.slider', function (e) {
self.settings.on_change();
});
self.S(window)
.on('resize.fndtn.slider', self.throttle(function (e) {
self.reflow();
}, 300));
// update slider value as users change input value
this.S('[' + this.attr_name() + ']').each(function () {
var slider = $(this),
handle = slider.children('.range-slider-handle')[0],
settings = self.initialize_settings(handle);
if (settings.display_selector != '') {
$(settings.display_selector).each(function(){
if ($(this).attr('value')) {
$(this).off('change').on('change', function () {
slider.foundation("slider", "set_value", $(this).val());
});
}
});
}
});
},
get_cursor_position : function (e, xy) {
var pageXY = 'page' + xy.toUpperCase(),
clientXY = 'client' + xy.toUpperCase(),
position;
if (typeof e[pageXY] !== 'undefined') {
position = e[pageXY];
} else if (typeof e.originalEvent[clientXY] !== 'undefined') {
position = e.originalEvent[clientXY];
} else if (e.originalEvent.touches && e.originalEvent.touches[0] && typeof e.originalEvent.touches[0][clientXY] !== 'undefined') {
position = e.originalEvent.touches[0][clientXY];
} else if (e.currentPoint && typeof e.currentPoint[xy] !== 'undefined') {
position = e.currentPoint[xy];
}
return position;
},
set_active_slider : function ($handle) {
this.cache.active = $handle;
},
remove_active_slider : function () {
this.cache.active = null;
},
calculate_position : function ($handle, cursor_x) {
var self = this,
settings = $.data($handle[0], 'settings'),
handle_l = $.data($handle[0], 'handle_l'),
handle_o = $.data($handle[0], 'handle_o'),
bar_l = $.data($handle[0], 'bar_l'),
bar_o = $.data($handle[0], 'bar_o');
requestAnimationFrame(function () {
var pct;
if (Foundation.rtl && !settings.vertical) {
pct = self.limit_to(((bar_o + bar_l - cursor_x) / bar_l), 0, 1);
} else {
pct = self.limit_to(((cursor_x - bar_o) / bar_l), 0, 1);
}
pct = settings.vertical ? 1 - pct : pct;
var norm = self.normalized_value(pct, settings.start, settings.end, settings.step, settings.precision);
self.set_ui($handle, norm);
});
},
set_ui : function ($handle, value) {
var settings = $.data($handle[0], 'settings'),
handle_l = $.data($handle[0], 'handle_l'),
bar_l = $.data($handle[0], 'bar_l'),
norm_pct = this.normalized_percentage(value, settings.start, settings.end),
handle_offset = norm_pct * (bar_l - handle_l) - 1,
progress_bar_length = norm_pct * 100,
$handle_parent = $handle.parent(),
$hidden_inputs = $handle.parent().children('input[type=hidden]');
if (Foundation.rtl && !settings.vertical) {
handle_offset = -handle_offset;
}
handle_offset = settings.vertical ? -handle_offset + bar_l - handle_l + 1 : handle_offset;
this.set_translate($handle, handle_offset, settings.vertical);
if (settings.vertical) {
$handle.siblings('.range-slider-active-segment').css('height', progress_bar_length + '%');
} else {
$handle.siblings('.range-slider-active-segment').css('width', progress_bar_length + '%');
}
$handle_parent.attr(this.attr_name(), value).trigger('change.fndtn.slider');
$hidden_inputs.val(value);
if (settings.trigger_input_change) {
$hidden_inputs.trigger('change.fndtn.slider');
}
if (!$handle[0].hasAttribute('aria-valuemin')) {
$handle.attr({
'aria-valuemin' : settings.start,
'aria-valuemax' : settings.end
});
}
$handle.attr('aria-valuenow', value);
if (settings.display_selector != '') {
$(settings.display_selector).each(function () {
if (this.hasAttribute('value')) {
$(this).val(value);
} else {
$(this).text(value);
}
});
}
},
normalized_percentage : function (val, start, end) {
return Math.min(1, (val - start) / (end - start));
},
normalized_value : function (val, start, end, step, precision) {
var range = end - start,
point = val * range,
mod = (point - (point % step)) / step,
rem = point % step,
round = ( rem >= step * 0.5 ? step : 0);
return ((mod * step + round) + start).toFixed(precision);
},
set_translate : function (ele, offset, vertical) {
if (vertical) {
$(ele)
.css('-webkit-transform', 'translateY(' + offset + 'px)')
.css('-moz-transform', 'translateY(' + offset + 'px)')
.css('-ms-transform', 'translateY(' + offset + 'px)')
.css('-o-transform', 'translateY(' + offset + 'px)')
.css('transform', 'translateY(' + offset + 'px)');
} else {
$(ele)
.css('-webkit-transform', 'translateX(' + offset + 'px)')
.css('-moz-transform', 'translateX(' + offset + 'px)')
.css('-ms-transform', 'translateX(' + offset + 'px)')
.css('-o-transform', 'translateX(' + offset + 'px)')
.css('transform', 'translateX(' + offset + 'px)');
}
},
limit_to : function (val, min, max) {
return Math.min(Math.max(val, min), max);
},
initialize_settings : function (handle) {
var settings = $.extend({}, this.settings, this.data_options($(handle).parent())),
decimal_places_match_result;
if (settings.precision === null) {
decimal_places_match_result = ('' + settings.step).match(/\.([\d]*)/);
settings.precision = decimal_places_match_result && decimal_places_match_result[1] ? decimal_places_match_result[1].length : 0;
}
if (settings.vertical) {
$.data(handle, 'bar_o', $(handle).parent().offset().top);
$.data(handle, 'bar_l', $(handle).parent().outerHeight());
$.data(handle, 'handle_o', $(handle).offset().top);
$.data(handle, 'handle_l', $(handle).outerHeight());
} else {
$.data(handle, 'bar_o', $(handle).parent().offset().left);
$.data(handle, 'bar_l', $(handle).parent().outerWidth());
$.data(handle, 'handle_o', $(handle).offset().left);
$.data(handle, 'handle_l', $(handle).outerWidth());
}
$.data(handle, 'bar', $(handle).parent());
return $.data(handle, 'settings', settings);
},
set_initial_position : function ($ele) {
var settings = $.data($ele.children('.range-slider-handle')[0], 'settings'),
initial = ((typeof settings.initial == 'number' && !isNaN(settings.initial)) ? settings.initial : Math.floor((settings.end - settings.start) * 0.5 / settings.step) * settings.step + settings.start),
$handle = $ele.children('.range-slider-handle');
this.set_ui($handle, initial);
},
set_value : function (value) {
var self = this;
$('[' + self.attr_name() + ']', this.scope).each(function () {
$(this).attr(self.attr_name(), value);
});
if (!!$(this.scope).attr(self.attr_name())) {
$(this.scope).attr(self.attr_name(), value);
}
self.reflow();
},
reflow : function () {
var self = this;
self.S('[' + this.attr_name() + ']').each(function () {
var handle = $(this).children('.range-slider-handle')[0],
val = $(this).attr(self.attr_name());
self.initialize_settings(handle);
if (val) {
self.set_ui($(handle), parseFloat(val));
} else {
self.set_initial_position($(this));
}
});
}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.slider.js | foundation.slider.js |
;(function ($, window, document, undefined) {
'use strict';
var noop = function () {};
var Orbit = function (el, settings) {
// Don't reinitialize plugin
if (el.hasClass(settings.slides_container_class)) {
return this;
}
var self = this,
container,
slides_container = el,
number_container,
bullets_container,
timer_container,
idx = 0,
animate,
timer,
locked = false,
adjust_height_after = false;
self.slides = function () {
return slides_container.children(settings.slide_selector);
};
self.slides().first().addClass(settings.active_slide_class);
self.update_slide_number = function (index) {
if (settings.slide_number) {
number_container.find('span:first').text(parseInt(index) + 1);
number_container.find('span:last').text(self.slides().length);
}
if (settings.bullets) {
bullets_container.children().removeClass(settings.bullets_active_class);
$(bullets_container.children().get(index)).addClass(settings.bullets_active_class);
}
};
self.update_active_link = function (index) {
var link = $('[data-orbit-link="' + self.slides().eq(index).attr('data-orbit-slide') + '"]');
link.siblings().removeClass(settings.bullets_active_class);
link.addClass(settings.bullets_active_class);
};
self.build_markup = function () {
slides_container.wrap('<div class="' + settings.container_class + '"></div>');
container = slides_container.parent();
slides_container.addClass(settings.slides_container_class);
if (settings.stack_on_small) {
container.addClass(settings.stack_on_small_class);
}
if (settings.navigation_arrows) {
container.append($('<a href="#"><span></span></a>').addClass(settings.prev_class));
container.append($('<a href="#"><span></span></a>').addClass(settings.next_class));
}
if (settings.timer) {
timer_container = $('<div>').addClass(settings.timer_container_class);
timer_container.append('<span>');
timer_container.append($('<div>').addClass(settings.timer_progress_class));
timer_container.addClass(settings.timer_paused_class);
container.append(timer_container);
}
if (settings.slide_number) {
number_container = $('<div>').addClass(settings.slide_number_class);
number_container.append('<span></span> ' + settings.slide_number_text + ' <span></span>');
container.append(number_container);
}
if (settings.bullets) {
bullets_container = $('<ol>').addClass(settings.bullets_container_class);
container.append(bullets_container);
bullets_container.wrap('<div class="orbit-bullets-container"></div>');
self.slides().each(function (idx, el) {
var bullet = $('<li>').attr('data-orbit-slide', idx).on('click', self.link_bullet);;
bullets_container.append(bullet);
});
}
};
self._goto = function (next_idx, start_timer) {
// if (locked) {return false;}
if (next_idx === idx) {return false;}
if (typeof timer === 'object') {timer.restart();}
var slides = self.slides();
var dir = 'next';
locked = true;
if (next_idx < idx) {dir = 'prev';}
if (next_idx >= slides.length) {
if (!settings.circular) {
return false;
}
next_idx = 0;
} else if (next_idx < 0) {
if (!settings.circular) {
return false;
}
next_idx = slides.length - 1;
}
var current = $(slides.get(idx));
var next = $(slides.get(next_idx));
current.css('zIndex', 2);
current.removeClass(settings.active_slide_class);
next.css('zIndex', 4).addClass(settings.active_slide_class);
slides_container.trigger('before-slide-change.fndtn.orbit');
settings.before_slide_change();
self.update_active_link(next_idx);
var callback = function () {
var unlock = function () {
idx = next_idx;
locked = false;
if (start_timer === true) {timer = self.create_timer(); timer.start();}
self.update_slide_number(idx);
slides_container.trigger('after-slide-change.fndtn.orbit', [{slide_number : idx, total_slides : slides.length}]);
settings.after_slide_change(idx, slides.length);
};
if (slides_container.outerHeight() != next.outerHeight() && settings.variable_height) {
slides_container.animate({'height': next.outerHeight()}, 250, 'linear', unlock);
} else {
unlock();
}
};
if (slides.length === 1) {callback(); return false;}
var start_animation = function () {
if (dir === 'next') {animate.next(current, next, callback);}
if (dir === 'prev') {animate.prev(current, next, callback);}
};
if (next.outerHeight() > slides_container.outerHeight() && settings.variable_height) {
slides_container.animate({'height': next.outerHeight()}, 250, 'linear', start_animation);
} else {
start_animation();
}
};
self.next = function (e) {
e.stopImmediatePropagation();
e.preventDefault();
self._goto(idx + 1);
};
self.prev = function (e) {
e.stopImmediatePropagation();
e.preventDefault();
self._goto(idx - 1);
};
self.link_custom = function (e) {
e.preventDefault();
var link = $(this).attr('data-orbit-link');
if ((typeof link === 'string') && (link = $.trim(link)) != '') {
var slide = container.find('[data-orbit-slide=' + link + ']');
if (slide.index() != -1) {self._goto(slide.index());}
}
};
self.link_bullet = function (e) {
var index = $(this).attr('data-orbit-slide');
if ((typeof index === 'string') && (index = $.trim(index)) != '') {
if (isNaN(parseInt(index))) {
var slide = container.find('[data-orbit-slide=' + index + ']');
if (slide.index() != -1) {self._goto(slide.index() + 1);}
} else {
self._goto(parseInt(index));
}
}
}
self.timer_callback = function () {
self._goto(idx + 1, true);
}
self.compute_dimensions = function () {
var current = $(self.slides().get(idx));
var h = current.outerHeight();
if (!settings.variable_height) {
self.slides().each(function(){
if ($(this).outerHeight() > h) { h = $(this).outerHeight(); }
});
}
slides_container.height(h);
};
self.create_timer = function () {
var t = new Timer(
container.find('.' + settings.timer_container_class),
settings,
self.timer_callback
);
return t;
};
self.stop_timer = function () {
if (typeof timer === 'object') {
timer.stop();
}
};
self.toggle_timer = function () {
var t = container.find('.' + settings.timer_container_class);
if (t.hasClass(settings.timer_paused_class)) {
if (typeof timer === 'undefined') {timer = self.create_timer();}
timer.start();
} else {
if (typeof timer === 'object') {timer.stop();}
}
};
self.init = function () {
self.build_markup();
if (settings.timer) {
timer = self.create_timer();
Foundation.utils.image_loaded(this.slides().children('img'), timer.start);
}
animate = new FadeAnimation(settings, slides_container);
if (settings.animation === 'slide') {
animate = new SlideAnimation(settings, slides_container);
}
container.on('click', '.' + settings.next_class, self.next);
container.on('click', '.' + settings.prev_class, self.prev);
if (settings.next_on_click) {
container.on('click', '.' + settings.slides_container_class + ' [data-orbit-slide]', self.link_bullet);
}
container.on('click', self.toggle_timer);
if (settings.swipe) {
container.on('touchstart.fndtn.orbit', function (e) {
if (!e.touches) {e = e.originalEvent;}
var data = {
start_page_x : e.touches[0].pageX,
start_page_y : e.touches[0].pageY,
start_time : (new Date()).getTime(),
delta_x : 0,
is_scrolling : undefined
};
container.data('swipe-transition', data);
e.stopPropagation();
})
.on('touchmove.fndtn.orbit', function (e) {
if (!e.touches) {
e = e.originalEvent;
}
// Ignore pinch/zoom events
if (e.touches.length > 1 || e.scale && e.scale !== 1) {
return;
}
var data = container.data('swipe-transition');
if (typeof data === 'undefined') {data = {};}
data.delta_x = e.touches[0].pageX - data.start_page_x;
if ( typeof data.is_scrolling === 'undefined') {
data.is_scrolling = !!( data.is_scrolling || Math.abs(data.delta_x) < Math.abs(e.touches[0].pageY - data.start_page_y) );
}
if (!data.is_scrolling && !data.active) {
e.preventDefault();
var direction = (data.delta_x < 0) ? (idx + 1) : (idx - 1);
data.active = true;
self._goto(direction);
}
})
.on('touchend.fndtn.orbit', function (e) {
container.data('swipe-transition', {});
e.stopPropagation();
})
}
container.on('mouseenter.fndtn.orbit', function (e) {
if (settings.timer && settings.pause_on_hover) {
self.stop_timer();
}
})
.on('mouseleave.fndtn.orbit', function (e) {
if (settings.timer && settings.resume_on_mouseout) {
timer.start();
}
});
$(document).on('click', '[data-orbit-link]', self.link_custom);
$(window).on('load resize', self.compute_dimensions);
Foundation.utils.image_loaded(this.slides().children('img'), self.compute_dimensions);
Foundation.utils.image_loaded(this.slides().children('img'), function () {
container.prev('.' + settings.preloader_class).css('display', 'none');
self.update_slide_number(0);
self.update_active_link(0);
slides_container.trigger('ready.fndtn.orbit');
});
};
self.init();
};
var Timer = function (el, settings, callback) {
var self = this,
duration = settings.timer_speed,
progress = el.find('.' + settings.timer_progress_class),
start,
timeout,
left = -1;
this.update_progress = function (w) {
var new_progress = progress.clone();
new_progress.attr('style', '');
new_progress.css('width', w + '%');
progress.replaceWith(new_progress);
progress = new_progress;
};
this.restart = function () {
clearTimeout(timeout);
el.addClass(settings.timer_paused_class);
left = -1;
self.update_progress(0);
};
this.start = function () {
if (!el.hasClass(settings.timer_paused_class)) {return true;}
left = (left === -1) ? duration : left;
el.removeClass(settings.timer_paused_class);
start = new Date().getTime();
progress.animate({'width' : '100%'}, left, 'linear');
timeout = setTimeout(function () {
self.restart();
callback();
}, left);
el.trigger('timer-started.fndtn.orbit')
};
this.stop = function () {
if (el.hasClass(settings.timer_paused_class)) {return true;}
clearTimeout(timeout);
el.addClass(settings.timer_paused_class);
var end = new Date().getTime();
left = left - (end - start);
var w = 100 - ((left / duration) * 100);
self.update_progress(w);
el.trigger('timer-stopped.fndtn.orbit');
};
};
var SlideAnimation = function (settings, container) {
var duration = settings.animation_speed;
var is_rtl = ($('html[dir=rtl]').length === 1);
var margin = is_rtl ? 'marginRight' : 'marginLeft';
var animMargin = {};
animMargin[margin] = '0%';
this.next = function (current, next, callback) {
current.animate({marginLeft : '-100%'}, duration);
next.animate(animMargin, duration, function () {
current.css(margin, '100%');
callback();
});
};
this.prev = function (current, prev, callback) {
current.animate({marginLeft : '100%'}, duration);
prev.css(margin, '-100%');
prev.animate(animMargin, duration, function () {
current.css(margin, '100%');
callback();
});
};
};
var FadeAnimation = function (settings, container) {
var duration = settings.animation_speed;
var is_rtl = ($('html[dir=rtl]').length === 1);
var margin = is_rtl ? 'marginRight' : 'marginLeft';
this.next = function (current, next, callback) {
next.css({'margin' : '0%', 'opacity' : '0.01'});
next.animate({'opacity' :'1'}, duration, 'linear', function () {
current.css('margin', '100%');
callback();
});
};
this.prev = function (current, prev, callback) {
prev.css({'margin' : '0%', 'opacity' : '0.01'});
prev.animate({'opacity' : '1'}, duration, 'linear', function () {
current.css('margin', '100%');
callback();
});
};
};
Foundation.libs = Foundation.libs || {};
Foundation.libs.orbit = {
name : 'orbit',
version : '5.5.3',
settings : {
animation : 'slide',
timer_speed : 10000,
pause_on_hover : true,
resume_on_mouseout : false,
next_on_click : true,
animation_speed : 500,
stack_on_small : false,
navigation_arrows : true,
slide_number : true,
slide_number_text : 'of',
container_class : 'orbit-container',
stack_on_small_class : 'orbit-stack-on-small',
next_class : 'orbit-next',
prev_class : 'orbit-prev',
timer_container_class : 'orbit-timer',
timer_paused_class : 'paused',
timer_progress_class : 'orbit-progress',
slides_container_class : 'orbit-slides-container',
preloader_class : 'preloader',
slide_selector : '*',
bullets_container_class : 'orbit-bullets',
bullets_active_class : 'active',
slide_number_class : 'orbit-slide-number',
caption_class : 'orbit-caption',
active_slide_class : 'active',
orbit_transition_class : 'orbit-transitioning',
bullets : true,
circular : true,
timer : true,
variable_height : false,
swipe : true,
before_slide_change : noop,
after_slide_change : noop
},
init : function (scope, method, options) {
var self = this;
this.bindings(method, options);
},
events : function (instance) {
var orbit_instance = new Orbit(this.S(instance), this.S(instance).data('orbit-init'));
this.S(instance).data(this.name + '-instance', orbit_instance);
},
reflow : function () {
var self = this;
if (self.S(self.scope).is('[data-orbit]')) {
var $el = self.S(self.scope);
var instance = $el.data(self.name + '-instance');
instance.compute_dimensions();
} else {
self.S('[data-orbit]', self.scope).each(function (idx, el) {
var $el = self.S(el);
var opts = self.data_options($el);
var instance = $el.data(self.name + '-instance');
instance.compute_dimensions();
});
}
}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.orbit.js | foundation.orbit.js |
;(function ($, window, document, undefined) {
'use strict';
Foundation.libs.tab = {
name : 'tab',
version : '5.5.3',
settings : {
active_class : 'active',
callback : function () {},
deep_linking : false,
scroll_to_content : true,
is_hover : false
},
default_tab_hashes : [],
init : function (scope, method, options) {
var self = this,
S = this.S;
// Store the default active tabs which will be referenced when the
// location hash is absent, as in the case of navigating the tabs and
// returning to the first viewing via the browser Back button.
S('[' + this.attr_name() + '] > .active > a', this.scope).each(function () {
self.default_tab_hashes.push(this.hash);
});
this.bindings(method, options);
this.handle_location_hash_change();
},
events : function () {
var self = this,
S = this.S;
var usual_tab_behavior = function (e, target) {
var settings = S(target).closest('[' + self.attr_name() + ']').data(self.attr_name(true) + '-init');
if (!settings.is_hover || Modernizr.touch) {
// if user did not pressed tab key, prevent default action
var keyCode = e.keyCode || e.which;
if (keyCode !== 9) {
e.preventDefault();
e.stopPropagation();
}
self.toggle_active_tab(S(target).parent());
}
};
S(this.scope)
.off('.tab')
// Key event: focus/tab key
.on('keydown.fndtn.tab', '[' + this.attr_name() + '] > * > a', function(e) {
var keyCode = e.keyCode || e.which;
// if user pressed tab key
if (keyCode === 13 || keyCode === 32) { // enter or space
var el = this;
usual_tab_behavior(e, el);
}
})
// Click event: tab title
.on('click.fndtn.tab', '[' + this.attr_name() + '] > * > a', function(e) {
var el = this;
usual_tab_behavior(e, el);
})
// Hover event: tab title
.on('mouseenter.fndtn.tab', '[' + this.attr_name() + '] > * > a', function (e) {
var settings = S(this).closest('[' + self.attr_name() + ']').data(self.attr_name(true) + '-init');
if (settings.is_hover) {
self.toggle_active_tab(S(this).parent());
}
});
// Location hash change event
S(window).on('hashchange.fndtn.tab', function (e) {
e.preventDefault();
self.handle_location_hash_change();
});
},
handle_location_hash_change : function () {
var self = this,
S = this.S;
S('[' + this.attr_name() + ']', this.scope).each(function () {
var settings = S(this).data(self.attr_name(true) + '-init');
if (settings.deep_linking) {
// Match the location hash to a label
var hash;
if (settings.scroll_to_content) {
hash = self.scope.location.hash;
} else {
// prefix the hash to prevent anchor scrolling
hash = self.scope.location.hash.replace('fndtn-', '');
}
if (hash != '') {
// Check whether the location hash references a tab content div or
// another element on the page (inside or outside the tab content div)
var hash_element = S(hash);
if (hash_element.hasClass('content') && hash_element.parent().hasClass('tabs-content')) {
// Tab content div
self.toggle_active_tab($('[' + self.attr_name() + '] > * > a[href=' + hash + ']').parent());
} else {
// Not the tab content div. If inside the tab content, find the
// containing tab and toggle it as active.
var hash_tab_container_id = hash_element.closest('.content').attr('id');
if (hash_tab_container_id != undefined) {
self.toggle_active_tab($('[' + self.attr_name() + '] > * > a[href=#' + hash_tab_container_id + ']').parent(), hash);
}
}
} else {
// Reference the default tab hashes which were initialized in the init function
for (var ind = 0; ind < self.default_tab_hashes.length; ind++) {
self.toggle_active_tab($('[' + self.attr_name() + '] > * > a[href=' + self.default_tab_hashes[ind] + ']').parent());
}
}
}
});
},
toggle_active_tab : function (tab, location_hash) {
var self = this,
S = self.S,
tabs = tab.closest('[' + this.attr_name() + ']'),
tab_link = tab.find('a'),
anchor = tab.children('a').first(),
target_hash = '#' + anchor.attr('href').split('#')[1],
target = S(target_hash),
siblings = tab.siblings(),
settings = tabs.data(this.attr_name(true) + '-init'),
interpret_keyup_action = function (e) {
// Light modification of Heydon Pickering's Practical ARIA Examples: http://heydonworks.com/practical_aria_examples/js/a11y.js
// define current, previous and next (possible) tabs
var $original = $(this);
var $prev = $(this).parents('li').prev().children('[role="tab"]');
var $next = $(this).parents('li').next().children('[role="tab"]');
var $target;
// find the direction (prev or next)
switch (e.keyCode) {
case 37:
$target = $prev;
break;
case 39:
$target = $next;
break;
default:
$target = false
break;
}
if ($target.length) {
$original.attr({
'tabindex' : '-1',
'aria-selected' : null
});
$target.attr({
'tabindex' : '0',
'aria-selected' : true
}).focus();
}
// Hide panels
$('[role="tabpanel"]')
.attr('aria-hidden', 'true');
// Show panel which corresponds to target
$('#' + $(document.activeElement).attr('href').substring(1))
.attr('aria-hidden', null);
},
go_to_hash = function(hash) {
// This function allows correct behaviour of the browser's back button when deep linking is enabled. Without it
// the user would get continually redirected to the default hash.
var default_hash = settings.scroll_to_content ? self.default_tab_hashes[0] : 'fndtn-' + self.default_tab_hashes[0].replace('#', '');
if (hash !== default_hash || window.location.hash) {
window.location.hash = hash;
}
};
// allow usage of data-tab-content attribute instead of href
if (anchor.data('tab-content')) {
target_hash = '#' + anchor.data('tab-content').split('#')[1];
target = S(target_hash);
}
if (settings.deep_linking) {
if (settings.scroll_to_content) {
// retain current hash to scroll to content
go_to_hash(location_hash || target_hash);
if (location_hash == undefined || location_hash == target_hash) {
tab.parent()[0].scrollIntoView();
} else {
S(target_hash)[0].scrollIntoView();
}
} else {
// prefix the hashes so that the browser doesn't scroll down
if (location_hash != undefined) {
go_to_hash('fndtn-' + location_hash.replace('#', ''));
} else {
go_to_hash('fndtn-' + target_hash.replace('#', ''));
}
}
}
// WARNING: The activation and deactivation of the tab content must
// occur after the deep linking in order to properly refresh the browser
// window (notably in Chrome).
// Clean up multiple attr instances to done once
tab.addClass(settings.active_class).triggerHandler('opened');
tab_link.attr({'aria-selected' : 'true', tabindex : 0});
siblings.removeClass(settings.active_class)
siblings.find('a').attr({'aria-selected' : 'false'/*, tabindex : -1*/});
target.siblings().removeClass(settings.active_class).attr({'aria-hidden' : 'true'/*, tabindex : -1*/});
target.addClass(settings.active_class).attr('aria-hidden', 'false').removeAttr('tabindex');
settings.callback(tab);
target.triggerHandler('toggled', [target]);
tabs.triggerHandler('toggled', [tab]);
tab_link.off('keydown').on('keydown', interpret_keyup_action );
},
data_attr : function (str) {
if (this.namespace.length > 0) {
return this.namespace + '-' + str;
}
return str;
},
off : function () {},
reflow : function () {}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.tab.js | foundation.tab.js |
;(function ($, window, document, undefined) {
'use strict';
var openModals = [];
Foundation.libs.reveal = {
name : 'reveal',
version : '5.5.3',
locked : false,
settings : {
animation : 'fadeAndPop',
animation_speed : 250,
close_on_background_click : true,
close_on_esc : true,
dismiss_modal_class : 'close-reveal-modal',
multiple_opened : false,
bg_class : 'reveal-modal-bg',
root_element : 'body',
open : function(){},
opened : function(){},
close : function(){},
closed : function(){},
on_ajax_error: $.noop,
bg : $('.reveal-modal-bg'),
css : {
open : {
'opacity' : 0,
'visibility' : 'visible',
'display' : 'block'
},
close : {
'opacity' : 1,
'visibility' : 'hidden',
'display' : 'none'
}
}
},
init : function (scope, method, options) {
$.extend(true, this.settings, method, options);
this.bindings(method, options);
},
events : function (scope) {
var self = this,
S = self.S;
S(this.scope)
.off('.reveal')
.on('click.fndtn.reveal', '[' + this.add_namespace('data-reveal-id') + ']:not([disabled])', function (e) {
e.preventDefault();
if (!self.locked) {
var element = S(this),
ajax = element.data(self.data_attr('reveal-ajax')),
replaceContentSel = element.data(self.data_attr('reveal-replace-content'));
self.locked = true;
if (typeof ajax === 'undefined') {
self.open.call(self, element);
} else {
var url = ajax === true ? element.attr('href') : ajax;
self.open.call(self, element, {url : url}, { replaceContentSel : replaceContentSel });
}
}
});
S(document)
.on('click.fndtn.reveal', this.close_targets(), function (e) {
e.preventDefault();
if (!self.locked) {
var settings = S('[' + self.attr_name() + '].open').data(self.attr_name(true) + '-init') || self.settings,
bg_clicked = S(e.target)[0] === S('.' + settings.bg_class)[0];
if (bg_clicked) {
if (settings.close_on_background_click) {
e.stopPropagation();
} else {
return;
}
}
self.locked = true;
self.close.call(self, bg_clicked ? S('[' + self.attr_name() + '].open:not(.toback)') : S(this).closest('[' + self.attr_name() + ']'));
}
});
if (S('[' + self.attr_name() + ']', this.scope).length > 0) {
S(this.scope)
// .off('.reveal')
.on('open.fndtn.reveal', this.settings.open)
.on('opened.fndtn.reveal', this.settings.opened)
.on('opened.fndtn.reveal', this.open_video)
.on('close.fndtn.reveal', this.settings.close)
.on('closed.fndtn.reveal', this.settings.closed)
.on('closed.fndtn.reveal', this.close_video);
} else {
S(this.scope)
// .off('.reveal')
.on('open.fndtn.reveal', '[' + self.attr_name() + ']', this.settings.open)
.on('opened.fndtn.reveal', '[' + self.attr_name() + ']', this.settings.opened)
.on('opened.fndtn.reveal', '[' + self.attr_name() + ']', this.open_video)
.on('close.fndtn.reveal', '[' + self.attr_name() + ']', this.settings.close)
.on('closed.fndtn.reveal', '[' + self.attr_name() + ']', this.settings.closed)
.on('closed.fndtn.reveal', '[' + self.attr_name() + ']', this.close_video);
}
return true;
},
// PATCH #3: turning on key up capture only when a reveal window is open
key_up_on : function (scope) {
var self = this;
// PATCH #1: fixing multiple keyup event trigger from single key press
self.S('body').off('keyup.fndtn.reveal').on('keyup.fndtn.reveal', function ( event ) {
var open_modal = self.S('[' + self.attr_name() + '].open'),
settings = open_modal.data(self.attr_name(true) + '-init') || self.settings ;
// PATCH #2: making sure that the close event can be called only while unlocked,
// so that multiple keyup.fndtn.reveal events don't prevent clean closing of the reveal window.
if ( settings && event.which === 27 && settings.close_on_esc && !self.locked) { // 27 is the keycode for the Escape key
self.close.call(self, open_modal);
}
});
return true;
},
// PATCH #3: turning on key up capture only when a reveal window is open
key_up_off : function (scope) {
this.S('body').off('keyup.fndtn.reveal');
return true;
},
open : function (target, ajax_settings) {
var self = this,
modal;
if (target) {
if (typeof target.selector !== 'undefined') {
// Find the named node; only use the first one found, since the rest of the code assumes there's only one node
modal = self.S('#' + target.data(self.data_attr('reveal-id'))).first();
} else {
modal = self.S(this.scope);
ajax_settings = target;
}
} else {
modal = self.S(this.scope);
}
var settings = modal.data(self.attr_name(true) + '-init');
settings = settings || this.settings;
if (modal.hasClass('open') && target !== undefined && target.attr('data-reveal-id') == modal.attr('id')) {
return self.close(modal);
}
if (!modal.hasClass('open')) {
var open_modal = self.S('[' + self.attr_name() + '].open');
if (typeof modal.data('css-top') === 'undefined') {
modal.data('css-top', parseInt(modal.css('top'), 10))
.data('offset', this.cache_offset(modal));
}
modal.attr('tabindex','0').attr('aria-hidden','false');
this.key_up_on(modal); // PATCH #3: turning on key up capture only when a reveal window is open
// Prevent namespace event from triggering twice
modal.on('open.fndtn.reveal', function(e) {
if (e.namespace !== 'fndtn.reveal') return;
});
modal.on('open.fndtn.reveal').trigger('open.fndtn.reveal');
if (open_modal.length < 1) {
this.toggle_bg(modal, true);
}
if (typeof ajax_settings === 'string') {
ajax_settings = {
url : ajax_settings
};
}
var openModal = function() {
if(open_modal.length > 0) {
if(settings.multiple_opened) {
self.to_back(open_modal);
} else {
self.hide(open_modal, settings.css.close);
}
}
// bl: add the open_modal that isn't already in the background to the openModals array
if(settings.multiple_opened) {
openModals.push(modal);
}
self.show(modal, settings.css.open);
};
if (typeof ajax_settings === 'undefined' || !ajax_settings.url) {
openModal();
} else {
var old_success = typeof ajax_settings.success !== 'undefined' ? ajax_settings.success : null;
$.extend(ajax_settings, {
success : function (data, textStatus, jqXHR) {
if ( $.isFunction(old_success) ) {
var result = old_success(data, textStatus, jqXHR);
if (typeof result == 'string') {
data = result;
}
}
if (typeof options !== 'undefined' && typeof options.replaceContentSel !== 'undefined') {
modal.find(options.replaceContentSel).html(data);
} else {
modal.html(data);
}
self.S(modal).foundation('section', 'reflow');
self.S(modal).children().foundation();
openModal();
}
});
// check for if user initalized with error callback
if (settings.on_ajax_error !== $.noop) {
$.extend(ajax_settings, {
error : settings.on_ajax_error
});
}
$.ajax(ajax_settings);
}
}
self.S(window).trigger('resize');
},
close : function (modal) {
var modal = modal && modal.length ? modal : this.S(this.scope),
open_modals = this.S('[' + this.attr_name() + '].open'),
settings = modal.data(this.attr_name(true) + '-init') || this.settings,
self = this;
if (open_modals.length > 0) {
modal.removeAttr('tabindex','0').attr('aria-hidden','true');
this.locked = true;
this.key_up_off(modal); // PATCH #3: turning on key up capture only when a reveal window is open
modal.trigger('close.fndtn.reveal');
if ((settings.multiple_opened && open_modals.length === 1) || !settings.multiple_opened || modal.length > 1) {
self.toggle_bg(modal, false);
self.to_front(modal);
}
if (settings.multiple_opened) {
var isCurrent = modal.is(':not(.toback)');
self.hide(modal, settings.css.close, settings);
if(isCurrent) {
// remove the last modal since it is now closed
openModals.pop();
} else {
// if this isn't the current modal, then find it in the array and remove it
openModals = $.grep(openModals, function(elt) {
var isThis = elt[0]===modal[0];
if(isThis) {
// since it's not currently in the front, put it in the front now that it is hidden
// so that if it's re-opened, it won't be .toback
self.to_front(modal);
}
return !isThis;
});
}
// finally, show the next modal in the stack, if there is one
if(openModals.length>0) {
self.to_front(openModals[openModals.length - 1]);
}
} else {
self.hide(open_modals, settings.css.close, settings);
}
}
},
close_targets : function () {
var base = '.' + this.settings.dismiss_modal_class;
if (this.settings.close_on_background_click) {
return base + ', .' + this.settings.bg_class;
}
return base;
},
toggle_bg : function (modal, state) {
if (this.S('.' + this.settings.bg_class).length === 0) {
this.settings.bg = $('<div />', {'class': this.settings.bg_class})
.appendTo('body').hide();
}
var visible = this.settings.bg.filter(':visible').length > 0;
if ( state != visible ) {
if ( state == undefined ? visible : !state ) {
this.hide(this.settings.bg);
} else {
this.show(this.settings.bg);
}
}
},
show : function (el, css) {
// is modal
if (css) {
var settings = el.data(this.attr_name(true) + '-init') || this.settings,
root_element = settings.root_element,
context = this;
if (el.parent(root_element).length === 0) {
var placeholder = el.wrap('<div style="display: none;" />').parent();
el.on('closed.fndtn.reveal.wrapped', function () {
el.detach().appendTo(placeholder);
el.unwrap().unbind('closed.fndtn.reveal.wrapped');
});
el.detach().appendTo(root_element);
}
var animData = getAnimationData(settings.animation);
if (!animData.animate) {
this.locked = false;
}
if (animData.pop) {
css.top = $(window).scrollTop() - el.data('offset') + 'px';
var end_css = {
top: $(window).scrollTop() + el.data('css-top') + 'px',
opacity: 1
};
return setTimeout(function () {
return el
.css(css)
.animate(end_css, settings.animation_speed, 'linear', function () {
context.locked = false;
el.trigger('opened.fndtn.reveal');
})
.addClass('open');
}, settings.animation_speed / 2);
}
css.top = $(window).scrollTop() + el.data('css-top') + 'px';
if (animData.fade) {
var end_css = {opacity: 1};
return setTimeout(function () {
return el
.css(css)
.animate(end_css, settings.animation_speed, 'linear', function () {
context.locked = false;
el.trigger('opened.fndtn.reveal');
})
.addClass('open');
}, settings.animation_speed / 2);
}
return el.css(css).show().css({opacity : 1}).addClass('open').trigger('opened.fndtn.reveal');
}
var settings = this.settings;
// should we animate the background?
if (getAnimationData(settings.animation).fade) {
return el.fadeIn(settings.animation_speed / 2);
}
this.locked = false;
return el.show();
},
to_back : function(el) {
el.addClass('toback');
},
to_front : function(el) {
el.removeClass('toback');
},
hide : function (el, css) {
// is modal
if (css) {
var settings = el.data(this.attr_name(true) + '-init'),
context = this;
settings = settings || this.settings;
var animData = getAnimationData(settings.animation);
if (!animData.animate) {
this.locked = false;
}
if (animData.pop) {
var end_css = {
top: - $(window).scrollTop() - el.data('offset') + 'px',
opacity: 0
};
return setTimeout(function () {
return el
.animate(end_css, settings.animation_speed, 'linear', function () {
context.locked = false;
el.css(css).trigger('closed.fndtn.reveal');
})
.removeClass('open');
}, settings.animation_speed / 2);
}
if (animData.fade) {
var end_css = {opacity : 0};
return setTimeout(function () {
return el
.animate(end_css, settings.animation_speed, 'linear', function () {
context.locked = false;
el.css(css).trigger('closed.fndtn.reveal');
})
.removeClass('open');
}, settings.animation_speed / 2);
}
return el.hide().css(css).removeClass('open').trigger('closed.fndtn.reveal');
}
var settings = this.settings;
// should we animate the background?
if (getAnimationData(settings.animation).fade) {
return el.fadeOut(settings.animation_speed / 2);
}
return el.hide();
},
close_video : function (e) {
var video = $('.flex-video', e.target),
iframe = $('iframe', video);
if (iframe.length > 0) {
iframe.attr('data-src', iframe[0].src);
iframe.attr('src', iframe.attr('src'));
video.hide();
}
},
open_video : function (e) {
var video = $('.flex-video', e.target),
iframe = video.find('iframe');
if (iframe.length > 0) {
var data_src = iframe.attr('data-src');
if (typeof data_src === 'string') {
iframe[0].src = iframe.attr('data-src');
} else {
var src = iframe[0].src;
iframe[0].src = undefined;
iframe[0].src = src;
}
video.show();
}
},
data_attr : function (str) {
if (this.namespace.length > 0) {
return this.namespace + '-' + str;
}
return str;
},
cache_offset : function (modal) {
var offset = modal.show().height() + parseInt(modal.css('top'), 10) + modal.scrollY;
modal.hide();
return offset;
},
off : function () {
$(this.scope).off('.fndtn.reveal');
},
reflow : function () {}
};
/*
* getAnimationData('popAndFade') // {animate: true, pop: true, fade: true}
* getAnimationData('fade') // {animate: true, pop: false, fade: true}
* getAnimationData('pop') // {animate: true, pop: true, fade: false}
* getAnimationData('foo') // {animate: false, pop: false, fade: false}
* getAnimationData(null) // {animate: false, pop: false, fade: false}
*/
function getAnimationData(str) {
var fade = /fade/i.test(str);
var pop = /pop/i.test(str);
return {
animate : fade || pop,
pop : pop,
fade : fade
};
}
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.reveal.js | foundation.reveal.js |
;(function ($, window, document, undefined) {
'use strict';
Foundation.libs.tooltip = {
name : 'tooltip',
version : '5.5.3',
settings : {
additional_inheritable_classes : [],
tooltip_class : '.tooltip',
append_to : 'body',
touch_close_text : 'Tap To Close',
disable_for_touch : false,
hover_delay : 200,
fade_in_duration : 150,
fade_out_duration : 150,
show_on : 'all',
tip_template : function (selector, content) {
return '<span data-selector="' + selector + '" id="' + selector + '" class="'
+ Foundation.libs.tooltip.settings.tooltip_class.substring(1)
+ '" role="tooltip">' + content + '<span class="nub"></span></span>';
}
},
cache : {},
init : function (scope, method, options) {
Foundation.inherit(this, 'random_str');
this.bindings(method, options);
},
should_show : function (target, tip) {
var settings = $.extend({}, this.settings, this.data_options(target));
if (settings.show_on === 'all') {
return true;
} else if (this.small() && settings.show_on === 'small') {
return true;
} else if (this.medium() && settings.show_on === 'medium') {
return true;
} else if (this.large() && settings.show_on === 'large') {
return true;
}
return false;
},
medium : function () {
return matchMedia(Foundation.media_queries['medium']).matches;
},
large : function () {
return matchMedia(Foundation.media_queries['large']).matches;
},
events : function (instance) {
var self = this,
S = self.S;
self.create(this.S(instance));
function _startShow(elt, $this, immediate) {
if (elt.timer) {
return;
}
if (immediate) {
elt.timer = null;
self.showTip($this);
} else {
elt.timer = setTimeout(function () {
elt.timer = null;
self.showTip($this);
}.bind(elt), self.settings.hover_delay);
}
}
function _startHide(elt, $this) {
if (elt.timer) {
clearTimeout(elt.timer);
elt.timer = null;
}
self.hide($this);
}
$(this.scope)
.off('.tooltip')
.on('mouseenter.fndtn.tooltip mouseleave.fndtn.tooltip touchstart.fndtn.tooltip MSPointerDown.fndtn.tooltip',
'[' + this.attr_name() + ']', function (e) {
var $this = S(this),
settings = $.extend({}, self.settings, self.data_options($this)),
is_touch = false;
if (Modernizr.touch && /touchstart|MSPointerDown/i.test(e.type) && S(e.target).is('a')) {
return false;
}
if (/mouse/i.test(e.type) && self.ie_touch(e)) {
return false;
}
if ($this.hasClass('open')) {
if (Modernizr.touch && /touchstart|MSPointerDown/i.test(e.type)) {
e.preventDefault();
}
self.hide($this);
} else {
if (settings.disable_for_touch && Modernizr.touch && /touchstart|MSPointerDown/i.test(e.type)) {
return;
} else if (!settings.disable_for_touch && Modernizr.touch && /touchstart|MSPointerDown/i.test(e.type)) {
e.preventDefault();
S(settings.tooltip_class + '.open').hide();
is_touch = true;
// close other open tooltips on touch
if ($('.open[' + self.attr_name() + ']').length > 0) {
var prevOpen = S($('.open[' + self.attr_name() + ']')[0]);
self.hide(prevOpen);
}
}
if (/enter|over/i.test(e.type)) {
_startShow(this, $this);
} else if (e.type === 'mouseout' || e.type === 'mouseleave') {
_startHide(this, $this);
} else {
_startShow(this, $this, true);
}
}
})
.on('mouseleave.fndtn.tooltip touchstart.fndtn.tooltip MSPointerDown.fndtn.tooltip', '[' + this.attr_name() + '].open', function (e) {
if (/mouse/i.test(e.type) && self.ie_touch(e)) {
return false;
}
if ($(this).data('tooltip-open-event-type') == 'touch' && e.type == 'mouseleave') {
return;
} else if ($(this).data('tooltip-open-event-type') == 'mouse' && /MSPointerDown|touchstart/i.test(e.type)) {
self.convert_to_touch($(this));
} else {
_startHide(this, $(this));
}
})
.on('DOMNodeRemoved DOMAttrModified', '[' + this.attr_name() + ']:not(a)', function (e) {
_startHide(this, S(this));
});
},
ie_touch : function (e) {
// How do I distinguish between IE11 and Windows Phone 8?????
return false;
},
showTip : function ($target) {
var $tip = this.getTip($target);
if (this.should_show($target, $tip)) {
return this.show($target);
}
return;
},
getTip : function ($target) {
var selector = this.selector($target),
settings = $.extend({}, this.settings, this.data_options($target)),
tip = null;
if (selector) {
tip = this.S('span[data-selector="' + selector + '"]' + settings.tooltip_class);
}
return (typeof tip === 'object') ? tip : false;
},
selector : function ($target) {
var dataSelector = $target.attr(this.attr_name()) || $target.attr('data-selector');
if (typeof dataSelector != 'string') {
dataSelector = this.random_str(6);
$target
.attr('data-selector', dataSelector)
.attr('aria-describedby', dataSelector);
}
return dataSelector;
},
create : function ($target) {
var self = this,
settings = $.extend({}, this.settings, this.data_options($target)),
tip_template = this.settings.tip_template;
if (typeof settings.tip_template === 'string' && window.hasOwnProperty(settings.tip_template)) {
tip_template = window[settings.tip_template];
}
var $tip = $(tip_template(this.selector($target), $('<div></div>').html($target.attr('title')).html())),
classes = this.inheritable_classes($target);
$tip.addClass(classes).appendTo(settings.append_to);
if (Modernizr.touch) {
$tip.append('<span class="tap-to-close">' + settings.touch_close_text + '</span>');
$tip.on('touchstart.fndtn.tooltip MSPointerDown.fndtn.tooltip', function (e) {
self.hide($target);
});
}
$target.removeAttr('title').attr('title', '');
},
reposition : function (target, tip, classes) {
var width, nub, nubHeight, nubWidth, objPos;
tip.css('visibility', 'hidden').show();
width = target.data('width');
nub = tip.children('.nub');
nubHeight = nub.outerHeight();
nubWidth = nub.outerWidth();
if (this.small()) {
tip.css({'width' : '100%'});
} else {
tip.css({'width' : (width) ? width : 'auto'});
}
objPos = function (obj, top, right, bottom, left, width) {
return obj.css({
'top' : (top) ? top : 'auto',
'bottom' : (bottom) ? bottom : 'auto',
'left' : (left) ? left : 'auto',
'right' : (right) ? right : 'auto'
}).end();
};
var o_top = target.offset().top;
var o_left = target.offset().left;
var outerHeight = target.outerHeight();
objPos(tip, (o_top + outerHeight + 10), 'auto', 'auto', o_left);
if (this.small()) {
objPos(tip, (o_top + outerHeight + 10), 'auto', 'auto', 12.5, $(this.scope).width());
tip.addClass('tip-override');
objPos(nub, -nubHeight, 'auto', 'auto', o_left);
} else {
if (Foundation.rtl) {
nub.addClass('rtl');
o_left = o_left + target.outerWidth() - tip.outerWidth();
}
objPos(tip, (o_top + outerHeight + 10), 'auto', 'auto', o_left);
// reset nub from small styles, if they've been applied
if (nub.attr('style')) {
nub.removeAttr('style');
}
tip.removeClass('tip-override');
var tip_outerHeight = tip.outerHeight();
if (classes && classes.indexOf('tip-top') > -1) {
if (Foundation.rtl) {
nub.addClass('rtl');
}
objPos(tip, (o_top - tip_outerHeight), 'auto', 'auto', o_left)
.removeClass('tip-override');
} else if (classes && classes.indexOf('tip-left') > -1) {
objPos(tip, (o_top + (outerHeight / 2) - (tip_outerHeight / 2)), 'auto', 'auto', (o_left - tip.outerWidth() - nubHeight))
.removeClass('tip-override');
nub.removeClass('rtl');
} else if (classes && classes.indexOf('tip-right') > -1) {
objPos(tip, (o_top + (outerHeight / 2) - (tip_outerHeight / 2)), 'auto', 'auto', (o_left + target.outerWidth() + nubHeight))
.removeClass('tip-override');
nub.removeClass('rtl');
}
}
tip.css('visibility', 'visible').hide();
},
small : function () {
return matchMedia(Foundation.media_queries.small).matches &&
!matchMedia(Foundation.media_queries.medium).matches;
},
inheritable_classes : function ($target) {
var settings = $.extend({}, this.settings, this.data_options($target)),
inheritables = ['tip-top', 'tip-left', 'tip-bottom', 'tip-right', 'radius', 'round'].concat(settings.additional_inheritable_classes),
classes = $target.attr('class'),
filtered = classes ? $.map(classes.split(' '), function (el, i) {
if ($.inArray(el, inheritables) !== -1) {
return el;
}
}).join(' ') : '';
return $.trim(filtered);
},
convert_to_touch : function ($target) {
var self = this,
$tip = self.getTip($target),
settings = $.extend({}, self.settings, self.data_options($target));
if ($tip.find('.tap-to-close').length === 0) {
$tip.append('<span class="tap-to-close">' + settings.touch_close_text + '</span>');
$tip.on('click.fndtn.tooltip.tapclose touchstart.fndtn.tooltip.tapclose MSPointerDown.fndtn.tooltip.tapclose', function (e) {
self.hide($target);
});
}
$target.data('tooltip-open-event-type', 'touch');
},
show : function ($target) {
var $tip = this.getTip($target);
if ($target.data('tooltip-open-event-type') == 'touch') {
this.convert_to_touch($target);
}
this.reposition($target, $tip, $target.attr('class'));
$target.addClass('open');
$tip.fadeIn(this.settings.fade_in_duration);
},
hide : function ($target) {
var $tip = this.getTip($target);
$tip.fadeOut(this.settings.fade_out_duration, function () {
$tip.find('.tap-to-close').remove();
$tip.off('click.fndtn.tooltip.tapclose MSPointerDown.fndtn.tapclose');
$target.removeClass('open');
});
},
off : function () {
var self = this;
this.S(this.scope).off('.fndtn.tooltip');
this.S(this.settings.tooltip_class).each(function (i) {
$('[' + self.attr_name() + ']').eq(i).attr('title', $(this).text());
}).remove();
},
reflow : function () {}
};
}(jQuery, window, window.document)); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/foundation/foundation.tooltip.js | foundation.tooltip.js |
!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){function c(a){var b="length"in a&&a.length,c=_.type(a);return"function"===c||_.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}function d(a,b,c){if(_.isFunction(b))return _.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return _.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(ha.test(b))return _.filter(b,a,c);b=_.filter(b,a)}return _.grep(a,function(a){return U.call(b,a)>=0!==c})}function e(a,b){for(;(a=a[b])&&1!==a.nodeType;);return a}function f(a){var b=oa[a]={};return _.each(a.match(na)||[],function(a,c){b[c]=!0}),b}function g(){Z.removeEventListener("DOMContentLoaded",g,!1),a.removeEventListener("load",g,!1),_.ready()}function h(){Object.defineProperty(this.cache={},0,{get:function(){return{}}}),this.expando=_.expando+h.uid++}function i(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(ua,"-$1").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:ta.test(c)?_.parseJSON(c):c}catch(e){}sa.set(a,b,c)}else c=void 0;return c}function j(){return!0}function k(){return!1}function l(){try{return Z.activeElement}catch(a){}}function m(a,b){return _.nodeName(a,"table")&&_.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function n(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function o(a){var b=Ka.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function p(a,b){for(var c=0,d=a.length;d>c;c++)ra.set(a[c],"globalEval",!b||ra.get(b[c],"globalEval"))}function q(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(ra.hasData(a)&&(f=ra.access(a),g=ra.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)_.event.add(b,e,j[e][c])}sa.hasData(a)&&(h=sa.access(a),i=_.extend({},h),sa.set(b,i))}}function r(a,b){var c=a.getElementsByTagName?a.getElementsByTagName(b||"*"):a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&_.nodeName(a,b)?_.merge([a],c):c}function s(a,b){var c=b.nodeName.toLowerCase();"input"===c&&ya.test(a.type)?b.checked=a.checked:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}function t(b,c){var d,e=_(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:_.css(e[0],"display");return e.detach(),f}function u(a){var b=Z,c=Oa[a];return c||(c=t(a,b),"none"!==c&&c||(Na=(Na||_("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=Na[0].contentDocument,b.write(),b.close(),c=t(a,b),Na.detach()),Oa[a]=c),c}function v(a,b,c){var d,e,f,g,h=a.style;return c=c||Ra(a),c&&(g=c.getPropertyValue(b)||c[b]),c&&(""!==g||_.contains(a.ownerDocument,a)||(g=_.style(a,b)),Qa.test(g)&&Pa.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0!==g?g+"":g}function w(a,b){return{get:function(){return a()?void delete this.get:(this.get=b).apply(this,arguments)}}}function x(a,b){if(b in a)return b;for(var c=b[0].toUpperCase()+b.slice(1),d=b,e=Xa.length;e--;)if(b=Xa[e]+c,b in a)return b;return d}function y(a,b,c){var d=Ta.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function z(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=_.css(a,c+wa[f],!0,e)),d?("content"===c&&(g-=_.css(a,"padding"+wa[f],!0,e)),"margin"!==c&&(g-=_.css(a,"border"+wa[f]+"Width",!0,e))):(g+=_.css(a,"padding"+wa[f],!0,e),"padding"!==c&&(g+=_.css(a,"border"+wa[f]+"Width",!0,e)));return g}function A(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=Ra(a),g="border-box"===_.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=v(a,b,f),(0>e||null==e)&&(e=a.style[b]),Qa.test(e))return e;d=g&&(Y.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+z(a,b,c||(g?"border":"content"),d,f)+"px"}function B(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=ra.get(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&xa(d)&&(f[g]=ra.access(d,"olddisplay",u(d.nodeName)))):(e=xa(d),"none"===c&&e||ra.set(d,"olddisplay",e?c:_.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}function C(a,b,c,d,e){return new C.prototype.init(a,b,c,d,e)}function D(){return setTimeout(function(){Ya=void 0}),Ya=_.now()}function E(a,b){var c,d=0,e={height:a};for(b=b?1:0;4>d;d+=2-b)c=wa[d],e["margin"+c]=e["padding"+c]=a;return b&&(e.opacity=e.width=a),e}function F(a,b,c){for(var d,e=(cb[b]||[]).concat(cb["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function G(a,b,c){var d,e,f,g,h,i,j,k,l=this,m={},n=a.style,o=a.nodeType&&xa(a),p=ra.get(a,"fxshow");c.queue||(h=_._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,l.always(function(){l.always(function(){h.unqueued--,_.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[n.overflow,n.overflowX,n.overflowY],j=_.css(a,"display"),k="none"===j?ra.get(a,"olddisplay")||u(a.nodeName):j,"inline"===k&&"none"===_.css(a,"float")&&(n.display="inline-block")),c.overflow&&(n.overflow="hidden",l.always(function(){n.overflow=c.overflow[0],n.overflowX=c.overflow[1],n.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],$a.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(o?"hide":"show")){if("show"!==e||!p||void 0===p[d])continue;o=!0}m[d]=p&&p[d]||_.style(a,d)}else j=void 0;if(_.isEmptyObject(m))"inline"===("none"===j?u(a.nodeName):j)&&(n.display=j);else{p?"hidden"in p&&(o=p.hidden):p=ra.access(a,"fxshow",{}),f&&(p.hidden=!o),o?_(a).show():l.done(function(){_(a).hide()}),l.done(function(){var b;ra.remove(a,"fxshow");for(b in m)_.style(a,b,m[b])});for(d in m)g=F(o?p[d]:0,d,l),d in p||(p[d]=g.start,o&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function H(a,b){var c,d,e,f,g;for(c in a)if(d=_.camelCase(c),e=b[d],f=a[c],_.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=_.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function I(a,b,c){var d,e,f=0,g=bb.length,h=_.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=Ya||D(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:_.extend({},b),opts:_.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:Ya||D(),duration:c.duration,tweens:[],createTween:function(b,c){var d=_.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(H(k,j.opts.specialEasing);g>f;f++)if(d=bb[f].call(j,a,k,j.opts))return d;return _.map(k,F,j),_.isFunction(j.opts.start)&&j.opts.start.call(a,j),_.fx.timer(_.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}function J(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(na)||[];if(_.isFunction(c))for(;d=f[e++];)"+"===d[0]?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function K(a,b,c,d){function e(h){var i;return f[h]=!0,_.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||g||f[j]?g?!(i=j):void 0:(b.dataTypes.unshift(j),e(j),!1)}),i}var f={},g=a===tb;return e(b.dataTypes[0])||!f["*"]&&e("*")}function L(a,b){var c,d,e=_.ajaxSettings.flatOptions||{};for(c in b)void 0!==b[c]&&((e[c]?a:d||(d={}))[c]=b[c]);return d&&_.extend(!0,a,d),a}function M(a,b,c){for(var d,e,f,g,h=a.contents,i=a.dataTypes;"*"===i[0];)i.shift(),void 0===d&&(d=a.mimeType||b.getResponseHeader("Content-Type"));if(d)for(e in h)if(h[e]&&h[e].test(d)){i.unshift(e);break}if(i[0]in c)f=i[0];else{for(e in c){if(!i[0]||a.converters[e+" "+i[0]]){f=e;break}g||(g=e)}f=f||g}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function N(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];for(f=k.shift();f;)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}function O(a,b,c,d){var e;if(_.isArray(b))_.each(b,function(b,e){c||yb.test(a)?d(a,e):O(a+"["+("object"==typeof e?b:"")+"]",e,c,d)});else if(c||"object"!==_.type(b))d(a,b);else for(e in b)O(a+"["+e+"]",b[e],c,d)}function P(a){return _.isWindow(a)?a:9===a.nodeType&&a.defaultView}var Q=[],R=Q.slice,S=Q.concat,T=Q.push,U=Q.indexOf,V={},W=V.toString,X=V.hasOwnProperty,Y={},Z=a.document,$="2.1.4",_=function(a,b){return new _.fn.init(a,b)},aa=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,ba=/^-ms-/,ca=/-([\da-z])/gi,da=function(a,b){return b.toUpperCase()};_.fn=_.prototype={jquery:$,constructor:_,selector:"",length:0,toArray:function(){return R.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:R.call(this)},pushStack:function(a){var b=_.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return _.each(this,a,b)},map:function(a){return this.pushStack(_.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(R.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:T,sort:Q.sort,splice:Q.splice},_.extend=_.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||_.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(_.isPlainObject(d)||(e=_.isArray(d)))?(e?(e=!1,f=c&&_.isArray(c)?c:[]):f=c&&_.isPlainObject(c)?c:{},g[b]=_.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},_.extend({expando:"jQuery"+($+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===_.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){return!_.isArray(a)&&a-parseFloat(a)+1>=0},isPlainObject:function(a){return"object"!==_.type(a)||a.nodeType||_.isWindow(a)?!1:a.constructor&&!X.call(a.constructor.prototype,"isPrototypeOf")?!1:!0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?V[W.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=_.trim(a),a&&(1===a.indexOf("use strict")?(b=Z.createElement("script"),b.text=a,Z.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(ba,"ms-").replace(ca,da)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,d){var e,f=0,g=a.length,h=c(a);if(d){if(h)for(;g>f&&(e=b.apply(a[f],d),e!==!1);f++);else for(f in a)if(e=b.apply(a[f],d),e===!1)break}else if(h)for(;g>f&&(e=b.call(a[f],f,a[f]),e!==!1);f++);else for(f in a)if(e=b.call(a[f],f,a[f]),e===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(aa,"")},makeArray:function(a,b){var d=b||[];return null!=a&&(c(Object(a))?_.merge(d,"string"==typeof a?[a]:a):T.call(d,a)),d},inArray:function(a,b,c){return null==b?-1:U.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,d){var e,f=0,g=a.length,h=c(a),i=[];if(h)for(;g>f;f++)e=b(a[f],f,d),null!=e&&i.push(e);else for(f in a)e=b(a[f],f,d),null!=e&&i.push(e);return S.apply([],i)},guid:1,proxy:function(a,b){var c,d,e;return"string"==typeof b&&(c=a[b],b=a,a=c),_.isFunction(a)?(d=R.call(arguments,2),e=function(){return a.apply(b||this,d.concat(R.call(arguments)))},e.guid=a.guid=a.guid||_.guid++,e):void 0},now:Date.now,support:Y}),_.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){V["[object "+b+"]"]=b.toLowerCase()});var ea=/*!
* Sizzle CSS Selector Engine v2.2.0-pre
* http://sizzlejs.com/
*
* Copyright 2008, 2014 jQuery Foundation, Inc. and other contributors
* Released under the MIT license
* http://jquery.org/license
*
* Date: 2014-12-16
*/
function(a){function b(a,b,c,d){var e,f,g,h,i,j,l,n,o,p;if((b?b.ownerDocument||b:O)!==G&&F(b),b=b||G,c=c||[],h=b.nodeType,"string"!=typeof a||!a||1!==h&&9!==h&&11!==h)return c;if(!d&&I){if(11!==h&&(e=sa.exec(a)))if(g=e[1]){if(9===h){if(f=b.getElementById(g),!f||!f.parentNode)return c;if(f.id===g)return c.push(f),c}else if(b.ownerDocument&&(f=b.ownerDocument.getElementById(g))&&M(b,f)&&f.id===g)return c.push(f),c}else{if(e[2])return $.apply(c,b.getElementsByTagName(a)),c;if((g=e[3])&&v.getElementsByClassName)return $.apply(c,b.getElementsByClassName(g)),c}if(v.qsa&&(!J||!J.test(a))){if(n=l=N,o=b,p=1!==h&&a,1===h&&"object"!==b.nodeName.toLowerCase()){for(j=z(a),(l=b.getAttribute("id"))?n=l.replace(ua,"\\$&"):b.setAttribute("id",n),n="[id='"+n+"'] ",i=j.length;i--;)j[i]=n+m(j[i]);o=ta.test(a)&&k(b.parentNode)||b,p=j.join(",")}if(p)try{return $.apply(c,o.querySelectorAll(p)),c}catch(q){}finally{l||b.removeAttribute("id")}}}return B(a.replace(ia,"$1"),b,c,d)}function c(){function a(c,d){return b.push(c+" ")>w.cacheLength&&delete a[b.shift()],a[c+" "]=d}var b=[];return a}function d(a){return a[N]=!0,a}function e(a){var b=G.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function f(a,b){for(var c=a.split("|"),d=a.length;d--;)w.attrHandle[c[d]]=b}function g(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||V)-(~a.sourceIndex||V);if(d)return d;if(c)for(;c=c.nextSibling;)if(c===b)return-1;return a?1:-1}function h(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function i(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function j(a){return d(function(b){return b=+b,d(function(c,d){for(var e,f=a([],c.length,b),g=f.length;g--;)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function k(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}function l(){}function m(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function n(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=Q++;return b.first?function(b,c,f){for(;b=b[d];)if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[P,f];if(g){for(;b=b[d];)if((1===b.nodeType||e)&&a(b,c,g))return!0}else for(;b=b[d];)if(1===b.nodeType||e){if(i=b[N]||(b[N]={}),(h=i[d])&&h[0]===P&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function o(a){return a.length>1?function(b,c,d){for(var e=a.length;e--;)if(!a[e](b,c,d))return!1;return!0}:a[0]}function p(a,c,d){for(var e=0,f=c.length;f>e;e++)b(a,c[e],d);return d}function q(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function r(a,b,c,e,f,g){return e&&!e[N]&&(e=r(e)),f&&!f[N]&&(f=r(f,g)),d(function(d,g,h,i){var j,k,l,m=[],n=[],o=g.length,r=d||p(b||"*",h.nodeType?[h]:h,[]),s=!a||!d&&b?r:q(r,m,a,h,i),t=c?f||(d?a:o||e)?[]:g:s;if(c&&c(s,t,h,i),e)for(j=q(t,n),e(j,[],h,i),k=j.length;k--;)(l=j[k])&&(t[n[k]]=!(s[n[k]]=l));if(d){if(f||a){if(f){for(j=[],k=t.length;k--;)(l=t[k])&&j.push(s[k]=l);f(null,t=[],j,i)}for(k=t.length;k--;)(l=t[k])&&(j=f?aa(d,l):m[k])>-1&&(d[j]=!(g[j]=l))}}else t=q(t===g?t.splice(o,t.length):t),f?f(null,g,t,i):$.apply(g,t)})}function s(a){for(var b,c,d,e=a.length,f=w.relative[a[0].type],g=f||w.relative[" "],h=f?1:0,i=n(function(a){return a===b},g,!0),j=n(function(a){return aa(b,a)>-1},g,!0),k=[function(a,c,d){var e=!f&&(d||c!==C)||((b=c).nodeType?i(a,c,d):j(a,c,d));return b=null,e}];e>h;h++)if(c=w.relative[a[h].type])k=[n(o(k),c)];else{if(c=w.filter[a[h].type].apply(null,a[h].matches),c[N]){for(d=++h;e>d&&!w.relative[a[d].type];d++);return r(h>1&&o(k),h>1&&m(a.slice(0,h-1).concat({value:" "===a[h-2].type?"*":""})).replace(ia,"$1"),c,d>h&&s(a.slice(h,d)),e>d&&s(a=a.slice(d)),e>d&&m(a))}k.push(c)}return o(k)}function t(a,c){var e=c.length>0,f=a.length>0,g=function(d,g,h,i,j){var k,l,m,n=0,o="0",p=d&&[],r=[],s=C,t=d||f&&w.find.TAG("*",j),u=P+=null==s?1:Math.random()||.1,v=t.length;for(j&&(C=g!==G&&g);o!==v&&null!=(k=t[o]);o++){if(f&&k){for(l=0;m=a[l++];)if(m(k,g,h)){i.push(k);break}j&&(P=u)}e&&((k=!m&&k)&&n--,d&&p.push(k))}if(n+=o,e&&o!==n){for(l=0;m=c[l++];)m(p,r,g,h);if(d){if(n>0)for(;o--;)p[o]||r[o]||(r[o]=Y.call(i));r=q(r)}$.apply(i,r),j&&!d&&r.length>0&&n+c.length>1&&b.uniqueSort(i)}return j&&(P=u,C=s),p};return e?d(g):g}var u,v,w,x,y,z,A,B,C,D,E,F,G,H,I,J,K,L,M,N="sizzle"+1*new Date,O=a.document,P=0,Q=0,R=c(),S=c(),T=c(),U=function(a,b){return a===b&&(E=!0),0},V=1<<31,W={}.hasOwnProperty,X=[],Y=X.pop,Z=X.push,$=X.push,_=X.slice,aa=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},ba="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",ca="[\\x20\\t\\r\\n\\f]",da="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",ea=da.replace("w","w#"),fa="\\["+ca+"*("+da+")(?:"+ca+"*([*^$|!~]?=)"+ca+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+ea+"))|)"+ca+"*\\]",ga=":("+da+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+fa+")*)|.*)\\)|)",ha=new RegExp(ca+"+","g"),ia=new RegExp("^"+ca+"+|((?:^|[^\\\\])(?:\\\\.)*)"+ca+"+$","g"),ja=new RegExp("^"+ca+"*,"+ca+"*"),ka=new RegExp("^"+ca+"*([>+~]|"+ca+")"+ca+"*"),la=new RegExp("="+ca+"*([^\\]'\"]*?)"+ca+"*\\]","g"),ma=new RegExp(ga),na=new RegExp("^"+ea+"$"),oa={ID:new RegExp("^#("+da+")"),CLASS:new RegExp("^\\.("+da+")"),TAG:new RegExp("^("+da.replace("w","w*")+")"),ATTR:new RegExp("^"+fa),PSEUDO:new RegExp("^"+ga),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+ca+"*(even|odd|(([+-]|)(\\d*)n|)"+ca+"*(?:([+-]|)"+ca+"*(\\d+)|))"+ca+"*\\)|)","i"),bool:new RegExp("^(?:"+ba+")$","i"),needsContext:new RegExp("^"+ca+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+ca+"*((?:-\\d)?\\d*)"+ca+"*\\)|)(?=[^-]|$)","i")},pa=/^(?:input|select|textarea|button)$/i,qa=/^h\d$/i,ra=/^[^{]+\{\s*\[native \w/,sa=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ta=/[+~]/,ua=/'|\\/g,va=new RegExp("\\\\([\\da-f]{1,6}"+ca+"?|("+ca+")|.)","ig"),wa=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},xa=function(){F()};try{$.apply(X=_.call(O.childNodes),O.childNodes),X[O.childNodes.length].nodeType}catch(ya){$={apply:X.length?function(a,b){Z.apply(a,_.call(b))}:function(a,b){for(var c=a.length,d=0;a[c++]=b[d++];);a.length=c-1}}}v=b.support={},y=b.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},F=b.setDocument=function(a){var b,c,d=a?a.ownerDocument||a:O;return d!==G&&9===d.nodeType&&d.documentElement?(G=d,H=d.documentElement,c=d.defaultView,c&&c!==c.top&&(c.addEventListener?c.addEventListener("unload",xa,!1):c.attachEvent&&c.attachEvent("onunload",xa)),I=!y(d),v.attributes=e(function(a){return a.className="i",!a.getAttribute("className")}),v.getElementsByTagName=e(function(a){return a.appendChild(d.createComment("")),!a.getElementsByTagName("*").length}),v.getElementsByClassName=ra.test(d.getElementsByClassName),v.getById=e(function(a){return H.appendChild(a).id=N,!d.getElementsByName||!d.getElementsByName(N).length}),v.getById?(w.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&I){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},w.filter.ID=function(a){var b=a.replace(va,wa);return function(a){return a.getAttribute("id")===b}}):(delete w.find.ID,w.filter.ID=function(a){var b=a.replace(va,wa);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),w.find.TAG=v.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):v.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){for(;c=f[e++];)1===c.nodeType&&d.push(c);return d}return f},w.find.CLASS=v.getElementsByClassName&&function(a,b){return I?b.getElementsByClassName(a):void 0},K=[],J=[],(v.qsa=ra.test(d.querySelectorAll))&&(e(function(a){H.appendChild(a).innerHTML="<a id='"+N+"'></a><select id='"+N+"-\f]' msallowcapture=''><option selected=''></option></select>",a.querySelectorAll("[msallowcapture^='']").length&&J.push("[*^$]="+ca+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||J.push("\\["+ca+"*(?:value|"+ba+")"),a.querySelectorAll("[id~="+N+"-]").length||J.push("~="),a.querySelectorAll(":checked").length||J.push(":checked"),a.querySelectorAll("a#"+N+"+*").length||J.push(".#.+[+~]")}),e(function(a){var b=d.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&J.push("name"+ca+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||J.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),J.push(",.*:")})),(v.matchesSelector=ra.test(L=H.matches||H.webkitMatchesSelector||H.mozMatchesSelector||H.oMatchesSelector||H.msMatchesSelector))&&e(function(a){v.disconnectedMatch=L.call(a,"div"),L.call(a,"[s!='']:x"),K.push("!=",ga)}),J=J.length&&new RegExp(J.join("|")),K=K.length&&new RegExp(K.join("|")),b=ra.test(H.compareDocumentPosition),M=b||ra.test(H.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)for(;b=b.parentNode;)if(b===a)return!0;return!1},U=b?function(a,b){if(a===b)return E=!0,0;var c=!a.compareDocumentPosition-!b.compareDocumentPosition;return c?c:(c=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&c||!v.sortDetached&&b.compareDocumentPosition(a)===c?a===d||a.ownerDocument===O&&M(O,a)?-1:b===d||b.ownerDocument===O&&M(O,b)?1:D?aa(D,a)-aa(D,b):0:4&c?-1:1)}:function(a,b){if(a===b)return E=!0,0;var c,e=0,f=a.parentNode,h=b.parentNode,i=[a],j=[b];if(!f||!h)return a===d?-1:b===d?1:f?-1:h?1:D?aa(D,a)-aa(D,b):0;if(f===h)return g(a,b);for(c=a;c=c.parentNode;)i.unshift(c);for(c=b;c=c.parentNode;)j.unshift(c);for(;i[e]===j[e];)e++;return e?g(i[e],j[e]):i[e]===O?-1:j[e]===O?1:0},d):G},b.matches=function(a,c){return b(a,null,null,c)},b.matchesSelector=function(a,c){if((a.ownerDocument||a)!==G&&F(a),c=c.replace(la,"='$1']"),v.matchesSelector&&I&&(!K||!K.test(c))&&(!J||!J.test(c)))try{var d=L.call(a,c);if(d||v.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return b(c,G,null,[a]).length>0},b.contains=function(a,b){return(a.ownerDocument||a)!==G&&F(a),M(a,b)},b.attr=function(a,b){(a.ownerDocument||a)!==G&&F(a);var c=w.attrHandle[b.toLowerCase()],d=c&&W.call(w.attrHandle,b.toLowerCase())?c(a,b,!I):void 0;return void 0!==d?d:v.attributes||!I?a.getAttribute(b):(d=a.getAttributeNode(b))&&d.specified?d.value:null},b.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},b.uniqueSort=function(a){var b,c=[],d=0,e=0;if(E=!v.detectDuplicates,D=!v.sortStable&&a.slice(0),a.sort(U),E){for(;b=a[e++];)b===a[e]&&(d=c.push(e));for(;d--;)a.splice(c[d],1)}return D=null,a},x=b.getText=function(a){var b,c="",d=0,e=a.nodeType;if(e){if(1===e||9===e||11===e){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=x(a)}else if(3===e||4===e)return a.nodeValue}else for(;b=a[d++];)c+=x(b);return c},w=b.selectors={cacheLength:50,createPseudo:d,match:oa,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(va,wa),a[3]=(a[3]||a[4]||a[5]||"").replace(va,wa),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||b.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&b.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return oa.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&ma.test(c)&&(b=z(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(va,wa).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=R[a+" "];return b||(b=new RegExp("(^|"+ca+")"+a+"("+ca+"|$)"))&&R(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,c,d){return function(e){var f=b.attr(e,a);return null==f?"!="===c:c?(f+="","="===c?f===d:"!="===c?f!==d:"^="===c?d&&0===f.indexOf(d):"*="===c?d&&f.indexOf(d)>-1:"$="===c?d&&f.slice(-d.length)===d:"~="===c?(" "+f.replace(ha," ")+" ").indexOf(d)>-1:"|="===c?f===d||f.slice(0,d.length+1)===d+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){for(;p;){for(l=b;l=l[p];)if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){for(k=q[N]||(q[N]={}),j=k[a]||[],n=j[0]===P&&j[1],m=j[0]===P&&j[2],l=n&&q.childNodes[n];l=++n&&l&&l[p]||(m=n=0)||o.pop();)if(1===l.nodeType&&++m&&l===b){k[a]=[P,n,m];break}}else if(s&&(j=(b[N]||(b[N]={}))[a])&&j[0]===P)m=j[1];else for(;(l=++n&&l&&l[p]||(m=n=0)||o.pop())&&((h?l.nodeName.toLowerCase()!==r:1!==l.nodeType)||!++m||(s&&((l[N]||(l[N]={}))[a]=[P,m]),l!==b)););return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,c){var e,f=w.pseudos[a]||w.setFilters[a.toLowerCase()]||b.error("unsupported pseudo: "+a);return f[N]?f(c):f.length>1?(e=[a,a,"",c],w.setFilters.hasOwnProperty(a.toLowerCase())?d(function(a,b){for(var d,e=f(a,c),g=e.length;g--;)d=aa(a,e[g]),a[d]=!(b[d]=e[g])}):function(a){return f(a,0,e)}):f}},pseudos:{not:d(function(a){var b=[],c=[],e=A(a.replace(ia,"$1"));return e[N]?d(function(a,b,c,d){for(var f,g=e(a,null,d,[]),h=a.length;h--;)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,d,f){return b[0]=a,e(b,null,f,c),b[0]=null,!c.pop()}}),has:d(function(a){return function(c){return b(a,c).length>0}}),contains:d(function(a){return a=a.replace(va,wa),function(b){return(b.textContent||b.innerText||x(b)).indexOf(a)>-1}}),lang:d(function(a){return na.test(a||"")||b.error("unsupported lang: "+a),a=a.replace(va,wa).toLowerCase(),function(b){var c;do if(c=I?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===H},focus:function(a){return a===G.activeElement&&(!G.hasFocus||G.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!w.pseudos.empty(a)},header:function(a){return qa.test(a.nodeName)},input:function(a){return pa.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:j(function(){return[0]}),last:j(function(a,b){return[b-1]}),eq:j(function(a,b,c){return[0>c?c+b:c]}),even:j(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:j(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:j(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:j(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},w.pseudos.nth=w.pseudos.eq;for(u in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})w.pseudos[u]=h(u);for(u in{submit:!0,reset:!0})w.pseudos[u]=i(u);return l.prototype=w.filters=w.pseudos,w.setFilters=new l,z=b.tokenize=function(a,c){var d,e,f,g,h,i,j,k=S[a+" "];if(k)return c?0:k.slice(0);for(h=a,i=[],j=w.preFilter;h;){(!d||(e=ja.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),d=!1,(e=ka.exec(h))&&(d=e.shift(),f.push({value:d,type:e[0].replace(ia," ")}),h=h.slice(d.length));for(g in w.filter)!(e=oa[g].exec(h))||j[g]&&!(e=j[g](e))||(d=e.shift(),f.push({value:d,type:g,matches:e}),h=h.slice(d.length));if(!d)break}return c?h.length:h?b.error(a):S(a,i).slice(0)},A=b.compile=function(a,b){var c,d=[],e=[],f=T[a+" "];if(!f){for(b||(b=z(a)),c=b.length;c--;)f=s(b[c]),f[N]?d.push(f):e.push(f);f=T(a,t(e,d)),f.selector=a}return f},B=b.select=function(a,b,c,d){var e,f,g,h,i,j="function"==typeof a&&a,l=!d&&z(a=j.selector||a);if(c=c||[],1===l.length){if(f=l[0]=l[0].slice(0),f.length>2&&"ID"===(g=f[0]).type&&v.getById&&9===b.nodeType&&I&&w.relative[f[1].type]){if(b=(w.find.ID(g.matches[0].replace(va,wa),b)||[])[0],!b)return c;j&&(b=b.parentNode),a=a.slice(f.shift().value.length)}for(e=oa.needsContext.test(a)?0:f.length;e--&&(g=f[e],!w.relative[h=g.type]);)if((i=w.find[h])&&(d=i(g.matches[0].replace(va,wa),ta.test(f[0].type)&&k(b.parentNode)||b))){if(f.splice(e,1),a=d.length&&m(f),!a)return $.apply(c,d),c;break}}return(j||A(a,l))(d,b,!I,c,ta.test(a)&&k(b.parentNode)||b),c},v.sortStable=N.split("").sort(U).join("")===N,v.detectDuplicates=!!E,F(),v.sortDetached=e(function(a){return 1&a.compareDocumentPosition(G.createElement("div"))}),e(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||f("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),v.attributes&&e(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||f("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),e(function(a){return null==a.getAttribute("disabled")})||f(ba,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),b}(a);_.find=ea,_.expr=ea.selectors,_.expr[":"]=_.expr.pseudos,_.unique=ea.uniqueSort,_.text=ea.getText,_.isXMLDoc=ea.isXML,_.contains=ea.contains;var fa=_.expr.match.needsContext,ga=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,ha=/^.[^:#\[\.,]*$/;_.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?_.find.matchesSelector(d,a)?[d]:[]:_.find.matches(a,_.grep(b,function(a){return 1===a.nodeType}))},_.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(_(a).filter(function(){for(b=0;c>b;b++)if(_.contains(e[b],this))return!0}));for(b=0;c>b;b++)_.find(a,e[b],d);return d=this.pushStack(c>1?_.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(d(this,a||[],!1))},not:function(a){return this.pushStack(d(this,a||[],!0))},is:function(a){return!!d(this,"string"==typeof a&&fa.test(a)?_(a):a||[],!1).length}});var ia,ja=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,ka=_.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:ja.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||ia).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof _?b[0]:b,_.merge(this,_.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:Z,!0)),ga.test(c[1])&&_.isPlainObject(b))for(c in b)_.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}return d=Z.getElementById(c[2]),d&&d.parentNode&&(this.length=1,this[0]=d),this.context=Z,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):_.isFunction(a)?"undefined"!=typeof ia.ready?ia.ready(a):a(_):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),_.makeArray(a,this))};ka.prototype=_.fn,ia=_(Z);var la=/^(?:parents|prev(?:Until|All))/,ma={children:!0,contents:!0,next:!0,prev:!0};_.extend({dir:function(a,b,c){for(var d=[],e=void 0!==c;(a=a[b])&&9!==a.nodeType;)if(1===a.nodeType){if(e&&_(a).is(c))break;d.push(a)}return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),_.fn.extend({has:function(a){var b=_(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(_.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=fa.test(a)||"string"!=typeof a?_(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&_.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?_.unique(f):f)},index:function(a){return a?"string"==typeof a?U.call(_(a),this[0]):U.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(_.unique(_.merge(this.get(),_(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}}),_.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return _.dir(a,"parentNode")},parentsUntil:function(a,b,c){return _.dir(a,"parentNode",c)},next:function(a){return e(a,"nextSibling")},prev:function(a){return e(a,"previousSibling")},nextAll:function(a){return _.dir(a,"nextSibling")},prevAll:function(a){return _.dir(a,"previousSibling")},nextUntil:function(a,b,c){return _.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return _.dir(a,"previousSibling",c)},siblings:function(a){return _.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return _.sibling(a.firstChild)},contents:function(a){return a.contentDocument||_.merge([],a.childNodes)}},function(a,b){_.fn[a]=function(c,d){var e=_.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=_.filter(d,e)),this.length>1&&(ma[a]||_.unique(e),la.test(a)&&e.reverse()),this.pushStack(e)}});var na=/\S+/g,oa={};_.Callbacks=function(a){a="string"==typeof a?oa[a]||f(a):_.extend({},a);var b,c,d,e,g,h,i=[],j=!a.once&&[],k=function(f){for(b=a.memory&&f,c=!0,h=e||0,e=0,g=i.length,d=!0;i&&g>h;h++)if(i[h].apply(f[0],f[1])===!1&&a.stopOnFalse){b=!1;break}d=!1,i&&(j?j.length&&k(j.shift()):b?i=[]:l.disable())},l={add:function(){if(i){var c=i.length;!function f(b){_.each(b,function(b,c){var d=_.type(c);"function"===d?a.unique&&l.has(c)||i.push(c):c&&c.length&&"string"!==d&&f(c)})}(arguments),d?g=i.length:b&&(e=c,k(b))}return this},remove:function(){return i&&_.each(arguments,function(a,b){for(var c;(c=_.inArray(b,i,c))>-1;)i.splice(c,1),d&&(g>=c&&g--,h>=c&&h--)}),this},has:function(a){return a?_.inArray(a,i)>-1:!(!i||!i.length)},empty:function(){return i=[],g=0,this},disable:function(){return i=j=b=void 0,this},disabled:function(){return!i},lock:function(){return j=void 0,b||l.disable(),this},locked:function(){return!j},fireWith:function(a,b){return!i||c&&!j||(b=b||[],b=[a,b.slice?b.slice():b],d?j.push(b):k(b)),this},fire:function(){return l.fireWith(this,arguments),this},fired:function(){return!!c}};return l},_.extend({Deferred:function(a){var b=[["resolve","done",_.Callbacks("once memory"),"resolved"],["reject","fail",_.Callbacks("once memory"),"rejected"],["notify","progress",_.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return _.Deferred(function(c){_.each(b,function(b,f){var g=_.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&_.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?_.extend(a,d):d}},e={};return d.pipe=d.then,_.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b,c,d,e=0,f=R.call(arguments),g=f.length,h=1!==g||a&&_.isFunction(a.promise)?g:0,i=1===h?a:_.Deferred(),j=function(a,c,d){return function(e){c[a]=this,d[a]=arguments.length>1?R.call(arguments):e,d===b?i.notifyWith(c,d):--h||i.resolveWith(c,d)}};if(g>1)for(b=new Array(g),c=new Array(g),d=new Array(g);g>e;e++)f[e]&&_.isFunction(f[e].promise)?f[e].promise().done(j(e,d,f)).fail(i.reject).progress(j(e,c,b)):--h;return h||i.resolveWith(d,f),i.promise()}});var pa;_.fn.ready=function(a){return _.ready.promise().done(a),this},_.extend({isReady:!1,readyWait:1,holdReady:function(a){a?_.readyWait++:_.ready(!0)},ready:function(a){(a===!0?--_.readyWait:_.isReady)||(_.isReady=!0,a!==!0&&--_.readyWait>0||(pa.resolveWith(Z,[_]),_.fn.triggerHandler&&(_(Z).triggerHandler("ready"),_(Z).off("ready"))))}}),_.ready.promise=function(b){return pa||(pa=_.Deferred(),"complete"===Z.readyState?setTimeout(_.ready):(Z.addEventListener("DOMContentLoaded",g,!1),a.addEventListener("load",g,!1))),pa.promise(b)},_.ready.promise();var qa=_.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===_.type(c)){e=!0;for(h in c)_.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,_.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(_(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f};_.acceptData=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType},h.uid=1,h.accepts=_.acceptData,h.prototype={key:function(a){if(!h.accepts(a))return 0;var b={},c=a[this.expando];if(!c){c=h.uid++;try{b[this.expando]={value:c},Object.defineProperties(a,b)}catch(d){b[this.expando]=c,_.extend(a,b)}}return this.cache[c]||(this.cache[c]={}),c},set:function(a,b,c){var d,e=this.key(a),f=this.cache[e];if("string"==typeof b)f[b]=c;else if(_.isEmptyObject(f))_.extend(this.cache[e],b);else for(d in b)f[d]=b[d];return f},get:function(a,b){var c=this.cache[this.key(a)];return void 0===b?c:c[b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,_.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=this.key(a),g=this.cache[f];if(void 0===b)this.cache[f]={};else{_.isArray(b)?d=b.concat(b.map(_.camelCase)):(e=_.camelCase(b),b in g?d=[b,e]:(d=e,d=d in g?[d]:d.match(na)||[])),c=d.length;for(;c--;)delete g[d[c]]}},hasData:function(a){return!_.isEmptyObject(this.cache[a[this.expando]]||{})},discard:function(a){a[this.expando]&&delete this.cache[a[this.expando]]}};var ra=new h,sa=new h,ta=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,ua=/([A-Z])/g;_.extend({hasData:function(a){return sa.hasData(a)||ra.hasData(a)},data:function(a,b,c){return sa.access(a,b,c)},removeData:function(a,b){sa.remove(a,b)},_data:function(a,b,c){return ra.access(a,b,c)},_removeData:function(a,b){ra.remove(a,b)}}),_.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=sa.get(f),1===f.nodeType&&!ra.get(f,"hasDataAttrs"))){for(c=g.length;c--;)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=_.camelCase(d.slice(5)),i(f,d,e[d])));ra.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){sa.set(this,a)}):qa(this,function(b){var c,d=_.camelCase(a);if(f&&void 0===b){if(c=sa.get(f,a),void 0!==c)return c;if(c=sa.get(f,d),void 0!==c)return c;if(c=i(f,d,void 0),void 0!==c)return c}else this.each(function(){var c=sa.get(this,d);sa.set(this,d,b),-1!==a.indexOf("-")&&void 0!==c&&sa.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){sa.remove(this,a)})}}),_.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=ra.get(a,b),c&&(!d||_.isArray(c)?d=ra.access(a,b,_.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=_.queue(a,b),d=c.length,e=c.shift(),f=_._queueHooks(a,b),g=function(){_.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return ra.get(a,c)||ra.access(a,c,{empty:_.Callbacks("once memory").add(function(){ra.remove(a,[b+"queue",c])})})}}),_.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?_.queue(this[0],a):void 0===b?this:this.each(function(){var c=_.queue(this,a,b);_._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&_.dequeue(this,a)})},dequeue:function(a){return this.each(function(){_.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=_.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};for("string"!=typeof a&&(b=a,a=void 0),a=a||"fx";g--;)c=ra.get(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var va=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,wa=["Top","Right","Bottom","Left"],xa=function(a,b){return a=b||a,"none"===_.css(a,"display")||!_.contains(a.ownerDocument,a)},ya=/^(?:checkbox|radio)$/i;!function(){var a=Z.createDocumentFragment(),b=a.appendChild(Z.createElement("div")),c=Z.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),Y.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="<textarea>x</textarea>",Y.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var za="undefined";Y.focusinBubbles="onfocusin"in a;var Aa=/^key/,Ba=/^(?:mouse|pointer|contextmenu)|click/,Ca=/^(?:focusinfocus|focusoutblur)$/,Da=/^([^.]*)(?:\.(.+)|)$/;_.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,o,p,q=ra.get(a);if(q)for(c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=_.guid++),(i=q.events)||(i=q.events={}),(g=q.handle)||(g=q.handle=function(b){return typeof _!==za&&_.event.triggered!==b.type?_.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(na)||[""],j=b.length;j--;)h=Da.exec(b[j])||[],n=p=h[1],o=(h[2]||"").split(".").sort(),n&&(l=_.event.special[n]||{},n=(e?l.delegateType:l.bindType)||n,l=_.event.special[n]||{},k=_.extend({type:n,origType:p,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&_.expr.match.needsContext.test(e),namespace:o.join(".")},f),(m=i[n])||(m=i[n]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,o,g)!==!1||a.addEventListener&&a.addEventListener(n,g,!1)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),_.event.global[n]=!0)},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,o,p,q=ra.hasData(a)&&ra.get(a);if(q&&(i=q.events)){for(b=(b||"").match(na)||[""],j=b.length;j--;)if(h=Da.exec(b[j])||[],n=p=h[1],o=(h[2]||"").split(".").sort(),n){for(l=_.event.special[n]||{},n=(d?l.delegateType:l.bindType)||n,m=i[n]||[],h=h[2]&&new RegExp("(^|\\.)"+o.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;f--;)k=m[f],!e&&p!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,o,q.handle)!==!1||_.removeEvent(a,n,q.handle),delete i[n])}else for(n in i)_.event.remove(a,n+b[j],c,d,!0);_.isEmptyObject(i)&&(delete q.handle,ra.remove(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,j,k,l,m=[d||Z],n=X.call(b,"type")?b.type:b,o=X.call(b,"namespace")?b.namespace.split("."):[];if(g=h=d=d||Z,3!==d.nodeType&&8!==d.nodeType&&!Ca.test(n+_.event.triggered)&&(n.indexOf(".")>=0&&(o=n.split("."),n=o.shift(),o.sort()),j=n.indexOf(":")<0&&"on"+n,b=b[_.expando]?b:new _.Event(n,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=o.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+o.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),
c=null==c?[b]:_.makeArray(c,[b]),l=_.event.special[n]||{},e||!l.trigger||l.trigger.apply(d,c)!==!1)){if(!e&&!l.noBubble&&!_.isWindow(d)){for(i=l.delegateType||n,Ca.test(i+n)||(g=g.parentNode);g;g=g.parentNode)m.push(g),h=g;h===(d.ownerDocument||Z)&&m.push(h.defaultView||h.parentWindow||a)}for(f=0;(g=m[f++])&&!b.isPropagationStopped();)b.type=f>1?i:l.bindType||n,k=(ra.get(g,"events")||{})[b.type]&&ra.get(g,"handle"),k&&k.apply(g,c),k=j&&g[j],k&&k.apply&&_.acceptData(g)&&(b.result=k.apply(g,c),b.result===!1&&b.preventDefault());return b.type=n,e||b.isDefaultPrevented()||l._default&&l._default.apply(m.pop(),c)!==!1||!_.acceptData(d)||j&&_.isFunction(d[n])&&!_.isWindow(d)&&(h=d[j],h&&(d[j]=null),_.event.triggered=n,d[n](),_.event.triggered=void 0,h&&(d[j]=h)),b.result}},dispatch:function(a){a=_.event.fix(a);var b,c,d,e,f,g=[],h=R.call(arguments),i=(ra.get(this,"events")||{})[a.type]||[],j=_.event.special[a.type]||{};if(h[0]=a,a.delegateTarget=this,!j.preDispatch||j.preDispatch.call(this,a)!==!1){for(g=_.event.handlers.call(this,a,i),b=0;(e=g[b++])&&!a.isPropagationStopped();)for(a.currentTarget=e.elem,c=0;(f=e.handlers[c++])&&!a.isImmediatePropagationStopped();)(!a.namespace_re||a.namespace_re.test(f.namespace))&&(a.handleObj=f,a.data=f.data,d=((_.event.special[f.origType]||{}).handle||f.handler).apply(e.elem,h),void 0!==d&&(a.result=d)===!1&&(a.preventDefault(),a.stopPropagation()));return j.postDispatch&&j.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!==this;i=i.parentNode||this)if(i.disabled!==!0||"click"!==a.type){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?_(e,this).index(i)>=0:_.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,d,e,f=b.button;return null==a.pageX&&null!=b.clientX&&(c=a.target.ownerDocument||Z,d=c.documentElement,e=c.body,a.pageX=b.clientX+(d&&d.scrollLeft||e&&e.scrollLeft||0)-(d&&d.clientLeft||e&&e.clientLeft||0),a.pageY=b.clientY+(d&&d.scrollTop||e&&e.scrollTop||0)-(d&&d.clientTop||e&&e.clientTop||0)),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},fix:function(a){if(a[_.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];for(g||(this.fixHooks[e]=g=Ba.test(e)?this.mouseHooks:Aa.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new _.Event(f),b=d.length;b--;)c=d[b],a[c]=f[c];return a.target||(a.target=Z),3===a.target.nodeType&&(a.target=a.target.parentNode),g.filter?g.filter(a,f):a},special:{load:{noBubble:!0},focus:{trigger:function(){return this!==l()&&this.focus?(this.focus(),!1):void 0},delegateType:"focusin"},blur:{trigger:function(){return this===l()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return"checkbox"===this.type&&this.click&&_.nodeName(this,"input")?(this.click(),!1):void 0},_default:function(a){return _.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=_.extend(new _.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?_.event.trigger(e,null,b):_.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},_.removeEvent=function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)},_.Event=function(a,b){return this instanceof _.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?j:k):this.type=a,b&&_.extend(this,b),this.timeStamp=a&&a.timeStamp||_.now(),void(this[_.expando]=!0)):new _.Event(a,b)},_.Event.prototype={isDefaultPrevented:k,isPropagationStopped:k,isImmediatePropagationStopped:k,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=j,a&&a.preventDefault&&a.preventDefault()},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=j,a&&a.stopPropagation&&a.stopPropagation()},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=j,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},_.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){_.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!_.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),Y.focusinBubbles||_.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){_.event.simulate(b,a.target,_.event.fix(a),!0)};_.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=ra.access(d,b);e||d.addEventListener(a,c,!0),ra.access(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=ra.access(d,b)-1;e?ra.access(d,b,e):(d.removeEventListener(a,c,!0),ra.remove(d,b))}}}),_.fn.extend({on:function(a,b,c,d,e){var f,g;if("object"==typeof a){"string"!=typeof b&&(c=c||b,b=void 0);for(g in a)this.on(g,b,c,a[g],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&("string"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=k;else if(!d)return this;return 1===e&&(f=d,d=function(a){return _().off(a),f.apply(this,arguments)},d.guid=f.guid||(f.guid=_.guid++)),this.each(function(){_.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,_(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||"function"==typeof b)&&(c=b,b=void 0),c===!1&&(c=k),this.each(function(){_.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){_.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?_.event.trigger(a,b,c,!0):void 0}});var Ea=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,Fa=/<([\w:]+)/,Ga=/<|&#?\w+;/,Ha=/<(?:script|style|link)/i,Ia=/checked\s*(?:[^=]|=\s*.checked.)/i,Ja=/^$|\/(?:java|ecma)script/i,Ka=/^true\/(.*)/,La=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,Ma={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};Ma.optgroup=Ma.option,Ma.tbody=Ma.tfoot=Ma.colgroup=Ma.caption=Ma.thead,Ma.th=Ma.td,_.extend({clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=_.contains(a.ownerDocument,a);if(!(Y.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||_.isXMLDoc(a)))for(g=r(h),f=r(a),d=0,e=f.length;e>d;d++)s(f[d],g[d]);if(b)if(c)for(f=f||r(a),g=g||r(h),d=0,e=f.length;e>d;d++)q(f[d],g[d]);else q(a,h);return g=r(h,"script"),g.length>0&&p(g,!i&&r(a,"script")),h},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,k=b.createDocumentFragment(),l=[],m=0,n=a.length;n>m;m++)if(e=a[m],e||0===e)if("object"===_.type(e))_.merge(l,e.nodeType?[e]:e);else if(Ga.test(e)){for(f=f||k.appendChild(b.createElement("div")),g=(Fa.exec(e)||["",""])[1].toLowerCase(),h=Ma[g]||Ma._default,f.innerHTML=h[1]+e.replace(Ea,"<$1></$2>")+h[2],j=h[0];j--;)f=f.lastChild;_.merge(l,f.childNodes),f=k.firstChild,f.textContent=""}else l.push(b.createTextNode(e));for(k.textContent="",m=0;e=l[m++];)if((!d||-1===_.inArray(e,d))&&(i=_.contains(e.ownerDocument,e),f=r(k.appendChild(e),"script"),i&&p(f),c))for(j=0;e=f[j++];)Ja.test(e.type||"")&&c.push(e);return k},cleanData:function(a){for(var b,c,d,e,f=_.event.special,g=0;void 0!==(c=a[g]);g++){if(_.acceptData(c)&&(e=c[ra.expando],e&&(b=ra.cache[e]))){if(b.events)for(d in b.events)f[d]?_.event.remove(c,d):_.removeEvent(c,d,b.handle);ra.cache[e]&&delete ra.cache[e]}delete sa.cache[c[sa.expando]]}}}),_.fn.extend({text:function(a){return qa(this,function(a){return void 0===a?_.text(this):this.empty().each(function(){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&(this.textContent=a)})},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=m(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=m(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?_.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||_.cleanData(r(c)),c.parentNode&&(b&&_.contains(c.ownerDocument,c)&&p(r(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(_.cleanData(r(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return _.clone(this,a,b)})},html:function(a){return qa(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!Ha.test(a)&&!Ma[(Fa.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Ea,"<$1></$2>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(_.cleanData(r(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,_.cleanData(r(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=S.apply([],a);var c,d,e,f,g,h,i=0,j=this.length,k=this,l=j-1,m=a[0],p=_.isFunction(m);if(p||j>1&&"string"==typeof m&&!Y.checkClone&&Ia.test(m))return this.each(function(c){var d=k.eq(c);p&&(a[0]=m.call(this,c,d.html())),d.domManip(a,b)});if(j&&(c=_.buildFragment(a,this[0].ownerDocument,!1,this),d=c.firstChild,1===c.childNodes.length&&(c=d),d)){for(e=_.map(r(c,"script"),n),f=e.length;j>i;i++)g=c,i!==l&&(g=_.clone(g,!0,!0),f&&_.merge(e,r(g,"script"))),b.call(this[i],g,i);if(f)for(h=e[e.length-1].ownerDocument,_.map(e,o),i=0;f>i;i++)g=e[i],Ja.test(g.type||"")&&!ra.access(g,"globalEval")&&_.contains(h,g)&&(g.src?_._evalUrl&&_._evalUrl(g.src):_.globalEval(g.textContent.replace(La,"")))}return this}}),_.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){_.fn[a]=function(a){for(var c,d=[],e=_(a),f=e.length-1,g=0;f>=g;g++)c=g===f?this:this.clone(!0),_(e[g])[b](c),T.apply(d,c.get());return this.pushStack(d)}});var Na,Oa={},Pa=/^margin/,Qa=new RegExp("^("+va+")(?!px)[a-z%]+$","i"),Ra=function(b){return b.ownerDocument.defaultView.opener?b.ownerDocument.defaultView.getComputedStyle(b,null):a.getComputedStyle(b,null)};!function(){function b(){g.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;display:block;margin-top:1%;top:1%;border:1px;padding:1px;width:4px;position:absolute",g.innerHTML="",e.appendChild(f);var b=a.getComputedStyle(g,null);c="1%"!==b.top,d="4px"===b.width,e.removeChild(f)}var c,d,e=Z.documentElement,f=Z.createElement("div"),g=Z.createElement("div");g.style&&(g.style.backgroundClip="content-box",g.cloneNode(!0).style.backgroundClip="",Y.clearCloneStyle="content-box"===g.style.backgroundClip,f.style.cssText="border:0;width:0;height:0;top:0;left:-9999px;margin-top:1px;position:absolute",f.appendChild(g),a.getComputedStyle&&_.extend(Y,{pixelPosition:function(){return b(),c},boxSizingReliable:function(){return null==d&&b(),d},reliableMarginRight:function(){var b,c=g.appendChild(Z.createElement("div"));return c.style.cssText=g.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",c.style.marginRight=c.style.width="0",g.style.width="1px",e.appendChild(f),b=!parseFloat(a.getComputedStyle(c,null).marginRight),e.removeChild(f),g.removeChild(c),b}}))}(),_.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var Sa=/^(none|table(?!-c[ea]).+)/,Ta=new RegExp("^("+va+")(.*)$","i"),Ua=new RegExp("^([+-])=("+va+")","i"),Va={position:"absolute",visibility:"hidden",display:"block"},Wa={letterSpacing:"0",fontWeight:"400"},Xa=["Webkit","O","Moz","ms"];_.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=v(a,"opacity");return""===c?"1":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":"cssFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=_.camelCase(b),i=a.style;return b=_.cssProps[h]||(_.cssProps[h]=x(i,h)),g=_.cssHooks[b]||_.cssHooks[h],void 0===c?g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b]:(f=typeof c,"string"===f&&(e=Ua.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(_.css(a,b)),f="number"),null!=c&&c===c&&("number"!==f||_.cssNumber[h]||(c+="px"),Y.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),g&&"set"in g&&void 0===(c=g.set(a,c,d))||(i[b]=c)),void 0)}},css:function(a,b,c,d){var e,f,g,h=_.camelCase(b);return b=_.cssProps[h]||(_.cssProps[h]=x(a.style,h)),g=_.cssHooks[b]||_.cssHooks[h],g&&"get"in g&&(e=g.get(a,!0,c)),void 0===e&&(e=v(a,b,d)),"normal"===e&&b in Wa&&(e=Wa[b]),""===c||c?(f=parseFloat(e),c===!0||_.isNumeric(f)?f||0:e):e}}),_.each(["height","width"],function(a,b){_.cssHooks[b]={get:function(a,c,d){return c?Sa.test(_.css(a,"display"))&&0===a.offsetWidth?_.swap(a,Va,function(){return A(a,b,d)}):A(a,b,d):void 0},set:function(a,c,d){var e=d&&Ra(a);return y(a,c,d?z(a,b,d,"border-box"===_.css(a,"boxSizing",!1,e),e):0)}}}),_.cssHooks.marginRight=w(Y.reliableMarginRight,function(a,b){return b?_.swap(a,{display:"inline-block"},v,[a,"marginRight"]):void 0}),_.each({margin:"",padding:"",border:"Width"},function(a,b){_.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+wa[d]+b]=f[d]||f[d-2]||f[0];return e}},Pa.test(a)||(_.cssHooks[a+b].set=y)}),_.fn.extend({css:function(a,b){return qa(this,function(a,b,c){var d,e,f={},g=0;if(_.isArray(b)){for(d=Ra(a),e=b.length;e>g;g++)f[b[g]]=_.css(a,b[g],!1,d);return f}return void 0!==c?_.style(a,b,c):_.css(a,b)},a,b,arguments.length>1)},show:function(){return B(this,!0)},hide:function(){return B(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){xa(this)?_(this).show():_(this).hide()})}}),_.Tween=C,C.prototype={constructor:C,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||"swing",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(_.cssNumber[c]?"":"px")},cur:function(){var a=C.propHooks[this.prop];return a&&a.get?a.get(this):C.propHooks._default.get(this)},run:function(a){var b,c=C.propHooks[this.prop];return this.options.duration?this.pos=b=_.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):this.pos=b=a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):C.propHooks._default.set(this),this}},C.prototype.init.prototype=C.prototype,C.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=_.css(a.elem,a.prop,""),b&&"auto"!==b?b:0):a.elem[a.prop]},set:function(a){_.fx.step[a.prop]?_.fx.step[a.prop](a):a.elem.style&&(null!=a.elem.style[_.cssProps[a.prop]]||_.cssHooks[a.prop])?_.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},C.propHooks.scrollTop=C.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},_.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},_.fx=C.prototype.init,_.fx.step={};var Ya,Za,$a=/^(?:toggle|show|hide)$/,_a=new RegExp("^(?:([+-])=|)("+va+")([a-z%]*)$","i"),ab=/queueHooks$/,bb=[G],cb={"*":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=_a.exec(b),f=e&&e[3]||(_.cssNumber[a]?"":"px"),g=(_.cssNumber[a]||"px"!==f&&+d)&&_a.exec(_.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||".5",g/=h,_.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};_.Animation=_.extend(I,{tweener:function(a,b){_.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");for(var c,d=0,e=a.length;e>d;d++)c=a[d],cb[c]=cb[c]||[],cb[c].unshift(b)},prefilter:function(a,b){b?bb.unshift(a):bb.push(a)}}),_.speed=function(a,b,c){var d=a&&"object"==typeof a?_.extend({},a):{complete:c||!c&&b||_.isFunction(a)&&a,duration:a,easing:c&&b||b&&!_.isFunction(b)&&b};return d.duration=_.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in _.fx.speeds?_.fx.speeds[d.duration]:_.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue="fx"),d.old=d.complete,d.complete=function(){_.isFunction(d.old)&&d.old.call(this),d.queue&&_.dequeue(this,d.queue)},d},_.fn.extend({fadeTo:function(a,b,c,d){return this.filter(xa).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=_.isEmptyObject(a),f=_.speed(b,c,d),g=function(){var b=I(this,_.extend({},a),f);(e||ra.get(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=_.timers,g=ra.get(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&ab.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&_.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=ra.get(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=_.timers,g=d?d.length:0;for(c.finish=!0,_.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),_.each(["toggle","show","hide"],function(a,b){var c=_.fn[b];_.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(E(b,!0),a,d,e)}}),_.each({slideDown:E("show"),slideUp:E("hide"),slideToggle:E("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){_.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),_.timers=[],_.fx.tick=function(){var a,b=0,c=_.timers;for(Ya=_.now();b<c.length;b++)a=c[b],a()||c[b]!==a||c.splice(b--,1);c.length||_.fx.stop(),Ya=void 0},_.fx.timer=function(a){_.timers.push(a),a()?_.fx.start():_.timers.pop()},_.fx.interval=13,_.fx.start=function(){Za||(Za=setInterval(_.fx.tick,_.fx.interval))},_.fx.stop=function(){clearInterval(Za),Za=null},_.fx.speeds={slow:600,fast:200,_default:400},_.fn.delay=function(a,b){return a=_.fx?_.fx.speeds[a]||a:a,b=b||"fx",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a=Z.createElement("input"),b=Z.createElement("select"),c=b.appendChild(Z.createElement("option"));a.type="checkbox",Y.checkOn=""!==a.value,Y.optSelected=c.selected,b.disabled=!0,Y.optDisabled=!c.disabled,a=Z.createElement("input"),a.value="t",a.type="radio",Y.radioValue="t"===a.value}();var db,eb,fb=_.expr.attrHandle;_.fn.extend({attr:function(a,b){return qa(this,_.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){_.removeAttr(this,a)})}}),_.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===za?_.prop(a,b,c):(1===f&&_.isXMLDoc(a)||(b=b.toLowerCase(),d=_.attrHooks[b]||(_.expr.match.bool.test(b)?eb:db)),void 0===c?d&&"get"in d&&null!==(e=d.get(a,b))?e:(e=_.find.attr(a,b),null==e?void 0:e):null!==c?d&&"set"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+""),c):void _.removeAttr(a,b))},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(na);if(f&&1===a.nodeType)for(;c=f[e++];)d=_.propFix[c]||c,_.expr.match.bool.test(c)&&(a[d]=!1),a.removeAttribute(c)},attrHooks:{type:{set:function(a,b){if(!Y.radioValue&&"radio"===b&&_.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}}}),eb={set:function(a,b,c){return b===!1?_.removeAttr(a,c):a.setAttribute(c,c),c}},_.each(_.expr.match.bool.source.match(/\w+/g),function(a,b){var c=fb[b]||_.find.attr;fb[b]=function(a,b,d){var e,f;return d||(f=fb[b],fb[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,fb[b]=f),e}});var gb=/^(?:input|select|textarea|button)$/i;_.fn.extend({prop:function(a,b){return qa(this,_.prop,a,b,arguments.length>1)},removeProp:function(a){return this.each(function(){delete this[_.propFix[a]||a]})}}),_.extend({propFix:{"for":"htmlFor","class":"className"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!_.isXMLDoc(a),f&&(b=_.propFix[b]||b,e=_.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){return a.hasAttribute("tabindex")||gb.test(a.nodeName)||a.href?a.tabIndex:-1}}}}),Y.optSelected||(_.propHooks.selected={get:function(a){var b=a.parentNode;return b&&b.parentNode&&b.parentNode.selectedIndex,null}}),_.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){_.propFix[this.toLowerCase()]=this});var hb=/[\t\r\n\f]/g;_.fn.extend({addClass:function(a){var b,c,d,e,f,g,h="string"==typeof a&&a,i=0,j=this.length;if(_.isFunction(a))return this.each(function(b){_(this).addClass(a.call(this,b,this.className))});if(h)for(b=(a||"").match(na)||[];j>i;i++)if(c=this[i],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(hb," "):" ")){for(f=0;e=b[f++];)d.indexOf(" "+e+" ")<0&&(d+=e+" ");g=_.trim(d),c.className!==g&&(c.className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0===arguments.length||"string"==typeof a&&a,i=0,j=this.length;if(_.isFunction(a))return this.each(function(b){_(this).removeClass(a.call(this,b,this.className))});if(h)for(b=(a||"").match(na)||[];j>i;i++)if(c=this[i],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(hb," "):"")){for(f=0;e=b[f++];)for(;d.indexOf(" "+e+" ")>=0;)d=d.replace(" "+e+" "," ");g=a?_.trim(d):"",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):_.isFunction(a)?this.each(function(c){_(this).toggleClass(a.call(this,c,this.className,b),b)}):this.each(function(){if("string"===c)for(var b,d=0,e=_(this),f=a.match(na)||[];b=f[d++];)e.hasClass(b)?e.removeClass(b):e.addClass(b);else(c===za||"boolean"===c)&&(this.className&&ra.set(this,"__className__",this.className),this.className=this.className||a===!1?"":ra.get(this,"__className__")||"")})},hasClass:function(a){for(var b=" "+a+" ",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(" "+this[c].className+" ").replace(hb," ").indexOf(b)>=0)return!0;return!1}});var ib=/\r/g;_.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=_.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,_(this).val()):a,null==e?e="":"number"==typeof e?e+="":_.isArray(e)&&(e=_.map(e,function(a){return null==a?"":a+""})),b=_.valHooks[this.type]||_.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=_.valHooks[e.type]||_.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(ib,""):null==c?"":c)}}}),_.extend({valHooks:{option:{get:function(a){var b=_.find.attr(a,"value");return null!=b?b:_.trim(_.text(a))}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],(c.selected||i===e)&&(Y.optDisabled?!c.disabled:null===c.getAttribute("disabled"))&&(!c.parentNode.disabled||!_.nodeName(c.parentNode,"optgroup"))){if(b=_(c).val(),f)return b;g.push(b)}return g},set:function(a,b){for(var c,d,e=a.options,f=_.makeArray(b),g=e.length;g--;)d=e[g],(d.selected=_.inArray(d.value,f)>=0)&&(c=!0);return c||(a.selectedIndex=-1),f}}}}),_.each(["radio","checkbox"],function(){_.valHooks[this]={set:function(a,b){return _.isArray(b)?a.checked=_.inArray(_(a).val(),b)>=0:void 0}},Y.checkOn||(_.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})}),_.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){_.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),_.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}});var jb=_.now(),kb=/\?/;_.parseJSON=function(a){return JSON.parse(a+"")},_.parseXML=function(a){var b,c;if(!a||"string"!=typeof a)return null;try{c=new DOMParser,b=c.parseFromString(a,"text/xml")}catch(d){b=void 0}return(!b||b.getElementsByTagName("parsererror").length)&&_.error("Invalid XML: "+a),b};var lb=/#.*$/,mb=/([?&])_=[^&]*/,nb=/^(.*?):[ \t]*([^\r\n]*)$/gm,ob=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,pb=/^(?:GET|HEAD)$/,qb=/^\/\//,rb=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,sb={},tb={},ub="*/".concat("*"),vb=a.location.href,wb=rb.exec(vb.toLowerCase())||[];_.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:vb,type:"GET",isLocal:ob.test(wb[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":ub,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":_.parseJSON,"text xml":_.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?L(L(a,_.ajaxSettings),b):L(_.ajaxSettings,a)},ajaxPrefilter:J(sb),ajaxTransport:J(tb),ajax:function(a,b){function c(a,b,c,g){var i,k,r,s,u,w=b;2!==t&&(t=2,h&&clearTimeout(h),d=void 0,f=g||"",v.readyState=a>0?4:0,i=a>=200&&300>a||304===a,c&&(s=M(l,v,c)),s=N(l,s,v,i),i?(l.ifModified&&(u=v.getResponseHeader("Last-Modified"),u&&(_.lastModified[e]=u),u=v.getResponseHeader("etag"),u&&(_.etag[e]=u)),204===a||"HEAD"===l.type?w="nocontent":304===a?w="notmodified":(w=s.state,k=s.data,r=s.error,i=!r)):(r=w,(a||!w)&&(w="error",0>a&&(a=0))),v.status=a,v.statusText=(b||w)+"",i?o.resolveWith(m,[k,w,v]):o.rejectWith(m,[v,w,r]),v.statusCode(q),q=void 0,j&&n.trigger(i?"ajaxSuccess":"ajaxError",[v,l,i?k:r]),p.fireWith(m,[v,w]),j&&(n.trigger("ajaxComplete",[v,l]),--_.active||_.event.trigger("ajaxStop")))}"object"==typeof a&&(b=a,a=void 0),b=b||{};var d,e,f,g,h,i,j,k,l=_.ajaxSetup({},b),m=l.context||l,n=l.context&&(m.nodeType||m.jquery)?_(m):_.event,o=_.Deferred(),p=_.Callbacks("once memory"),q=l.statusCode||{},r={},s={},t=0,u="canceled",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!g)for(g={};b=nb.exec(f);)g[b[1].toLowerCase()]=b[2];b=g[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?f:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(l.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var b=a||u;return d&&d.abort(b),c(0,b),this}};if(o.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,l.url=((a||l.url||vb)+"").replace(lb,"").replace(qb,wb[1]+"//"),l.type=b.method||b.type||l.method||l.type,l.dataTypes=_.trim(l.dataType||"*").toLowerCase().match(na)||[""],null==l.crossDomain&&(i=rb.exec(l.url.toLowerCase()),l.crossDomain=!(!i||i[1]===wb[1]&&i[2]===wb[2]&&(i[3]||("http:"===i[1]?"80":"443"))===(wb[3]||("http:"===wb[1]?"80":"443")))),l.data&&l.processData&&"string"!=typeof l.data&&(l.data=_.param(l.data,l.traditional)),K(sb,l,b,v),2===t)return v;j=_.event&&l.global,j&&0===_.active++&&_.event.trigger("ajaxStart"),l.type=l.type.toUpperCase(),l.hasContent=!pb.test(l.type),e=l.url,l.hasContent||(l.data&&(e=l.url+=(kb.test(e)?"&":"?")+l.data,delete l.data),l.cache===!1&&(l.url=mb.test(e)?e.replace(mb,"$1_="+jb++):e+(kb.test(e)?"&":"?")+"_="+jb++)),l.ifModified&&(_.lastModified[e]&&v.setRequestHeader("If-Modified-Since",_.lastModified[e]),_.etag[e]&&v.setRequestHeader("If-None-Match",_.etag[e])),(l.data&&l.hasContent&&l.contentType!==!1||b.contentType)&&v.setRequestHeader("Content-Type",l.contentType),v.setRequestHeader("Accept",l.dataTypes[0]&&l.accepts[l.dataTypes[0]]?l.accepts[l.dataTypes[0]]+("*"!==l.dataTypes[0]?", "+ub+"; q=0.01":""):l.accepts["*"]);for(k in l.headers)v.setRequestHeader(k,l.headers[k]);if(l.beforeSend&&(l.beforeSend.call(m,v,l)===!1||2===t))return v.abort();u="abort";for(k in{success:1,error:1,complete:1})v[k](l[k]);if(d=K(tb,l,b,v)){v.readyState=1,j&&n.trigger("ajaxSend",[v,l]),l.async&&l.timeout>0&&(h=setTimeout(function(){v.abort("timeout")},l.timeout));try{t=1,d.send(r,c)}catch(w){if(!(2>t))throw w;c(-1,w)}}else c(-1,"No Transport");return v},getJSON:function(a,b,c){return _.get(a,b,c,"json")},getScript:function(a,b){return _.get(a,void 0,b,"script")}}),_.each(["get","post"],function(a,b){_[b]=function(a,c,d,e){return _.isFunction(c)&&(e=e||d,d=c,c=void 0),_.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),_._evalUrl=function(a){return _.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},_.fn.extend({wrapAll:function(a){var b;return _.isFunction(a)?this.each(function(b){_(this).wrapAll(a.call(this,b))}):(this[0]&&(b=_(a,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){for(var a=this;a.firstElementChild;)a=a.firstElementChild;return a}).append(this)),this)},wrapInner:function(a){return _.isFunction(a)?this.each(function(b){_(this).wrapInner(a.call(this,b))}):this.each(function(){var b=_(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=_.isFunction(a);return this.each(function(c){_(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){_.nodeName(this,"body")||_(this).replaceWith(this.childNodes)}).end()}}),_.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0},_.expr.filters.visible=function(a){return!_.expr.filters.hidden(a)};var xb=/%20/g,yb=/\[\]$/,zb=/\r?\n/g,Ab=/^(?:submit|button|image|reset|file)$/i,Bb=/^(?:input|select|textarea|keygen)/i;_.param=function(a,b){var c,d=[],e=function(a,b){b=_.isFunction(b)?b():null==b?"":b,
d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=_.ajaxSettings&&_.ajaxSettings.traditional),_.isArray(a)||a.jquery&&!_.isPlainObject(a))_.each(a,function(){e(this.name,this.value)});else for(c in a)O(c,a[c],b,e);return d.join("&").replace(xb,"+")},_.fn.extend({serialize:function(){return _.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=_.prop(this,"elements");return a?_.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!_(this).is(":disabled")&&Bb.test(this.nodeName)&&!Ab.test(a)&&(this.checked||!ya.test(a))}).map(function(a,b){var c=_(this).val();return null==c?null:_.isArray(c)?_.map(c,function(a){return{name:b.name,value:a.replace(zb,"\r\n")}}):{name:b.name,value:c.replace(zb,"\r\n")}}).get()}}),_.ajaxSettings.xhr=function(){try{return new XMLHttpRequest}catch(a){}};var Cb=0,Db={},Eb={0:200,1223:204},Fb=_.ajaxSettings.xhr();a.attachEvent&&a.attachEvent("onunload",function(){for(var a in Db)Db[a]()}),Y.cors=!!Fb&&"withCredentials"in Fb,Y.ajax=Fb=!!Fb,_.ajaxTransport(function(a){var b;return Y.cors||Fb&&!a.crossDomain?{send:function(c,d){var e,f=a.xhr(),g=++Cb;if(f.open(a.type,a.url,a.async,a.username,a.password),a.xhrFields)for(e in a.xhrFields)f[e]=a.xhrFields[e];a.mimeType&&f.overrideMimeType&&f.overrideMimeType(a.mimeType),a.crossDomain||c["X-Requested-With"]||(c["X-Requested-With"]="XMLHttpRequest");for(e in c)f.setRequestHeader(e,c[e]);b=function(a){return function(){b&&(delete Db[g],b=f.onload=f.onerror=null,"abort"===a?f.abort():"error"===a?d(f.status,f.statusText):d(Eb[f.status]||f.status,f.statusText,"string"==typeof f.responseText?{text:f.responseText}:void 0,f.getAllResponseHeaders()))}},f.onload=b(),f.onerror=b("error"),b=Db[g]=b("abort");try{f.send(a.hasContent&&a.data||null)}catch(h){if(b)throw h}},abort:function(){b&&b()}}:void 0}),_.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(a){return _.globalEval(a),a}}}),_.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET")}),_.ajaxTransport("script",function(a){if(a.crossDomain){var b,c;return{send:function(d,e){b=_("<script>").prop({async:!0,charset:a.scriptCharset,src:a.url}).on("load error",c=function(a){b.remove(),c=null,a&&e("error"===a.type?404:200,a.type)}),Z.head.appendChild(b[0])},abort:function(){c&&c()}}}});var Gb=[],Hb=/(=)\?(?=&|$)|\?\?/;_.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=Gb.pop()||_.expando+"_"+jb++;return this[a]=!0,a}}),_.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(Hb.test(b.url)?"url":"string"==typeof b.data&&!(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&Hb.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=_.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(Hb,"$1"+e):b.jsonp!==!1&&(b.url+=(kb.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||_.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,Gb.push(e)),g&&_.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),_.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||Z;var d=ga.exec(a),e=!c&&[];return d?[b.createElement(d[1])]:(d=_.buildFragment([a],b,e),e&&e.length&&_(e).remove(),_.merge([],d.childNodes))};var Ib=_.fn.load;_.fn.load=function(a,b,c){if("string"!=typeof a&&Ib)return Ib.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>=0&&(d=_.trim(a.slice(h)),a=a.slice(0,h)),_.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(e="POST"),g.length>0&&_.ajax({url:a,type:e,dataType:"html",data:b}).done(function(a){f=arguments,g.html(d?_("<div>").append(_.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,f||[a.responseText,b,a])}),this},_.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){_.fn[b]=function(a){return this.on(b,a)}}),_.expr.filters.animated=function(a){return _.grep(_.timers,function(b){return a===b.elem}).length};var Jb=a.document.documentElement;_.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=_.css(a,"position"),l=_(a),m={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=_.css(a,"top"),i=_.css(a,"left"),j=("absolute"===k||"fixed"===k)&&(f+i).indexOf("auto")>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),_.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(m.top=b.top-h.top+g),null!=b.left&&(m.left=b.left-h.left+e),"using"in b?b.using.call(a,m):l.css(m)}},_.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){_.offset.setOffset(this,a,b)});var b,c,d=this[0],e={top:0,left:0},f=d&&d.ownerDocument;if(f)return b=f.documentElement,_.contains(b,d)?(typeof d.getBoundingClientRect!==za&&(e=d.getBoundingClientRect()),c=P(f),{top:e.top+c.pageYOffset-b.clientTop,left:e.left+c.pageXOffset-b.clientLeft}):e},position:function(){if(this[0]){var a,b,c=this[0],d={top:0,left:0};return"fixed"===_.css(c,"position")?b=c.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),_.nodeName(a[0],"html")||(d=a.offset()),d.top+=_.css(a[0],"borderTopWidth",!0),d.left+=_.css(a[0],"borderLeftWidth",!0)),{top:b.top-d.top-_.css(c,"marginTop",!0),left:b.left-d.left-_.css(c,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){for(var a=this.offsetParent||Jb;a&&!_.nodeName(a,"html")&&"static"===_.css(a,"position");)a=a.offsetParent;return a||Jb})}}),_.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(b,c){var d="pageYOffset"===c;_.fn[b]=function(e){return qa(this,function(b,e,f){var g=P(b);return void 0===f?g?g[c]:b[e]:void(g?g.scrollTo(d?a.pageXOffset:f,d?f:a.pageYOffset):b[e]=f)},b,e,arguments.length,null)}}),_.each(["top","left"],function(a,b){_.cssHooks[b]=w(Y.pixelPosition,function(a,c){return c?(c=v(a,b),Qa.test(c)?_(a).position()[b]+"px":c):void 0})}),_.each({Height:"height",Width:"width"},function(a,b){_.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){_.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return qa(this,function(b,c,d){var e;return _.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?_.css(b,c,g):_.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),_.fn.size=function(){return this.length},_.fn.andSelf=_.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return _});var Kb=a.jQuery,Lb=a.$;return _.noConflict=function(b){return a.$===_&&(a.$=Lb),b&&a.jQuery===_&&(a.jQuery=Kb),_},typeof b===za&&(a.jQuery=a.$=_),_}); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/vendor/jquery.js | jquery.js |
!function(a){"function"==typeof define&&define.amd?define(["jquery"],a):a(jQuery)}(function(a){function b(b){var c={},d=/^jQuery\d+$/;return a.each(b.attributes,function(a,b){b.specified&&!d.test(b.name)&&(c[b.name]=b.value)}),c}function c(b,c){var d=this,f=a(d);if(d.value==f.attr("placeholder")&&f.hasClass("placeholder"))if(f.data("placeholder-password")){if(f=f.hide().nextAll('input[type="password"]:first').show().attr("id",f.removeAttr("id").data("placeholder-id")),b===!0)return f[0].value=c;f.focus()}else d.value="",f.removeClass("placeholder"),d==e()&&d.select()}function d(){var d,e=this,f=a(e),g=this.id;if(""===e.value){if("password"===e.type){if(!f.data("placeholder-textinput")){try{d=f.clone().attr({type:"text"})}catch(h){d=a("<input>").attr(a.extend(b(this),{type:"text"}))}d.removeAttr("name").data({"placeholder-password":f,"placeholder-id":g}).bind("focus.placeholder",c),f.data({"placeholder-textinput":d,"placeholder-id":g}).before(d)}f=f.removeAttr("id").hide().prevAll('input[type="text"]:first').attr("id",g).show()}f.addClass("placeholder"),f[0].value=f.attr("placeholder")}else f.removeClass("placeholder")}function e(){try{return document.activeElement}catch(a){}}var f,g,h="[object OperaMini]"==Object.prototype.toString.call(window.operamini),i="placeholder"in document.createElement("input")&&!h,j="placeholder"in document.createElement("textarea")&&!h,k=a.valHooks,l=a.propHooks;i&&j?(g=a.fn.placeholder=function(){return this},g.input=g.textarea=!0):(g=a.fn.placeholder=function(){var a=this;return a.filter((i?"textarea":":input")+"[placeholder]").not(".placeholder").bind({"focus.placeholder":c,"blur.placeholder":d}).data("placeholder-enabled",!0).trigger("blur.placeholder"),a},g.input=i,g.textarea=j,f={get:function(b){var c=a(b),d=c.data("placeholder-password");return d?d[0].value:c.data("placeholder-enabled")&&c.hasClass("placeholder")?"":b.value},set:function(b,f){var g=a(b),h=g.data("placeholder-password");return h?h[0].value=f:g.data("placeholder-enabled")?(""===f?(b.value=f,b!=e()&&d.call(b)):g.hasClass("placeholder")?c.call(b,!0,f)||(b.value=f):b.value=f,g):b.value=f}},i||(k.input=f,l.value=f),j||(k.textarea=f,l.value=f),a(function(){a(document).delegate("form","submit.placeholder",function(){var b=a(".placeholder",this).each(c);setTimeout(function(){b.each(d)},10)})}),a(window).bind("beforeunload.placeholder",function(){a(".placeholder").each(function(){this.value=""})}))}); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/vendor/placeholder.js | placeholder.js |
!function(){"use strict";/**
* @preserve FastClick: polyfill to remove click delays on browsers with touch UIs.
*
* @codingstandard ftlabs-jsv2
* @copyright The Financial Times Limited [All Rights Reserved]
* @license MIT License (see LICENSE.txt)
*/
function a(b,d){function e(a,b){return function(){return a.apply(b,arguments)}}var f;if(d=d||{},this.trackingClick=!1,this.trackingClickStart=0,this.targetElement=null,this.touchStartX=0,this.touchStartY=0,this.lastTouchIdentifier=0,this.touchBoundary=d.touchBoundary||10,this.layer=b,this.tapDelay=d.tapDelay||200,this.tapTimeout=d.tapTimeout||700,!a.notNeeded(b)){for(var g=["onMouse","onClick","onTouchStart","onTouchMove","onTouchEnd","onTouchCancel"],h=this,i=0,j=g.length;j>i;i++)h[g[i]]=e(h[g[i]],h);c&&(b.addEventListener("mouseover",this.onMouse,!0),b.addEventListener("mousedown",this.onMouse,!0),b.addEventListener("mouseup",this.onMouse,!0)),b.addEventListener("click",this.onClick,!0),b.addEventListener("touchstart",this.onTouchStart,!1),b.addEventListener("touchmove",this.onTouchMove,!1),b.addEventListener("touchend",this.onTouchEnd,!1),b.addEventListener("touchcancel",this.onTouchCancel,!1),Event.prototype.stopImmediatePropagation||(b.removeEventListener=function(a,c,d){var e=Node.prototype.removeEventListener;"click"===a?e.call(b,a,c.hijacked||c,d):e.call(b,a,c,d)},b.addEventListener=function(a,c,d){var e=Node.prototype.addEventListener;"click"===a?e.call(b,a,c.hijacked||(c.hijacked=function(a){a.propagationStopped||c(a)}),d):e.call(b,a,c,d)}),"function"==typeof b.onclick&&(f=b.onclick,b.addEventListener("click",function(a){f(a)},!1),b.onclick=null)}}var b=navigator.userAgent.indexOf("Windows Phone")>=0,c=navigator.userAgent.indexOf("Android")>0&&!b,d=/iP(ad|hone|od)/.test(navigator.userAgent)&&!b,e=d&&/OS 4_\d(_\d)?/.test(navigator.userAgent),f=d&&/OS [6-7]_\d/.test(navigator.userAgent),g=navigator.userAgent.indexOf("BB10")>0;a.prototype.needsClick=function(a){switch(a.nodeName.toLowerCase()){case"button":case"select":case"textarea":if(a.disabled)return!0;break;case"input":if(d&&"file"===a.type||a.disabled)return!0;break;case"label":case"iframe":case"video":return!0}return/\bneedsclick\b/.test(a.className)},a.prototype.needsFocus=function(a){switch(a.nodeName.toLowerCase()){case"textarea":return!0;case"select":return!c;case"input":switch(a.type){case"button":case"checkbox":case"file":case"image":case"radio":case"submit":return!1}return!a.disabled&&!a.readOnly;default:return/\bneedsfocus\b/.test(a.className)}},a.prototype.sendClick=function(a,b){var c,d;document.activeElement&&document.activeElement!==a&&document.activeElement.blur(),d=b.changedTouches[0],c=document.createEvent("MouseEvents"),c.initMouseEvent(this.determineEventType(a),!0,!0,window,1,d.screenX,d.screenY,d.clientX,d.clientY,!1,!1,!1,!1,0,null),c.forwardedTouchEvent=!0,a.dispatchEvent(c)},a.prototype.determineEventType=function(a){return c&&"select"===a.tagName.toLowerCase()?"mousedown":"click"},a.prototype.focus=function(a){var b;d&&a.setSelectionRange&&0!==a.type.indexOf("date")&&"time"!==a.type&&"month"!==a.type?(b=a.value.length,a.setSelectionRange(b,b)):a.focus()},a.prototype.updateScrollParent=function(a){var b,c;if(b=a.fastClickScrollParent,!b||!b.contains(a)){c=a;do{if(c.scrollHeight>c.offsetHeight){b=c,a.fastClickScrollParent=c;break}c=c.parentElement}while(c)}b&&(b.fastClickLastScrollTop=b.scrollTop)},a.prototype.getTargetElementFromEventTarget=function(a){return a.nodeType===Node.TEXT_NODE?a.parentNode:a},a.prototype.onTouchStart=function(a){var b,c,f;if(a.targetTouches.length>1)return!0;if(b=this.getTargetElementFromEventTarget(a.target),c=a.targetTouches[0],d){if(f=window.getSelection(),f.rangeCount&&!f.isCollapsed)return!0;if(!e){if(c.identifier&&c.identifier===this.lastTouchIdentifier)return a.preventDefault(),!1;this.lastTouchIdentifier=c.identifier,this.updateScrollParent(b)}}return this.trackingClick=!0,this.trackingClickStart=a.timeStamp,this.targetElement=b,this.touchStartX=c.pageX,this.touchStartY=c.pageY,a.timeStamp-this.lastClickTime<this.tapDelay&&a.preventDefault(),!0},a.prototype.touchHasMoved=function(a){var b=a.changedTouches[0],c=this.touchBoundary;return Math.abs(b.pageX-this.touchStartX)>c||Math.abs(b.pageY-this.touchStartY)>c?!0:!1},a.prototype.onTouchMove=function(a){return this.trackingClick?((this.targetElement!==this.getTargetElementFromEventTarget(a.target)||this.touchHasMoved(a))&&(this.trackingClick=!1,this.targetElement=null),!0):!0},a.prototype.findControl=function(a){return void 0!==a.control?a.control:a.htmlFor?document.getElementById(a.htmlFor):a.querySelector("button, input:not([type=hidden]), keygen, meter, output, progress, select, textarea")},a.prototype.onTouchEnd=function(a){var b,g,h,i,j,k=this.targetElement;if(!this.trackingClick)return!0;if(a.timeStamp-this.lastClickTime<this.tapDelay)return this.cancelNextClick=!0,!0;if(a.timeStamp-this.trackingClickStart>this.tapTimeout)return!0;if(this.cancelNextClick=!1,this.lastClickTime=a.timeStamp,g=this.trackingClickStart,this.trackingClick=!1,this.trackingClickStart=0,f&&(j=a.changedTouches[0],k=document.elementFromPoint(j.pageX-window.pageXOffset,j.pageY-window.pageYOffset)||k,k.fastClickScrollParent=this.targetElement.fastClickScrollParent),h=k.tagName.toLowerCase(),"label"===h){if(b=this.findControl(k)){if(this.focus(k),c)return!1;k=b}}else if(this.needsFocus(k))return a.timeStamp-g>100||d&&window.top!==window&&"input"===h?(this.targetElement=null,!1):(this.focus(k),this.sendClick(k,a),d&&"select"===h||(this.targetElement=null,a.preventDefault()),!1);return d&&!e&&(i=k.fastClickScrollParent,i&&i.fastClickLastScrollTop!==i.scrollTop)?!0:(this.needsClick(k)||(a.preventDefault(),this.sendClick(k,a)),!1)},a.prototype.onTouchCancel=function(){this.trackingClick=!1,this.targetElement=null},a.prototype.onMouse=function(a){return this.targetElement?a.forwardedTouchEvent?!0:a.cancelable&&(!this.needsClick(this.targetElement)||this.cancelNextClick)?(a.stopImmediatePropagation?a.stopImmediatePropagation():a.propagationStopped=!0,a.stopPropagation(),a.preventDefault(),!1):!0:!0},a.prototype.onClick=function(a){var b;return this.trackingClick?(this.targetElement=null,this.trackingClick=!1,!0):"submit"===a.target.type&&0===a.detail?!0:(b=this.onMouse(a),b||(this.targetElement=null),b)},a.prototype.destroy=function(){var a=this.layer;c&&(a.removeEventListener("mouseover",this.onMouse,!0),a.removeEventListener("mousedown",this.onMouse,!0),a.removeEventListener("mouseup",this.onMouse,!0)),a.removeEventListener("click",this.onClick,!0),a.removeEventListener("touchstart",this.onTouchStart,!1),a.removeEventListener("touchmove",this.onTouchMove,!1),a.removeEventListener("touchend",this.onTouchEnd,!1),a.removeEventListener("touchcancel",this.onTouchCancel,!1)},a.notNeeded=function(a){var b,d,e,f;if("undefined"==typeof window.ontouchstart)return!0;if(d=+(/Chrome\/([0-9]+)/.exec(navigator.userAgent)||[,0])[1]){if(!c)return!0;if(b=document.querySelector("meta[name=viewport]")){if(-1!==b.content.indexOf("user-scalable=no"))return!0;if(d>31&&document.documentElement.scrollWidth<=window.outerWidth)return!0}}if(g&&(e=navigator.userAgent.match(/Version\/([0-9]*)\.([0-9]*)/),e[1]>=10&&e[2]>=3&&(b=document.querySelector("meta[name=viewport]")))){if(-1!==b.content.indexOf("user-scalable=no"))return!0;if(document.documentElement.scrollWidth<=window.outerWidth)return!0}return"none"===a.style.msTouchAction||"manipulation"===a.style.touchAction?!0:(f=+(/Firefox\/([0-9]+)/.exec(navigator.userAgent)||[,0])[1],f>=27&&(b=document.querySelector("meta[name=viewport]"),b&&(-1!==b.content.indexOf("user-scalable=no")||document.documentElement.scrollWidth<=window.outerWidth))?!0:"none"===a.style.touchAction||"manipulation"===a.style.touchAction?!0:!1)},a.attach=function(b,c){return new a(b,c)},"function"==typeof define&&"object"==typeof define.amd&&define.amd?define(function(){return a}):"undefined"!=typeof module&&module.exports?(module.exports=a.attach,module.exports.FastClick=a):window.FastClick=a}(); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/vendor/fastclick.js | fastclick.js |
window.Modernizr=function(a,b,c){function d(a){t.cssText=a}function e(a,b){return d(x.join(a+";")+(b||""))}function f(a,b){return typeof a===b}function g(a,b){return!!~(""+a).indexOf(b)}function h(a,b){for(var d in a){var e=a[d];if(!g(e,"-")&&t[e]!==c)return"pfx"==b?e:!0}return!1}function i(a,b,d){for(var e in a){var g=b[a[e]];if(g!==c)return d===!1?a[e]:f(g,"function")?g.bind(d||b):g}return!1}function j(a,b,c){var d=a.charAt(0).toUpperCase()+a.slice(1),e=(a+" "+z.join(d+" ")+d).split(" ");return f(b,"string")||f(b,"undefined")?h(e,b):(e=(a+" "+A.join(d+" ")+d).split(" "),i(e,b,c))}function k(){o.input=function(c){for(var d=0,e=c.length;e>d;d++)E[c[d]]=!!(c[d]in u);return E.list&&(E.list=!(!b.createElement("datalist")||!a.HTMLDataListElement)),E}("autocomplete autofocus list placeholder max min multiple pattern required step".split(" ")),o.inputtypes=function(a){for(var d,e,f,g=0,h=a.length;h>g;g++)u.setAttribute("type",e=a[g]),d="text"!==u.type,d&&(u.value=v,u.style.cssText="position:absolute;visibility:hidden;",/^range$/.test(e)&&u.style.WebkitAppearance!==c?(q.appendChild(u),f=b.defaultView,d=f.getComputedStyle&&"textfield"!==f.getComputedStyle(u,null).WebkitAppearance&&0!==u.offsetHeight,q.removeChild(u)):/^(search|tel)$/.test(e)||(d=/^(url|email)$/.test(e)?u.checkValidity&&u.checkValidity()===!1:u.value!=v)),D[a[g]]=!!d;return D}("search tel url email datetime date month week time datetime-local number range color".split(" "))}var l,m,n="2.8.3",o={},p=!0,q=b.documentElement,r="modernizr",s=b.createElement(r),t=s.style,u=b.createElement("input"),v=":)",w={}.toString,x=" -webkit- -moz- -o- -ms- ".split(" "),y="Webkit Moz O ms",z=y.split(" "),A=y.toLowerCase().split(" "),B={svg:"http://www.w3.org/2000/svg"},C={},D={},E={},F=[],G=F.slice,H=function(a,c,d,e){var f,g,h,i,j=b.createElement("div"),k=b.body,l=k||b.createElement("body");if(parseInt(d,10))for(;d--;)h=b.createElement("div"),h.id=e?e[d]:r+(d+1),j.appendChild(h);return f=["­",'<style id="s',r,'">',a,"</style>"].join(""),j.id=r,(k?j:l).innerHTML+=f,l.appendChild(j),k||(l.style.background="",l.style.overflow="hidden",i=q.style.overflow,q.style.overflow="hidden",q.appendChild(l)),g=c(j,a),k?j.parentNode.removeChild(j):(l.parentNode.removeChild(l),q.style.overflow=i),!!g},I=function(b){var c=a.matchMedia||a.msMatchMedia;if(c)return c(b)&&c(b).matches||!1;var d;return H("@media "+b+" { #"+r+" { position: absolute; } }",function(b){d="absolute"==(a.getComputedStyle?getComputedStyle(b,null):b.currentStyle).position}),d},J=function(){function a(a,e){e=e||b.createElement(d[a]||"div"),a="on"+a;var g=a in e;return g||(e.setAttribute||(e=b.createElement("div")),e.setAttribute&&e.removeAttribute&&(e.setAttribute(a,""),g=f(e[a],"function"),f(e[a],"undefined")||(e[a]=c),e.removeAttribute(a))),e=null,g}var d={select:"input",change:"input",submit:"form",reset:"form",error:"img",load:"img",abort:"img"};return a}(),K={}.hasOwnProperty;m=f(K,"undefined")||f(K.call,"undefined")?function(a,b){return b in a&&f(a.constructor.prototype[b],"undefined")}:function(a,b){return K.call(a,b)},Function.prototype.bind||(Function.prototype.bind=function(a){var b=this;if("function"!=typeof b)throw new TypeError;var c=G.call(arguments,1),d=function(){if(this instanceof d){var e=function(){};e.prototype=b.prototype;var f=new e,g=b.apply(f,c.concat(G.call(arguments)));return Object(g)===g?g:f}return b.apply(a,c.concat(G.call(arguments)))};return d}),C.flexbox=function(){return j("flexWrap")},C.flexboxlegacy=function(){return j("boxDirection")},C.canvas=function(){var a=b.createElement("canvas");return!(!a.getContext||!a.getContext("2d"))},C.canvastext=function(){return!(!o.canvas||!f(b.createElement("canvas").getContext("2d").fillText,"function"))},C.webgl=function(){return!!a.WebGLRenderingContext},C.touch=function(){var c;return"ontouchstart"in a||a.DocumentTouch&&b instanceof DocumentTouch?c=!0:H(["@media (",x.join("touch-enabled),("),r,")","{#modernizr{top:9px;position:absolute}}"].join(""),function(a){c=9===a.offsetTop}),c},C.geolocation=function(){return"geolocation"in navigator},C.postmessage=function(){return!!a.postMessage},C.websqldatabase=function(){return!!a.openDatabase},C.indexedDB=function(){return!!j("indexedDB",a)},C.hashchange=function(){return J("hashchange",a)&&(b.documentMode===c||b.documentMode>7)},C.history=function(){return!(!a.history||!history.pushState)},C.draganddrop=function(){var a=b.createElement("div");return"draggable"in a||"ondragstart"in a&&"ondrop"in a},C.websockets=function(){return"WebSocket"in a||"MozWebSocket"in a},C.rgba=function(){return d("background-color:rgba(150,255,150,.5)"),g(t.backgroundColor,"rgba")},C.hsla=function(){return d("background-color:hsla(120,40%,100%,.5)"),g(t.backgroundColor,"rgba")||g(t.backgroundColor,"hsla")},C.multiplebgs=function(){return d("background:url(https://),url(https://),red url(https://)"),/(url\s*\(.*?){3}/.test(t.background)},C.backgroundsize=function(){return j("backgroundSize")},C.borderimage=function(){return j("borderImage")},C.borderradius=function(){return j("borderRadius")},C.boxshadow=function(){return j("boxShadow")},C.textshadow=function(){return""===b.createElement("div").style.textShadow},C.opacity=function(){return e("opacity:.55"),/^0.55$/.test(t.opacity)},C.cssanimations=function(){return j("animationName")},C.csscolumns=function(){return j("columnCount")},C.cssgradients=function(){var a="background-image:",b="gradient(linear,left top,right bottom,from(#9f9),to(white));",c="linear-gradient(left top,#9f9, white);";return d((a+"-webkit- ".split(" ").join(b+a)+x.join(c+a)).slice(0,-a.length)),g(t.backgroundImage,"gradient")},C.cssreflections=function(){return j("boxReflect")},C.csstransforms=function(){return!!j("transform")},C.csstransforms3d=function(){var a=!!j("perspective");return a&&"webkitPerspective"in q.style&&H("@media (transform-3d),(-webkit-transform-3d){#modernizr{left:9px;position:absolute;height:3px;}}",function(b,c){a=9===b.offsetLeft&&3===b.offsetHeight}),a},C.csstransitions=function(){return j("transition")},C.fontface=function(){var a;return H('@font-face {font-family:"font";src:url("https://")}',function(c,d){var e=b.getElementById("smodernizr"),f=e.sheet||e.styleSheet,g=f?f.cssRules&&f.cssRules[0]?f.cssRules[0].cssText:f.cssText||"":"";a=/src/i.test(g)&&0===g.indexOf(d.split(" ")[0])}),a},C.generatedcontent=function(){var a;return H(["#",r,"{font:0/0 a}#",r,':after{content:"',v,'";visibility:hidden;font:3px/1 a}'].join(""),function(b){a=b.offsetHeight>=3}),a},C.video=function(){var a=b.createElement("video"),c=!1;try{(c=!!a.canPlayType)&&(c=new Boolean(c),c.ogg=a.canPlayType('video/ogg; codecs="theora"').replace(/^no$/,""),c.h264=a.canPlayType('video/mp4; codecs="avc1.42E01E"').replace(/^no$/,""),c.webm=a.canPlayType('video/webm; codecs="vp8, vorbis"').replace(/^no$/,""))}catch(d){}return c},C.audio=function(){var a=b.createElement("audio"),c=!1;try{(c=!!a.canPlayType)&&(c=new Boolean(c),c.ogg=a.canPlayType('audio/ogg; codecs="vorbis"').replace(/^no$/,""),c.mp3=a.canPlayType("audio/mpeg;").replace(/^no$/,""),c.wav=a.canPlayType('audio/wav; codecs="1"').replace(/^no$/,""),c.m4a=(a.canPlayType("audio/x-m4a;")||a.canPlayType("audio/aac;")).replace(/^no$/,""))}catch(d){}return c},C.localstorage=function(){try{return localStorage.setItem(r,r),localStorage.removeItem(r),!0}catch(a){return!1}},C.sessionstorage=function(){try{return sessionStorage.setItem(r,r),sessionStorage.removeItem(r),!0}catch(a){return!1}},C.webworkers=function(){return!!a.Worker},C.applicationcache=function(){return!!a.applicationCache},C.svg=function(){return!!b.createElementNS&&!!b.createElementNS(B.svg,"svg").createSVGRect},C.inlinesvg=function(){var a=b.createElement("div");return a.innerHTML="<svg/>",(a.firstChild&&a.firstChild.namespaceURI)==B.svg},C.smil=function(){return!!b.createElementNS&&/SVGAnimate/.test(w.call(b.createElementNS(B.svg,"animate")))},C.svgclippaths=function(){return!!b.createElementNS&&/SVGClipPath/.test(w.call(b.createElementNS(B.svg,"clipPath")))};for(var L in C)m(C,L)&&(l=L.toLowerCase(),o[l]=C[L](),F.push((o[l]?"":"no-")+l));return o.input||k(),o.addTest=function(a,b){if("object"==typeof a)for(var d in a)m(a,d)&&o.addTest(d,a[d]);else{if(a=a.toLowerCase(),o[a]!==c)return o;b="function"==typeof b?b():b,"undefined"!=typeof p&&p&&(q.className+=" "+(b?"":"no-")+a),o[a]=b}return o},d(""),s=u=null,function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=s.elements;return"string"==typeof a?a.split(" "):a}function e(a){var b=r[a[p]];return b||(b={},q++,a[p]=q,r[q]=b),b}function f(a,c,d){if(c||(c=b),k)return c.createElement(a);d||(d=e(c));var f;return f=d.cache[a]?d.cache[a].cloneNode():o.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!f.canHaveChildren||n.test(a)||f.tagUrn?f:d.frag.appendChild(f)}function g(a,c){if(a||(a=b),k)return a.createDocumentFragment();c=c||e(a);for(var f=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)f.createElement(h[g]);return f}function h(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return s.shivMethods?f(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(s,b.frag)}function i(a){a||(a=b);var d=e(a);return!s.shivCSS||j||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),k||h(a,d),a}var j,k,l="3.7.0",m=a.html5||{},n=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,o=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,p="_html5shiv",q=0,r={};!function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",j="hidden"in a,k=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){j=!0,k=!0}}();var s={elements:m.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output progress section summary template time video",version:l,shivCSS:m.shivCSS!==!1,supportsUnknownElements:k,shivMethods:m.shivMethods!==!1,type:"default",shivDocument:i,createElement:f,createDocumentFragment:g};a.html5=s,i(b)}(this,b),o._version=n,o._prefixes=x,o._domPrefixes=A,o._cssomPrefixes=z,o.mq=I,o.hasEvent=J,o.testProp=function(a){return h([a])},o.testAllProps=j,o.testStyles=H,o.prefixed=function(a,b,c){return b?j(a,b,c):j(a,"pfx")},q.className=q.className.replace(/(^|\s)no-js(\s|$)/,"$1$2")+(p?" js "+F.join(" "):""),o}(this,this.document); | zurb-foundation | /zurb-foundation-5.5.3.tar.gz/zurb-foundation-5.5.3/js/vendor/modernizr.js | modernizr.js |
<!-- Banner -->

<!-- PROJECT SHIELDS -->
[![GitHub Release][releases-shield]][releases]
[![Python Versions][python-versions-shield]][pypi]
![Project Stage][project-stage-shield]
![Project Maintenance][maintenance-shield]
[![License][license-shield]](LICENSE)
[![GitHub Activity][commits-shield]][commits-url]
[![PyPi Downloads][downloads-shield]][downloads-url]
[![GitHub Last Commit][last-commit-shield]][commits-url]
[![Stargazers][stars-shield]][stars-url]
[![Issues][issues-shield]][issues-url]
[![Code Quality][code-quality-shield]][code-quality]
[![Build Status][build-shield]][build-url]
[![Typing Status][typing-shield]][typing-url]
[![Maintainability][maintainability-shield]][maintainability-url]
[![Code Coverage][codecov-shield]][codecov-url]
Asynchronous Python client for the open datasets of Zurich (Switzerland).
## About
A python package with which you can retrieve data from the Open Data Platform of Zurich via [their API][api]. This package was initially created to only retrieve parking data from the API, but the code base is made in such a way that it is easy to extend for other datasets from the same platform.
## Installation
```bash
pip install zurich
```
## Datasets
You can read the following datasets with this package:
- [Parking spaces for disabled / Behindertenparkplätze][disabled_parkings] (413 locations)
<details>
<summary>Click here to get more details</summary>
### Disabled parkings
| Variable | Type | Description |
| :------- | :--- | :---------- |
| `spot_id` | int | The ID of the parking spot |
| `address` | str | The address of the parking spot |
| `longitude` | float | The longitude of the parking spot |
| `latitude` | float | The latitude of the parking spot |
</details>
## Example
```python
import asyncio
from zurich import ODPZurich
async def main() -> None:
"""Show example on using the Open Data API client."""
async with ODPZurich() as client:
disabled_parkings = await client.disabled_parkings()
print(disabled_parkings)
if __name__ == "__main__":
asyncio.run(main())
```
## Use cases
[NIPKaart.nl][nipkaart]
A website that provides insight into where disabled parking spaces are, based
on data from users and municipalities. Operates mainly in the Netherlands, but
also has plans to process data from abroad.
## Contributing
This is an active open-source project. We are always open to people who want to
use the code or contribute to it.
We've set up a separate document for our
[contribution guidelines](CONTRIBUTING.md).
Thank you for being involved! :heart_eyes:
## Setting up development environment
This Python project is fully managed using the [Poetry][poetry] dependency
manager.
You need at least:
- Python 3.9+
- [Poetry][poetry-install]
Install all packages, including all development requirements:
```bash
poetry install
```
Poetry creates by default an virtual environment where it installs all
necessary pip packages, to enter or exit the venv run the following commands:
```bash
poetry shell
exit
```
Setup the pre-commit check, you must run this inside the virtual environment:
```bash
pre-commit install
```
*Now you're all set to get started!*
As this repository uses the [pre-commit][pre-commit] framework, all changes
are linted and tested with each commit. You can run all checks and tests
manually, using the following command:
```bash
poetry run pre-commit run --all-files
```
To run just the Python tests:
```bash
poetry run pytest
```
## License
MIT License
Copyright (c) 2022-2023 Klaas Schoute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
[api]: https://data.stadt-zuerich.ch/
[disabled_parkings]: https://data.stadt-zuerich.ch/dataset/geo_behindertenparkplaetze
[nipkaart]: https://www.nipkaart.nl
<!-- MARKDOWN LINKS & IMAGES -->
[build-shield]: https://github.com/klaasnicolaas/python-zurich/actions/workflows/tests.yaml/badge.svg
[build-url]: https://github.com/klaasnicolaas/python-zurich/actions/workflows/tests.yaml
[code-quality-shield]: https://github.com/klaasnicolaas/python-zurich/actions/workflows/codeql.yaml/badge.svg
[code-quality]: https://github.com/klaasnicolaas/python-zurich/actions/workflows/codeql.yaml
[commits-shield]: https://img.shields.io/github/commit-activity/y/klaasnicolaas/python-zurich.svg
[commits-url]: https://github.com/klaasnicolaas/python-zurich/commits/main
[codecov-shield]: https://codecov.io/gh/klaasnicolaas/python-zurich/branch/main/graph/badge.svg?token=CLytQU0E0f
[codecov-url]: https://codecov.io/gh/klaasnicolaas/python-zurich
[downloads-shield]: https://img.shields.io/pypi/dm/zurich
[downloads-url]: https://pypistats.org/packages/zurich
[issues-shield]: https://img.shields.io/github/issues/klaasnicolaas/python-zurich.svg
[issues-url]: https://github.com/klaasnicolaas/python-zurich/issues
[license-shield]: https://img.shields.io/github/license/klaasnicolaas/python-zurich.svg
[last-commit-shield]: https://img.shields.io/github/last-commit/klaasnicolaas/python-zurich.svg
[maintenance-shield]: https://img.shields.io/maintenance/yes/2023.svg
[maintainability-shield]: https://api.codeclimate.com/v1/badges/b4c705c4b91ea931d411/maintainability
[maintainability-url]: https://codeclimate.com/github/klaasnicolaas/python-zurich/maintainability
[project-stage-shield]: https://img.shields.io/badge/project%20stage-experimental-yellow.svg
[pypi]: https://pypi.org/project/zurich/
[python-versions-shield]: https://img.shields.io/pypi/pyversions/zurich
[typing-shield]: https://github.com/klaasnicolaas/python-zurich/actions/workflows/typing.yaml/badge.svg
[typing-url]: https://github.com/klaasnicolaas/python-zurich/actions/workflows/typing.yaml
[releases-shield]: https://img.shields.io/github/release/klaasnicolaas/python-zurich.svg
[releases]: https://github.com/klaasnicolaas/python-zurich/releases
[stars-shield]: https://img.shields.io/github/stars/klaasnicolaas/python-zurich.svg
[stars-url]: https://github.com/klaasnicolaas/python-zurich/stargazers
[poetry-install]: https://python-poetry.org/docs/#installation
[poetry]: https://python-poetry.org
[pre-commit]: https://pre-commit.com
| zurich | /zurich-0.2.0.tar.gz/zurich-0.2.0/README.md | README.md |
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Attention 机制适合于seq2seq模型 即 具有 encoder decoder
"""
class Attention(nn.Module):
def __init__(self, config, method="dot"):
super(Attention, self).__init__()
assert method in ["dot", "general", "concat"], "method error"
self.method = method
if self.method == "general":
self.Wa = nn.Linear(config.encoder_hidden_size, config.decoder_hidden_size, bias=False)
elif self.method == "concat":
self.Wa = nn.Linear(config.encoder_hidden_size + config.decoder_hidden_size, config.decoder_hidden_size,
bias=False)
self.Va = nn.Linear(config.decoder_hidden_size, 1, bias=False)
def forward(self, hidden_state, encoder_output):
"""
Parameters
----------
hidden_state [ num_layer, batch_size, decoder_hidden_size ]
encoder_output [ batch_size, seq_len, encoder_hidden_size ]
Returns [ batch_size, seq_len ]
-------
"""
# TODO ----->dot
if self.method == "dot":
hidden_state = hidden_state[-1].unsqueeze(dim=-1) # [ batch_size, decoder_hidden_size, 1 ]
att = torch.bmm(encoder_output, hidden_state).squeeze(dim=-1) # [ batch_size, seq_len ]
att_weight = F.softmax(att, dim=-1) # [ batch_size, seq_len ]
# TODO ----->general
elif self.method == "general":
encoder_output = self.Wa(encoder_output) # [ batch_size, seq_len, decoder_hidden_size ]
hidden_state = hidden_state[-1].unsqueeze(dim=-1) # [ batch_size, decoder_hidden_size, 1 ]
att = torch.bmm(encoder_output, hidden_state).squeeze(dim=-1) # [ batch_size, seq_len ]
att_weight = F.softmax(att, dim=-1) # [ batch_size, seq_len ]
# TODO ----->concat
elif self.method == "concat":
hidden_state = hidden_state[-1] # [ batch_size, decoder_hidden_size ]
hidden_state = hidden_state.repeat(1, encoder_output.size(1),
1) # [ batch_size, seq_len, decoder_hidden_size ]
cat_ed = torch.cat([hidden_state, encoder_output], dim=-1)
att = self.Va(F.tanh(self.Wa(cat_ed))).squeeze(dim=-1) # [ batch_size, seq_len ]
att_weight = F.softmax(att, dim=-1) # [ batch_size, seq_len ]
return att_weight | zut-nlp-utils | /zut_nlp_utils-0.2-py3-none-any.whl/zut_nlp_utils/Attention.py | Attention.py |
import pickle
from dataset import get_dataloader
class WordSequence:
UNK_TAG = "UNK" # 表示未知字符
PAD_TAG = "PAD" # 填充符
UNK = 0
PAD = 1
def __init__(self):
self.dict = {self.UNK_TAG: self.UNK, self.PAD_TAG: self.PAD}
self.count = {} # 统计词频
def fit(self, sentence):
"""
把单个句子保存到dict中
:param sentence: [word1, word2, word3......]
:return:
"""
for word in sentence:
self.count[word] = self.count.get(word,
0) + 1 # 取 value = word if value exist value +1 else value = 1
def build_vocab(self, min_len=0, max_len=None, max_features=None):
"""
生成词典
:param min_len: 最小出现的次数
:param max_len: 最大的次数
:param max_features: 一共保留多少个词语
:return:
"""
# 方法一 (复杂 慢)
# 删除count中小于min的word 牢记下面这种遍历方法
if min_len is not None:
self.count = {word: value for word, value in self.count.items() if value > min_len}
# 删除count中大于max的word
if max_len is not None:
self.count = {word: value for word, value in self.count.items() if value < max_len}
# 限制保留的词语数
if max_features is not None:
# 要排序
temp = sorted(self.count.items(), key=lambda x: x[-1], reverse=True)[
:max_features] # count.items() : [(key:value),....]
# 取前max_features个
self.count = dict(temp) # 存的是词频
for word in self.count: # self.count : {hello: 2} hello 存2次 word 是count的key值
# 若第一个 为 {hello: 2} 则 word 为 hello dict[hello] = 2 代表它是第二个 hello 是词典中第二个词
self.dict[word] = len(self.dict) # 第一遍历 dict的长度为2 只存了原始的 PAD 和 UNK
# 得到一个反转的dict字典
self.inverse_dict = dict(zip(self.dict.values(), self.dict.keys()))
def transform(self, sentence, max_len=None):
"""
把句子转化为 序列(list)
:param sentence: [word1, word2, word3......]
:param max_len: int ,对句子进行填充或裁剪 为什么要设置最大长度
:return:
"""
if max_len is not None:
if len(sentence) > max_len:
sentence = sentence[:max_len]
else:
sentence = sentence + [self.PAD_TAG] * (max_len - len(sentence)) # 填充PAD
# 这个为 平常的写法
# for word in sentence:
# # 意思为 如果词在词典中取不到 则返回 unk 所以不用dict[word]
# self.dict.get(word, self.UNK)
return [self.dict.get(word, self.UNK) for word in sentence]
def inverse_transform(self, indices):
"""
把序列转化为 句子list
:param indices: [1,2,3,4,......]
:return:
"""
return [self.inverse_dict.get(i, self.UNK_TAG) for i in indices] # 这里为什么没有 get(i,self.UNK_TAG)
def __len__(self):
return len(self.dict)
def sava_ws_model(self, ws_model_path="./models/ws.pkl"):
pickle.dump(self, open(ws_model_path, "wb"))
def train_save_vocab_model(self):
dl_train = get_dataloader(train=True) # 训练的所有句子
dl_test = get_dataloader(train=False) # 测试的所有句子
# tqdm(dl_train, total=len(dl_train))
for reviews, labels in dl_train:
for label in labels:
self.fit(label)
for sentence in reviews:
self.fit(sentence)
for reviews, labels in dl_test:
for label in labels:
self.fit(label)
for sentence in reviews:
self.fit(sentence)
self.build_vocab()
print("词典长度为:", len(self))
self.sava_ws_model(ws_model_path="models/ws.pkl") | zut-nlp-utils | /zut_nlp_utils-0.2-py3-none-any.whl/zut_nlp_utils/Sequence.py | Sequence.py |
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import numpy as np
from collections import Counter
class Metrics(object):
def __init__(self, golden_tags, predict_tags, remove_O=False):
# [[t1, t2], [t3, t4]...] --> [t1, t2, t3, t4...]
self.golden_tags = flatten_lists(golden_tags)
self.predict_tags = flatten_lists(predict_tags)
if remove_O: # 将O标记移除,只关心实体标记
self._remove_Otags()
# 辅助计算的变量
self.tagset = set(self.golden_tags)
self.correct_tags_number = self.count_correct_tags()
self.predict_tags_counter = Counter(self.predict_tags)
self.golden_tags_counter = Counter(self.golden_tags)
# 计算精确率
self.precision_scores = self.cal_precision()
# 计算召回率
self.recall_scores = self.cal_recall()
# 计算F1分数
self.f1_scores = self.cal_f1()
def report_scores(self):
"""将结果用表格的形式打印出来,像这个样子:
precision recall f1-score support
B-LOC 0.775 0.757 0.766 1084
I-LOC 0.601 0.631 0.616 325
B-MISC 0.698 0.499 0.582 339
I-MISC 0.644 0.567 0.603 557
B-ORG 0.795 0.801 0.798 1400
I-ORG 0.831 0.773 0.801 1104
B-PER 0.812 0.876 0.843 735
I-PER 0.873 0.931 0.901 634
avg/total 0.779 0.764 0.770 6178
"""
# 打印表头
header_format = '{:>9s} {:>9} {:>9} {:>9} {:>9}'
header = ['precision', 'recall', 'f1-score', 'support']
print(header_format.format('', *header))
row_format = '{:>9s} {:>9.4f} {:>9.4f} {:>9.4f} {:>9}'
# 打印每个标签的 精确率、召回率、f1分数
for tag in self.tagset:
print(row_format.format(
tag,
self.precision_scores[tag],
self.recall_scores[tag],
self.f1_scores[tag],
self.golden_tags_counter[tag]
))
# 计算并打印平均值
avg_metrics = self._cal_weighted_average()
print(row_format.format(
'avg/total',
avg_metrics['precision'],
avg_metrics['recall'],
avg_metrics['f1_score'],
len(self.golden_tags)
))
def report_confusion_matrix(self):
"""计算混淆矩阵"""
print("\nConfusion Matrix:")
tag_list = list(self.tagset)
# 初始化混淆矩阵 matrix[i][j]表示第i个tag被模型预测成第j个tag的次数
tags_size = len(tag_list)
matrix = []
for i in range(tags_size):
matrix.append([0] * tags_size)
# 遍历tags列表
for golden_tag, predict_tag in zip(self.golden_tags, self.predict_tags):
try:
row = tag_list.index(golden_tag)
col = tag_list.index(predict_tag)
matrix[row][col] += 1
except ValueError: # 有极少数标记没有出现在golden_tags,但出现在predict_tags,跳过这些标记
continue
# 输出矩阵
row_format_ = '{:>7} ' * (tags_size + 1)
print(row_format_.format("", *tag_list))
for i, row in enumerate(matrix):
print(row_format_.format(tag_list[i], *row))
def get_score(tag, pred):
acc_list, pre_list, recall_list, f1_list = [], [], [], []
global a
cur_acc = accuracy_score(tag, pred)
cur_pre = precision_score(tag, pred, average='macro')
cur_rec = recall_score(tag, pred, average='macro')
cur_f1 = f1_score(tag, pred, average='macro')
# print("acc: ", cur_acc, "precision: ", cur_pre, "recall: ", cur_rec, "f1: ", cur_f1)
acc_list.append(cur_acc)
pre_list.append(cur_pre)
recall_list.append(cur_rec)
f1_list.append(cur_f1)
print("ave_acc: ", np.mean(acc_list), "ave_pre: ", np.mean(pre_list), "ave_recall: ", np.mean(recall_list),
"ave_f1: ",
np.mean(f1_list)) | zut-nlp-utils | /zut_nlp_utils-0.2-py3-none-any.whl/zut_nlp_utils/get_score.py | get_score.py |
Zut
===
Reusable Python, Django and PostgreSql utilities.
## Install
From PyPI:
pip install zut
From Git, last version:
pip install git+https://gitlab.com/ipamo/zut.git@main
Use SSH instead of HTTPS url:
pip install git+ssh://[email protected]/ipamo/zut.git@main
Specific version, including extra dependencies:
pip install git+https://gitlab.com/ipamo/[email protected]#egg=zut[extra]
In a `requirements.txt` file, including extra dependencies:
zut[extra] @ git+https://gitlab.com/ipamo/[email protected]#egg=zut[extra]
## Dev quick start
Install Python, its packet manager (`pip`) and PostgreSql.
Under Linux, also install password manager `pass` (used as _credentials manager_).
Windows pre-requisites:
- Download [Python](https://www.python.org/downloads/) and install it.
- Download [PostgreSql](https://www.enterprisedb.com/downloads/postgres-postgresql-downloads), install it, and add binaries (`C:\Program Files\PostgreSQL\14\bin`) to PATH.
Linux (Debian) pre-requisites:
sudo apt install python3-venv python3-pip postgresql pass
Create Python virtual environment (example for Windows):
python -m venv .venv # Debian: python3 -m venv .venv
.\.venv\Scripts\activate # Linux: source .venv/bin/activate
pip install --upgrade pip setuptools wheel
pip install -r requirements.txt
Create test database (cf. parameters in `tests/settings.py`). Example:
sudo -u postgres psql -c "create database test_zut encoding 'utf8' template 'template0'"
For Linux, configure password manager `pass`. Example:
# Import your GPG key, show key identifier and mark key as trusted
gpg --import my-private-gpg-key.asc
gpg --list-secret-keys
gpg --edit-key [email protected]
trust
5
o
q
# Initialize "pass" with your GPG key
pass init [email protected]
Run tests:
python -m unittest
Run commands :
python -m zut --help
## Publish library
Configure `~/.pypirc`. Example:
```conf
[distutils]
index-servers =
pypi
testpypi
zut
[pypi]
username = __token__
password = # use project-scoped token instead
[testpypi]
# user-scoped token
username = __token__
password = pypi-xxxxx...
# -----------------------------------------------------------------------------
# Project-scoped token
# Usage example: twine --repository zut
#
[zut]
repository = https://upload.pypi.org/legacy/
username = __token__
password = pypi-xxxxx...
```
Prepare distribution:
pip install twine # if not already done
python -m zut checkversion
python tools.py clean
python setup.py sdist bdist_wheel
twine check dist/*
Upload tarball on PyPI:
# $env:HTTPS_PROXY="..." # if necessary
# $env:TWINE_CERT="C:\...\ca-certificates.crt" # if necessary
twine upload --repository zut dist/*
| zut | /zut-0.3.1.tar.gz/zut-0.3.1/README.md | README.md |
# zutilities
A collection of Python utilities
## Table of Contents
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Usage](#usage)
- [License](#license)
## Prerequisites
You'll need to have Python installed in order to use `zutilities`. Start by downloading and installing [Python](https://www.python.org/downloads/).
> *Note: Python 3 is recommended, however `zutilities` will probably work just fine with most verions of Python 2*
## Installation
```
python -m pip install zutilities
```
## Usage
`zutilities`.**jprint**(list_or_dict, indent=2)
Prints a list or dictionary as formatted JSON.
```
>>> zutilities.jprint([{'key1':'value1','key2':'value2'}])
[
{
"key1": "value1",
"key2": "value2"
}
]
```
`zutilities`.**read_json_file**(json_file)
Reads a JSON file from the filesystem and returns a list or dictionary.
```
>>> j = zutilities.read_json_file('file.json')
>>> j
[{'key1': 'value1', 'key2': 'value2'}]
```
`zutilities`.**get_logger**(log_level=20, format=default_log_format, streams=[sys.stdout])
Returns a _logging.RootLogger_ object with preferred defaults set. The _default_log_format_ is '[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s'
```
>>> logr = zutilities.get_logger()
>>> logr.info('Hello World')
[2021-10-08 21:08:40,353] {<stdin>:1} INFO - Hello world
```
## License
This project is licensed under the MIT License
| zutilities | /zutilities-0.1.2.tar.gz/zutilities-0.1.2/README.md | README.md |
# zutnlp
#### 介绍
一款基于fastNLP的工具库
#### 软件架构
软件架构说明
#### 安装教程
1. pip install zutnlp
2. xxxx
3. xxxx
#### 使用说明
1. xxxx
2. xxxx
3. xxxx
#### 参与贡献
1. Fork 本仓库
2. 新建 Feat_xxx 分支
3. 提交代码
4. 新建 Pull Request
#### 特技
1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md
2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com)
3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目
4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目
5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help)
6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)
| zutnlp | /zutnlp-0.0.1.tar.gz/zutnlp-0.0.1/README.md | README.md |
# zutool
[](
<https://badge.fury.io/py/zutool>
) [](
<https://codeclimate.com/github/eggplants/zutool/maintainability>
) [](
<https://results.pre-commit.ci/latest/github/eggplants/zutool/master>
) [](
<https://codeclimate.com/github/eggplants/zutool/test_coverage>
) [](
<https://github.com/eggplants/zutool/actions/workflows/test.yml>
) [](
<https://github.com/eggplants/zutool/actions/workflows/release.yml>
)
[ ](
<https://github.com/eggplants/zutool/pkgs/container/zutool>
)
Unofficial zutool (頭痛ール: <https://zutool.jp/>) API Wrapper
## Install
```bash
pip install zutool
```
## As Library
```python
import zutool as z
# see: <https://nlftp.mlit.go.jp/ksj/gml/codelist/PrefCd.html>
area_code = "13" # 東京都
z.get_pain_status(area_code)
keyword = "東京都"
z.get_weather_point(keyword)
# see: <https://geoshape.ex.nii.ac.jp/city/code/?13113>
city_code = "13113" # 東京都渋谷区
z.get_weather_status(city_code)
city_code = "13101" # 東京都千代田区
z.get_otenki_asp(city_code)
```
## As CLI
```shellsession
$ zutool -h
usage: zutool [-h] [-j] {pain_status,ps,weather_point,wp,weather_status,ws,otenki_asp,oa} ...
Get info of zutool <https://zutool.jp/>.
positional arguments:
{pain_status,ps,weather_point,wp,weather_status,ws,otenki_asp,oa}
pain_status (ps) get pain status by prefecture
weather_point (wp) search weather point
weather_status (ws) get pain status by city
otenki_asp (oa) get weather infomations
optional arguments:
-h, --help show this help message and exit
-j, --json print as json (default: False)
```
### `pain_status (ps)`
```shellsession
$ zutool ps -h
usage: zutool pain_status [-h] [-s CODE] area_code
positional arguments:
area_code see: <https://nlftp.mlit.go.jp/ksj/gml/codelist/PrefCd.html> (ex. `13`)
optional arguments:
-h, --help show this help message and exit
-s CODE set weather point code as default (ex. `13113`) (default: None)
```
```shellsession
$ zutool ps 01
今のみんなの体調は? <北海道|01>
(集計時間: 12時-18時台)
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ 😃😃😃😃😃😃😃😃 17.098445595855% ┃
┃ 😐😐😐😐😐😐😐😐😐😐😐😐😐😐😐 31.60621761658% ┃
┃ 😞😞😞😞😞😞😞😞😞😞😞😞😞😞😞😞😞😞 37.823834196891% ┃
┃ 🤯🤯🤯🤯🤯🤯 13.471502590674% ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ [😃・・・普通, 😐・・・少し痛い, 😞・・・痛い, 🤯・・・かなり痛い] │
└────────────────────────────────────────────────────────┘
```
### `weather_point (wp)`
```shellsession
$ zutool wp -h
usage: zutool weather_point [-h] [-k] keyword
positional arguments:
keyword keyword for searching city_code (ex. `東京都`)
optional arguments:
-h, --help show this help message and exit
-k, --kata with kata column in non-json output (default: False)
```
```shellsession
$ zutool wp "港区"
「港区」の検索結果
┏━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━┓
┃ 地域コード ┃ 地域名 ┃
┡━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━┩
│ 13103 │ 東京都港区 │
│ 23111 │ 愛知県名古屋市港区 │
│ 27107 │ 大阪府大阪市港区 │
└────────────┴────────────────────┘
```
### `weather_status (ws)`
```shellsession
$ zutool ws -h
usage: zutool weather_status [-h] [-n N [N ...]] city_code
positional arguments:
city_code see: <https://geoshape.ex.nii.ac.jp/city/code/> (ex. `13113`)
optional arguments:
-h, --help show this help message and exit
-n N [N ...] specify day number to show (default: [0])
```
```shellsession
$ zutool ws 13113
<東京都渋谷区|13113>の気圧予報
today = 2023-08-15 20:00:00+09:00
┏━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┓
┃ 0 ┃ 1 ┃ 2 ┃ 3 ┃ 4 ┃ 5 ┃ 6 ┃ 7 ┃ 8 ┃ 9 ┃ 10 ┃ 11 ┃
┡━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━┩
│ ☁ │ ☁ │ ☔ │ ☔ │ ☁ │ ☔ │ ☁ │ ☁ │ ☔ │ ☔ │ ☁ │ ☁ │
│ 28.4℃ │ 27.5℃ │ 27.3℃ │ 26.5℃ │ 26.9℃ │ 26.7℃ │ 26.9℃ │ 27.9℃ │ 28.4℃ │ 28.4℃ │ 29.1℃ │ 30.7℃ │
│ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │
│ 1004.8 │ 1004.2 │ 1004.3 │ 1004.3 │ 1004.6 │ 1004.9 │ 1005.2 │ 1005.4 │ 1005.8 │ 1006.0 │ 1005.8 │ 1005.3 │
│ やや警戒 │ やや警戒 │ やや警戒 │ やや警戒 │ やや警戒 │ やや警戒 │ やや警戒 │ やや警戒 │ やや警戒 │ やや警戒 │ やや警戒 │ やや警戒 │
└──────────┴──────────┴──────────┴──────────┴──────────┴──────────┴──────────┴──────────┴──────────┴──────────┴──────────┴──────────┘
┏━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━┳━━━━━━━━┳━━━━━━━━┳━━━━━━━━┳━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┓
┃ 12 ┃ 13 ┃ 14 ┃ 15 ┃ 16 ┃ 17 ┃ 18 ┃ 19 ┃ 20 ┃ 21 ┃ 22 ┃ 23 ┃
┡━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━╇━━━━━━━━╇━━━━━━━━╇━━━━━━━━╇━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━┩
│ ☁ │ ☀ │ ☀ │ ☀ │ ☀ │ ☔ │ ☁ │ ☁ │ ☔ │ ☔ │ ☔ │ ☔ │
│ 31.1℃ │ 32.2℃ │ 31.9℃ │ 31.6℃ │ 31.3℃ │ 29.9℃ │ 29.3℃ │ 29.2℃ │ 28.4℃ │ 27.9℃ │ 27.5℃ │ 27.2℃ │
│ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │ ↗ │
│ 1005.1 │ 1004.9 │ 1004.9 │ 1004.6 │ 1004.7 │ 1004.8 │ 1005.2 │ 1005.7 │ 1006.3 │ 1006.5 │ 1006.5 │ 1006.4 │
│ やや警戒 │ やや警戒 │ やや警戒 │ 通常_0 │ 通常_0 │ 通常_0 │ 通常_0 │ 通常_0 │ 通常_0 │ やや警戒 │ やや警戒 │ やや警戒 │
└──────────┴──────────┴──────────┴────────┴────────┴────────┴────────┴────────┴────────┴──────────┴──────────┴──────────┘
```
### `otenki_asp (oa)`
```shellsession
$ zutool oa -h
usage: zutool otenki_asp [-h] [-n N [N ...]] {01101,04101,13101,15103,17201,23106,27128,34101,39201,40133,47201}
positional arguments:
{01101,04101,13101,15103,17201,23106,27128,34101,39201,40133,47201}
see: <https://geoshape.ex.nii.ac.jp/city/code/> (ex. `13113`)
optional arguments:
-h, --help show this help message and exit
-n N [N ...] specify day number to show (default: [0, 1, 2, 3, 4, 5, 6])
```
```shellsession
$ zutool oa 13101
<東京|13101>の天気情報
┏━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━┳━━━━━━━━━━┓
┃ 日付 ┃ 天気 ┃ 降水確率 ┃ 最高気温 ┃ 最低気温 ┃ 最大風速 ┃ 最大風速時風向 ┃ 気圧予報レベル ┃ 最小湿度 ┃
┡━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━╇━━━━━━━━━━┩
│ 08/02 │ 晴れ │ 10.0 │ 35.0 │ 26.0 │ 11.2 │ 8.0 │ 2.0 │ 60.5 │
│ 08/03 │ 晴れ │ 10.0 │ 36.0 │ 26.0 │ 11.8 │ 8.0 │ 4.0 │ 63.6 │
│ 08/04 │ 晴れ │ 0.0 │ 36.0 │ 26.0 │ 10.0 │ 8.0 │ 2.0 │ 59.6 │
│ 08/05 │ 晴れのち雨 │ 30.0 │ 36.0 │ 27.0 │ 11.8 │ 8.0 │ 1.0 │ 64.3 │
│ 08/06 │ 雨のち晴れ │ 30.0 │ 36.0 │ 27.0 │ 10.3 │ 8.0 │ 2.0 │ 61.9 │
│ 08/07 │ 晴れのち雨 │ 50.0 │ 33.0 │ 26.0 │ 7.2 │ 2.0 │ 2.0 │ 63.6 │
│ 08/08 │ 雨一時晴れ │ 80.0 │ 33.0 │ 26.0 │ 6.2 │ 6.0 │ 1.0 │ 79.5 │
└───────┴────────────┴──────────┴──────────┴──────────┴──────────┴────────────────┴────────────────┴──────────┘
```
| zutool | /zutool-0.0.6.tar.gz/zutool-0.0.6/README.md | README.md |
import requests
import urllib.parse
import yaml
class ZuulRESTException(Exception):
pass
class BearerAuth(requests.auth.AuthBase):
"""Custom authentication helper class.
Authentication helper class to work around requests' default behavior
of using ~/.netrc to authenticate despite having set an explicit
authorization header.
See also https://github.com/psf/requests/issues/3929
"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = 'Bearer %s' % self._token
return r
class ZuulRESTClient(object):
"""Basic client for Zuul's REST API"""
def __init__(self, url, verify=False, auth_token=None):
self.url = url
if not self.url.endswith('/'):
self.url += '/'
self.auth_token = auth_token
self.verify = verify
self.base_url = urllib.parse.urljoin(self.url, 'api/')
self.session = requests.Session()
self.session.verify = self.verify
if self.auth_token:
self.session.auth = BearerAuth(self.auth_token)
self.info_ = None
@property
def info(self):
"""Return the Zuul info data.
Useful to get capabilities and tenant info."""
if self.info_ is None:
url = urllib.parse.urljoin(
self.base_url,
'info')
req = self.session.get(url)
self._check_request_status(req)
self.info_ = req.json().get('info', {})
return self.info_
def _check_request_status(self, req):
try:
req.raise_for_status()
except Exception as e:
if req.status_code == 401:
raise ZuulRESTException(
'Unauthorized - your token might be invalid or expired.')
elif req.status_code == 403:
raise ZuulRESTException(
'Insufficient privileges to perform the action.')
else:
raise ZuulRESTException(
'Unknown error code %s: "%s"' % (req.status_code, e))
def _check_scope(self, tenant):
scope = self.info.get("tenant", None)
if (
(scope is not None)
and (tenant not in [None, ""])
and scope != tenant
):
raise Exception(
"Tenant %s and tenant scope %s do not match" % (tenant, scope)
)
def autohold(self, tenant, project, job, change, ref,
reason, count, node_hold_expiration):
if not self.auth_token:
raise Exception('Auth Token required')
args = {"reason": reason,
"count": count,
"job": job,
"change": change,
"ref": ref,
"node_hold_expiration": node_hold_expiration}
if self.info.get('tenant'):
self._check_scope(tenant)
suffix = 'project/%s/autohold' % project
else:
suffix = 'tenant/%s/project/%s/autohold' % (tenant, project)
url = urllib.parse.urljoin(
self.base_url,
suffix)
req = self.session.post(url, json=args)
self._check_request_status(req)
return req.json()
def autohold_list(self, tenant):
if self.info.get('tenant'):
self._check_scope(tenant)
suffix = 'autohold'
else:
suffix = 'tenant/%s/autohold' % tenant
url = urllib.parse.urljoin(
self.base_url,
suffix)
# auth not needed here
req = self.session.get(url)
self._check_request_status(req)
resp = req.json()
return resp
def autohold_delete(self, id, tenant):
if not self.auth_token:
raise Exception('Auth Token required')
if self.info.get('tenant'):
self._check_scope(tenant)
suffix = 'autohold/%s' % id
else:
suffix = 'tenant/%s/autohold/%s' % (tenant, id)
url = urllib.parse.urljoin(
self.base_url,
suffix)
req = self.session.delete(url)
self._check_request_status(req)
# DELETE doesn't return a body, just the HTTP code
return (req.status_code == 204)
def autohold_info(self, id, tenant):
if self.info.get('tenant'):
self._check_scope(tenant)
suffix = 'autohold/%s' % id
else:
suffix = 'tenant/%s/autohold/%s' % (tenant, id)
url = urllib.parse.urljoin(
self.base_url,
suffix)
# auth not needed here
req = self.session.get(url)
self._check_request_status(req)
resp = req.json()
return resp
def enqueue(self, tenant, pipeline, project, change):
if not self.auth_token:
raise Exception('Auth Token required')
args = {"change": change,
"pipeline": pipeline}
if self.info.get('tenant'):
self._check_scope(tenant)
suffix = 'project/%s/enqueue' % project
else:
suffix = 'tenant/%s/project/%s/enqueue' % (tenant, project)
url = urllib.parse.urljoin(
self.base_url,
suffix)
req = self.session.post(url, json=args)
self._check_request_status(req)
return req.json()
def enqueue_ref(self, tenant, pipeline, project, ref, oldrev, newrev):
if not self.auth_token:
raise Exception('Auth Token required')
args = {"ref": ref,
"oldrev": oldrev,
"newrev": newrev,
"pipeline": pipeline}
if self.info.get('tenant'):
self._check_scope(tenant)
suffix = 'project/%s/enqueue' % project
else:
suffix = 'tenant/%s/project/%s/enqueue' % (tenant, project)
url = urllib.parse.urljoin(
self.base_url,
suffix)
req = self.session.post(url, json=args)
self._check_request_status(req)
return req.json()
def dequeue(self, tenant, pipeline, project, change=None, ref=None):
if not self.auth_token:
raise Exception('Auth Token required')
args = {"pipeline": pipeline}
if change and not ref:
args['change'] = change
elif ref and not change:
args['ref'] = ref
else:
raise Exception('need change OR ref')
if self.info.get('tenant'):
self._check_scope(tenant)
suffix = 'project/%s/dequeue' % project
else:
suffix = 'tenant/%s/project/%s/dequeue' % (tenant, project)
url = urllib.parse.urljoin(
self.base_url,
suffix)
req = self.session.post(url, json=args)
self._check_request_status(req)
return req.json()
def promote(self, tenant, pipeline, change_ids):
if not self.auth_token:
raise Exception('Auth Token required')
args = {'pipeline': pipeline,
'changes': change_ids}
if self.info.get('tenant'):
self._check_scope(tenant)
suffix = 'promote'
else:
suffix = 'tenant/%s/promote' % tenant
url = urllib.parse.urljoin(
self.base_url,
suffix)
req = self.session.post(url, json=args)
self._check_request_status(req)
return req.json()
def get_key(self, tenant, project):
if self.info.get('tenant'):
self._check_scope(tenant)
suffix = 'key/%s.pub' % project
else:
suffix = 'tenant/%s/key/%s.pub' % (tenant, project)
url = urllib.parse.urljoin(
self.base_url,
suffix)
req = self.session.get(url)
self._check_request_status(req)
return req.text
def builds(self, tenant, **kwargs):
# check kwargs
allowed_args = {'project', 'pipeline', 'change', 'branch', 'patchset',
'ref', 'newrev', 'uuid', 'job_name', 'voting',
'node_name', 'result', 'final', 'held',
'limit', 'skip'}
if not set(kwargs.keys()).issubset(allowed_args):
raise Exception(
'Allowed arguments are %s' % ', '.join(allowed_args))
params = kwargs
if 'limit' not in params:
params['limit'] = 50
if 'skip' not in params:
params['skip'] = 0
if self.info.get("tenant"):
self._check_scope(tenant)
suffix = "builds"
else:
suffix = "tenant/%s/builds" % tenant
url = urllib.parse.urljoin(self.base_url, suffix)
req = self.session.get(url, params=kwargs)
self._check_request_status(req)
return req.json()
def build(self, tenant, uuid):
if self.info.get("tenant"):
self._check_scope(tenant)
suffix = "build/%s" % uuid
else:
suffix = "tenant/%s/build/%s" % (tenant, uuid)
url = urllib.parse.urljoin(self.base_url, suffix)
req = self.session.get(url)
self._check_request_status(req)
build_info = req.json()
build_info['job_output_url'] = urllib.parse.urljoin(
build_info['log_url'], 'job-output.txt')
inventory_url = urllib.parse.urljoin(
build_info['log_url'], 'zuul-info/inventory.yaml')
try:
raw_inventory = self.session.get(inventory_url)
build_info['inventory'] = yaml.load(raw_inventory.text,
Loader=yaml.SafeLoader)
except Exception as e:
build_info['inventory'] = {'error': str(e)}
return build_info | zuul-client | /zuul_client-0.1.0-py3-none-any.whl/zuulclient/api/__init__.py | __init__.py |
import argparse
import configparser
import logging
import os
from pathlib import Path
import shutil
import sys
import tempfile
import textwrap
from zuulclient.api import ZuulRESTClient
from zuulclient.utils import get_default
from zuulclient.utils import encrypt_with_openssl
from zuulclient.utils import formatters
_HOME = Path(os.path.expandvars('$HOME'))
_XDG_CONFIG_HOME = Path(os.environ.get(
'XDG_CONFIG_HOME',
_HOME / '.config'))
class ArgumentException(Exception):
pass
class ZuulClient():
app_name = 'zuul-client'
app_description = 'Zuul User CLI'
log = logging.getLogger("zuul-client")
default_config_locations = [
_XDG_CONFIG_HOME / 'zuul' / 'client.conf',
_HOME / '.zuul.conf'
]
def __init__(self):
self.args = None
self.config = None
def _get_version(self):
from zuulclient.version import version_info
return "Zuul-client version: %s" % version_info.release_string()
def createParser(self):
parser = argparse.ArgumentParser(
description=self.app_description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-c', dest='config',
help='specify the config file')
parser.add_argument('--version', dest='version', action='version',
version=self._get_version(),
help='show zuul version')
parser.add_argument('-v', dest='verbose', action='store_true',
help='verbose output')
parser.add_argument('--auth-token', dest='auth_token',
required=False,
default=None,
help='Authentication Token, required by '
'admin commands')
parser.add_argument('--zuul-url', dest='zuul_url',
required=False,
default=None,
help='Zuul base URL, needed if using the '
'client without a configuration file')
parser.add_argument('--use-config', dest='zuul_config',
required=False,
default=None,
help='A predefined configuration in the '
'zuul-client configuration file')
parser.add_argument('--insecure', dest='verify_ssl',
required=False,
action='store_false',
help='Do not verify SSL connection to Zuul '
'(Defaults to False)')
parser.add_argument('--format', choices=['JSON', 'text'],
default='text', required=False,
help='The output format, when applicable')
self.createCommandParsers(parser)
return parser
def createCommandParsers(self, parser):
subparsers = parser.add_subparsers(title='commands',
description='valid commands',
help='additional help')
self.add_autohold_subparser(subparsers)
self.add_autohold_delete_subparser(subparsers)
self.add_autohold_info_subparser(subparsers)
self.add_autohold_list_subparser(subparsers)
self.add_enqueue_subparser(subparsers)
self.add_enqueue_ref_subparser(subparsers)
self.add_dequeue_subparser(subparsers)
self.add_promote_subparser(subparsers)
self.add_encrypt_subparser(subparsers)
self.add_builds_list_subparser(subparsers)
self.add_build_info_subparser(subparsers)
return subparsers
def parseArguments(self, args=None):
self.parser = self.createParser()
self.args = self.parser.parse_args(args)
if (
(self.args.zuul_url and self.args.zuul_config) or
(not self.args.zuul_url and not self.args.zuul_config)
):
raise ArgumentException(
'Either specify --zuul-url or use a config file')
if not getattr(self.args, 'func', None):
self.parser.print_help()
sys.exit(1)
if self.args.func == self.enqueue_ref:
# if oldrev or newrev is set, ensure they're not the same
if (self.args.oldrev is not None) or \
(self.args.newrev is not None):
if self.args.oldrev == self.args.newrev:
raise ArgumentException(
"The old and new revisions must not be the same.")
# if they're not set, we pad them out to zero
if self.args.oldrev is None:
self.args.oldrev = '0000000000000000000000000000000000000000'
if self.args.newrev is None:
self.args.newrev = '0000000000000000000000000000000000000000'
if self.args.func == self.dequeue:
if self.args.change is None and self.args.ref is None:
raise ArgumentException("Change or ref needed.")
if self.args.change is not None and self.args.ref is not None:
raise ArgumentException(
"The 'change' and 'ref' arguments are mutually exclusive.")
@property
def formatter(self):
if self.args.format == 'JSON':
return formatters.JSONFormatter
elif self.args.format == 'text':
return formatters.PrettyTableFormatter
else:
raise Exception('Unsupported formatter: %s' % self.args.format)
def readConfig(self):
safe_env = {
k: v for k, v in os.environ.items()
if k.startswith('ZUUL_')
}
self.config = configparser.ConfigParser(safe_env)
if self.args.config:
locations = [self.args.config]
else:
locations = self.default_config_locations
for fp in locations:
if os.path.exists(os.path.expanduser(fp)):
self.config.read(os.path.expanduser(fp))
return
raise ArgumentException(
"Unable to locate config "
"file in %s" % ', '.join([x.as_posix() for x in locations]))
def setup_logging(self):
config_args = dict(
format='%(levelname)-8s - %(message)s'
)
if self.args.verbose:
config_args['level'] = logging.DEBUG
else:
config_args['level'] = logging.ERROR
# set logging across all components (urllib etc)
logging.basicConfig(**config_args)
if self.args.zuul_config and\
self.args.zuul_config in self.config.sections():
zuul_conf = self.args.zuul_config
log_file = get_default(self.config,
zuul_conf, 'log_file', None)
if log_file is not None:
fh = logging.FileHandler(log_file)
f_loglevel = get_default(self.config,
zuul_conf, 'log_level', 'INFO')
fh.setLevel(getattr(logging, f_loglevel, 'INFO'))
f_formatter = logging.Formatter(
fmt='%(asctime)s %(name)s %(levelname)-8s - %(message)s',
datefmt='%x %X'
)
fh.setFormatter(f_formatter)
self.log.addHandler(fh)
def _main(self, args=None):
# TODO make func return specific return codes
try:
self.parseArguments(args)
if not self.args.zuul_url:
self.readConfig()
self.setup_logging()
ret = self.args.func()
except ArgumentException:
if self.args.func:
name = self.args.func.__name__
parser = getattr(self, 'cmd_' + name, self.parser)
else:
parser = self.parser
parser.print_help()
print()
raise
if ret:
self.log.info('Command %s completed '
'successfully' % self.args.func.__name__)
return 0
else:
self.log.error('Command %s completed '
'with error(s)' % self.args.func.__name__)
return 1
def main(self):
try:
sys.exit(self._main())
except Exception as e:
self.log.exception(
'Failed with the following exception: %s ' % e
)
sys.exit(1)
def _check_tenant_scope(self, client):
tenant_scope = client.info.get("tenant", None)
tenant = self.tenant()
if tenant != "":
if tenant_scope is not None and tenant_scope != tenant:
raise ArgumentException(
"Error: Zuul API URL %s is "
'scoped to tenant "%s"' % (client.base_url, tenant_scope)
)
else:
if tenant_scope is None:
raise ArgumentException(
"Error: the --tenant argument or the 'tenant' "
"field in the configuration file is required"
)
def add_autohold_subparser(self, subparsers):
cmd_autohold = subparsers.add_parser(
'autohold', help='hold nodes for failed job')
cmd_autohold.add_argument('--tenant', help='tenant name',
required=False, default='')
cmd_autohold.add_argument('--project', help='project name',
required=True)
cmd_autohold.add_argument('--job', help='job name',
required=True)
cmd_autohold.add_argument('--change',
help='specific change to hold nodes for',
required=False, default='')
cmd_autohold.add_argument('--ref', help='git ref to hold nodes for',
required=False, default='')
cmd_autohold.add_argument('--reason', help='reason for the hold',
required=True)
cmd_autohold.add_argument('--count',
help='number of job runs (default: 1)',
required=False, type=int, default=1)
cmd_autohold.add_argument(
'--node-hold-expiration',
help=('how long in seconds should the node set be in HOLD status '
'(default: scheduler\'s default_hold_expiration value)'),
required=False, type=int)
cmd_autohold.set_defaults(func=self.autohold)
self.cmd_autohold = cmd_autohold
def autohold(self):
if self.args.change and self.args.ref:
raise Exception(
"Change and ref can't be both used for the same request")
if "," in self.args.change:
raise Exception("Error: change argument can not contain any ','")
node_hold_expiration = self.args.node_hold_expiration
client = self.get_client()
self._check_tenant_scope(client)
kwargs = dict(
tenant=self.tenant(),
project=self.args.project,
job=self.args.job,
change=self.args.change,
ref=self.args.ref,
reason=self.args.reason,
count=self.args.count,
node_hold_expiration=node_hold_expiration)
self.log.info('Invoking autohold with arguments: %s' % kwargs)
r = client.autohold(**kwargs)
return r
def add_autohold_delete_subparser(self, subparsers):
cmd_autohold_delete = subparsers.add_parser(
'autohold-delete', help='delete autohold request')
cmd_autohold_delete.set_defaults(func=self.autohold_delete)
cmd_autohold_delete.add_argument('--tenant', help='tenant name',
required=False, default='')
cmd_autohold_delete.add_argument('id', metavar='REQUEST_ID',
help='the hold request ID')
self.cmd_autohold_delete = cmd_autohold_delete
def autohold_delete(self):
client = self.get_client()
self._check_tenant_scope(client)
kwargs = dict(
id=self.args.id,
tenant=self.tenant()
)
self.log.info('Invoking autohold-delete with arguments: %s' % kwargs)
return client.autohold_delete(**kwargs)
def add_autohold_info_subparser(self, subparsers):
cmd_autohold_info = subparsers.add_parser(
'autohold-info', help='retrieve autohold request detailed info')
cmd_autohold_info.set_defaults(func=self.autohold_info)
cmd_autohold_info.add_argument('--tenant', help='tenant name',
required=False, default='')
cmd_autohold_info.add_argument('id', metavar='REQUEST_ID',
help='the hold request ID')
self.cmd_autohold_info = cmd_autohold_info
def autohold_info(self):
client = self.get_client()
self._check_tenant_scope(client)
request = client.autohold_info(self.args.id, self.tenant())
if not request:
print("Autohold request not found")
return False
formatted_result = self.formatter('AutoholdQuery')(request)
print(formatted_result)
return True
def add_autohold_list_subparser(self, subparsers):
cmd_autohold_list = subparsers.add_parser(
'autohold-list', help='list autohold requests')
cmd_autohold_list.add_argument('--tenant', help='tenant name',
required=False, default='')
cmd_autohold_list.set_defaults(func=self.autohold_list)
self.cmd_autohold_list = cmd_autohold_list
def autohold_list(self):
client = self.get_client()
self._check_tenant_scope(client)
requests = client.autohold_list(tenant=self.tenant())
if not requests:
print("No autohold requests found")
return True
formatted_result = self.formatter('AutoholdQueries')(requests)
print(formatted_result)
return True
def add_enqueue_subparser(self, subparsers):
cmd_enqueue = subparsers.add_parser('enqueue', help='enqueue a change')
cmd_enqueue.add_argument('--tenant', help='tenant name',
required=False, default='')
cmd_enqueue.add_argument('--pipeline', help='pipeline name',
required=True)
cmd_enqueue.add_argument('--project', help='project name',
required=True)
cmd_enqueue.add_argument('--change', help='change id',
required=True)
cmd_enqueue.set_defaults(func=self.enqueue)
self.cmd_enqueue = cmd_enqueue
def enqueue(self):
client = self.get_client()
self._check_tenant_scope(client)
kwargs = dict(
tenant=self.tenant(),
pipeline=self.args.pipeline,
project=self.args.project,
change=self.args.change
)
self.log.info('Invoking enqueue with arguments: %s' % kwargs)
r = client.enqueue(**kwargs)
return r
def add_enqueue_ref_subparser(self, subparsers):
cmd_enqueue_ref = subparsers.add_parser(
'enqueue-ref', help='enqueue a ref',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Submit a trigger event
Directly enqueue a trigger event. This is usually used
to manually "replay" a trigger received from an external
source such as gerrit.'''))
cmd_enqueue_ref.add_argument('--tenant', help='tenant name',
required=False, default='')
cmd_enqueue_ref.add_argument('--pipeline', help='pipeline name',
required=True)
cmd_enqueue_ref.add_argument('--project', help='project name',
required=True)
cmd_enqueue_ref.add_argument('--ref', help='ref name',
required=True)
cmd_enqueue_ref.add_argument(
'--oldrev', help='old revision', default=None)
cmd_enqueue_ref.add_argument(
'--newrev', help='new revision', default=None)
cmd_enqueue_ref.set_defaults(func=self.enqueue_ref)
self.cmd_enqueue_ref = cmd_enqueue_ref
def enqueue_ref(self):
client = self.get_client()
self._check_tenant_scope(client)
kwargs = dict(
tenant=self.tenant(),
pipeline=self.args.pipeline,
project=self.args.project,
ref=self.args.ref,
oldrev=self.args.oldrev,
newrev=self.args.newrev
)
self.log.info('Invoking enqueue-ref with arguments: %s' % kwargs)
r = client.enqueue_ref(**kwargs)
return r
def add_dequeue_subparser(self, subparsers):
cmd_dequeue = subparsers.add_parser('dequeue',
help='dequeue a buildset by its '
'change or ref')
cmd_dequeue.add_argument('--tenant', help='tenant name',
required=False, default='')
cmd_dequeue.add_argument('--pipeline', help='pipeline name',
required=True)
cmd_dequeue.add_argument('--project', help='project name',
required=True)
cmd_dequeue.add_argument('--change', help='change id',
default=None)
cmd_dequeue.add_argument('--ref', help='ref name',
default=None)
cmd_dequeue.set_defaults(func=self.dequeue)
self.cmd_dequeue = cmd_dequeue
def dequeue(self):
client = self.get_client()
self._check_tenant_scope(client)
kwargs = dict(
tenant=self.tenant(),
pipeline=self.args.pipeline,
project=self.args.project,
change=self.args.change,
ref=self.args.ref
)
self.log.info('Invoking dequeue with arguments: %s' % kwargs)
r = client.dequeue(**kwargs)
return r
def add_promote_subparser(self, subparsers):
cmd_promote = subparsers.add_parser('promote',
help='promote one or more changes')
cmd_promote.add_argument('--tenant', help='tenant name',
required=False, default='')
cmd_promote.add_argument('--pipeline', help='pipeline name',
required=True)
cmd_promote.add_argument('--changes', help='change ids',
required=True, nargs='+')
cmd_promote.set_defaults(func=self.promote)
self.cmd_promote = cmd_promote
def promote(self):
client = self.get_client()
self._check_tenant_scope(client)
kwargs = dict(
tenant=self.tenant(),
pipeline=self.args.pipeline,
change_ids=self.args.changes
)
self.log.info('Invoking promote with arguments: %s' % kwargs)
r = client.promote(**kwargs)
return r
def get_client(self):
if self.args.zuul_url:
self.log.debug(
'Using Zuul URL provided as argument to instantiate client')
client = ZuulRESTClient(self.args.zuul_url,
self.args.verify_ssl,
self.args.auth_token)
return client
conf_sections = self.config.sections()
if len(conf_sections) == 1 and self.args.zuul_config is None:
zuul_conf = conf_sections[0]
self.log.debug(
'Using section "%s" found in '
'config to instantiate client' % zuul_conf)
elif self.args.zuul_config and self.args.zuul_config in conf_sections:
zuul_conf = self.args.zuul_config
else:
raise Exception('Unable to find a way to connect to Zuul, '
'provide the "--zuul-url" argument or set up a '
'zuul-client configuration file.')
server = get_default(self.config,
zuul_conf, 'url', None)
verify = get_default(self.config, zuul_conf,
'verify_ssl',
self.args.verify_ssl)
# Allow token override by CLI argument
auth_token = self.args.auth_token or get_default(self.config,
zuul_conf,
'auth_token',
None)
if server is None:
raise Exception('Missing "url" configuration value')
client = ZuulRESTClient(server, verify, auth_token)
return client
def tenant(self):
if self.args.tenant == "":
if self.config is not None:
config_tenant = ""
conf_sections = self.config.sections()
if (
self.args.zuul_config
and self.args.zuul_config in conf_sections
):
zuul_conf = self.args.zuul_config
config_tenant = get_default(
self.config, zuul_conf, "tenant", ""
)
return config_tenant
return self.args.tenant
def add_encrypt_subparser(self, subparsers):
cmd_encrypt = subparsers.add_parser(
'encrypt', help='Encrypt a secret to be used in a project\'s jobs')
cmd_encrypt.add_argument('--public-key',
help='path to project public key '
'(bypass API call)',
metavar='/path/to/pubkey',
required=False, default=None)
cmd_encrypt.add_argument('--tenant', help='tenant name',
required=False, default='')
cmd_encrypt.add_argument('--project', help='project name',
required=False, default=None)
cmd_encrypt.add_argument('--no-strip', action='store_true',
help='Do not strip whitespace from beginning '
'or end of input. Ignored when '
'--infile is used.',
default=False)
cmd_encrypt.add_argument('--secret-name',
default=None,
help='How the secret should be named. If not '
'supplied, a placeholder will be used.')
cmd_encrypt.add_argument('--field-name',
default=None,
help='How the name of the secret variable. '
'If not supplied, a placeholder will be '
'used.')
cmd_encrypt.add_argument('--infile',
default=None,
help='A filename whose contents will be '
'encrypted. If not supplied, the value '
'will be read from standard input.\n'
'If entering the secret manually, press '
'Ctrl+d when finished to process the '
'secret.')
cmd_encrypt.add_argument('--outfile',
default=None,
help='A filename to which the encrypted '
'value will be written. If not '
'supplied, the value will be written '
'to standard output.')
cmd_encrypt.set_defaults(func=self.encrypt)
self.cmd_encrypt = cmd_encrypt
def encrypt(self):
if self.args.project is None and self.args.public_key is None:
raise ArgumentException(
'Either provide a public key or a project to continue'
)
strip = not self.args.no_strip
if self.args.infile:
strip = False
try:
with open(self.args.infile) as f:
plaintext = f.read()
except FileNotFoundError:
raise Exception('File "%s" not found' % self.args.infile)
except PermissionError:
raise Exception(
'Insufficient rights to open %s' % self.args.infile)
else:
plaintext = sys.stdin.read()
if strip:
plaintext = plaintext.strip()
pubkey_file = tempfile.NamedTemporaryFile(delete=False)
self.log.debug('Creating temporary key file %s' % pubkey_file.name)
try:
if self.args.public_key is not None:
self.log.debug('Using local public key')
shutil.copy(self.args.public_key, pubkey_file.name)
else:
client = self.get_client()
self._check_tenant_scope(client)
key = client.get_key(self.tenant(), self.args.project)
pubkey_file.write(str.encode(key))
pubkey_file.close()
self.log.debug('Invoking openssl')
ciphertext_chunks = encrypt_with_openssl(pubkey_file.name,
plaintext,
self.log)
output = textwrap.dedent(
'''
- secret:
name: {}
data:
{}: !encrypted/pkcs1-oaep
'''.format(self.args.secret_name or '<name>',
self.args.field_name or '<fieldname>'))
twrap = textwrap.TextWrapper(width=79,
initial_indent=' ' * 8,
subsequent_indent=' ' * 10)
for chunk in ciphertext_chunks:
chunk = twrap.fill('- ' + chunk)
output += chunk + '\n'
if self.args.outfile:
with open(self.args.outfile, "w") as f:
f.write(output)
else:
print(output)
return_code = True
except ArgumentException as e:
# do not log and re-raise, caught later
raise e
except Exception as e:
self.log.exception(e)
return_code = False
finally:
self.log.debug('Deleting temporary key file %s' % pubkey_file.name)
os.unlink(pubkey_file.name)
return return_code
def add_build_info_subparser(self, subparsers):
cmd_build_info = subparsers.add_parser(
'build-info', help='Get info on a specific build')
cmd_build_info.add_argument(
'--tenant', help='tenant name', required=False, default='')
cmd_build_info.add_argument(
'--uuid', help='build UUID', required=True)
cmd_build_info.add_argument(
'--show-job-output', default=False, action='store_true',
help='Only download the job\'s output to the console')
cmd_build_info.add_argument(
'--show-artifacts', default=False, action='store_true',
help='Display only artifacts information for the build')
cmd_build_info.add_argument(
'--show-inventory', default=False, action='store_true',
help='Display only ansible inventory information for the build')
cmd_build_info.set_defaults(func=self.build_info)
self.cmd_build_info = cmd_build_info
def build_info(self):
if sum(map(lambda x: x and 1 or 0,
[self.args.show_artifacts,
self.args.show_job_output,
self.args.show_inventory])
) > 1:
raise Exception(
'--show-artifacts, --show-job-output and '
'--show-inventory are mutually exclusive'
)
client = self.get_client()
self._check_tenant_scope(client)
build = client.build(self.tenant(), self.args.uuid)
if not build:
print('Build not found')
return False
if self.args.show_job_output:
output = client.session.get(build['job_output_url'])
client._check_request_status(output)
formatted_result = output.text
elif self.args.show_artifacts:
formatted_result = self.formatter('Artifacts')(
build.get('artifacts', [])
)
elif self.args.show_inventory:
formatted_result = self.formatter('Inventory')(
build.get('inventory', {})
)
else:
formatted_result = self.formatter('Build')(build)
print(formatted_result)
return True
def add_builds_list_subparser(self, subparsers):
cmd_builds = subparsers.add_parser(
'builds', help='List builds matching search criteria')
cmd_builds.add_argument(
'--tenant', help='tenant name', required=False, default='')
cmd_builds.add_argument(
'--project', help='project name')
cmd_builds.add_argument(
'--pipeline', help='pipeline name')
cmd_builds.add_argument(
'--change', help='change reference')
cmd_builds.add_argument(
'--branch', help='branch name')
cmd_builds.add_argument(
'--patchset', help='patchset number')
cmd_builds.add_argument(
'--ref', help='ref name')
cmd_builds.add_argument(
'--newrev', help='the applied revision')
cmd_builds.add_argument(
'--job', help='job name')
cmd_builds.add_argument(
'--voting', help='show voting builds only',
action='store_true', default=False)
cmd_builds.add_argument(
'--non-voting', help='show non-voting builds only',
action='store_true', default=False)
cmd_builds.add_argument(
'--node', help='node name')
cmd_builds.add_argument(
'--result', help='build result')
cmd_builds.add_argument(
'--final', help='show final builds only',
action='store_true', default=False)
cmd_builds.add_argument(
'--held', help='show held builds only',
action='store_true', default=False)
cmd_builds.add_argument(
'--limit', help='maximum amount of results to return',
default=50, type=int)
cmd_builds.add_argument(
'--skip', help='how many results to skip',
default=0, type=int)
cmd_builds.set_defaults(func=self.builds)
self.cmd_builds = cmd_builds
def builds(self):
if self.args.voting and self.args.non_voting:
raise Exception('--voting and --non-voting are mutually exclusive')
filters = {'limit': self.args.limit,
'skip': self.args.skip}
if self.args.project:
filters['project'] = self.args.project
if self.args.pipeline:
filters['pipeline'] = self.args.pipeline
if self.args.change:
filters['change'] = self.args.change
if self.args.branch:
filters['branch'] = self.args.branch
if self.args.patchset:
filters['patchset'] = self.args.patchset
if self.args.ref:
filters['ref'] = self.args.ref
if self.args.newrev:
filters['newrev'] = self.args.newrev
if self.args.job:
filters['job_name'] = self.args.job
if self.args.voting:
filters['voting'] = True
if self.args.non_voting:
filters['voting'] = False
if self.args.node:
filters['node'] = self.args.node
if self.args.result:
filters['result'] = self.args.result
if self.args.final:
filters['final'] = True
if self.args.held:
filters['held'] = True
client = self.get_client()
self._check_tenant_scope(client)
request = client.builds(tenant=self.tenant(), **filters)
formatted_result = self.formatter('Builds')(request)
print(formatted_result)
return True
def main():
ZuulClient().main() | zuul-client | /zuul_client-0.1.0-py3-none-any.whl/zuulclient/cmd/__init__.py | __init__.py |
import time
from dateutil.parser import isoparse
import prettytable
import json
import yaml
class BaseFormatter:
def __init__(self, data_type):
self.data_type = data_type
def __call__(self, data):
"""Format data according to the type of data being displayed."""
try:
return getattr(self, 'format' + self.data_type)(data)
except Exception:
raise Exception('Unsupported data type "%s"' % self.data_type)
def formatBuildNodes(self, data):
raise NotImplementedError
def formatAutoholdQueries(self, data):
raise NotImplementedError
def formatAutoholdQuery(self, data):
raise NotImplementedError
def formatJobResource(self, data):
raise NotImplementedError
def formatArtifacts(self, data):
raise NotImplementedError
def formatInventory(self, data):
raise NotImplementedError
def formatBuild(self, data):
raise NotImplementedError
def formatBuildSet(self, data):
raise NotImplementedError
def formatBuilds(self, data):
raise NotImplementedError
def formatBuildSets(self, data):
raise NotImplementedError
class JSONFormatter(BaseFormatter):
def __call__(self, data) -> str:
# Simply format the raw dictionary returned by the API
return json.dumps(data, sort_keys=True, indent=2)
class PrettyTableFormatter(BaseFormatter):
"""Format Zuul data in a nice human-readable way for the CLI."""
def formatAutoholdQuery(self, data) -> str:
text = ""
text += "ID: %s\n" % data.get('id', 'N/A')
text += "Tenant: %s\n" % data.get('tenant', 'N/A')
text += "Project: %s\n" % data.get('project', 'N/A')
text += "Job: %s\n" % data.get('job', 'N/A')
text += "Ref Filter: %s\n" % data.get('ref_filter', 'N/A')
text += "Max Count: %s\n" % (data.get('max_count', None) or
data.get('count', 'N/A'))
text += "Current Count: %s\n" % data.get('current_count', 'N/A')
text += "Node Expiration: %s\n" % (
data.get('node_expiration', None) or
data.get('node_hold_expiration', 'N/A')
)
text += "Request Expiration: %s\n" % (
data.get('expired', None) and time.ctime(data['expired']) or
'N/A'
)
text += "Reason: %s\n" % data.get('reason', 'N/A')
text += "Held Nodes:\n"
for buildnodes in data.get('nodes', []):
text += self.formatBuildNodes(buildnodes)
return text
def formatBuildNodes(self, data) -> str:
table = prettytable.PrettyTable(field_names=['Build ID', 'Node ID'])
for node in data.get('nodes', []):
table.add_row([data.get('build', 'N/A'), node])
return str(table)
def formatAutoholdQueries(self, data) -> str:
table = prettytable.PrettyTable(
field_names=[
'ID', 'Tenant', 'Project', 'Job', 'Ref Filter',
'Max Count', 'Reason'
])
for request in data:
table.add_row([
request.get('id', 'N/A'),
request.get('tenant', 'N/A'),
request.get('project', 'N/A'),
request.get('job', 'N/A'),
request.get('ref_filter', 'N/A'),
request.get('max_count', None) or request.get('count', 'N/A'),
request.get('reason', 'N/A'),
])
return str(table)
def formatBuild(self, data) -> str:
output = ''
# This is based on the web UI
output += 'UUID: %s\n' % data.get('uuid', 'N/A')
output += '=' * len('UUID: %s' % data.get('uuid', 'N/A')) + '\n'
output += 'Result: %s\n' % data.get('result', 'N/A')
output += 'Pipeline: %s\n' % data.get('pipeline', 'N/A')
output += 'Project: %s\n' % data.get('project', 'N/A')
output += 'Job: %s\n' % data.get('job_name', 'N/A')
if data.get('newrev'):
output += 'Ref: %s\n' % data.get('ref', 'N/A')
output += 'New Rev: %s\n' % data['newrev']
if data.get('change') and data.get('patchset'):
output += 'Change: %s\n' % (str(data['change']) + ',' +
str(data['patchset']))
output += 'Branch: %s\n' % data.get('branch', 'N/A')
output += 'Ref URL: %s\n' % data.get('ref_url', 'N/A')
output += 'Event ID: %s\n' % data.get('event_id', 'N/A')
output += 'Buildset ID: %s\n' % data.get('buildset',
{}).get('uuid', 'N/A')
output += 'Start time: %s\n' % (
data.get('start_time') and
isoparse(data['start_time']) or
'N/A'
)
output += 'End time: %s\n' % (
data.get('end_time') and
isoparse(data['end_time']) or
'N/A'
)
output += 'Duration: %s\n' % data.get('duration', 'N/A')
output += 'Voting: %s\n' % (data.get('voting') and 'Yes' or 'No')
output += 'Log URL: %s\n' % data.get('log_url', 'N/A')
output += 'Node: %s\n' % data.get('node_name', 'N/A')
provides = data.get('provides', [])
if provides:
output += 'Provides:\n'
for resource in provides:
output += '- %s\n' % self.formatJobResource(resource)
if data.get('final', None) is not None:
output += 'Final: %s\n' % (data['final'] and 'Yes' or 'No')
else:
output += 'Final: N/A\n'
if data.get('held', None) is not None:
output += 'Held: %s' % (data['held'] and 'Yes' or 'No')
else:
output += 'Held: N/A'
return output
def formatArtifacts(self, data) -> str:
table = prettytable.PrettyTable(
field_names=['name', 'url']
)
for artifact in data:
table.add_row([artifact.get('name', 'N/A'),
artifact.get('url', 'N/A')])
return str(table)
def formatInventory(self, data) -> str:
return yaml.dump(data, default_flow_style=False)
def formatBuildSet(self, data) -> str:
# This is based on the web UI
output = ''
output += 'UUID: %s\n' % data.get('uuid', 'N/A')
output += '=' * len('UUID: %s' % data.get('uuid', 'N/A')) + '\n'
output += 'Result: %s\n' % data.get('result', 'N/A')
if data.get('newrev'):
output += 'Ref: %s\n' % data.get('ref', 'N/A')
output += 'New Rev: %s\n' % data['newrev']
if data.get('change') and data.get('patchset'):
output += 'Change: %s\n' % (str(data['change']) + ',' +
str(data['patchset']))
output += 'Project: %s\n' % data.get('project', 'N/A')
output += 'Branch: %s\n' % data.get('branch', 'N/A')
output += 'Pipeline: %s\n' % data.get('pipeline', 'N/A')
output += 'Event ID: %s\n' % data.get('event_id', 'N/A')
output += 'Message: %s' % data.get('message', 'N/A')
return output
def formatBuildSets(self, data) -> str:
table = prettytable.PrettyTable(
field_names=[
'ID', 'Project', 'Branch', 'Pipeline', 'Change or Ref',
'Result', 'Event ID'
]
)
for buildset in data:
if buildset.get('change') and buildset.get('patchset'):
change = (
str(buildset['change']) + ',' +
str(buildset['patchset'])
)
else:
change = buildset.get('ref', 'N/A')
table.add_row([
buildset.get('uuid', 'N/A'),
buildset.get('project', 'N/A'),
buildset.get('branch', 'N/A'),
buildset.get('pipeline', 'N/A'),
change,
buildset.get('result', 'N/A'),
buildset.get('event_id', 'N/A')
])
return str(table)
def formatBuilds(self, data) -> str:
table = prettytable.PrettyTable(
field_names=[
'ID', 'Job', 'Project', 'Branch', 'Pipeline', 'Change or Ref',
'Duration (s)', 'Start time', 'Result', 'Event ID'
]
)
for build in data:
if build.get('change') and build.get('patchset'):
change = str(build['change']) + ',' + str(build['patchset'])
else:
change = build.get('ref', 'N/A')
start_time = (
build.get('start_time') and
isoparse(build['start_time']) or
'N/A'
)
table.add_row([
build.get('uuid', 'N/A'),
build.get('job_name', 'N/A'),
build.get('project', 'N/A'),
build.get('branch', 'N/A'),
build.get('pipeline', 'N/A'),
change,
build.get('duration', 'N/A'),
start_time,
build.get('result', 'N/A'),
build.get('event_id', 'N/A')
])
return str(table)
def formatJobResource(self, data) -> str:
return data.get('name', 'N/A') | zuul-client | /zuul_client-0.1.0-py3-none-any.whl/zuulclient/utils/formatters.py | formatters.py |
import base64
import math
import os
import re
import subprocess
def get_default(config, section, option, default=None, expand_user=False):
if config.has_option(section, option):
# Need to be ensured that we get suitable
# type from config file by default value
if isinstance(default, bool):
value = config.getboolean(section, option)
elif isinstance(default, int):
value = config.getint(section, option)
else:
value = config.get(section, option)
else:
value = default
if expand_user and value:
return os.path.expanduser(value)
return value
def encrypt_with_openssl(pubkey_path, plaintext, logger=None):
cmd = ['openssl', 'version']
if logger:
logger.debug('Invoking "%s"' % ' '.join(cmd))
try:
openssl_version = subprocess.check_output(
cmd).split()[1]
except FileNotFoundError:
raise Exception('"openssl" is not installed on the system')
cmd = ['openssl', 'rsa', '-text', '-pubin', '-in', pubkey_path]
if logger:
logger.debug('Invoking "%s"' % ' '.join(cmd))
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
raise Exception('openssl failure (Return code %s)' % p.returncode)
output = stdout.decode('utf-8')
if openssl_version.startswith(b'0.'):
key_length_re = r'^Modulus \((?P<key_length>\d+) bit\):$'
else:
key_length_re = r'^(|RSA )Public-Key: \((?P<key_length>\d+) bit\)$'
m = re.match(key_length_re, output, re.MULTILINE)
nbits = int(m.group('key_length'))
nbytes = int(nbits / 8)
max_bytes = nbytes - 42 # PKCS1-OAEP overhead
chunks = int(math.ceil(float(len(plaintext)) / max_bytes))
ciphertext_chunks = []
if logger:
logger.info(
'Public key length: {} bits ({} bytes)'.format(nbits, nbytes))
logger.info(
'Max plaintext length per chunk: {} bytes'.format(max_bytes))
logger.info(
'Input plaintext length: {} bytes'.format(len(plaintext)))
logger.info('Number of chunks: {}'.format(chunks))
cmd = ['openssl', 'rsautl', '-encrypt',
'-oaep', '-pubin', '-inkey',
pubkey_path]
if logger:
logger.debug('Invoking "%s" with each data chunk:' % ' '.join(cmd))
for count in range(chunks):
chunk = plaintext[int(count * max_bytes):
int((count + 1) * max_bytes)]
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
if logger:
logger.debug('\tchunk %s' % (count + 1))
(stdout, stderr) = p.communicate(str.encode(chunk))
if p.returncode != 0:
raise Exception('openssl failure (Return code %s)' % p.returncode)
ciphertext_chunks.append(base64.b64encode(stdout).decode('utf-8'))
return ciphertext_chunks | zuul-client | /zuul_client-0.1.0-py3-none-any.whl/zuulclient/utils/__init__.py | __init__.py |
# zuul-lint
## Validate from the command line
```
pip install zuul-lint
zuul-lint .zuul.yaml
```
## Validate with pre-commit
Add the code below to your `.pre-commit-config.yaml` file:
```yaml
- repo: https://github.com/pycontribs/zuul-lint.git
rev: "0.1"
hooks:
- id: zuul-lint
```
## Validate with VS Code
To ease editing Zuul CI configuration file we added experimental support for
a Zuul JSON Schema. This should enable validation and auto-completion in
code editors.
For example on [VSCode](1) you can use the [YAML](2) extension to use such a schema
validation by adding the following to `settings.json`:
```json
"yaml.schemas": {
"https://raw.githubusercontent.com/pycontribs/zuul-lint/master/zuul_lint/zuul-schema.json": ["*zuul.d/*.yaml", "*/.zuul.yaml"]
},
"yaml.customTags": [
"!encrypted/pkcs1-oaep array"
],
"sortJSON.orderOverride": ["title", "name", "$schema", "version", "description", "type"],
"sortJSON.orderUnderride": ["definitions"]
```
[1]: https://code.visualstudio.com/
[2]: https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml
| zuul-lint | /zuul-lint-0.1.2.tar.gz/zuul-lint-0.1.2/README.md | README.md |
import argparse
import base64
import os
import sys
import logging
import cherrypy
import hashlib
import json
import typing
import functools
import yaml
from . import filesystem
from . import storage
from . import swift
import jwt
DRIVERS = {
'filesystem': filesystem.Driver,
'swift': swift.Driver,
}
class Authorization(cherrypy.Tool):
log = logging.getLogger("registry.authz")
READ = 'read'
WRITE = 'write'
AUTH = 'auth'
def __init__(self, secret, users, public_url):
self.secret = secret
self.public_url = public_url
self.rw = {}
self.ro = {}
self.anonymous_read = True
for user in users:
if user['access'] == self.WRITE:
self.rw[user['name']] = user['pass']
if user['access'] == self.READ:
self.ro[user['name']] = user['pass']
self.anonymous_read = False
if self.anonymous_read:
self.log.info("Anonymous read access enabled")
else:
self.log.info("Anonymous read access disabled")
cherrypy.Tool.__init__(self, 'before_handler',
self.check_auth,
priority=1)
def check(self, store, user, password):
if user not in store:
return False
return store[user] == password
def unauthorized(self, scope):
cherrypy.response.headers['www-authenticate'] = (
'Bearer realm="%s/auth/token",scope="%s"' % (
self.public_url, scope)
)
raise cherrypy.HTTPError(401, 'Authentication required')
def check_auth(self, level=READ):
auth_header = cherrypy.request.headers.get('authorization')
if auth_header and 'Bearer' in auth_header:
token = auth_header.split()[1]
payload = jwt.decode(token, 'secret', algorithms=['HS256'])
if payload.get('level') in [level, self.WRITE]:
self.log.debug('Auth ok %s', level)
return
self.log.debug('Unauthorized %s', level)
self.unauthorized(level)
def _get_level(self, scope):
level = None
if not isinstance(scope, list):
scope = scope.split(' ')
for resource_scope in scope:
parts = resource_scope.split(':')
if parts[0] == 'repository' and 'push' in parts[2]:
level = self.WRITE
if (parts[0] == 'repository' and 'pull' in parts[2]
and level is None):
level = self.READ
if level is None:
if self.anonymous_read:
# No scope was provided, so this is an authentication
# request; treat it as requesting 'write' access so
# that we validate the password.
level = self.WRITE
else:
level = self.READ
return level
@cherrypy.expose
@cherrypy.tools.json_out(content_type='application/json; charset=utf-8')
def token(self, **kw):
# If the scope of the token requested is for pushing an image,
# that corresponds to 'write' level access, so we verify the
# password.
#
# If the scope of the token is not specified, we treat it as
# 'write' since it probably means the client is performing
# login validation. The _get_level method takes care of that.
#
# If the scope requested is for pulling an image, we always
# grant a read-level token. This covers the case where no
# authentication credentials are supplied, and also an
# interesting edge case: the docker client, when configured
# with a registry mirror, will, bless it's little heart, send
# the *docker hub* credentials to that mirror. In order for
# us to act as a a stand-in for docker hub, we need to accept
# those credentials.
auth_header = cherrypy.request.headers.get('authorization')
level = self._get_level(kw.get('scope', ''))
self.log.info('Authenticate level %s', level)
if level == self.WRITE:
self._check_creds(auth_header, [self.rw], level)
elif level == self.READ and not self.anonymous_read:
self._check_creds(auth_header, [self.rw, self.ro], level)
# If we permit anonymous read and we're requesting read, no
# check is performed.
self.log.debug('Generate %s token', level)
token = jwt.encode({'level': level}, 'secret', algorithm='HS256')
return {'token': token,
'access_token': token}
def _check_creds(self, auth_header, credstores, level):
# If the password is okay, fall through; otherwise call
# unauthorized for the side effect of raising an exception.
if auth_header and 'Basic' in auth_header:
cred = auth_header.split()[1]
cred = base64.decodebytes(cred.encode('utf8')).decode('utf8')
user, pw = cred.split(':', 1)
# Return true on the first credstore with the user, false otherwise
if not next(filter(
lambda cs: self.check(cs, user, pw), credstores), False):
self.unauthorized(level)
else:
self.unauthorized(level)
class RegistryAPI:
"""Registry API server.
Implements the container registry protocol as documented in
https://docs.docker.com/registry/spec/api/
"""
log = logging.getLogger("registry.api")
DEFAULT_NAMESPACE = '_local'
# A list of content types ordered by preference. Manifest lists
# come first so that multi-arch builds are supported.
CONTENT_TYPES = [
'application/vnd.docker.distribution.manifest.list.v2+json',
'application/vnd.oci.image.index.v1+json',
'application/vnd.docker.distribution.manifest.v2+json',
'application/vnd.oci.image.manifest.v1+json',
]
def __init__(self, store, namespaced, authz, conf):
self.storage = store
self.authz = authz
self.namespaced = namespaced
self.conf = conf
def get_namespace(self, repository):
if not self.namespaced:
return (self.DEFAULT_NAMESPACE, repository)
parts = repository.split('/')
return (parts[0], '/'.join(parts[1:]))
def not_found(self):
raise cherrypy.HTTPError(404)
@cherrypy.expose
@cherrypy.tools.json_out(content_type='application/json; charset=utf-8')
def version_check(self):
self.log.info('Version check')
return {'version': '1.0'}
res = cherrypy.response
res.headers['Distribution-API-Version'] = 'registry/2.0'
@cherrypy.expose
# By default CherryPy will try to encode the body/add a charset to
# headers if the response type is text/*. However, since it's
# changing unicode things that may alter the body length, CherryPy
# deletes the Content-Length so that the framework will
# automatically re-caclulate it when the response is sent.
#
# This poses a problem for blob HEAD requests which return a blank
# body -- we don't really have a Content-Type for a blank body so
# it defaults to text/html and goes into the charset detection
# path where the Content-Length set would get set to zero.
# Clients handle this in different and confusing ways; doing
# things sending back invalid manifests several steps later.
#
# Disabling the add_charset tool here is important to avoid this
# behaviour and send a correct Content-Length.
@cherrypy.config(**{'tools.encode.add_charset': False})
def head_blob(self, repository, digest):
namespace, repository = self.get_namespace(repository)
size = self.storage.blob_size(namespace, digest)
if size is None:
self.log.info('Head blob %s %s %s not found',
namespace, repository, digest)
return self.not_found()
self.log.info('Head blob %s %s %s size %s',
namespace, repository, digest, size)
res = cherrypy.response
res.headers['Docker-Content-Digest'] = digest
res.headers['Content-Length'] = str(size)
return ''
@cherrypy.expose
@cherrypy.config(**{'response.stream': True})
def get_blob(self, repository, digest, ns=None):
# The ns parameter is supplied by some image clients (like the one
# found in buildx). We specify it here so that cherrypy doesn't 404
# when receiving that parameter, but we ignore it otherwise.
namespace, repository = self.get_namespace(repository)
self.log.info('Get blob %s %s %s', namespace, repository, digest)
size, data_iter = self.storage.stream_blob(namespace, digest)
if data_iter is None:
return self.not_found()
res = cherrypy.response
res.headers['Docker-Content-Digest'] = digest
res.headers['Content-Type'] = 'application/octet-stream'
if size is not None:
res.headers['Content-Length'] = str(size)
return data_iter
@cherrypy.expose
@cherrypy.tools.json_out(content_type='application/json; charset=utf-8')
def get_tags(self, repository):
namespace, repository = self.get_namespace(repository)
self.log.info('Get tags %s %s', namespace, repository)
tags = self.storage.list_tags(namespace, repository)
return {'name': repository,
'tags': [t.name for t in tags]}
@cherrypy.expose
@cherrypy.config(**{'tools.check_auth.level': Authorization.WRITE})
def start_upload(self, repository, digest=None):
orig_repository = repository
namespace, repository = self.get_namespace(repository)
method = cherrypy.request.method
uuid = self.storage.start_upload(namespace)
self.log.info('[u: %s] Start upload %s %s %s digest %s',
uuid, method, namespace, repository, digest)
res = cherrypy.response
res.headers['Location'] = '/v2/%s/blobs/uploads/%s' % (
orig_repository, uuid)
res.headers['Docker-Upload-UUID'] = uuid
res.headers['Range'] = '0-0'
res.headers['Content-Length'] = '0'
res.status = '202 Accepted'
@cherrypy.expose
@cherrypy.config(**{'tools.check_auth.level': Authorization.WRITE})
def upload_chunk(self, repository, uuid):
orig_repository = repository
namespace, repository = self.get_namespace(repository)
self.log.info('[u: %s] Upload chunk %s %s',
uuid, namespace, repository)
old_length, new_length = self.storage.upload_chunk(
namespace, uuid, cherrypy.request.body)
res = cherrypy.response
res.headers['Location'] = '/v2/%s/blobs/uploads/%s' % (
orig_repository, uuid)
res.headers['Docker-Upload-UUID'] = uuid
res.headers['Content-Length'] = '0'
# Be careful to not be off-by-one, range starts at 0
res.headers['Range'] = '0-%s' % (new_length - 1,)
res.status = '202 Accepted'
self.log.info(
'[u: %s] Finish Upload chunk %s %s', uuid, repository, new_length)
@cherrypy.expose
@cherrypy.config(**{'tools.check_auth.level': Authorization.WRITE})
def finish_upload(self, repository, uuid, digest):
orig_repository = repository
namespace, repository = self.get_namespace(repository)
self.log.info('[u: %s] Upload final chunk %s %s digest %s',
uuid, namespace, repository, digest)
old_length, new_length = self.storage.upload_chunk(
namespace, uuid, cherrypy.request.body)
self.log.debug('[u: %s] Store upload %s %s',
uuid, namespace, repository)
self.storage.store_upload(namespace, uuid, digest)
self.log.info('[u: %s] Upload complete %s %s digest %s',
uuid, namespace, repository, digest)
res = cherrypy.response
res.headers['Location'] = '/v2/%s/blobs/%s' % (orig_repository, digest)
res.headers['Docker-Content-Digest'] = digest
res.headers['Content-Range'] = '%s-%s' % (old_length, new_length)
res.headers['Content-Length'] = '0'
res.status = '201 Created'
def _validate_manifest(self, namespace, request):
body = request.body.read()
content_type = request.headers.get('Content-Type')
# Only v2 manifests are validated
if (content_type !=
'application/vnd.docker.distribution.manifest.v2+json'):
return body
data = json.loads(body)
# We should not be missing a size in the manifest. At one
# point we did accept this but it turned out to be related to
# zuul-registry returning invalid data in HEAD requests.
if 'size' not in data['config']:
msg = ('Manifest missing size attribute, can not create')
raise cherrypy.HTTPError(400, msg)
# Validate layer sizes
for layer in data['layers']:
digest = layer['digest']
actual_size = self.storage.blob_size(namespace, digest)
if 'size' not in layer:
msg = ('Client push error: layer %s missing size ' % digest)
raise cherrypy.HTTPError(400, msg)
size = layer['size']
if size == actual_size:
continue
msg = ("Manifest has invalid size for layer %s "
"(size:%d actual:%d)" % (digest, size, actual_size))
self.log.error(msg)
# We don't delete layers here as they may be used by
# different images with valid manifests. Return an error to
# the client so it can try again.
raise cherrypy.HTTPError(400, msg)
return body
@cherrypy.expose
@cherrypy.config(**{'tools.check_auth.level': Authorization.WRITE})
def put_manifest(self, repository, ref):
namespace, repository = self.get_namespace(repository)
body = self._validate_manifest(namespace, cherrypy.request)
hasher = hashlib.sha256()
hasher.update(body)
digest = 'sha256:' + hasher.hexdigest()
self.log.info('Put manifest %s %s %s digest %s',
namespace, repository, ref, digest)
self.storage.put_blob(namespace, digest, body)
manifest = self.storage.get_manifest(namespace, repository, ref)
if manifest is None:
manifest = {}
else:
manifest = json.loads(manifest)
manifest[cherrypy.request.headers['Content-Type']] = digest
self.storage.put_manifest(
namespace, repository, ref, json.dumps(manifest).encode('utf8'))
res = cherrypy.response
res.headers['Location'] = '/v2/%s/manifests/%s' % (repository, ref)
res.headers['Docker-Content-Digest'] = digest
res.status = '201 Created'
@cherrypy.expose
# see prior note; this avoids destroying Content-Length on HEAD requests
@cherrypy.config(**{'tools.encode.add_charset': False})
def get_manifest(self, repository, ref, ns=None):
# The ns parameter is supplied by some image clients (like the one
# found in buildx). We specify it here so that cherrypy doesn't 404
# when receiving that parameter, but we ignore it otherwise.
namespace, repository = self.get_namespace(repository)
method = cherrypy.request.method
headers = cherrypy.request.headers
res = cherrypy.response
self.log.info(
'%s manifest %s %s %s', method, namespace, repository, ref)
if ref.startswith('sha256:'):
manifest = self.storage.get_blob(namespace, ref)
if manifest is None:
self.log.error('Manifest %s %s not found', repository, ref)
return self.not_found()
res.headers['Content-Type'] = json.loads(manifest)['mediaType']
res.headers['Docker-Content-Digest'] = ref
if method == 'HEAD':
# HEAD requests just return a blank body with the size
# of the manifest in Content-Length
size = self.storage.blob_size(namespace, ref)
res.headers['Content-Length'] = size
return ''
return manifest
# looking up by tag
manifest = self.storage.get_manifest(namespace, repository, ref)
if manifest is None:
manifest = {}
else:
manifest = json.loads(manifest)
accept = [x.strip() for x in headers['Accept'].split(',')]
# Resort content types by ones that we know about in our
# preference order, followed by ones we don't know about in
# the original order.
content_types = ([h for h in self.CONTENT_TYPES if h in accept] +
[h for h in accept if h not in self.CONTENT_TYPES])
for ct in content_types:
if ct in manifest:
self.log.debug('Manifest %s %s digest found %s',
repository, ref, manifest[ct])
data = self.storage.get_blob(namespace, manifest[ct])
if not data:
self.log.error(
'Blob %s %s not found', namespace, manifest[ct])
return self.not_found()
res.headers['Content-Type'] = ct
hasher = hashlib.sha256()
hasher.update(data)
self.log.debug('Retrieved sha256 %s', hasher.hexdigest())
res.headers['Docker-Content-Digest'] = manifest[ct]
if method == 'HEAD':
# See comment above about head response
res.headers['Content-Length'] = len(data)
return ''
return data
self.log.error('Manifest %s %s not found', repository, ref)
return self.not_found()
class RegistryServer:
log = logging.getLogger("registry.server")
def __init__(self, config_path):
self.log.info("Loading config from %s", config_path)
self.conf = RegistryServer.load_config(
config_path, os.environ)['registry']
# TODO: pyopenssl?
if 'tls-key' in self.conf:
cherrypy.server.ssl_module = 'builtin'
cherrypy.server.ssl_certificate = self.conf['tls-cert']
cherrypy.server.ssl_private_key = self.conf['tls-key']
driver = self.conf['storage']['driver']
backend = DRIVERS[driver](self.conf['storage'])
self.store = storage.Storage(backend, self.conf['storage'])
authz = Authorization(self.conf['secret'], self.conf['users'],
self.conf['public-url'])
route_map = cherrypy.dispatch.RoutesDispatcher()
api = RegistryAPI(self.store,
False,
authz,
self.conf)
cherrypy.tools.check_auth = authz
route_map.connect('api', '/v2/',
controller=api, action='version_check')
route_map.connect('api', '/v2/{repository:.*}/blobs/uploads/',
controller=api, action='start_upload')
route_map.connect('api', '/v2/{repository:.*}/blobs/uploads/{uuid}',
conditions=dict(method=['PATCH']),
controller=api, action='upload_chunk')
route_map.connect('api', '/v2/{repository:.*}/blobs/uploads/{uuid}',
conditions=dict(method=['PUT']),
controller=api, action='finish_upload')
route_map.connect('api', '/v2/{repository:.*}/manifests/{ref}',
conditions=dict(method=['PUT']),
controller=api, action='put_manifest')
route_map.connect('api', '/v2/{repository:.*}/manifests/{ref}',
conditions=dict(method=['GET', 'HEAD']),
controller=api, action='get_manifest')
route_map.connect('api', '/v2/{repository:.*}/blobs/{digest}',
conditions=dict(method=['HEAD']),
controller=api, action='head_blob')
route_map.connect('api', '/v2/{repository:.*}/blobs/{digest}',
conditions=dict(method=['GET']),
controller=api, action='get_blob')
route_map.connect('api', '/v2/{repository:.*}/tags/list',
conditions=dict(method=['GET']),
controller=api, action='get_tags')
route_map.connect('authz', '/auth/token',
controller=authz, action='token')
conf = {
'/': {
'request.dispatch': route_map,
'tools.check_auth.on': True,
},
'/auth': {
'tools.check_auth.on': False,
}
}
cherrypy.config.update({
'global': {
'environment': 'production',
'server.max_request_body_size': 1e12,
'server.socket_host': self.conf['address'],
'server.socket_port': self.conf['port'],
},
})
cherrypy.tree.mount(api, '/', config=conf)
@staticmethod
def load_config(path: str, env: typing.Dict[str, str]) -> typing.Any:
"""Replace path content value of the form %(ZUUL_ENV_NAME) with environment,
Then return the yaml load result"""
with open(path) as f:
return yaml.safe_load(functools.reduce(
lambda config, env_item: config.replace(
f"%({env_item[0]})", env_item[1]),
[(k, v) for k, v in env.items() if k.startswith('ZUUL_')],
f.read()
))
@property
def port(self):
return cherrypy.server.bound_addr[1]
def start(self):
self.log.info("Registry starting")
cherrypy.engine.start()
def stop(self):
self.log.info("Registry stopping")
cherrypy.engine.exit()
# Not strictly necessary, but without this, if the server is
# started again (e.g., in the unit tests) it will reuse the
# same host/port settings.
cherrypy.server.httpserver = None
def prune(self):
self.store.prune()
def main():
parser = argparse.ArgumentParser(
description='Zuul registry server')
parser.add_argument('-c', dest='config',
help='Config file path',
default='/conf/registry.yaml')
parser.add_argument('-d', dest='debug',
help='Debug log level',
action='store_true')
parser.add_argument('command',
nargs='?',
help='Command: serve, prune',
default='serve')
args = parser.parse_args()
logformat = '%(asctime)s %(levelname)s %(name)s: %(message)s'
if args.debug or os.environ.get('DEBUG') == '1':
logging.basicConfig(level=logging.DEBUG, format=logformat)
logging.getLogger("openstack").setLevel(logging.DEBUG)
logging.getLogger("urllib3").setLevel(logging.DEBUG)
logging.getLogger("requests").setLevel(logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO, format=logformat)
logging.getLogger("openstack").setLevel(logging.INFO)
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("requests").setLevel(logging.ERROR)
cherrypy.log.access_log.propagate = False
logging.getLogger("keystoneauth").setLevel(logging.ERROR)
logging.getLogger("stevedore").setLevel(logging.ERROR)
s = RegistryServer(args.config)
if args.command == 'serve':
s.start()
cherrypy.engine.block()
elif args.command == 'prune':
s.prune()
else:
print("Unknown command: %s", args.command)
sys.exit(1) | zuul-registry | /zuul_registry-1.2.0-py3-none-any.whl/zuul_registry/main.py | main.py |
import logging
import openstack
import os
import keystoneauth1
import tempfile
import time
import json
import dateutil.parser
from . import storageutils
POST_ATTEMPTS = 3
SWIFT_CHUNK_SIZE = 64 * 1024
def retry_function(func):
for attempt in range(1, POST_ATTEMPTS + 1):
try:
return func()
except keystoneauth1.exceptions.http.NotFound:
raise
except Exception:
if attempt >= POST_ATTEMPTS:
raise
else:
logging.exception("Error on attempt %d" % attempt)
time.sleep(attempt * 10)
class SwiftDriver(storageutils.StorageDriver):
log = logging.getLogger('registry.swift')
def __init__(self, conf):
self.cloud_name = conf['cloud']
self.container_name = conf['container']
self.conn = openstack.connect(cloud=self.cloud_name)
container = retry_function(
lambda: self.conn.get_container(self.container_name))
if not container:
self.log.info("Creating container %s", self.container_name)
retry_function(
lambda: self.conn.create_container(
name=self.container_name, public=False))
endpoint = self.conn.object_store.get_endpoint()
self.url = os.path.join(endpoint, self.container_name)
def get_url(self, path):
return os.path.join(self.url, path)
def list_objects(self, path):
self.log.debug("List objects %s", path)
url = self.get_url('') + '?prefix=%s&delimiter=/&format=json' % (path,)
ret = retry_function(
lambda: self.conn.session.get(url).content.decode('utf8'))
data = json.loads(ret)
ret = []
for obj in data:
if 'subdir' in obj:
objpath = obj['subdir']
name = obj['subdir'].split('/')[-2]
ctime = time.time()
isdir = True
else:
objpath = obj['name']
name = obj['name'].split('/')[-1]
ctime = dateutil.parser.parse(
obj['last_modified'] + 'Z').timestamp()
isdir = False
ret.append(storageutils.ObjectInfo(
objpath, name, ctime, isdir))
return ret
def get_object_size(self, path):
try:
ret = retry_function(
lambda: self.conn.session.head(self.get_url(path)))
except keystoneauth1.exceptions.http.NotFound:
return None
return int(ret.headers['Content-Length'])
def put_object(self, path, data, uuid=None):
name = None
try:
with tempfile.NamedTemporaryFile('wb', delete=False) as f:
name = f.name
if isinstance(data, bytes):
f.write(data)
else:
for chunk in data:
f.write(chunk)
retry_function(
lambda: self.conn.object_store.upload_object(
self.container_name,
path,
filename=name))
# Get the md5sum and size of the object, and make sure it
# matches the upload.
ret = retry_function(lambda: self.conn.session.head(
self.get_url(path)))
try:
size = int(ret.headers.get('Content-Length', ''))
except ValueError:
size = None
md5 = ret.headers.get('Etag', '')
sdk_md5 = ret.headers.get('X-Object-Meta-X-Sdk-Md5', '')
self.log.debug("[u: %s] Upload object %s "
"md5: %s sdkmd5: %s size: %s",
uuid, path, md5, sdk_md5, size)
if md5 != sdk_md5:
raise Exception("Swift and SDK md5s did not match (u: %s)" %
uuid)
finally:
if name:
os.unlink(name)
def get_object(self, path):
try:
ret = retry_function(
lambda: self.conn.session.get(self.get_url(path)))
except keystoneauth1.exceptions.http.NotFound:
return None
return ret.content
def stream_object(self, path):
try:
ret = retry_function(
lambda: self.conn.session.get(self.get_url(path), stream=True))
except keystoneauth1.exceptions.http.NotFound:
return None, None
try:
size = int(ret.headers.get('Content-Length', ''))
except ValueError:
size = None
return size, ret.iter_content(chunk_size=SWIFT_CHUNK_SIZE)
def delete_object(self, path):
retry_function(
lambda: self.conn.session.delete(
self.get_url(path)))
def move_object(self, src_path, dst_path, uuid=None):
dst = os.path.join(self.container_name, dst_path)
# Get the md5sum and size of the object, and make sure it
# matches on both sides of the copy.
ret = retry_function(lambda: self.conn.session.head(
self.get_url(src_path)))
try:
size = int(ret.headers.get('Content-Length', ''))
except ValueError:
size = None
md5 = ret.headers.get('Etag', '')
sdk_md5 = ret.headers.get('X-Object-Meta-X-Sdk-Md5', '')
old_md = dict(md5=md5, sdk_md5=sdk_md5, size=size)
self.log.debug("[u: %s] Move object %s %s %s",
uuid, src_path, dst_path, old_md)
if md5 != sdk_md5:
raise Exception("Swift and SDK md5s did not match at start "
"of copy (u: %s) %s" % (uuid, old_md))
# FIXME: The multipart-manifest argument below means that in
# the event this docker chunk is a large object, we intend to
# copy the manifest but not the underlying large object
# segments. That seems incorrect, and we should actually just
# recast the large object segments into docker chunks and
# discard this manifest. But first we should verify that's
# what's happening -- it's not clear we ever hit a segment
# limit in practice, so we may never have a large object
# chunk.
retry_function(
lambda: self.conn.session.request(
self.get_url(src_path) + "?multipart-manfest=get",
'COPY',
headers={'Destination': dst}
))
# Get the md5sum and size of the object, and make sure it
# matches on both sides of the copy.
ret = retry_function(lambda: self.conn.session.head(
self.get_url(dst_path)))
try:
size = int(ret.headers.get('Content-Length', ''))
except ValueError:
size = None
md5 = ret.headers.get('Etag', '')
sdk_md5 = ret.headers.get('X-Object-Meta-X-Sdk-Md5', '')
new_md = dict(md5=md5, sdk_md5=sdk_md5, size=size)
self.log.debug("[u: %s] Moved object %s %s %s",
uuid, src_path, dst_path, new_md)
if md5 != sdk_md5:
raise Exception("Swift and SDK md5s did not match at end of copy "
"(u: %s) %s" % (uuid, new_md))
if old_md != new_md:
raise Exception("Object metadata did not match after copy "
"(u: %s) old: %s new: %s" % (uuid, old_md, new_md))
retry_function(
lambda: self.conn.session.delete(
self.get_url(src_path)))
def cat_objects(self, path, chunks, uuid=None):
manifest = []
# TODO: Would it be better to move 1-chunk objects?
# TODO: We can leak the upload chunks here if a blob is uploaded
# concurrently by two different clients. We should update the prune
# system to clean them up.
for chunk in chunks:
ret = retry_function(
lambda: self.conn.session.head(self.get_url(chunk['path'])))
size = int(ret.headers['Content-Length'])
if size == 0:
continue
etag = ret.headers['Etag']
sdk_md5 = ret.headers['X-Object-Meta-X-Sdk-Md5']
if not (sdk_md5 == etag == chunk['md5']):
raise Exception("Object metadata did not match during cat "
"(u: %s) orig: %s sdk: %s etag: %s" % (
uuid, chunk['md5'], sdk_md5, etag))
if not (size == chunk['size']):
raise Exception("Object metadata did not match during cat "
"(u: %s) orig: %s size: %s" % (
uuid, chunk['size'], size))
manifest.append({'path':
os.path.join(self.container_name, chunk['path']),
'etag': ret.headers['Etag'],
'size_bytes': ret.headers['Content-Length']})
retry_function(lambda:
self.conn.session.put(
self.get_url(path) + "?multipart-manifest=put",
data=json.dumps(manifest)))
Driver = SwiftDriver | zuul-registry | /zuul_registry-1.2.0-py3-none-any.whl/zuul_registry/swift.py | swift.py |
from abc import ABCMeta, abstractmethod
class ObjectInfo:
def __init__(self, path, name, ctime, isdir):
self.path = path
self.name = name
self.ctime = ctime
self.isdir = isdir
class StorageDriver(metaclass=ABCMeta):
"""Base class for storage drivers.
Storage drivers should implement all of the methods in this class.
This is a low-level API with no knowledge of the intended use as
an image registry. This makes it easy to add backend drivers
since the storage abstraction layer is designed to deal with the
lowest common denominator.
"""
@abstractmethod
def __init__(self, conf):
"""Initialize a driver.
:arg dict conf: The 'storage' section from the config file.
"""
pass
@abstractmethod
def list_objects(self, path):
"""List objects at path.
Returns a list of objects rooted at `path`, one level deep.
:arg str path: The object path.
:returns: A list of ObjectInfo objects, one for each object.
:rtype: ObjectInfo
"""
pass
@abstractmethod
def get_object_size(self, path):
"""Return the size of object at path.
:arg str path: The object path.
:returns: The size of the object in bytes.
:rtype: int
"""
pass
@abstractmethod
def put_object(self, path, data):
"""Store an object.
Store the contents of `data` at `path`. The `data` parameter
may be a bytearray or a generator which produces bytearrays.
:arg str path: The object path.
:arg bytearray data: The data to store.
"""
pass
@abstractmethod
def get_object(self, path):
"""Retrieve an object.
Return the contents of the object at `path`.
:arg str path: The object path.
:returns: The contents of the object.
:rtype: bytearray
"""
pass
@abstractmethod
def stream_object(self, path):
"""Retrieve an object, streaming.
Return a generator with the content of the object at `path`.
:arg str path: The object path.
:returns: The size and contents of the object.
:rtype: tuple of (int or None, generator-of-bytearray or None)
"""
pass
@abstractmethod
def delete_object(self, path):
"""Delete an object.
Delete the object stored at `path`.
:arg str path: The object path.
"""
pass
@abstractmethod
def move_object(self, src_path, dst_path):
"""Move an object.
Move the object from `src_path` to `dst_path`.
:arg str src_path: The original path.
:arg str dst_path: The new path.
"""
pass
@abstractmethod
def cat_objects(self, path, chunks):
"""Concatenate objects.
Concatenate one or more objects to create a new object.
The original objects are deleted.
:arg str path: The new path.
:arg list chunks: A list of paths of objects to concatenate.
"""
pass | zuul-registry | /zuul_registry-1.2.0-py3-none-any.whl/zuul_registry/storageutils.py | storageutils.py |
import base64
import json
import logging
import os
import queue
import rehash
import hashlib
import threading
import time
from uuid import uuid4
class UploadRecord:
"""Information about an upload.
This class holds information about an upload in progress. It is
designed to be serialized into object storage and stored along
with the data of the upload so that as each new chunk is uploaded,
this is updated.
The registry protocol guarantees that upload chunks are
sequential, so this does not need to be locked for use by multiple
writers.
The most important part of this (that which could not otherwise be
constructed from a simple object listing) is the resumable hash of
the contents. We need to calculate the hash of the complete
upload, but we would like to support chunks being written by
different writers (for example, in a simple round-robin load
balancer). If we store the state of the hash algorithm after each
chunk is uploaded, we can avoid having to download the entire data
again at the end merely to calculate the hash.
"""
def __init__(self):
self.chunks = []
self.hasher = rehash.sha256()
@property
def count(self):
return len(self.chunks)
@property
def size(self):
return sum([x['size'] for x in self.chunks])
@property
def digest(self):
return 'sha256:' + self.hasher.hexdigest()
def load(self, data):
data = json.loads(data.decode('utf8'))
self.chunks = data['chunks']
hash_state = data['hash_state']
hash_state['md_data'] = base64.decodebytes(
hash_state['md_data'].encode('ascii'))
self.hasher.__setstate__(hash_state)
def dump(self):
hash_state = self.hasher.__getstate__()
hash_state['md_data'] = base64.encodebytes(
hash_state['md_data']).decode('ascii')
data = dict(chunks=self.chunks,
hash_state=hash_state)
return json.dumps(data).encode('utf8')
class UploadStreamer:
"""Stream an upload to the object storage.
This returns data from an internal buffer as a generator. Pass
this to the `put_object` method to supply streaming data to it in
one thread, while another adds data to the buffer using the
`write` method.
"""
def __init__(self):
self.queue = queue.Queue()
def write(self, data):
self.queue.put(data)
def __iter__(self):
while True:
d = self.queue.get()
if d is None:
break
yield d
class Storage:
"""Storage abstraction layer.
This class abstracts different storage backends, providing a
convenience API to the registry.
Most of these methods take a namespace argument. The namespace
is, essentially, an entire registry isolated from the other
namespaces. They may even have duplicate object data. This
allows us to support serving multiple registries from the same
process (without confusing the contents of them).
"""
# Clients have 1 hour to complete an upload before we start
# deleting stale objects.
upload_exp = 60 * 60
log = logging.getLogger('registry.storage')
def __init__(self, backend, conf):
self.backend = backend
if 'expiration' in conf:
self.manifest_exp = conf['expiration']
else:
self.manifest_exp = None
def blob_size(self, namespace, digest):
path = os.path.join(namespace, 'blobs', digest, 'data')
return self.backend.get_object_size(path)
def put_blob(self, namespace, digest, data):
path = os.path.join(namespace, 'blobs', digest, 'data')
return self.backend.put_object(path, data)
def get_blob(self, namespace, digest):
path = os.path.join(namespace, 'blobs', digest, 'data')
return self.backend.get_object(path)
def stream_blob(self, namespace, digest):
path = os.path.join(namespace, 'blobs', digest, 'data')
return self.backend.stream_object(path)
def start_upload(self, namespace):
"""Start an upload.
Create an empty UploadRecord and store it. Later methods will
add to it. The uuid attribute of the UploadRecord uniquely
identifies the upload.
Uploads have one or more chunks. See `upload_chunk`.
"""
uuid = uuid4().hex
upload = UploadRecord()
self._update_upload(namespace, uuid, upload)
return uuid
def _get_upload(self, namespace, uuid):
path = os.path.join(namespace, 'uploads', uuid, 'metadata')
data = self.backend.get_object(path)
upload = UploadRecord()
upload.load(data)
return upload
def _update_upload(self, namespace, uuid, upload):
path = os.path.join(namespace, 'uploads', uuid, 'metadata')
self.log.debug("[u: %s] Update upload metadata chunks: %s",
uuid, upload.chunks)
self.backend.put_object(path, upload.dump(), uuid)
def upload_chunk(self, namespace, uuid, fp):
"""Add a chunk to an upload.
Uploads contain one or more chunk of data which are ultimately
concatenated into one blob.
This streams the data from `fp` and writes it into the
registry.
:arg namespace str: The registry namespace.
:arg uuid str: The UUID of the upload.
:arg file fp: An open file pointer to the source data.
"""
upload = self._get_upload(namespace, uuid)
path = os.path.join(namespace, 'uploads', uuid, str(upload.count + 1))
streamer = UploadStreamer()
t = threading.Thread(target=self.backend.put_object,
args=(path, streamer, uuid))
t.start()
size = 0
# This calculates the md5 of just this chunk for internal
# integrity checking; it is not the overall hash of the layer
# (that's a running calculation in the upload record).
chunk_hasher = hashlib.md5()
while True:
try:
d = fp.read(4096)
except ValueError:
# We get this on an empty body
d = b''
if not d:
break
upload.hasher.update(d)
chunk_hasher.update(d)
size += len(d)
streamer.write(d)
streamer.write(None)
t.join()
upload.chunks.append(dict(size=size, md5=chunk_hasher.hexdigest()))
self._update_upload(namespace, uuid, upload)
return upload.size - size, upload.size
def store_upload(self, namespace, uuid, digest):
"""Complete an upload.
Verify the supplied digest matches the uploaded data, and if
so, stores the uploaded data as a blob in the registry. Until
this is called, the upload is incomplete and the data blob is
not addressible.
"""
upload = self._get_upload(namespace, uuid)
if digest != upload.digest:
raise Exception('Digest does not match %s %s' %
(digest, upload.digest))
# Move the chunks into the blob dir to get them out of the
# uploads dir.
chunks = []
for i, chunk in enumerate(upload.chunks):
src_path = os.path.join(namespace, 'uploads', uuid, str(i + 1))
dst_path = os.path.join(namespace, 'blobs', digest,
'uploads', uuid, str(i + 1))
chunks.append(dict(path=dst_path,
md5=chunk['md5'], size=chunk['size']))
self.backend.move_object(src_path, dst_path, uuid)
# Concatenate the chunks into one blob.
path = os.path.join(namespace, 'blobs', digest, 'data')
self.backend.cat_objects(path, chunks, uuid)
path = os.path.join(namespace, 'uploads', uuid, 'metadata')
self.backend.delete_object(path)
def put_manifest(self, namespace, repo, tag, data):
path = os.path.join(namespace, 'repos', repo, 'manifests', tag)
self.backend.put_object(path, data)
def get_manifest(self, namespace, repo, tag):
path = os.path.join(namespace, 'repos', repo, 'manifests', tag)
return self.backend.get_object(path)
def list_tags(self, namespace, repo):
path = os.path.join(namespace, 'repos', repo, 'manifests')
return self.backend.list_objects(path)
def prune(self):
"""Prune the registry
Prune all namespaces in the registry according to configured
expiration times.
"""
now = time.time()
upload_target = now - self.upload_exp
if self.manifest_exp:
manifest_target = now - self.manifest_exp
else:
manifest_target = None
for namespace in self.backend.list_objects(''):
uploadpath = os.path.join(namespace.path, 'uploads/')
for upload in self.backend.list_objects(uploadpath):
self._prune(upload, upload_target)
if not manifest_target:
continue
repopath = os.path.join(namespace.path, 'repos/')
for repo in self.backend.list_objects(repopath):
kept_manifests = self._prune(repo, manifest_target)
# mark/sweep manifest blobs
layers = set()
for manifest in kept_manifests:
if manifest.isdir:
continue
layers.update(self._get_layers_from_manifest(
namespace.name, manifest.path))
blobpath = os.path.join(namespace.path, 'blobs/')
for blob in self.backend.list_objects(blobpath):
if blob.name not in layers:
self._prune(blob, upload_target)
def _get_layers_from_manifest(self, namespace, path):
self.log.debug('Get layers %s', path)
data = self.backend.get_object(path)
manifest = json.loads(data)
target = manifest.get(
'application/vnd.docker.distribution.manifest.v2+json')
layers = []
if not target:
self.log.debug('Unknown manifest %s', path)
return layers
layers.append(target)
data = self.get_blob(namespace, target)
manifest = json.loads(data)
layers.append(manifest['config']['digest'])
for layer in manifest['layers']:
layers.append(layer['digest'])
return layers
def _prune(self, root_obj, target):
kept = []
if root_obj.isdir:
for obj in self.backend.list_objects(root_obj.path):
kept.extend(self._prune(obj, target))
if not kept and root_obj.ctime < target:
self.log.debug('Prune %s', root_obj.path)
self.backend.delete_object(root_obj.path)
else:
self.log.debug('Keep %s', root_obj.path)
kept.append(root_obj)
return kept | zuul-registry | /zuul_registry-1.2.0-py3-none-any.whl/zuul_registry/storage.py | storage.py |
import os
import tempfile
from . import storageutils
DISK_CHUNK_SIZE = 64 * 1024
class FilesystemDriver(storageutils.StorageDriver):
def __init__(self, conf):
self.root = conf['root']
def list_objects(self, path):
path = os.path.join(self.root, path)
if not os.path.isdir(path):
return []
ret = []
for f in os.listdir(path):
obj_path = os.path.join(path, f)
ret.append(storageutils.ObjectInfo(
obj_path, f, os.stat(obj_path).st_ctime,
os.path.isdir(obj_path)))
return ret
def get_object_size(self, path):
path = os.path.join(self.root, path)
if not os.path.exists(path):
return None
return os.stat(path).st_size
def put_object(self, path, data, uuid=None):
path = os.path.join(self.root, path)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as f:
if isinstance(data, bytes):
f.write(data)
else:
for chunk in data:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
def get_object(self, path):
path = os.path.join(self.root, path)
if not os.path.exists(path):
return None
with open(path, 'rb') as f:
return f.read()
def stream_object(self, path):
path = os.path.join(self.root, path)
if not os.path.exists(path):
return None, None
f = open(path, 'rb', buffering=DISK_CHUNK_SIZE)
try:
size = os.fstat(f.fileno()).st_size
except OSError:
f.close()
raise
def data_iter(f=f):
with f:
yield b'' # will get discarded; see note below
yield from iter(lambda: f.read(DISK_CHUNK_SIZE), b'')
ret = data_iter()
# This looks a little funny, because it is. We're going to discard the
# empty bytes added at the start, but that's not the important part.
# We want to ensure that
#
# 1. the generator has started executing and
# 2. it left off *inside the with block*
#
# This ensures that when the generator gets cleaned up (either because
# everything went according to plan and the generator exited cleanly
# *or* there was an error which eventually raised a GeneratorExit),
# the file we opened will get closed.
next(ret)
return size, ret
def delete_object(self, path):
path = os.path.join(self.root, path)
if os.path.exists(path):
if os.path.isdir(path):
os.rmdir(path)
else:
os.unlink(path)
def move_object(self, src_path, dst_path, uuid=None):
src_path = os.path.join(self.root, src_path)
dst_path = os.path.join(self.root, dst_path)
os.makedirs(os.path.dirname(dst_path), exist_ok=True)
os.rename(src_path, dst_path)
def cat_objects(self, path, chunks, uuid=None):
path = os.path.join(self.root, path)
os.makedirs(os.path.dirname(path), exist_ok=True)
# We write to a temporary file in the same directory as the destiation
# file to ensure that we can rename it atomically once fully written.
# This is important because there may be multiple concurrent writes to
# the same object and due to client behavior we cannot return until
# at least one write is completed. To facilitate this we ensure each
# write happens completely then make that safe with atomic renames.
with tempfile.NamedTemporaryFile(dir=os.path.dirname(path),
delete=False) as outf:
for chunk in chunks:
chunk_path = os.path.join(self.root, chunk['path'])
with open(chunk_path, 'rb') as inf:
while True:
d = inf.read(4096)
if not d:
break
outf.write(d)
outf.flush()
os.fsync(outf.fileno())
os.rename(outf.name, path)
for chunk in chunks:
chunk_path = os.path.join(self.root, chunk['path'])
os.unlink(chunk_path)
Driver = FilesystemDriver | zuul-registry | /zuul_registry-1.2.0-py3-none-any.whl/zuul_registry/filesystem.py | filesystem.py |
from collections import OrderedDict
import codecs
import os
from sphinx import addnodes
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from sphinx.domains import Domain, ObjType
from sphinx.errors import SphinxError
from sphinx.roles import XRefRole
from sphinx.directives import ObjectDescription
from sphinx.util import logging
from sphinx.util.nodes import make_refnode
from docutils import nodes
import yaml
logger = logging.getLogger(__name__)
class ZuulSafeLoader(yaml.SafeLoader):
def __init__(self, *args, **kwargs):
super(ZuulSafeLoader, self).__init__(*args, **kwargs)
self.add_multi_constructor('!encrypted/', self.construct_encrypted)
@classmethod
def construct_encrypted(cls, loader, tag_suffix, node):
return loader.construct_sequence(node)
class ProjectTemplate(object):
def __init__(self, conf):
self.name = conf['name']
self.description = conf.get('description', '')
self.pipelines = OrderedDict()
self.parse(conf)
def parse(self, conf):
for k in sorted(conf.keys()):
v = conf[k]
if not isinstance(v, dict):
continue
if 'jobs' not in v:
continue
jobs = []
for job in v['jobs']:
if isinstance(job, dict):
job = list(job.keys())[0]
jobs.append(job)
if jobs:
self.pipelines[k] = jobs
class Layout(object):
def __init__(self):
self.jobs = []
self.project_templates = []
class ZuulDirective(Directive):
has_content = True
def find_zuul_yaml(self):
root = self.state.document.settings.env.relfn2path('.')[1]
while root:
for fn in ['zuul.yaml', '.zuul.yaml', 'zuul.d', '.zuul.d']:
path = os.path.join(root, fn)
if os.path.exists(path):
return path
root = os.path.split(root)[0]
raise SphinxError(
"Unable to find zuul config in zuul.yaml, .zuul.yaml,"
" zuul.d or .zuul.d")
def parse_zuul_yaml(self, path):
with open(path) as f:
data = yaml.load(f, Loader=ZuulSafeLoader)
layout = Layout()
for obj in data:
if 'job' in obj:
layout.jobs.append(obj['job'])
if 'project-template' in obj:
layout.project_templates.append(
ProjectTemplate(obj['project-template']))
return layout
def parse_zuul_d(self, path):
layout = Layout()
# zuul.d configs are recursively loaded in zuul so we recursively
# load them here too.
for (dirpath, dirnames, filenames) in os.walk(path):
# Zuul parses things in order, we do too.
dirnames.sort()
filenames.sort()
for conf in filter(lambda x: x.endswith('.yaml'), filenames):
conf_path = os.path.join(dirpath, conf)
with open(conf_path) as f:
data = yaml.load(f, Loader=ZuulSafeLoader)
if data is None:
raise SphinxError(
"File %s in Zuul dir is empty", conf_path)
for obj in data:
if 'job' in obj:
layout.jobs.append(obj['job'])
if 'project-template' in obj:
layout.project_templates.append(
ProjectTemplate(obj['project-template']))
return layout
def _parse_zuul_layout(self):
env = self.state.document.settings.env
if not env.domaindata['zuul']['layout']:
path = self.find_zuul_yaml()
if path.endswith('zuul.d'):
layout = self.parse_zuul_d(path)
else:
layout = self.parse_zuul_yaml(path)
env.domaindata['zuul']['layout_path'] = path
env.domaindata['zuul']['layout'] = layout
@property
def zuul_layout(self):
self._parse_zuul_layout()
env = self.state.document.settings.env
return env.domaindata['zuul']['layout']
@property
def zuul_layout_path(self):
self._parse_zuul_layout()
env = self.state.document.settings.env
return env.domaindata['zuul']['layout_path']
def generate_zuul_job_content(self, name):
lines = []
for job in self.zuul_layout.jobs:
if job['name'] == name:
lines.append('.. zuul:job:: %s' % name)
if 'branches' in job:
branches = job['branches']
if not isinstance(branches, list):
branches = [branches]
variant = ', '.join(branches)
lines.append(' :variant: %s' % variant)
lines.append('')
for l in job.get('description', '').split('\n'):
lines.append(' ' + l)
lines.append('')
return lines
def generate_zuul_project_template_content(self, name):
lines = []
for template in self.zuul_layout.project_templates:
if template.name == name:
lines.append('.. zuul:project_template:: %s' % name)
lines.append('')
for l in template.description.split('\n'):
lines.append(' ' + l)
for pipeline, jobs in template.pipelines.items():
lines.append('')
lines.append(' **'+pipeline+'**')
for job in jobs:
lines.append(' * :zuul:xjob:`' + job + '`')
lines.append('')
return lines
def find_zuul_roles(self):
env = self.state.document.settings.env
_root = os.path.dirname(self.zuul_layout_path)
root_roledir = os.path.join(_root, 'roles')
role_dirs = []
if os.path.isdir(root_roledir):
role_dirs = [root_roledir,]
if env.config.zuul_role_paths:
role_dirs.extend(env.config.zuul_role_paths)
roles = env.domaindata['zuul']['role_paths']
for d in role_dirs:
for p in os.listdir(d):
if not os.path.isdir(os.path.join(d, p)):
continue
if p in ('__pycache__',):
continue
role_readme = os.path.join(d, p, 'README.rst')
if os.path.exists(role_readme):
roles[p] = role_readme
else:
msg = "Missing role documentation: %s" % role_readme
if env.config.zuul_autoroles_warn_missing:
logger.warning(msg)
else:
logger.debug(msg)
@property
def zuul_role_paths(self):
env = self.state.document.settings.env
roles = env.domaindata['zuul']['role_paths']
if roles is None:
roles = {}
env.domaindata['zuul']['role_paths'] = roles
self.find_zuul_roles()
return roles
def generate_zuul_role_content(self, name):
lines = []
lines.append('.. zuul:role:: %s' % name)
lines.append('')
role_readme = self.zuul_role_paths[name]
with codecs.open(role_readme, encoding='utf-8') as f:
role_lines = f.read().split('\n')
for l in role_lines:
lines.append(' ' + l)
return lines
class ZuulObjectDescription(ZuulDirective, ObjectDescription):
object_names = {
'attr': 'attribute',
'var': 'variable',
'jobvar': 'job variable',
'rolevar': 'role variable',
'path': 'path',
}
separator = '.'
def get_path(self):
return self.env.ref_context.get('zuul:attr_path', [])
def get_display_path(self):
return self.env.ref_context.get('zuul:display_attr_path', [])
@property
def parent_pathname(self):
return self.separator.join(self.get_display_path())
@property
def full_pathname(self):
name = self.names[-1].lower()
return self.separator.join(self.get_path() + [name])
def add_target_and_index(self, name, sig, signode):
targetname = self.objtype + '-' + self.full_pathname
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['zuul']['objects']
if targetname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % targetname +
'other instance in ' +
self.env.doc2path(objects[targetname][0]) +
', use :noindex: for one of them',
line=self.lineno)
objects[targetname] = (self.env.docname, self.objtype)
objname = self.object_names.get(self.objtype, self.objtype)
if self.parent_pathname:
indextext = '%s (%s of %s)' % (name, objname,
self.parent_pathname)
else:
indextext = '%s (%s)' % (name, objname)
self.indexnode['entries'].append(('single', indextext,
targetname, '', None))
######################################################################
#
# Object description directives
#
class ZuulJobDirective(ZuulObjectDescription):
option_spec = {
'variant': lambda x: x,
}
def before_content(self):
path = self.env.ref_context.setdefault('zuul:attr_path', [])
element = self.names[-1]
path.append(element)
def after_content(self):
path = self.env.ref_context.get('zuul:attr_path')
if path:
path.pop()
def handle_signature(self, sig, signode):
signode += addnodes.desc_name(sig, sig)
return sig
class ZuulProjectTemplateDirective(ZuulObjectDescription):
def before_content(self):
path = self.env.ref_context.setdefault('zuul:attr_path', [])
element = self.names[-1]
path.append(element)
def after_content(self):
path = self.env.ref_context.get('zuul:attr_path')
if path:
path.pop()
def handle_signature(self, sig, signode):
signode += addnodes.desc_name(sig, sig)
return sig
class ZuulRoleDirective(ZuulObjectDescription):
def before_content(self):
path = self.env.ref_context.setdefault('zuul:attr_path', [])
element = self.names[-1]
path.append(element)
def after_content(self):
path = self.env.ref_context.get('zuul:attr_path')
if path:
path.pop()
def handle_signature(self, sig, signode):
signode += addnodes.desc_name(sig, sig)
return sig
class ZuulAttrDirective(ZuulObjectDescription):
has_content = True
option_spec = {
'required': lambda x: x,
'default': lambda x: x,
'noindex': lambda x: x,
'example': lambda x: x,
'type': lambda x: x,
}
def before_content(self):
path = self.env.ref_context.setdefault('zuul:attr_path', [])
path.append(self.names[-1])
path = self.env.ref_context.setdefault('zuul:display_attr_path', [])
path.append(self.names[-1])
def after_content(self):
path = self.env.ref_context.get('zuul:attr_path')
if path:
path.pop()
path = self.env.ref_context.get('zuul:display_attr_path')
if path:
path.pop()
def handle_signature(self, sig, signode):
path = self.get_display_path()
signode['is_multiline'] = True
line = addnodes.desc_signature_line()
line['add_permalink'] = True
for x in path:
line += addnodes.desc_addname(x + '.', x + '.')
line += addnodes.desc_name(sig, sig)
if 'required' in self.options:
line += addnodes.desc_annotation(' (required)', ' (required)')
signode += line
if 'default' in self.options:
line = addnodes.desc_signature_line()
line += addnodes.desc_type('Default: ', 'Default: ')
line += nodes.literal(self.options['default'],
self.options['default'])
signode += line
if 'example' in self.options:
line = addnodes.desc_signature_line()
line += addnodes.desc_type('Example: ', 'Example: ')
line += nodes.literal(self.options['example'],
self.options['example'])
signode += line
if 'type' in self.options:
line = addnodes.desc_signature_line()
line += addnodes.desc_type('Type: ', 'Type: ')
line += nodes.emphasis(self.options['type'],
self.options['type'])
signode += line
return sig
class ZuulValueDirective(ZuulObjectDescription):
has_content = True
def handle_signature(self, sig, signode):
signode += addnodes.desc_name(sig, sig)
return sig
class ZuulVarDirective(ZuulObjectDescription):
has_content = True
option_spec = {
'type': lambda x: x,
'default': lambda x: x,
'hidden': lambda x: x,
'noindex': lambda x: x,
}
type_map = {
'list': '[]',
'dict': '{}',
}
def get_type_str(self):
if 'type' in self.options and self.options['type'] in self.type_map:
return self.type_map[self.options['type']]
return ''
def before_content(self):
path = self.env.ref_context.setdefault('zuul:attr_path', [])
element = self.names[-1]
path.append(element)
path = self.env.ref_context.setdefault('zuul:display_attr_path', [])
element = self.names[-1] + self.get_type_str()
path.append(element)
def after_content(self):
path = self.env.ref_context.get('zuul:attr_path')
if path:
path.pop()
path = self.env.ref_context.get('zuul:display_attr_path')
if path:
path.pop()
def handle_signature(self, sig, signode):
if 'hidden' in self.options:
return sig
path = self.get_display_path()
signode['is_multiline'] = True
line = addnodes.desc_signature_line()
line['add_permalink'] = True
for x in path:
line += addnodes.desc_addname(x + '.', x + '.')
line += addnodes.desc_name(sig, sig)
if 'required' in self.options:
line += addnodes.desc_annotation(' (required)', ' (required)')
signode += line
if 'default' in self.options:
line = addnodes.desc_signature_line()
line += addnodes.desc_type('Default: ', 'Default: ')
line += nodes.literal(self.options['default'],
self.options['default'])
signode += line
if 'type' in self.options:
line = addnodes.desc_signature_line()
line += addnodes.desc_type('Type: ', 'Type: ')
line += nodes.emphasis(self.options['type'],
self.options['type'])
signode += line
return sig
class ZuulJobVarDirective(ZuulVarDirective):
pass
class ZuulRoleVarDirective(ZuulVarDirective):
pass
class ZuulStatDirective(ZuulObjectDescription):
has_content = True
option_spec = {
'type': lambda x: x,
'hidden': lambda x: x,
'noindex': lambda x: x,
}
def before_content(self):
path = self.env.ref_context.setdefault('zuul:attr_path', [])
element = self.names[-1]
path.append(element)
path = self.env.ref_context.setdefault('zuul:display_attr_path', [])
element = self.names[-1]
path.append(element)
def after_content(self):
path = self.env.ref_context.get('zuul:attr_path')
if path:
path.pop()
path = self.env.ref_context.get('zuul:display_attr_path')
if path:
path.pop()
def handle_signature(self, sig, signode):
if 'hidden' in self.options:
return sig
path = self.get_display_path()
for x in path:
signode += addnodes.desc_addname(x + '.', x + '.')
signode += addnodes.desc_name(sig, sig)
if 'type' in self.options:
t = ' (%s)' % self.options['type']
signode += addnodes.desc_annotation(t, t)
return sig
class ZuulPathDirective(ZuulObjectDescription):
has_content = True
option_spec = {
'ephemeral': lambda x: x,
'noindex': lambda x: x,
'example': lambda x: x,
'type': lambda x: x,
}
separator = '/'
def before_content(self):
path = self.env.ref_context.setdefault('zuul:attr_path', [])
path.append(self.names[-1])
path = self.env.ref_context.setdefault('zuul:display_attr_path', [])
path.append(self.names[-1])
def after_content(self):
path = self.env.ref_context.get('zuul:attr_path')
if path:
path.pop()
path = self.env.ref_context.get('zuul:display_attr_path')
if path:
path.pop()
def handle_signature(self, sig, signode):
path = self.get_display_path()
signode['is_multiline'] = True
line = addnodes.desc_signature_line()
line['add_permalink'] = True
line += addnodes.desc_addname('/', '/')
for x in path:
line += addnodes.desc_addname(x + '/', x + '/')
for x in sig.split('/')[:-1]:
line += addnodes.desc_addname(x + '/', x + '/')
last = sig.split('/')[-1]
line += addnodes.desc_name(last, last)
signode += line
if 'type' in self.options:
line = addnodes.desc_signature_line()
line += addnodes.desc_type('Type: ', 'Type: ')
line += nodes.emphasis(self.options['type'],
self.options['type'])
signode += line
elif 'ephemeral' in self.options:
line = addnodes.desc_signature_line()
signode += line
if 'ephemeral' in self.options:
# This is appended to the type line, or a new blank line
# if no type.
line += addnodes.desc_annotation(' (ephemeral)', ' (ephemeral)')
if 'example' in self.options:
line = addnodes.desc_signature_line()
line += addnodes.desc_type('Example: ', 'Example: ')
line += nodes.literal(self.options['example'],
self.options['example'])
signode += line
return sig
######################################################################
#
# Autodoc directives
#
class ZuulAutoJobDirective(ZuulDirective):
def run(self):
name = self.content[0]
lines = self.generate_zuul_job_content(name)
self.state_machine.insert_input(lines, self.zuul_layout_path)
return []
class ZuulAutoJobsDirective(ZuulDirective):
has_content = False
def run(self):
env = self.state.document.settings.env
names = set()
for job in self.zuul_layout.jobs:
name = job['name']
if name in names:
continue
lines = self.generate_zuul_job_content(name)
location = 'Job "%s" included in %s' % \
(name, env.doc2path(env.docname))
self.state_machine.insert_input(lines, location)
names.add(name)
return []
class ZuulAutoProjectTemplateDirective(ZuulDirective):
def run(self):
name = self.content[0]
lines = self.generate_zuul_project_template_content(name)
self.state_machine.insert_input(lines, self.zuul_layout_path)
return []
class ZuulAutoProjectTemplatesDirective(ZuulDirective):
has_content = False
def run(self):
env = self.state.document.settings.env
names = set()
for template in self.zuul_layout.project_templates:
name = template.name
if name in names:
continue
lines = self.generate_zuul_project_template_content(name)
location = 'Template "%s" included in %s' % \
(name, env.doc2path(env.docname))
self.state_machine.insert_input(lines, location)
names.add(name)
return []
class ZuulAutoRoleDirective(ZuulDirective):
def run(self):
name = self.content[0]
lines = self.generate_zuul_role_content(name)
self.state_machine.insert_input(lines, self.zuul_role_paths[name])
return []
class ZuulAutoRolesDirective(ZuulDirective):
has_content = False
def run(self):
role_names = reversed(sorted(self.zuul_role_paths.keys()))
for name in role_names:
lines = self.generate_zuul_role_content(name)
self.state_machine.insert_input(lines, self.zuul_role_paths[name])
return []
class ZuulAbbreviatedXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title,
target):
title, target = super(ZuulAbbreviatedXRefRole, self).process_link(
env, refnode, has_explicit_title, title, target)
if not has_explicit_title:
title = title.split('.')[-1]
return title, target
class ZuulDomain(Domain):
name = 'zuul'
label = 'Zuul'
directives = {
# Object description directives
'job': ZuulJobDirective,
'project_template': ZuulProjectTemplateDirective,
'role': ZuulRoleDirective,
'attr': ZuulAttrDirective,
'value': ZuulValueDirective,
'var': ZuulVarDirective,
'stat': ZuulStatDirective,
'jobvar': ZuulJobVarDirective,
'rolevar': ZuulRoleVarDirective,
'path': ZuulPathDirective,
# Autodoc directives
'autojob': ZuulAutoJobDirective,
'autojobs': ZuulAutoJobsDirective,
'autoproject_template': ZuulAutoProjectTemplateDirective,
'autoproject_templates': ZuulAutoProjectTemplatesDirective,
'autorole': ZuulAutoRoleDirective,
'autoroles': ZuulAutoRolesDirective,
}
roles = {
'job': XRefRole(innernodeclass=nodes.inline, # type: ignore
warn_dangling=True),
'xjob': XRefRole(innernodeclass=nodes.inline, # type: ignore
warn_dangling=False),
'project_template':
XRefRole(innernodeclass=nodes.inline, # type: ignore
warn_dangling=True),
'role': XRefRole(innernodeclass=nodes.inline, # type: ignore
warn_dangling=True),
'attr': XRefRole(innernodeclass=nodes.inline, # type: ignore
warn_dangling=True),
'value': ZuulAbbreviatedXRefRole(
innernodeclass=nodes.inline, # type: ignore
warn_dangling=True),
'var': XRefRole(innernodeclass=nodes.inline, # type: ignore
warn_dangling=True),
'stat': XRefRole(innernodeclass=nodes.inline, # type: ignore
warn_dangling=True),
'jobvar': XRefRole(innernodeclass=nodes.inline, # type: ignore
warn_dangling=True),
'rolevar': XRefRole(innernodeclass=nodes.inline, # type: ignore
warn_dangling=True),
'path': XRefRole(innernodeclass=nodes.inline, # type: ignore
warn_dangling=True),
}
initial_data = {
'layout': None,
'layout_path': None,
'role_paths': None,
'objects': OrderedDict(),
} # type: Dict[str, Dict]
def resolve_xref(self, env, fromdocname, builder, type, target,
node, contnode):
objects = self.data['objects']
if type == 'xjob':
type = 'job'
name = type + '-' + target
obj = objects.get(name)
if obj:
return make_refnode(builder, fromdocname, obj[0], name,
contnode, name)
def clear_doc(self, docname):
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
######################################################################
#
# Attribute overview directives
#
# TODO(ianw)
#
# There are many ways this could be improved
# * fancy indentation of nested attrs in the overview
# * (related) stripping of prefixes for nesting
# * something better than a bullet list (table?)
# * add something to attributes so that they can list thier child
# attributes atuomatically. Something like
#
# .. attr:: foo
# :show_overview:
#
# This is the foo option
#
# and then
#
# .. attr-overview::
# :maxdepth: 1
# :prefix: foo
#
# gets automatically inserted for you, and then you should have a
# sensible overview of the sub-options of "foo" inside the
# top-level "foo" documentation
# * figure out if it could be added to TOC
class attroverview(nodes.General, nodes.Element):
pass
class AttrOverviewDirective(Directive):
option_arguments = 2
option_spec = {
'maxdepth': directives.positive_int,
'prefix': directives.unchanged
}
def run(self):
attr = attroverview('')
if 'maxdepth' in self.options:
attr._maxdepth = self.options['maxdepth']
if 'prefix' in self.options:
attr._prefix = self.options['prefix']
return [attr]
def process_attr_overview(app, doctree, fromdocname):
objects = app.builder.env.domaindata['zuul']['objects']
for node in doctree.traverse(attroverview):
content = []
l = nodes.bullet_list()
content.append(l)
# The "..attr" calls have built up this dictionary, of the format
#
# {
# attr-foo : (docname, attr),
# attr-foo.bar : (docname, attr),
# }
#
# So, in words, we look at all items in this list that have
# our docname and the attr "type" (second argument) and build
# them into a bullet list.
for k,v in objects.items():
if v[0] == fromdocname and v[1] == 'attr':
# remove the leading "attr-" for the link name ... the
# whole thing is is the refid however.
name = k[5:]
# e.g. if we have foo.bar.baz that's considered 3
# levels
if getattr(node, '_maxdepth', None):
maxdepth = node._maxdepth
if len(name.split('.')) > maxdepth:
continue
if getattr(node, '_prefix', None):
prefix = node._prefix
if not name.startswith(prefix.strip()):
continue
item = nodes.list_item()
para = nodes.paragraph()
refnode = nodes.reference(name, name, internal=True, refid=k)
para.append(refnode)
item.append(para)
l.append(item)
node.replace_self(content)
def setup(app):
app.add_config_value('zuul_role_paths', [], 'html')
app.add_config_value('zuul_autoroles_warn_missing', True, '')
app.add_directive('attr-overview', AttrOverviewDirective)
app.connect('doctree-resolved', process_attr_overview)
app.add_domain(ZuulDomain) | zuul-sphinx | /zuul_sphinx-0.6.0-py3-none-any.whl/zuul_sphinx/zuul.py | zuul.py |
import collections
import functools
import json
import re
import requests
import urllib.parse
from time import time
from typing import Any, List
def get_zuul_tenants(zuul_api_url: str) -> List[str]:
zuul_tenants = json.loads(
requests.get(
urllib.parse.urljoin(
zuul_api_url.rstrip('/') + '/', "tenants")).content)
return list(map(lambda x: x["name"], zuul_tenants))
def get_zuul_status(zuul_status_url):
zuul_status = json.loads(
requests.get(
zuul_status_url).content)
return zuul_status
def get_zuul_pipeline_list(zuul_status):
found_pipeline = list(pipeline['name']
for pipeline in zuul_status['pipelines'])
return found_pipeline
def get_queues_for_pipeline(zuul_status, name):
for pipeline in zuul_status['pipelines']:
if pipeline['name'] == name:
return pipeline['change_queues']
return []
def filter_queues(queues, queue_name=None, project_regex=None):
found_queues = queues
if queue_name:
found_queues = (queue for queue in queues
if queue['name'] == queue_name)
elif project_regex:
found_queues = (queue for queue in queues
if re.search(project_regex, queue['name']))
return found_queues
Change = collections.namedtuple('Change', ['subchange', 'age', 'pipeline'])
def get_changes_age(zuul_status: Any) -> List[Change]:
changes = []
now = time() * 1000
pipelines = get_zuul_pipeline_list(zuul_status)
for pipeline in pipelines:
queues = get_queues_for_pipeline(zuul_status, pipeline)
for queue in queues:
for change in queue['heads']:
for subchange in change:
changes.append(Change(
subchange,
int(now - subchange['enqueue_time']),
pipeline))
return changes
def filter_long_running_jobs(
changes: List[Change], max_age: int) -> List[Change]:
return list(filter(lambda change: change.age > max_age, changes))
def get_max_age(changes: List[Change]) -> int:
return functools.reduce(max, map(lambda change: change.age, changes), 0)
def find_long_running_jobs(zuul_status, time_limit):
old_changes = []
for (change, age, pipeline) in get_changes_age(zuul_status):
if age > time_limit:
change['pipeline'] = pipeline
old_changes.append(change)
return old_changes | zuul-stats-client | /zuul_stats_client-0.0.4-py3-none-any.whl/zuul_stats_client/utils.py | utils.py |
Zuul
====
Zuul is a project gating system.
The latest documentation for Zuul v3 is published at:
https://zuul-ci.org/docs/zuul/
If you are looking for the Edge routing service named Zuul that is
related to Netflix, it can be found here:
https://github.com/Netflix/zuul
If you are looking for the Javascript testing tool named Zuul, it
can be found here:
https://github.com/defunctzombie/zuul
Getting Help
------------
There are two Zuul-related mailing lists:
`zuul-announce <http://lists.zuul-ci.org/cgi-bin/mailman/listinfo/zuul-announce>`_
A low-traffic announcement-only list to which every Zuul operator or
power-user should subscribe.
`zuul-discuss <http://lists.zuul-ci.org/cgi-bin/mailman/listinfo/zuul-discuss>`_
General discussion about Zuul, including questions about how to use
it, and future development.
You will also find Zuul developers on
`Matrix <https://matrix.to/#/#zuul:opendev.org>`.
Contributing
------------
To browse the latest code, see: https://opendev.org/zuul/zuul
To clone the latest code, use `git clone https://opendev.org/zuul/zuul`
Bugs are handled at: https://storyboard.openstack.org/#!/project/zuul/zuul
Suspected security vulnerabilities are most appreciated if first
reported privately following any of the supported mechanisms
described at https://zuul-ci.org/docs/zuul/user/vulnerabilities.html
Code reviews are handled by gerrit at https://review.opendev.org
After creating a Gerrit account, use `git review` to submit patches.
Example::
# Do your commits
$ git review
# Enter your username if prompted
`Join us on Matrix <https://matrix.to/#/#zuul:opendev.org>`_ to discuss
development or usage.
License
-------
Zuul is free software. Most of Zuul is licensed under the Apache
License, version 2.0. Some parts of Zuul are licensed under the
General Public License, version 3.0. Please see the license headers
at the tops of individual source files.
Python Version Support
----------------------
Zuul requires Python 3. It does not support Python 2.
Since Zuul uses Ansible to drive CI jobs, Zuul can run tests anywhere
Ansible can, including Python 2 environments.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/README.rst | README.rst |
import multiprocessing
import os
import nox
nox.options.error_on_external_run = True
nox.options.reuse_existing_virtualenvs = True
nox.options.sessions = ["tests-3", "linters"]
def set_env(session, var, default):
session.env[var] = os.environ.get(var, default)
def set_standard_env_vars(session):
set_env(session, 'OS_LOG_CAPTURE', '1')
set_env(session, 'OS_STDERR_CAPTURE', '1')
set_env(session, 'OS_STDOUT_CAPTURE', '1')
set_env(session, 'OS_TEST_TIMEOUT', '360')
session.env['PYTHONWARNINGS'] = ','.join([
'always::DeprecationWarning:zuul.driver.sql.sqlconnection',
'always::DeprecationWarning:tests.base',
'always::DeprecationWarning:tests.unit.test_database',
'always::DeprecationWarning:zuul.driver.sql.alembic.env',
'always::DeprecationWarning:zuul.driver.sql.alembic.script',
])
# Set PYTHONTRACEMALLOC to a value greater than 0 in the calling env
# to get tracebacks of that depth for ResourceWarnings. Disabled by
# default as this consumes more resources and is slow.
set_env(session, 'PYTHONTRACEMALLOC', '0')
@nox.session(python='3')
def bindep(session):
set_standard_env_vars(session)
session.install('bindep')
session.run('bindep', 'test')
@nox.session(python='3')
def cover(session):
set_standard_env_vars(session)
session.env['PYTHON'] = 'coverage run --source zuul --parallel-mode'
session.install('-r', 'requirements.txt',
'-r', 'test-requirements.txt')
session.install('-e', '.')
session.run('stestr', 'run')
session.run('coverage', 'combine')
session.run('coverage', 'html', '-d', 'cover')
session.run('coverage', 'xml', '-o', 'cover/coverage.xml')
@nox.session(python='3')
def docs(session):
set_standard_env_vars(session)
session.install('-r', 'doc/requirements.txt',
'-r', 'test-requirements.txt')
session.install('-e', '.')
session.run('sphinx-build', '-E', '-W', '-d', 'doc/build/doctrees',
'-b', 'html', 'doc/source/', 'doc/build/html')
@nox.session(python='3')
def linters(session):
set_standard_env_vars(session)
session.install('flake8', 'openapi-spec-validator')
session.run('flake8')
session.run('openapi-spec-validator', 'web/public/openapi.yaml')
@nox.session(python='3')
def tests(session):
set_standard_env_vars(session)
session.install('-r', 'requirements.txt',
'-r', 'test-requirements.txt')
session.install('-e', '.')
session.run_always('tools/yarn-build.sh', external=True)
session.run_always('zuul-manage-ansible', '-v')
procs = max(int(multiprocessing.cpu_count() - 1), 1)
session.run('stestr', 'run', '--slowest', f'--concurrency={procs}',
*session.posargs)
@nox.session(python='3')
def remote(session):
set_standard_env_vars(session)
session.install('-r', 'requirements.txt',
'-r', 'test-requirements.txt')
session.install('-e', '.')
session.run_always('zuul-manage-ansible', '-v')
session.run('stestr', 'run', '--test-path', './tests/remote')
@nox.session(python='3')
def venv(session):
set_standard_env_vars(session)
session.install('-r', 'requirements.txt',
'-r', 'test-requirements.txt')
session.install('-e', '.')
session.run(*session.posargs)
@nox.session(python='3')
def zuul_client(session):
set_standard_env_vars(session)
session.install('zuul-client',
'-r', 'test-requirements.txt',
'-r', 'requirements.txt')
session.install('-e', '.')
session.run_always('zuul-manage-ansible', '-v')
session.run(
'stestr', 'run', '--concurrency=1',
'--test-path', './tests/zuul_client') | zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/noxfile.py | noxfile.py |
============
Testing Zuul
============
------------
A Quickstart
------------
This is designed to be enough information for you to run your first tests on
an Ubuntu 20.04 (or later) host.
*Install pip*::
sudo apt-get install python3-pip
More information on pip here: http://www.pip-installer.org/en/latest/
*Use pip to install tox*::
pip install tox
A running zookeeper is required to execute tests, but it also needs to be
configured for TLS and a certificate authority set up to handle socket
authentication. Because of these complexities, it's recommended to use a
helper script to set up these dependencies, as well as a database servers::
sudo apt-get install docker-compose # or podman-compose if preferred
ROOTCMD=sudo tools/test-setup-docker.sh
.. note:: Installing and bulding javascript is not required, but tests that
depend on the javascript assets having been built will be skipped
if you don't.
*Install javascript tools*::
tools/install-js-tools.sh
*Install javascript dependencies*::
pushd web
yarn install
popd
*Build javascript assets*::
pushd web
yarn build
popd
Run The Tests
-------------
*Navigate to the project's root directory and execute*::
tox
Note: completing this command may take a long time (depends on system resources)
also, you might not see any output until tox is complete.
Information about tox can be found here: http://testrun.org/tox/latest/
Run The Tests in One Environment
--------------------------------
Tox will run your entire test suite in the environments specified in the project tox.ini::
[tox]
envlist = <list of available environments>
To run the test suite in just one of the environments in envlist execute::
tox -e <env>
so for example, *run the test suite in py35*::
tox -e py35
Run One Test
------------
To run individual tests with tox::
tox -e <env> -- path.to.module.Class.test
For example, to *run a single Zuul test*::
tox -e py35 -- tests.unit.test_scheduler.TestScheduler.test_jobs_executed
To *run one test in the foreground* (after previously having run tox
to set up the virtualenv)::
.tox/py35/bin/stestr run tests.unit.test_scheduler.TestScheduler.test_jobs_executed
List Failing Tests
------------------
.tox/py35/bin/activate
stestr failing --list
Hanging Tests
-------------
The following will run each test in turn and print the name of the
test as it is run::
. .tox/py35/bin/activate
stestr run
You can compare the output of that to::
python -m testtools.run discover --list
Need More Info?
---------------
More information about stestr: http://stestr.readthedocs.io/en/latest/
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/TESTING.rst | TESTING.rst |
:title: Vulnerability Reporting
.. _vulnerability-reporting:
Vulnerability Reporting
=======================
Zuul strives to be as secure as possible, implementing a layered
defense-in-depth approach where any untrusted code is executed and
leveraging well-reviewed popular libraries for its cryptographic
needs. Still, bugs are inevitable and security bugs are no exception
to that rule.
If you've found a bug in Zuul and you suspect it may compromise the
security of some part of the system, we'd appreciate the opportunity
to privately discuss the details before any suspected vulnerability
is made public. There are a couple possible ways you can bring
security bugs to our attention:
Create a Private Story in StoryBoard
------------------------------------
You can create a private story at the following URL:
`<https://storyboard.openstack.org/#!/story/new?force_private=true>`_
Using this particular reporting URL helps prevent you from
forgetting to set the ``Private`` checkbox in the new story UI
before saving. If you're doing this from a normal story creation
workflow instead, please make sure to set this checkbox first.
Enter a short but memorable title for your vulnerability report and
provide risks, concerns or other relevant details in the description
field. Where it lists teams and users that can see this story, add
the ``zuul-security`` team so they'll be able to work on triaging
it. For the initial task, select the project to which this is
specific (e.g., ``zuul/zuul`` or
``zuul/nodepool``) and if it relates to additional
projects you can add another task for each of them making sure to
include a relevant title for each task. When you've included all the
detail and tasks you want, save the new story and then you can
continue commenting on it normally. Please don't remove the
``Private`` setting, and instead wait for one of the zuul-security
reviewers to do this once it's deemed safe.
Report via Encrypted E-mail
---------------------------
If the issue is extremely sensitive or you’re otherwise unable to
use the task tracker directly, please send an E-mail message to one
or more members of the Zuul security team. You’re encouraged to
encrypt messages to their OpenPGP keys, which can be found linked
below and also on the keyserver network with the following
fingerprints:
.. TODO: add some more contacts/keys here
* Jeremy Stanley <[email protected]>:
`key 0x97ae496fc02dec9fc353b2e748f9961143495829
<_static/0x97ae496fc02dec9fc353b2e748f9961143495829.txt>`_
* Tobias Henkel <[email protected]>:
`key 0xfb2ee15b2f0f12662b68ed9603750dec158e5fa2
<_static/0xfb2ee15b2f0f12662b68ed9603750dec158e5fa2.txt>`_
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/vulnerabilities.rst | vulnerabilities.rst |
:title: Project Gating
.. _project_gating:
Project Gating
==============
Traditionally, many software development projects merge changes from
developers into the repository, and then identify regressions
resulting from those changes (perhaps by running a test suite with a
continuous integration system), followed by more patches to fix those
bugs. When the mainline of development is broken, it can be very
frustrating for developers and can cause lost productivity,
particularly so when the number of contributors or contributions is
large.
The process of gating attempts to prevent changes that introduce
regressions from being merged. This keeps the mainline of development
open and working for all developers, and only when a change is
confirmed to work without disruption is it merged.
Many projects practice an informal method of gating where developers
with mainline commit access ensure that a test suite runs before
merging a change. With more developers, more changes, and more
comprehensive test suites, that process does not scale very well, and
is not the best use of a developer's time. Zuul can help automate
this process, with a particular emphasis on ensuring large numbers of
changes are tested correctly.
Testing in parallel
-------------------
A particular focus of Zuul is ensuring correctly ordered testing of
changes in parallel. A gating system should always test each change
applied to the tip of the branch exactly as it is going to be merged.
A simple way to do that would be to test one change at a time, and
merge it only if it passes tests. That works very well, but if
changes take a long time to test, developers may have to wait a long
time for their changes to make it into the repository. With some
projects, it may take hours to test changes, and it is easy for
developers to create changes at a rate faster than they can be tested
and merged.
Zuul's :value:`dependent pipeline manager<pipeline.manager.dependent>`
allows for parallel execution of test jobs for gating while ensuring
changes are tested correctly, exactly as if they had been tested one
at a time. It does this by performing speculative execution of test
jobs; it assumes that all jobs will succeed and tests them in parallel
accordingly. If they do succeed, they can all be merged. However, if
one fails, then changes that were expecting it to succeed are
re-tested without the failed change. In the best case, as many
changes as execution contexts are available may be tested in parallel
and merged at once. In the worst case, changes are tested one at a
time (as each subsequent change fails, changes behind it start again).
For example, if a reviewer approves five changes in rapid succession::
A, B, C, D, E
Zuul queues those changes in the order they were approved, and notes
that each subsequent change depends on the one ahead of it merging:
.. blockdiag::
blockdiag foo {
node_width = 40;
span_width = 40;
A <- B <- C <- D <- E;
}
Zuul then starts immediately testing all of the changes in parallel.
But in the case of changes that depend on others, it instructs the
test system to include the changes ahead of it, with the assumption
they pass. That means jobs testing change *B* include change *A* as
well::
Jobs for A: merge change A, then test
Jobs for B: merge changes A and B, then test
Jobs for C: merge changes A, B and C, then test
Jobs for D: merge changes A, B, C and D, then test
Jobs for E: merge changes A, B, C, D and E, then test
Hence jobs triggered to tests A will only test A and ignore B, C, D:
.. blockdiag::
blockdiag foo {
node_width = 40;
span_width = 40;
master -> A -> B -> C -> D -> E;
group jobs_for_A {
label = "Merged changes for A";
master -> A;
}
group ignored_to_test_A {
label = "Ignored changes";
color = "lightgray";
B -> C -> D -> E;
}
}
The jobs for E would include the whole dependency chain: A, B, C, D, and E.
E will be tested assuming A, B, C, and D passed:
.. blockdiag::
blockdiag foo {
node_width = 40;
span_width = 40;
group jobs_for_E {
label = "Merged changes for E";
master -> A -> B -> C -> D -> E;
}
}
If changes *A* and *B* pass tests (green), and *C*, *D*, and *E* fail (red):
.. blockdiag::
blockdiag foo {
node_width = 40;
span_width = 40;
A [color = lightgreen];
B [color = lightgreen];
C [color = pink];
D [color = pink];
E [color = pink];
master <- A <- B <- C <- D <- E;
}
Zuul will merge change *A* followed by change *B*, leaving this queue:
.. blockdiag::
blockdiag foo {
node_width = 40;
span_width = 40;
C [color = pink];
D [color = pink];
E [color = pink];
C <- D <- E;
}
Since *D* was dependent on *C*, it is not clear whether *D*'s failure is the
result of a defect in *D* or *C*:
.. blockdiag::
blockdiag foo {
node_width = 40;
span_width = 40;
C [color = pink];
D [label = "D\n?"];
E [label = "E\n?"];
C <- D <- E;
}
Since *C* failed, Zuul will report its failure and drop *C* from the queue,
keeping D and E:
.. blockdiag::
blockdiag foo {
node_width = 40;
span_width = 40;
D [label = "D\n?"];
E [label = "E\n?"];
D <- E;
}
This queue is the same as if two new changes had just arrived, so Zuul
starts the process again testing *D* against the tip of the branch, and
*E* against *D*:
.. blockdiag::
blockdiag foo {
node_width = 40;
span_width = 40;
master -> D -> E;
group jobs_for_D {
label = "Merged changes for D";
master -> D;
}
group ignored_to_test_D {
label = "Skip";
color = "lightgray";
E;
}
}
.. blockdiag::
blockdiag foo {
node_width = 40;
span_width = 40;
group jobs_for_E {
label = "Merged changes for E";
master -> D -> E;
}
}
Cross Project Testing
---------------------
When your projects are closely coupled together, you want to make sure
changes entering the gate are going to be tested with the version of
other projects currently enqueued in the gate (since they will
eventually be merged and might introduce breaking features).
Such relationships can be defined in Zuul configuration by placing
projects in a shared queue within a dependent pipeline. Whenever
changes for any project enter a pipeline with such a shared queue,
they are tested together, such that the commits for the changes ahead
in the queue are automatically present in the jobs for the changes
behind them. See :ref:`project` for more details.
A given dependent pipeline may have as many shared change queues as
necessary, so groups of related projects may share a change queue
without interfering with unrelated projects.
:value:`Independent pipelines <pipeline.manager.independent>` do
not use shared change queues, however, they may still be used to test
changes across projects using cross-project dependencies.
.. _dependencies:
Cross-Project Dependencies
--------------------------
Zuul permits users to specify dependencies across projects. Using a
special footer, users may specify that a change depends on another
change in any repository known to Zuul. In Gerrit based projects
this footer needs to be added to the git commit message. In GitHub
based projects this footer must be added to the pull request description.
Zuul's cross-project dependencies behave like a directed acyclic graph
(DAG), like git itself, to indicate a one-way dependency relationship
between changes in different git repositories. Change A may depend on
B, but B may not depend on A.
To use them, include ``Depends-On: <change-url>`` in the footer of a
commit message or pull request. For example, a change which depends
on a GitHub pull request (PR #4) might have the following footer::
Depends-On: https://github.com/example/test/pull/4
.. note::
For Github the ``Depends-On:`` footer must be in the *Pull Request*
description, which is separate and often different to the commit
message (i.e. the text submitted with ``git commit``). This is in
contrast to Gerrit where the change description is always the
commit message.
A change which depends on a Gerrit change (change number 3)::
Depends-On: https://review.example.com/3
Changes may depend on changes in any other project, even projects not
on the same system (i.e., a Gerrit change may depend on a GitHub pull
request).
.. note::
An older syntax of specifying dependencies using Gerrit change-ids
is still supported, however it is deprecated and will be removed in
a future version.
Dependent Pipeline
~~~~~~~~~~~~~~~~~~
When Zuul sees changes with cross-project dependencies, it serializes
them in the usual manner when enqueuing them into a pipeline. This
means that if change A depends on B, then when they are added to a
dependent pipeline, B will appear first and A will follow:
.. blockdiag::
:align: center
blockdiag crd {
orientation = portrait
span_width = 30
class greendot [
label = "",
shape = circle,
color = green,
width = 20, height = 20
]
A_status [ class = greendot ]
B_status [ class = greendot ]
B_status -- A_status
'Change B\nURL: .../4' <- 'Change A\nDepends-On: .../4'
}
If tests for B fail, both B and A will be removed from the pipeline, and
it will not be possible for A to merge until B does.
.. note::
If changes with cross-project dependencies do not share a change
queue then Zuul is unable to enqueue them together, and the first
will be required to merge before the second can be enqueued. If the
second change is approved before the first is merged, Zuul can't act
on the approval and won't automatically enqueue the second change,
requiring a new approval event to enqueue it after the first change
merges.
Independent Pipeline
~~~~~~~~~~~~~~~~~~~~
When changes are enqueued into an independent pipeline, all of the
related dependencies (both normal git-dependencies that come from
parent commits as well as cross-project dependencies) appear in a
dependency graph, as in a dependent pipeline. This means that even in
an independent pipeline, your change will be tested with its
dependencies. Changes that were previously unable to be fully tested
until a related change landed in a different repository may now be
tested together from the start.
All of the changes are still independent (you will note that the whole
pipeline does not share a graph as in a dependent pipeline), but for
each change tested, all of its dependencies are visually connected to
it, and they are used to construct the git repositories that Zuul uses
when testing.
When looking at this graph on the status page, you will note that the
dependencies show up as grey dots, while the actual change tested shows
up as red or green (depending on the jobs results):
.. blockdiag::
:align: center
blockdiag crdgrey {
orientation = portrait
span_width = 30
class dot [
label = "",
shape = circle,
width = 20, height = 20
]
A_status [class = "dot", color = green]
B_status [class = "dot", color = grey]
B_status -- A_status
"Change B\nURL: .../4" <- "Change A\nDepends-On: .../4"
}
This is to indicate that the grey changes are only there to establish
dependencies. Even if one of the dependencies is also being tested, it
will show up as a grey dot when used as a dependency, but separately and
additionally will appear as its own red or green dot for its test.
Multiple Changes
~~~~~~~~~~~~~~~~
A change may list more than one dependency by simply adding more
``Depends-On:`` lines to the commit message footer. It is possible
for a change in project A to depend on a change in project B and a
change in project C.
.. blockdiag::
:align: center
blockdiag crdmultichanges {
orientation = portrait
span_width = 30
class greendot [
label = "",
shape = circle,
color = green,
width = 20, height = 20
]
C_status [ class = "greendot" ]
B_status [ class = "greendot" ]
A_status [ class = "greendot" ]
C_status -- B_status -- A_status
A [ label = "Repo A\nDepends-On: .../3\nDepends-On: .../4" ]
group {
orientation = portrait
label = "Dependencies"
color = "lightgray"
B [ label = "Repo B\nURL: .../3" ]
C [ label = "Repo C\nURL: .../4" ]
}
B, C <- A
}
Cycles
~~~~~~
Zuul supports cycles that are created by use of cross-project dependencies.
However this feature is opt-in and can be configured on the queue.
See :attr:`queue.allow-circular-dependencies` for information on how to
configure this.
.. _global_repo_state:
Global Repo State
~~~~~~~~~~~~~~~~~
If a git repository is used by at least one job for a queue item, then
Zuul will freeze the repo state (i.e., branch heads and tags) and use
that same state for every job run for that queue item. Not every job
will get a git repo checkout of every repo, but for any repo that is
checked out, it will have the same state. Because of this, authors
can be sure that jobs running on the same queue item have a consistent
view of all involved git repos, even if one job starts running much
later than another.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/gating.rst | gating.rst |
:title: Project Configuration
.. _project-configuration:
Project Configuration
=====================
The following sections describe the main part of Zuul's configuration.
All of what follows is found within files inside of the repositories
that Zuul manages.
Security Contexts
-----------------
When a system administrator configures Zuul to operate on a project,
they specify one of two security contexts for that project. A
*config-project* is one which is primarily tasked with holding
configuration information and job content for Zuul. Jobs which are
defined in a config-project are run with elevated privileges, and all
Zuul configuration items are available for use. Base jobs (that is,
jobs without a parent) may only be defined in config-projects. It is
expected that changes to config-projects will undergo careful scrutiny
before being merged.
An *untrusted-project* is a project whose primary focus is not to
operate Zuul, but rather it is one of the projects being tested or
deployed. The Zuul configuration language available to these projects
is somewhat restricted (as detailed in individual sections below), and
jobs defined in these projects run in a restricted execution
environment since they may be operating on changes which have not yet
undergone review.
Configuration Loading
---------------------
When Zuul starts, it examines all of the git repositories which are
specified by the system administrator in :ref:`tenant-config` and
searches for files in the root of each repository. Zuul looks first
for a file named ``zuul.yaml`` or a directory named ``zuul.d``, and if
they are not found, ``.zuul.yaml`` or ``.zuul.d`` (with a leading
dot). In the case of an :term:`untrusted-project`, the configuration
from every branch is included, however, in the case of a
:term:`config-project`, only a single branch is examined.
The config project branch can be configured with the tenant configuration
:attr:`tenant.config-projects.<project>.load-branch` attribute.
When a change is proposed to one of these files in an
untrusted-project, the configuration proposed in the change is merged
into the running configuration so that any changes to Zuul's
configuration are self-testing as part of that change. If there is a
configuration error, no jobs will be run and the error will be
reported by any applicable pipelines. In the case of a change to a
config-project, the new configuration is parsed and examined for
errors, but the new configuration is not used in testing the change.
This is because configuration in config-projects is able to access
elevated privileges and should always be reviewed before being merged.
As soon as a change containing a Zuul configuration change merges to
any Zuul-managed repository, the new configuration takes effect
immediately.
.. _regex:
Regular Expressions
-------------------
Many options accept literal strings or regular expressions. In these
cases, the regular expression matching starts at the beginning of the
string as if there were an implicit ``^`` at the start of the regular
expression. To match at an arbitrary position, prepend ``.*`` to the
regular expression.
Zuul uses the `RE2 library <https://github.com/google/re2/wiki/Syntax>`_
which has a restricted regular expression syntax compared to PCRE.
.. _encryption:
Encryption
----------
Zuul supports storing encrypted data directly in the git repositories
of projects it operates on. If you have a job which requires private
information in order to run (e.g., credentials to interact with a
third-party service) those credentials can be stored along with the
job definition.
Each project in Zuul has its own automatically generated RSA keypair
which can be used by anyone to encrypt a secret and only Zuul is able
to decrypt it. Zuul serves each project's public key using its
build-in webserver. They can be fetched at the path
``/api/tenant/<tenant>/key/<project>.pub`` where ``<project>`` is the
canonical name of a project and ``<tenant>`` is the name of a tenant
with that project.
Zuul currently supports one encryption scheme, PKCS#1 with OAEP, which
can not store secrets longer than the 3760 bits (derived from the key
length of 4096 bits minus 336 bits of overhead). The padding used by
this scheme ensures that someone examining the encrypted data can not
determine the length of the plaintext version of the data, except to
know that it is not longer than 3760 bits (or some multiple thereof).
In the config files themselves, Zuul uses an extensible method of
specifying the encryption scheme used for a secret so that other
schemes may be added later. To specify a secret, use the
``!encrypted/pkcs1-oaep`` YAML tag along with the base64 encoded
value. For example:
.. code-block:: yaml
- secret:
name: test_secret
data:
password: !encrypted/pkcs1-oaep |
BFhtdnm8uXx7kn79RFL/zJywmzLkT1GY78P3bOtp4WghUFWobkifSu7ZpaV4NeO0s71YUsi
...
To support secrets longer than 3760 bits, the value after the
encryption tag may be a list rather than a scalar. For example:
.. code-block:: yaml
- secret:
name: long_secret
data:
password: !encrypted/pkcs1-oaep
- er1UXNOD3OqtsRJaP0Wvaqiqx0ZY2zzRt6V9vqIsRaz1R5C4/AEtIad/DERZHwk3Nk+KV
...
- HdWDS9lCBaBJnhMsm/O9tpzCq+GKRELpRzUwVgU5k822uBwhZemeSrUOLQ8hQ7q/vVHln
...
The `zuul-client utility <https://zuul-ci.org/docs/zuul-client/>`_ provides a
simple way to encrypt secrets for a Zuul project:
.. program-output:: zuul-client encrypt --help
.. _configuration-items:
Configuration Items
-------------------
The ``zuul.yaml`` and ``.zuul.yaml`` configuration files are
YAML-formatted and are structured as a series of items, each of which
is referenced below.
In the case of a ``zuul.d`` (or ``.zuul.d``) directory, Zuul recurses
the directory and extends the configuration using all the .yaml files
in the sorted path order. For example, to keep job's variants in a
separate file, it needs to be loaded after the main entries, for
example using number prefixes in file's names::
* zuul.d/pipelines.yaml
* zuul.d/projects.yaml
* zuul.d/01_jobs.yaml
* zuul.d/02_jobs-variants.yaml
Note subdirectories are traversed. Any subdirectories with a
``.zuul.ignore`` file will be pruned and ignored (this is facilitates
keeping playbooks or roles in the config directory, if required).
Below are references to the different configuration items you may use within
the YAML files:
.. toctree::
:maxdepth: 1
config/pipeline
config/job
config/project
config/queue
config/secret
config/nodeset
config/semaphore
config/pragma
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/project-config.rst | project-config.rst |
.. _glossary:
Glossary
========
.. glossary::
:sorted:
abstract job
A job which cannot be run directly, and is only intended to
serve as a parent on top of which other jobs are constructed.
artifact
A file or set of files created by a build and archived for
reuse. The term is usually in reference to primary build
outputs such as a release package or rendered documentation,
but can also mean files produced as a byproduct such as logs.
base job
A job with no parent. A base job may only be defined in a
:term:`config-project`. Multiple base jobs may be defined, but
each tenant has a single default job which will be used as the
parent of any job which does not specify one explicitly.
build
Any run of a job. Every build is assigned a globally unique
identifier which is used when coordinating between Zuul's
component services, and for purposes such as addressing log
streams and results in the status API and Web dashboard. The
context for a build comes not only from its job definition,
but also from the pipeline into which it is scheduled.
buildset
A collection of builds which share a common context. All
builds in a buildset have the same triggering event and change
identifier.
change
A specific state of a Git repository. Changes can represent a
change revision/pull request from a code review system, a
remote branch tip, a tag, or any other sort of Git ref. A
change can also come with additional dependency context,
either implicit from its commit history or explicit through
the use of cross-project dependency declarations (for example
in a commit message or pull request summary, the exact
mechanism varies by source connection driver).
child job
A job which inherits values such as playbooks and variables
from a parent job. All jobs are implicitly child jobs, since
they inherit from at least a base job whether they declare it
as a parent or not.
check
By convention, the name of a pipeline which performs pre-merge
tests. Such a pipeline might be triggered by creating a new
change or pull request. It may run with changes which have not
yet seen any human review, so care must be taken in selecting
the kinds of jobs to run, and what resources will be available
to them in order to avoid misuse of the system or credential
compromise. It usually has an :value:`independent
<pipeline.manager.independent>` pipeline manager since the final
sequence of changes to merge is not generally known at the time
of upload.
config-project
One of two types of projects which may be specified by the
administrator in the tenant config file. A config-project is
primarily tasked with holding configuration information and job
content for Zuul. Jobs which are defined in a config-project
are run with elevated privileges, and all Zuul configuration
items are available for use. It is expected that changes to
config-projects will undergo careful scrutiny before being
merged.
connection
A coupling of a triggering and reporting driver with
credentials and location information for a specific source of
events, whether that's a code review platform, a generic Git
hosting site or an emitting protocol such as SMTP or SQL.
cross-project dependency
An explicit declaration that a change depends on another
change, which need not be in the same Git repository or even
accessible via the same connection. Zuul is expected to
incorporate any cross-project dependencies into the context
for the change declaring that dependency relationship.
deploy
By convention, the name of a continuous-deployment pipeline.
Such a pipeline typically interacts with production systems
rather than ephemeral test nodes. By triggering on merge events
the results of deployment can be reported back to the
originating change. The :value:`serial
<pipeline.manager.serial>` pipeline manager, is recommended if
multiple repositories are involved and only some jobs (based on
file matchers) will be run for each change. If a single repo is
involved and all deployment jobs run on every change merged,
then :value:`supercedent <pipeline.manager.supercedent>` may be
a better fit.
executor
The component of Zuul responsible for executing a sandboxed
Ansible process in order to produce a build. Some builds may
run entirely in the executor's provided workspace if the job
is suitably constructed, or it may require the executor to
connect to remote nodes for more complex and risky operations.
final job
A job which no other jobs are allowed to use as a parent, for
example in order to prevent the list of tasks they run from
being altered by potential child jobs.
gate
By convention, the name of a pipeline which performs project
gating. Such a pipeline might be triggered by a core team
member approving a change or pull request. It should have a
:value:`dependent <pipeline.manager.dependent>` pipeline manager
so that it can combine and sequence changes as they are
approved.
inventory
The set of hosts and variable assignments Zuul provides to
Ansible, forming the context for a build.
job
A collection of Ansible playbooks, variables, filtering
conditions and other metadata defining a set of actions which
should be taken when invoked under the intended circumstances.
Jobs are anonymous sets of sequenced actions, which when
executed in the context of a pipeline, result in a build.
job dependency
A declared reliance in one job on the completion of builds for
one or more other jobs or provided artifacts those builds may
produce. Jobs may also be conditionally dependent on specific
build results for their dependencies.
job variant
A lightweight modification of another defined job altering
variables and filtering criteria.
merger
The component of Zuul responsible for constructing Git refs
provided to builds based on supplied change contexts from
triggering events. An executor may also be configured to run
a local merger process for increased efficiency.
node
A remote system resource on which Ansible playbooks may be
executed, for strong isolation from the executor's
environment. In Ansible inventory terms, this is a remote
host.
nodeset
An assembly of one or more nodes which, when applied in a job,
are added as host entries to the Ansible inventory for its
builds. Nodes in a nodeset can be given convenient names for
ease of reference in job playbooks.
parent job
A job from which a child job inherits values such as playbooks
and variables. Depending on the type of playbooks and
variables, these may either be merged with or overridden by
the child job. Any job which doesn't specify a parent
inherits from the tenant's base job.
pipeline
A set of triggering, prioritizing, scheduling, and reporting
rules which provide the context for a build.
pipeline manager
The algorithm through which a pipeline manages queuing of
trigger events. Specifically, this determines whether changes
are queued independently, sequenced together in the order
they're approved, or superceded entirely by subsequent events.
project
A unique Git source repository available through a connection
within a tenant. Projects are identified by their connection
or hostname, combined with their repository, so as to avoid
ambiguity when two repositories of the same name are available
through different connections.
project gating
Automatically preventing a proposed change from merging to a
canonical source code repository for a project until it is
able to pass declared tests for that repository. In a project
gating workflow, cues may be taken from its users, but it is
ultimately the gating system which controls merging of changes
and not the users themselves.
project pipeline
The application of jobs to a pipeline. Project pipeline
entries often include filtering and matching rules specifying
the conditions under which a job should result in a build, and
any interdependencies those jobs may have on the build results
and named artifacts provided by other jobs.
project queue
The set of changes sequenced for testing, either explicitly
through dependency relationships, or implicitly from the
chronological ordering of triggering events which enqueued
them. Project queues can be named and shared by multiple
projects, ensuring sequential merging of changes across those
projects.
project template
A named mapping of jobs into pipelines, for application to one
or more projects. This construct provides a convenient means
of reusing the same sets of jobs in the same pipelines across
multiple projects.
promote
By convention, the name of a pipeline which uploads previously
built artifacts. These artifacts should be constructed in a
:term:`gate` pipeline and uploaded to a temporary location.
When all of the jobs in the gate pipeline succeed, the change
will be merged and may then be enqueued into a promote pipeline.
Jobs running in this pipeline do so with the understanding that
since the change merged as it was tested in the gate, any
artifacts created at that time are now safe to promote to
production. It is a good choice to use a :value:`supercedent
<pipeline.manager.supercedent>` pipeline manager so that if many
changes merge in rapid sequence, Zuul may skip promoting all but
the latest artifact to production.
provided artifact
A named artifact which builds of a job are expected to
produce, for purposes of dependency declarations in other
jobs. Multiple jobs may provide equivalent artifacts with the
same name, allowing these relationships to be defined
independent of the specific jobs which provide them.
post
By convention, the name of a pipeline which runs after a branch
is updated. By triggering on a branch update (rather than a
merge) event, jobs in this pipeline may run with the final git
state after the merge (including any merge commits generated by
the upstream code review system). This is important when
building some artifacts in order that the exact commit ids are
present in the git repo. The downside to this approach is that
jobs in this pipeline run without any connection to the
underlying changes which created the commits. If only the
latest updates to a branch matter, then the :value:`supercedent
<pipeline.manager.supercedent>` pipeline manager is recommended;
otherwise :value:`independent <pipeline.manager.independent>`
may be a better choice. See also :term:`tag` and
:term:`release`.
release
By convention, the name of a pipeline which runs after a
release-formatted tag is updated. Other than the matching ref,
this is typically constructed the same as a :term:`post`
pipeline. See also :term:`tag`.
reporter
A reporter is a :ref:`pipeline attribute <reporters>` which
describes the action performed when an item is dequeued after
its jobs complete. Reporters are implemented by :ref:`drivers`
so their actions may be quite varied. For example, a reporter
might leave feedback in a remote system on a proposed change,
send email, or store information in a database.
required artifact
An artifact provided by one or more jobs, on which execution
of the job requiring it depends.
required project
A project whose source code is required by the job. Jobs
implicitly require the project associated with the event
which triggered their build, but additional projects can be
specified explicitly as well. Zuul supplies merge commits
representing the speculative future states of all required
projects for a build.
scheduler
The component of Zuul which coordinates source and reporting
connections as well as requests for nodes, mergers and
executors for builds triggered by pipeline definitions in the
tenant configuration.
speculative execution
A term borrowed from microprocessor design, the idea that
sequenced operations can be performed in parallel by
predicting their possible outcomes and then discarding any
logical branches which turn out not to be true. Zuul uses
optimistic prediction to assume all builds for a change will
succeed, and then proceeds to run parallel builds for other
changes which would follow it in sequence. If a change enters
a failing state (at least one of its voting builds indicates a
failure result), then Zuul resets testing for all subsequent
queue items to no longer include it in their respective
contexts.
tag
By convention, the name of a pipeline which runs after a tag is
updated. Other than the matching ref, this is typically
constructed the same as a :term:`post` pipeline. See also
:term:`release`.
tenant
A set of projects on which Zuul should operate. Configuration
is not shared between tenants, but the same projects from the
same connections may appear in more than one tenant and the
same events may even enqueue the same changes in pipelines for
more than one tenant. Zuul's HTTP API methods and Web
dashboard are scoped per tenant, in order to support distinct
tenant-specific authentication and authorization.
trigger
A (typically external) event which Zuul may rely on as a cue
to enqueue a change into a pipeline.
trusted execution context
Playbooks defined in a :term:`config-project` run in the
*trusted* execution context. The trusted execution context may
have access to extra directories within the bubblewrap container
if the operator has configured these.
untrusted execution context
Playbooks defined in an :term:`untrusted-project` run in the
*untrusted* execution context.
untrusted-project
One of two types of projects which may be specified by the
administrator in the tenant config file. An untrusted-project
is one whose primary focus is not to operate Zuul, but rather it
is one of the projects being tested or deployed. The Zuul
configuration language available to these projects is somewhat
restricted, and jobs defined in these projects run in a
restricted execution environment since they may be operating on
changes which have not yet undergone review.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/glossary.rst | glossary.rst |
:title: Tenant Configuration
.. _tenant-config:
Tenant Configuration
====================
After ``zuul.conf`` is configured, Zuul component servers will be able
to start, but a tenant configuration is required in order for Zuul to
perform any actions. The tenant configuration file specifies upon
which projects Zuul should operate. These repositories are grouped
into tenants. The configuration of each tenant is separate from the
rest (no pipelines, jobs, etc are shared between them).
A project may appear in more than one tenant; this may be useful if
you wish to use common job definitions across multiple tenants.
Actions normally available to the Zuul operator only can be performed by specific
users on Zuul's REST API if admin rules are listed for the tenant. Authorization rules
are also defined in the tenant configuration file.
The tenant configuration file is specified by the
:attr:`scheduler.tenant_config` setting in ``zuul.conf``. It is a
YAML file which, like other Zuul configuration files, is a list of
configuration objects, though only a few types of objects (described
below) are supported.
Alternatively the :attr:`scheduler.tenant_config_script`
can be the path to an executable that will be executed and its stdout
used as the tenant configuration. The executable must return a valid
tenant YAML formatted output.
Tenant configuration is checked for updates any time a scheduler is
started, and changes to it are read automatically. If the tenant
configuration is altered during operation, you can signal a scheduler
to read and apply the updated state in order to avoid restarting. See
the section on :ref:`reconfiguration` for instructions. Ideally,
tenant configuration deployment via configuration management should
also be made to trigger a smart-reconfigure once the file is replaced.
Tenant
------
A tenant is a collection of projects which share a Zuul
configuration. Some examples of tenant definitions are:
.. code-block:: yaml
- tenant:
name: my-tenant
max-nodes-per-job: 5
exclude-unprotected-branches: false
source:
gerrit:
config-projects:
- common-config
- shared-jobs:
include: job
untrusted-projects:
- zuul/zuul-jobs:
shadow: common-config
- project1
- project2:
exclude-unprotected-branches: true
.. code-block:: yaml
- tenant:
name: my-tenant
admin-rules:
- acl1
- acl2
source:
gerrit:
config-projects:
- common-config
untrusted-projects:
- exclude:
- job
- semaphore
- project
- project-template
- nodeset
- secret
projects:
- project1
- project2:
exclude-unprotected-branches: true
.. attr:: tenant
The following attributes are supported:
.. attr:: name
:required:
The name of the tenant. This may appear in URLs, paths, and
monitoring fields, and so should be restricted to URL friendly
characters (ASCII letters, numbers, hyphen and underscore) and
you should avoid changing it unless necessary.
.. attr:: source
:required:
A dictionary of sources to consult for projects. A tenant may
contain projects from multiple sources; each of those sources
must be listed here, along with the projects it supports. The
name of a :ref:`connection<connections>` is used as the
dictionary key (e.g. ``gerrit`` in the example above), and the
value is a further dictionary containing the keys below.
The next two attributes, **config-projects** and
**untrusted-projects** provide the bulk of the information for
tenant configuration. They list all of the projects upon which
Zuul will act.
The order of the projects listed in a tenant is important. A job
which is defined in one project may not be redefined in another
project; therefore, once a job appears in one project, a project
listed later will be unable to define a job with that name.
Further, some aspects of project configuration (such as the merge
mode) may only be set on the first appearance of a project
definition.
Zuul loads the configuration from all **config-projects** in the
order listed, followed by all **untrusted-projects** in order.
.. attr:: config-projects
A list of projects to be treated as :term:`config projects
<config-project>` in this tenant. The jobs in a config project
are trusted, which means they run with extra privileges, do not
have their configuration dynamically loaded for proposed
changes, and Zuul config files are only searched for in the
``master`` branch.
The items in the list follow the same format described in
**untrusted-projects**.
.. attr:: <project>
The config-projects have an additional config option that
may be specified optionally.
.. attr:: load-branch
:default: master
Define which branch is loaded from a config project. By
default config projects load Zuul configuration only
from the master branch.
.. attr:: untrusted-projects
A list of projects to be treated as untrusted in this tenant.
An :term:`untrusted-project` is the typical project operated on
by Zuul. Their jobs run in a more restrictive environment, they
may not define pipelines, their configuration dynamically
changes in response to proposed changes, and Zuul will read
configuration files in all of their branches.
.. attr:: <project>
The items in the list may either be simple string values of
the project names, or a dictionary with the project name as
key and the following values:
.. attr:: include
Normally Zuul will load all of the :ref:`configuration-items`
appropriate for the type of project (config or untrusted)
in question. However, if you only want to load some
items, the **include** attribute can be used to specify
that *only* the specified items should be loaded.
Supplied as a string, or a list of strings.
The following **configuration items** are recognized:
* pipeline
* job
* semaphore
* project
* project-template
* nodeset
* secret
.. attr:: exclude
A list of **configuration items** that should not be loaded.
.. attr:: shadow
Normally, only one project in Zuul may contain
definitions for a given job. If a project earlier in the
configuration defines a job which a later project
redefines, the later definition is considered an error and
is not permitted. The **shadow** attribute of a project
indicates that job definitions in this project which
conflict with the named projects should be ignored, and
those in the named project should be used instead. The
named projects must still appear earlier in the
configuration. In the example above, if a job definition
appears in both the ``common-config`` and ``zuul-jobs``
projects, the definition in ``common-config`` will be
used.
.. attr:: exclude-unprotected-branches
Define if unprotected branches should be processed.
Defaults to the tenant wide setting of
exclude-unprotected-branches. This currently only affects
GitHub and GitLab projects.
.. attr:: include-branches
A list of regexes matching branches which should be
processed. If omitted, all branches are included.
Operates after *exclude-unprotected-branches* and so may
be used to further reduce the set of branches (but not
increase it).
It has priority over *exclude-branches*.
.. attr:: exclude-branches
A list of regexes matching branches which should be
processed. If omitted, all branches are included.
Operates after *exclude-unprotected-branches* and so may
be used to further reduce the set of branches (but not
increase it).
It will not exclude a branch which already matched
*include-branches*.
.. attr:: always-dynamic-branches
A list of regular expressions matching branches which
should be treated as if every change newly proposes
dynamic Zuul configuration. In other words, the only time
Zuul will realize any configuration related to these
branches is during the time it is running jobs for a
proposed change.
This is potentially useful for situations with large
numbers of rarely used feature branches, but comes at the
cost of a significant reduction in Zuul features for these
branches.
Every regular expression listed here will also implicitly
be included in *exclude-branches*, therefore Zuul will not
load any static in-repo configuration from this branch.
These branches will not be available for use in overriding
checkouts of repos, nor will they be included in the git
repos that Zuul prepares for *required-projects* (unless
there is a change in the dependency tree for this branch).
In particular, this means that the only jobs which can be
specified for these branches are pre-merge and gating jobs
(such as :term:`check` and :term:`gate`). No post-merge
or periodic jobs will run for these branches.
Using this setting also incurs additional processing for
each change submitted for these branches as Zuul must
recalculate the configuration layout it uses for such a
change as if it included a change to a ``zuul.yaml`` file,
even if the change does not alter the configuration).
With all these caveats in mind, this can be useful for
repos with large numbers of rarely used branches as it
allows Zuul to omit their configuration in most
circumstances and only calculate the configuration of a
single additional branch when it is used.
.. attr:: implied-branch-matchers
This is a boolean, which, if set, may be used to enable
(``true``) or disable (``false``) the addition of implied
branch matchers to job and project-template definitions.
Normally Zuul decides whether to add these based on
heuristics described in :attr:`job.branches`. This
attribute overrides that behavior.
This can be useful if branch settings for this project may
produce an unpredictable number of branches to load from.
Setting this value explicitly here can avoid unexpected
behavior changes as branches are added or removed from the
load set.
The :attr:`pragma.implied-branch-matchers` pragma will
override the setting here if present.
Note that if a job contains an explicit branch matcher, it
will be used regardless of the value supplied here.
.. attr:: extra-config-paths
Normally Zuul loads in-repo configuration from the first
of these paths:
* zuul.yaml
* zuul.d/*
* .zuul.yaml
* .zuul.d/*
If this option is supplied then, after the normal process
completes, Zuul will also load any configuration found in
the files or paths supplied here. This can be a string or
a list. If a list of multiple items, Zuul will load
configuration from *all* of the items in the list (it will
not stop at the first extra configuration found).
Directories should be listed with a trailing ``/``. Example:
.. code-block:: yaml
extra-config-paths:
- zuul-extra.yaml
- zuul-extra.d/
This feature may be useful to allow a project that
primarily holds shared jobs or roles to include additional
in-repo configuration for its own testing (which may not
be relevant to other users of the project).
.. attr:: <project-group>
The items in the list are dictionaries with the following
attributes. A **configuration items** definition is applied
to the list of projects.
.. attr:: include
A list of **configuration items** that should be loaded.
.. attr:: exclude
A list of **configuration items** that should not be loaded.
.. attr:: projects
A list of **project** items.
.. attr:: max-nodes-per-job
:default: 5
The maximum number of nodes a job can request. A value of
'-1' value removes the limit.
.. attr:: max-job-timeout
:default: 10800
The maximum timeout for jobs. A value of '-1' value removes the limit.
.. attr:: exclude-unprotected-branches
:default: false
When using a branch and pull model on a shared repository
there are usually one or more protected branches which are gated
and a dynamic number of personal/feature branches which are the
source for the pull requests. These branches can potentially
include broken Zuul config and therefore break the global tenant
wide configuration. In order to deal with this Zuul's operations
can be limited to the protected branches which are gated. This
is a tenant wide setting and can be overridden per project.
This currently only affects GitHub and GitLab projects.
.. attr:: default-parent
:default: base
If a job is defined without an explicit :attr:`job.parent`
attribute, this job will be configured as the job's parent.
This allows an administrator to configure a default base job to
implement local policies such as node setup and artifact
publishing.
.. attr:: default-ansible-version
Default ansible version to use for jobs that doesn't specify a version.
See :attr:`job.ansible-version` for details.
.. attr:: allowed-triggers
:default: all connections
The list of connections a tenant can trigger from. When set, this setting
can be used to restrict what connections a tenant can use as trigger.
Without this setting, the tenant can use any connection as a trigger.
.. attr:: allowed-reporters
:default: all connections
The list of connections a tenant can report to. When set, this setting
can be used to restrict what connections a tenant can use as reporter.
Without this setting, the tenant can report to any connection.
.. attr:: allowed-labels
:default: []
The list of labels (as strings or :ref:`regular expressions <regex>`)
a tenant can use in a job's nodeset. When set, this setting can
be used to restrict what labels a tenant can use. Without this
setting, the tenant can use any labels.
.. attr:: disallowed-labels
:default: []
The list of labels (as strings or :ref:`regular expressions <regex>`)
a tenant is forbidden to use in a job's nodeset. When set, this
setting can be used to restrict what labels a tenant can use.
Without this setting, the tenant can use any labels permitted by
:attr:`tenant.allowed-labels`. This check is applied after the
check for `allowed-labels` and may therefore be used to further
restrict the set of permitted labels.
.. attr:: web-root
If this tenant has a whitelabeled installation of zuul-web, set
its externally visible URL here (e.g.,
``https://tenant.example.com/``). This will override the
:attr:`web.root` setting when constructing URLs for this tenant.
.. attr:: admin-rules
A list of authorization rules to be checked in order to grant
administrative access to the tenant through Zuul's REST API and
web interface.
At least one rule in the list must match for the user to be allowed to
execute privileged actions. A matching rule will also allow the user
access to the tenant in general (i.e., the rule does not need to be
duplicated in `access-rules`).
More information on tenant-scoped actions can be found in
:ref:`authentication`.
.. attr:: access-rules
A list of authorization rules to be checked in order to grant
read access to the tenant through Zuul's REST API and web
interface.
If no rules are listed, then anonymous access to the tenant is
permitted. If any rules are present then at least one rule in
the list must match for the user to be allowed to access the
tenant.
More information on tenant-scoped actions can be found in
:ref:`authentication`.
.. attr:: authentication-realm
Each authenticator defined in Zuul's configuration is associated to a realm.
When authenticating through Zuul's Web User Interface under this tenant, the
Web UI will redirect the user to this realm's authentication service. The
authenticator must be of the type ``OpenIDConnect``.
.. note::
Defining a default realm for a tenant will not invalidate
access tokens issued from other configured realms. This is
intended so that an operator can issue an overriding access
token manually. If this is an issue, it is advised to add
finer filtering to admin rules, for example, filtering by the
``iss`` claim (generally equal to the issuer ID).
.. attr:: semaphores
A list of names of :attr:`global-semaphore` objects to allow
jobs in this tenant to access.
.. _global_semaphore:
Global Semaphore
----------------
Semaphores are normally defined in in-repo configuration (see
:ref:`semaphore`), however to support use-cases where semaphores are
used to represent constrained global resources that may be used by
multiple Zuul tenants, semaphores may be defined within the main
tenant configuration file.
In order for a job to use a global semaphore, the semaphore must first
be defined in the tenant configuration file with
:attr:`global-semaphore` and then added to each tenant which should
have access to it with :attr:`tenant.semaphores`. Once that is done,
Zuul jobs may use that semaphore in the same way they would use a
normal tenant-scoped semaphore.
If any tenant which is granted access to a global semaphore also has a
tenant-scoped semaphore defined with the same name, that definition
will be treated as a configuration error and subsequently ignored in
favor of the global semaphore.
An example definition looks similar to the normal semaphore object:
.. code-block:: yaml
- global-semaphore:
name: global-semaphore-foo
max: 5
.. attr:: global-semaphore
The following attributes are available:
.. attr:: name
:required:
The name of the semaphore, referenced by jobs.
.. attr:: max
:default: 1
The maximum number of running jobs which can use this semaphore.
.. _authz_rule_definition:
Authorization Rule
------------------
An authorization rule is a set of conditions the claims of a user's
JWT must match in order to be allowed to perform actions at a tenant's
level.
When an authorization rule is included in the tenant's `admin-rules`,
the protected actions available are **autohold**, **enqueue**,
**dequeue** and **promote**.
.. note::
Rules can be overridden by the ``zuul.admin`` claim in a token if if matches
an authenticator configuration where `allow_authz_override` is set to true.
See :ref:`authentication` for more details.
Below are some examples of how authorization rules can be defined:
.. code-block:: yaml
- authorization-rule:
name: affiliate_or_admin
conditions:
- resources_access:
account:
roles: "affiliate"
iss: external_institution
- resources_access.account.roles: "admin"
- authorization-rule:
name: alice_or_bob
conditions:
- zuul_uid: alice
- zuul_uid: bob
Zuul previously used ``admin-rule`` for these definitions. That form
is still permitted for backwards compatibility, but is deprecated and
will be removed in a future version of Zuul.
.. attr:: authorization-rule
The following attributes are supported:
.. attr:: name
:required:
The name of the rule, so that it can be referenced in the ``admin-rules``
attribute of a tenant's definition. It must be unique.
.. attr:: conditions
:required:
This is the list of conditions that define a rule. A JWT must match **at
least one** of the conditions for the rule to apply. A condition is a
dictionary where keys are claims. **All** the associated values must
match the claims in the user's token; in other words the condition dictionary
must be a "sub-dictionary" of the user's JWT.
Zuul's authorization engine will adapt matching tests depending on the
nature of the claim in the token, eg:
* if the claim is a JSON list, check that the condition value is in the
claim
* if the claim is a string, check that the condition value is equal to
the claim's value
The claim names can also be written in the XPath format for clarity: the
condition
.. code-block:: yaml
resources_access:
account:
roles: "affiliate"
is equivalent to the condition
.. code-block:: yaml
resources_access.account.roles: "affiliate"
The special ``zuul_uid`` claim refers to the ``uid_claim`` setting in an
authenticator's configuration. By default it refers to the ``sub`` claim
of a token. For more details see the :ref:`authentication`.
Under the above example, the following token would match rules
``affiliate_or_admin`` and ``alice_or_bob``:
.. code-block:: javascript
{
'iss': 'external_institution',
'aud': 'my_zuul_deployment',
'exp': 1234567890,
'iat': 1234556780,
'sub': 'alice',
'resources_access': {
'account': {
'roles': ['affiliate', 'other_role']
}
},
}
And this token would only match rule ``affiliate_or_admin``:
.. code-block:: javascript
{
'iss': 'some_other_institution',
'aud': 'my_zuul_deployment',
'exp': 1234567890,
'sub': 'carol',
'iat': 1234556780,
'resources_access': {
'account': {
'roles': ['admin', 'other_role']
}
},
}
Authorization Rule Templating
-----------------------------
The special word "{tenant.name}" can be used in conditions' values. It will be automatically
substituted for the relevant tenant when evaluating authorizations for a given
set of claims. For example, consider the following rule:
.. code-block:: yaml
- authorization-rule:
name: tenant_in_groups
conditions:
- groups: "{tenant.name}"
If applied to the following tenants:
.. code-block:: yaml
- tenant:
name: tenant-one
admin-rules:
- tenant_in_groups
- tenant:
name: tenant-two
admin-rules:
- tenant_in_groups
Then this set of claims will be allowed to perform protected actions on **tenant-one**:
.. code-block:: javascript
{
'iss': 'some_other_institution',
'aud': 'my_zuul_deployment',
'exp': 1234567890,
'sub': 'carol',
'iat': 1234556780,
'groups': ['tenant-one', 'some-other-group'],
}
And this set of claims will be allowed to perform protected actions on **tenant-one**
and **tenant-two**:
.. code-block:: javascript
{
'iss': 'some_other_institution',
'aud': 'my_zuul_deployment',
'exp': 1234567890,
'sub': 'carol',
'iat': 1234556780,
'groups': ['tenant-one', 'tenant-two'],
}
API Root
--------
Most actions in zuul-web, zuul-client, and the REST API are understood
to be within the context of a specific tenant and therefore the
authorization rules specified by that tenant apply. When zuul-web is
deployed in a multi-tenant scenario (the default), there are a few
extra actions or API methods which are outside of the context of an
individual tenant (for example, listing the tenants or observing the
state of Zuul system components). To control access to these methods,
an `api-root` object can be used.
At most one `api-root` object may appear in the tenant configuration
file. If more than one appears, it is an error. If there is no
`api-root` object, then anonymous read-only access to the tenant list
and other root-level API methods is assumed.
The ``/api/info`` endpoint is never protected by Zuul since it
supplies the authentication information needed by the web UI.
API root access is not a pre-requisite to access tenant-specific URLs.
.. attr:: api-root
The following attributes are supported:
.. attr:: authentication-realm
Each authenticator defined in Zuul's configuration is associated
to a realm. When authenticating through Zuul's Web User
Interface at the multi-tenant root, the Web UI will redirect the
user to this realm's authentication service. The authenticator
must be of the type ``OpenIDConnect``.
.. note::
Defining a default realm for the root API will not invalidate
access tokens issued from other configured realms. This is
intended so that an operator can issue an overriding access
token manually. If this is an issue, it is advised to add
finer filtering to admin rules, for example, filtering by the
``iss`` claim (generally equal to the issuer ID).
.. attr:: access-rules
A list of authorization rules to be checked in order to grant
read access to the top-level (i.e., non-tenant-specific) portion
of Zuul's REST API and web interface.
If no rules are listed, then anonymous access to top-level
methods is permitted. If any rules are present then at at least
one rule in the list must match for the user to be allowed
access.
More information on tenant-scoped actions can be found in
:ref:`authentication`.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/tenants.rst | tenants.rst |
Troubleshooting
---------------
In addition to inspecting :ref:`service debug logs <operation>`, some
advanced troubleshooting options are provided below. These are
generally very low-level and are not normally required.
Thread Dumps and Profiling
==========================
If you send a SIGUSR2 to one of the daemon processes, it will dump a
stack trace for each running thread into its debug log. It is written
under the log bucket ``zuul.stack_dump``. This is useful for tracking
down deadlock or otherwise slow threads::
sudo kill -USR2 `cat /var/run/zuul/executor.pid`
view /var/log/zuul/executor-debug.log +/zuul.stack_dump
When `yappi <https://code.google.com/p/yappi/>`_ (Yet Another Python
Profiler) is available, additional functions' and threads' stats are
emitted as well. The first SIGUSR2 will enable yappi, on the second
SIGUSR2 it dumps the information collected, resets all yappi state and
stops profiling. This is to minimize the impact of yappi on a running
system.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/troubleshooting.rst | troubleshooting.rst |
Zuul - A Project Gating System
==============================
Zuul is a program that drives continuous integration, delivery, and
deployment systems with a focus on project gating and interrelated
projects.
If you are looking for the Edge routing service named Zuul that is
related to Netflix, it can be found here:
https://github.com/Netflix/zuul
If you are looking for the Javascript testing tool named Zuul, it
can be found here:
https://github.com/defunctzombie/zuul
How To Use This Manual
----------------------
If you have access to a Zuul system operated by someone else, then you
may be interested in :ref:`about-zuul` and the following reference
sections: :ref:`project-configuration` and :ref:`job-content`.
If you would like to learn how to run Zuul, try the :ref:`quick-start`.
If you are or will be responsible for installing and operating a Zuul
System, the remainder of the sections will be useful.
.. toctree::
:includehidden:
:maxdepth: 1
about
tutorials/quick-start
project-config
job-content
admin
rest-api
howtos/index
developer/index
governance
vulnerabilities
releasenotes
glossary
Indices and tables
------------------
* :ref:`genindex`
* :ref:`search`
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/index.rst | index.rst |
:title: Component Overview
Component Overview
==================
.. _components:
Zuul is a distributed system consisting of several components, each of
which is described below.
.. graphviz::
:align: center
graph {
node [shape=box]
Database [fontcolor=grey]
Executor [href="#executor"]
Finger [href="#finger-gateway"]
Gerrit [fontcolor=grey]
Merger [href="#merger"]
Statsd [shape=ellipse fontcolor=grey]
Scheduler [href="#scheduler"]
Zookeeper [shape=ellipse]
Nodepool
GitHub [fontcolor=grey]
Web [href="#web-server"]
Executor -- Statsd
Executor -- "Job Node"
Web -- Database
Web -- GitHub
Web -- Zookeeper
Web -- Executor
Finger -- Executor
Scheduler -- Database;
Scheduler -- Gerrit;
Scheduler -- Zookeeper;
Zookeeper -- Executor;
Zookeeper -- Finger;
Zookeeper -- Merger
Zookeeper -- Nodepool;
Scheduler -- GitHub;
Scheduler -- Statsd;
}
.. contents::
:depth: 1
:local:
:backlinks: none
Each of the Zuul processes may run on the same host, or different
hosts.
Zuul requires an external ZooKeeper cluster running at least ZooKeeper
version 3.5.1, and all Zuul and Nodepool components need to be able to
connect to the hosts in that cluster on a TLS-encrypted TCP port,
typically 2281.
Both the Nodepool launchers and Zuul executors need to be able to
communicate with the hosts which Nodepool provides. If these are on
private networks, the executors will need to be able to route traffic
to them.
Only Zuul fingergw and Zuul web need to be publicly accessible;
executors never do. Executors should be accessible on TCP port 7900 by
fingergw and web.
A database is required and configured in ``database`` section of
``/etc/zuul/zuul.conf``. Both Zuul scheduler and Zuul web will need
access to it.
If statsd is enabled, the executors and schedulers need to be able to
emit data to statsd. Statsd can be configured to run on each host and
forward data, or services may emit to a centralized statsd collector.
Statsd listens on UDP port 8125 by default.
A minimal Zuul system may consist of a :ref:`scheduler` and
:ref:`executor` both running on the same host. Larger installations
should consider running multiple schedulers, executors and mergers,
with each component running on a dedicated host.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/components.rst | components.rst |
Installation
============
External Dependencies
---------------------
Zuul interacts with several other systems described below.
Nodepool
~~~~~~~~
In order to run all but the simplest jobs, Zuul uses a companion
program `Nodepool <https://opendev.org/zuul/nodepool>`__ to supply the
nodes (whether dynamic cloud instances or static hardware) used by
jobs. Before starting Zuul, ensure you have Nodepool installed and
any images you require built.
Zuul must be able to log into the nodes provisioned by Nodepool with a
given username and SSH private key. Executors should also be able to
talk to nodes on TCP port 19885 for log streaming; see
:ref:`nodepool_console_streaming`.
ZooKeeper
~~~~~~~~~
.. TODO: SpamapS any zookeeper config recommendations?
Zuul and Nodepool use ZooKeeper to communicate internally among their
components, and also to communicate with each other. You can run a
simple single-node ZooKeeper instance, or a multi-node cluster.
Ensure that all Zuul and Nodepool hosts have access to the cluster.
Zuul stores all possible state within ZooKeeper so that it can be
effectively shared and coordinated between instances of its component
services. Most of this is ephemeral and can be recreated or is of low
value if lost, but a clustered deployment will provide improved
continuity and resilience in the event of an incident adversely
impacting a ZooKeeper server.
Zuul's keystore (project-specific keys for asymmetric encryption of
job secrets and SSH access) is also stored in ZooKeeper, and unlike
the other data it **cannot be recreated** if lost. As such,
periodic :ref:`export and backup <backup>` of these keys is strongly
recommended.
.. _ansible-installation-options:
Executor Deployment
-------------------
The Zuul executor requires Ansible to run jobs. There are two
approaches that can be used to install Ansible for Zuul.
First you may set ``manage_ansible`` to True in the executor config. If you
do this Zuul will install all supported Ansible versions on zuul-executor
startup. These installations end up in Zuul's state dir,
``/var/lib/zuul/ansible-bin`` if unchanged.
The second option is to use ``zuul-manage-ansible`` to install the supported
Ansible versions. By default this will install Ansible to
``zuul_install_prefix/lib/zuul/ansible``. This method is preferable to the
first because it speeds up zuul-executor start time and allows you to
preinstall ansible in containers (avoids problems with bind mounted zuul
state dirs).
.. program-output:: zuul-manage-ansible -h
In both cases if using a non default path you will want to set
``ansible_root`` in the executor config file.
.. _web-deployment-options:
Web Deployment
--------------
The ``zuul-web`` service provides a web dashboard, a REST API and a websocket
log streaming service as a single holistic web application. For production use
it is recommended to run it behind a reverse proxy, such as Apache or Nginx.
The ``zuul-web`` service is entirely self-contained and can be run
with minimal configuration, however, more advanced users may desire to
do one or more of the following:
White Label
Serve the dashboard of an individual tenant at the root of its own domain.
https://zuul.openstack.org is an example of a Zuul dashboard that has been
white labeled for the ``openstack`` tenant of its Zuul.
Static Offload
Shift the duties of serving static files, such as HTML, Javascript, CSS or
images to the reverse proxy server.
Static External
Serve the static files from a completely separate location that does not
support programmatic rewrite rules such as a Swift Object Store.
Sub-URL
Serve a Zuul dashboard from a location below the root URL as part of
presenting integration with other application.
https://softwarefactory-project.io/zuul/ is an example of a Zuul dashboard
that is being served from a Sub-URL.
Most deployments shouldn't need these, so the following discussion
will assume that the ``zuul-web`` service is exposed via a reverse
proxy. Where rewrite rule examples are given, they will be given with
Apache syntax, but any other reverse proxy should work just fine.
Reverse Proxy
~~~~~~~~~~~~~
Using Apache as the reverse proxy requires the ``mod_proxy``,
``mod_proxy_http`` and ``mod_proxy_wstunnel`` modules to be installed
and enabled.
All of the cases require a rewrite rule for the websocket streaming, so the
simplest reverse-proxy case is::
RewriteEngine on
RewriteRule ^/api/tenant/(.*)/console-stream ws://localhost:9000/api/tenant/$1/console-stream [P]
RewriteRule ^/(.*)$ http://localhost:9000/$1 [P]
This is the recommended configuration unless one of the following
features is required.
Static Offload
~~~~~~~~~~~~~~
To have the reverse proxy serve the static html/javascript assets
instead of proxying them to the REST layer, enable the ``mod_rewrite``
Apache module, register the location where you unpacked the web
application as the document root and add rewrite rules::
<Directory /usr/share/zuul>
Require all granted
</Directory>
Alias / /usr/share/zuul/
<Location />
RewriteEngine on
RewriteBase /
# Rewrite api to the zuul-web endpoint
RewriteRule api/tenant/(.*)/console-stream ws://localhost:9000/api/tenant/$1/console-stream [P,L]
RewriteRule api/(.*)$ http://localhost:9000/api/$1 [P,L]
# Backward compatible rewrite
RewriteRule t/(.*)/(.*).html(.*) /t/$1/$2$3 [R=301,L,NE]
# Don't rewrite files or directories
RewriteCond %{REQUEST_FILENAME} !-f
RewriteCond %{REQUEST_FILENAME} !-d
RewriteRule . /index.html [L]
</Location>
Sub directory serving
~~~~~~~~~~~~~~~~~~~~~
The web application needs to be rebuilt to update the internal location of
the static files. Set the homepage setting in the package.json to an
absolute path or url. For example, to deploy the web interface through a
'/zuul/' sub directory:
.. note::
The web dashboard source code and package.json are located in the ``web``
directory. All the yarn commands need to be executed from the ``web``
directory.
.. code-block:: bash
sed -e 's#"homepage": "/"#"homepage": "/zuul/"#' -i package.json
yarn build
Then assuming the web application is unpacked in /usr/share/zuul,
enable the ``mod_rewrite`` Apache module and add the following rewrite
rules::
<Directory /usr/share/zuul>
Require all granted
</Directory>
Alias /zuul /usr/share/zuul/
<Location /zuul>
RewriteEngine on
RewriteBase /zuul
# Rewrite api to the zuul-web endpoint
RewriteRule api/tenant/(.*)/console-stream ws://localhost:9000/api/tenant/$1/console-stream [P,L]
RewriteRule api/(.*)$ http://localhost:9000/api/$1 [P,L]
# Backward compatible rewrite
RewriteRule t/(.*)/(.*).html(.*) /t/$1/$2$3 [R=301,L,NE]
# Don't rewrite files or directories
RewriteCond %{REQUEST_FILENAME} !-f
RewriteCond %{REQUEST_FILENAME} !-d
RewriteRule . /zuul/index.html [L]
</Location>
White Labeled Tenant
~~~~~~~~~~~~~~~~~~~~
Running a white-labeled tenant is similar to the offload case, but adds a
rule to ensure connection webhooks don't try to get put into the tenant scope.
.. note::
It's possible to do white-labeling without static offload, but it
is more complex with no benefit.
Enable the ``mod_rewrite`` Apache module, and assuming the Zuul tenant
name is ``example``, the rewrite rules are::
<Directory /usr/share/zuul>
Require all granted
</Directory>
Alias / /usr/share/zuul/
<Location />
RewriteEngine on
RewriteBase /
# Rewrite api to the zuul-web endpoint
RewriteRule api/connection/(.*)$ http://localhost:9000/api/connection/$1 [P,L]
RewriteRule api/console-stream ws://localhost:9000/api/tenant/example/console-stream [P,L]
RewriteRule api/(.*)$ http://localhost:9000/api/tenant/example/$1 [P,L]
# Backward compatible rewrite
RewriteRule t/(.*)/(.*).html(.*) /t/$1/$2$3 [R=301,L,NE]
# Don't rewrite files or directories
RewriteCond %{REQUEST_FILENAME} !-f
RewriteCond %{REQUEST_FILENAME} !-d
RewriteRule . /index.html [L]
</Location>
Static External
~~~~~~~~~~~~~~~
.. note::
Hosting the Zuul dashboard on an external static location that does
not support dynamic url rewrite rules only works for white-labeled
deployments.
In order to serve the zuul dashboard code from an external static location,
``REACT_APP_ZUUL_API`` must be set at javascript build time:
.. code-block:: bash
REACT_APP_ZUUL_API='http://zuul-web.example.com' yarn build
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/installation.rst | installation.rst |
:title: Authenticated Actions
.. _authentication:
Authenticated Access
====================
Access to Zuul's REST API and web interface can optionally be
restricted. By default, anonymous read access to any tenant is
permitted. Optionally, some administrative actions may also be
enabled and restricted to authorized users. Additionally, individual
tenants or the entire system may have read-level access restricted
to authorized users.
The supported administrative actions are **autohold**,
**enqueue/enqueue-ref**, **dequeue/dequeue-ref** and
**promote**. These are similar to the ones available through
Zuul's CLI.
The protected endpoints require a bearer token, passed to Zuul Web
Server as the **Authorization** header of the request. The token and
this workflow follow the JWT standard as established in this `RFC
<https://tools.ietf.org/html/rfc7519>`_.
Important Security Considerations
---------------------------------
Anybody with a valid administrative token can perform privileged
actions exposed through the REST API. Furthermore revoking tokens,
especially when manually issued, is not trivial.
As a mitigation, tokens should be generated with a short time to
live, like 10 minutes or less. If the token contains authorization Information
(see the ``zuul.admin`` claim below), it should be generated with as little a scope
as possible (one tenant only) to reduce the surface of attack should the
token be compromised.
Exposing administration tasks can impact build results (dequeue-ing buildsets),
and pose potential resources problems with Nodepool if the ``autohold`` feature
is abused, leading to a significant number of nodes remaining in "hold" state for
extended periods of time. As always, "with great power comes great responsibility"
and tokens should be handed over with discernment.
Configuration
-------------
.. important:: In order to use restricted commands in the zuul command
line interface, at least one HS256 authenticator should
be configured.
To enable tenant-scoped access to privileged actions or restrict
read-level access, see the Zuul Web Server component's section.
To set access rules for a tenant, see :ref:`the documentation about tenant
definition <authz_rule_definition>`.
Most of the time, only one authenticator will be needed in Zuul's configuration;
namely the configuration matching a third party identity provider service like
dex, auth0, keycloak or others. It can be useful however to add another
authenticator similar to this one:
.. code-block:: ini
[auth zuul_operator]
driver=HS256
allow_authz_override=true
realm=zuul.example.com
client_id=zuul.example.com
issuer_id=zuul_operator
secret=exampleSecret
With such an authenticator, a Zuul operator can use Zuul's CLI to
issue tokens overriding a tenant's access rules if need
be. A user can then use these tokens with Zuul's CLI to perform protected actions
on a tenant temporarily, without having to modify a tenant's access rules.
.. _jwt-format:
JWT Format
----------
Zuul can consume JWTs with the following minimal set of claims:
.. code-block:: javascript
{
'iss': 'jwt_provider',
'aud': 'my_zuul_deployment',
'exp': 1234567890,
'iat': 1234556780,
'sub': 'alice'
}
* **iss** is the issuer of the token. It can be used to filter
Identity Providers.
* **aud**, as the intended audience, is usually the client id as registered on
the Identity Provider.
* **exp** is the token's expiry timestamp.
* **iat** is the token's date of issuance timestamp.
* **sub** is the default, unique identifier of the user.
JWTs can be extended arbitrarily with other claims. Zuul however can look for a
specific **zuul** claim, if the ``allow_authz_override`` option was set to True
in the authenticator's configuration. This claim has the following format:
.. code-block:: javascript
{
'zuul': {
'admin': ['tenant-one', 'tenant-two']
}
}
The **admin** field is a list of tenants on which the token's bearer is granted
the right to perform privileged actions.
Manually Generating a JWT
-------------------------
An operator can generate a JWT by using the settings of a configured authenticator
in ``zuul.conf``.
For example, in Python, and for an authenticator using the ``HS256`` algorithm:
.. code-block:: python
>>> import jwt
>>> import time
>>> jwt.encode({'sub': 'user1',
'iss': <issuer_id>,
'aud': <client_id>,
'iat': int(time.time()),
'exp': int(time.time()) + 300,
'zuul': {
'admin': ['tenant-one']
}
}, <secret>, algorithm='HS256')
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ6dXVsIjp7ImFkbWluIjpbInRlbmFudC1vbmUiXX0sInN1YiI6InZlbmttYW4iLCJpc3MiOiJtYW51YWwiLCJleHAiOjE1NjAzNTQxOTcuMTg5NzIyLCJpYXQiOjE1NjAzNTM4OTcuMTg5NzIxLCJhdWQiOiJ6dXVsIn0.Qqb-ANmYv8slNUVSqjCJDL8HlH9L7nnLtLU2HBGzQJk'
Online resources like https://jwt.io are also available to generate, decode and
debug JWTs.
Debugging
---------
If problems appear:
* Make sure your configuration is correct, especially callback URIs.
* More information can be found in Zuul's web service logs.
* From the user's side, activating the web console in the browser can be helpful
to debug API calls.
Interfacing with Other Systems
------------------------------
Here are some how-tos to help administrators enable OpenID Connect
authentication in Zuul and Zuul's Web UI.
.. toctree::
:maxdepth: 1
howtos/openid-with-google
howtos/openid-with-keycloak
howtos/openid-with-microsoft
tutorials/keycloak
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/authentication.rst | authentication.rst |
Upgrading
=========
Rolling Upgrades
----------------
If more than one of each Zuul component is present in a system, then
Zuul may be upgrading without downtime by performing a rolling
upgrade. During a rolling upgrade, components are stopped and started
one at a time until all components are upgraded. If there is a
behavior change during an upgrade, Zuul will typically wait until all
components are upgraded before changing behavior, but in some cases
when it is deemed safe, new behaviors may start to appear as soon as
the first component is upgraded. Be sure not to begin using or rely
on new behaviors until all components are upgraded.
Unless specified in the release notes, there is no specific order for
which components should be upgraded first, but the following order is
likely to produce the least disruption and delay the use of new
behaviors until closer to the end of the process:
* Gracefully restart executors (one at a time, or as many as a
system's over-allocation of resources will allow).
* Gracefully restart mergers.
* Restart schedulers.
* Restart web and finger gateways.
Skipping Versions
-----------------
Zuul versions are specified as `major.minor.micro`. In general,
skipping minor or micro versions during upgrades is considered safe.
Skipping major versions is not recommended, as backwards compatibility
code for older systems may be removed during a major upgrade. This
means that, for example, an upgrade from 5.x.y to 7.0.0 should include
at least an upgrade to 6.4.0 (the latest 6.x release) before
proceeding to 7.0.0.
If skipping major versions is required, then a rolling upgrade is not
possible, and Zuul should be completely stopped, and the ``zuul-admin
delete-state`` command should be run before restarting on the new
version.
Some versions may have unique upgrade requirements. See release notes
for additional information about specific version upgrades.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/upgrading.rst | upgrading.rst |
Zuul Project Governance
=======================
The governance of the Zuul project is described below. Changes to
this page should remain open for a week and require more positive than
negative Code-Review votes from the Maintainers before merging. They
should only be approved by the Project Lead.
Zuul Maintainers
----------------
The Zuul project is self-governed.
Decisions regarding the project are made by the Zuul Maintainers.
They are a team of people who are familiar with the project as a whole
and act as stewards of the project. They have the right to approve or
reject proposed changes to the codebase, as well as make other
decisions regarding the project.
The Maintainers are expected to be familiar with the source code,
design, operation, and usage of all parts of the Zuul project. When
acting in their capacity as Maintainers, they are expected to consider
not only their own needs, but those of the entire community.
Changes to the code, documentation, website, and other project
resources held in version control repositories are reviewed and
approved by the Maintainers. In general, approval is sought from at
least two maintainers before merging a change, but fewer or more
reviews may be warranted depending on the change. Factors to consider
when reviewing are the complexity of the change, whether it is in
accordance with the project design, and whether additional project
participants with subject matter expertise should review the change.
Maintainers may also reject changes outright, but this is expected to
be used sparingly in favor of (or in the process of) redirecting
effort toward efforts which can achieve consensus.
The purpose of this review process is two-fold: first, to ensure that
changes to the project meet sufficiently high standards so that they
improve the project, contribute to furthering its goals, and do not
introduce regressive behavior or make the project more difficult to
support in the future. Secondly, and just as important, the process
also ensures that contributors are aware of the changes to the
project. In a distributed environment, reviews are an important part
of our collaborative process.
Project decisions other than those involving on-line review are
discussed on the project mailing list. Anyone is welcome and
encouraged to participate in these discussions so that input from the
broader community is received. As the authority, Maintainers should
strive to achieve consensus on any such decisions.
Changes to the membership of the Maintainers are decided by consensus
of the existing maintainers, however, due to their sensitivity, these
discussions should occur via private communication among the
maintainers under the direction of the Project Lead.
A large group of Maintainers is important for the health of the
project, therefore contributors are encouraged to become involved in
all facets of maintenance of the project as part of the process of
becoming a Maintainer. Existing Maintainers are expected to encourage
new members. There are no artificial limits of the number of
Maintainers. The Project Lead will assist any contributor who wishes
for guidance in becoming a Maintainer.
Current Zuul Maintainers:
====================== ===================================
Name Matrix ID
====================== ===================================
Clark Boylan Clark (@clarkb:matrix.org)
Felix Edel felixedel (@felixedel:matrix.org)
Ian Wienand ianw (@iwienand:matrix.org)
James E. Blair corvus (@jim:acmegating.com)
Jens Harbott
Jeremy Stanley fungi (@fungicide:matrix.org)
Monty Taylor mordred (@mordred:inaugust.com)
Simon Westphahl swest (@westphahl:matrix.org)
Tobias Henkel tobiash (@tobias.henkel:matrix.org)
Tristan Cacqueray tristanC (@tristanc_:matrix.org)
====================== ===================================
Zuul Project Lead
-----------------
The Maintainers elect a Project Lead to articulate the overall
direction of the project and promote consistency among the different
areas and aspects of the project. The Project Lead does not have
extra rights beyond those of the Maintainers, but does have extra
responsibilities. The Project Lead must pay particular attention to
the overall design and direction of the project, ensure that
Maintainers and other contributors are familiar with that design, and
facilitate achieving consensus on difficult issues.
If the project is unable to achieve consensus on an issue, the Project
Lead may poll the Maintainers on the issue, and in the case of a tie,
the vote of the Project Lead will be the tie-breaker.
The Project Lead is elected to a term of one year. The election
process shall be a Condorcet election and the candidates shall be
self-nominated from among the existing Maintainers.
The Project Lead is James E. Blair (term expires 2024-01-16).
Zuul-Jobs Maintainers
---------------------
The zuul-jobs and zuul-base-jobs repositories contain a standard
library of reusable job components which are designed to be used in a
wide variety of situations.
Changes to these repositories require consideriation of the various
environments in which the jobs may be used as well as policies which
promote the consistency and stability of the components therein, but
not necessarily the full scope of Zuul development. To that end,
approval rights for changes to these repositories are granted to both
the Zuul Maintainers and an additional group known as the Zuul-Jobs
Maintainers. The reviewing processes are identical to the rest of the
project; membership changes to the Zuul-Jobs Maintainers group are
undertaken with consensus of the wider Zuul-Jobs group (not merely the
Zuul Maintainers).
Current Zuul-Jobs Maintainers (in addition to Zuul Maintainers):
====================== ==========================
Name Matrix ID
====================== ==========================
Andreas Jaeger
Mohammed Naser
Albin Vass avass (@avass:vassast.org)
====================== ==========================
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/governance.rst | governance.rst |
:title: Operation
.. _operation:
Operation
=========
You can run any zuul process with the **-f** option to make it not
daemonize and stay in the foreground, logging to your terminal. It's a
good idea at first to check for issues with your configuration.
There's also a **-d** option to engage verbose debug logging, but be
careful in busy deployments as this can generate very large logs.
To start, simply run::
zuul-scheduler
Before Zuul can run any jobs, it needs to load its configuration, most
of which is in the git repositories that Zuul operates on. Start an
executor to allow zuul to do that::
zuul-executor
Zuul should now be able to read its configuration from the configured
repo and process any jobs defined therein.
Scheduler
---------
Operation
~~~~~~~~~
To start the scheduler, run ``zuul-scheduler``. To stop it, run
``zuul-scheduler stop``.
.. _reconfiguration:
Reconfiguration
~~~~~~~~~~~~~~~
Most of Zuul's configuration is automatically updated as changes to
the repositories which contain it are merged. However, Zuul must be
explicitly notified of changes to the tenant config file, since it is
not read from a git repository. Zuul supports two kinds of reconfigurations.
The full reconfiguration refetches and reloads the configuration of
all tenants. To do so, run ``zuul-scheduler full-reconfigure``. For
example this can be used to fix eventual configuration inconsistencies
after connection problems with the code hosting system.
To perform the same actions as a full reconfiguration but for a single
tenant, use ``zuul-scheduler tenant-reconfigure TENANT`` (where
``TENANT`` is the name of the tenant to reconfigure).
The smart reconfiguration reloads only the tenants that changed their
configuration in the tenant config file. To do so, run
``zuul-scheduler smart-reconfigure``. In multi tenant systems this can
be much faster than the full reconfiguration so it is recommended to
use the smart reconfiguration after changing the tenant configuration
file.
The ``tenant-reconfigure`` and ``smart-reconfigure`` commands should
only be run on a single scheduler. Other schedulers will see any
changes to the configuration stored in ZooKeeper and automatically
update their configuration in the background without interrupting
processing.
.. _backup:
Backup and Restoration
~~~~~~~~~~~~~~~~~~~~~~
While all of Zuul's component services are designed to be run in a
resilient active-active clustered deployment, a good disaster recovery
plan should include backing up critical data. At a minimum, the
randomly-generated project keys used for encryption of job secrets and
SSH access should be backed up, as they **cannot be recreated** if
lost. Zuul stores these keys in a keystore in ZooKeeper which is
inconvenient to back up directly, but provides an administrative tool
to :ref:`export <export-keys>` these keys to and :ref:`import
<import-keys>` them from a local directory.
It's highly recommended to set up periodic automation for dumping such
an export to a secure location (for example, on the filesystem of each
Zuul Scheduler) for use in a disaster where all ZooKeeper content is
lost. You may also consider configuring a safe remote backup of these
files with the tool of your choice, but be aware that they are
potentially sensitive since anyone who gains access to them could
decrypt job secrets or access protected systems which have been
instructed to trust those keys.
Note that the exported keys are symmetrically encrypted with the same
:ref:`keystore.password <keystore-password>` which is used for
encrypting and decrypting the copy of them in ZooKeeper, because its
the encrypted versions of the keys which are exported and imported.
Someone with access to the keys would also need a copy of the
keystore.password from Zuul's configuration, so for security-sensitive
environments you may not want to back them up together. Conversely, if
you lose the keystore.password then you also lose the use of the
project keys in the keystore and any exports, so you will likely want
to make sure you keep a secured copy of it somewhere as well in the
event your server configuration is lost.
Merger
------
Operation
~~~~~~~~~
To start the merger, run ``zuul-merger``.
In order to stop the merger and under normal circumstances it is
best to pause and wait for all currently running tasks to finish
before stopping it. To do so run ``zuul-merger pause``.
To stop the merger, run ``zuul-merger stop``. This will wait for any
currently running merge task to complete before exiting. As a result
this is always a graceful way to stop the merger.
``zuul-merger graceful`` is an alias for ``zuul-merger stop`` to make
this consistent with the executor.
Executor
--------
Operation
~~~~~~~~~
To start the executor, run ``zuul-executor``.
There are several commands which can be run to control the executor's
behavior once it is running.
To pause the executor and prevent it from running new jobs you can
run ``zuul-executor pause``.
To cause the executor to stop accepting new jobs and exit when all running
jobs have finished you can run ``zuul-executor graceful``. Under most
circumstances this will be the best way to stop Zuul.
To stop the executor immediately, run ``zuul-executor stop``. Jobs that were
running on the stopped executor will be rescheduled on other executors.
The executor normally responds to a ``SIGTERM`` signal in the same way
as the ``graceful`` command, however you can change this behavior to match
``stop`` with the :attr:`executor.sigterm_method` setting.
To enable or disable running Ansible in verbose mode (with the
``-vvv`` argument to ansible-playbook) run ``zuul-executor verbose``
and ``zuul-executor unverbose``.
.. _ansible-and-python-3:
Ansible and Python 3
~~~~~~~~~~~~~~~~~~~~
As noted above, the executor runs Ansible playbooks against the remote
node(s) allocated for the job. Since part of executing playbooks on
remote hosts is running Python scripts on them, Ansible needs to know
what Python interpreter to use on the remote host. With older
distributions, ``/usr/bin/python2`` was a generally sensible choice.
However, over time a heterogeneous Python ecosystem has evolved where
older distributions may only provide Python 2, most provide a mixed
2/3 environment and newer distributions may only provide Python 3 (and
then others like RHEL8 may even have separate "system" Python versions
to add to confusion!).
Ansible's ``ansible_python_interpreter`` variable configures the path
to the remote Python interpreter to use during playbook execution.
This value is set by Zuul from the ``python-path`` specified for the
node by Nodepool; see the `nodepool configuration documentation
<https://zuul-ci.org/docs/nodepool/configuration.html>`__.
This defaults to ``auto``, where Ansible will automatically discover
the interpreter available on the remote host. However, this setting
only became available in Ansible >=2.8, so Zuul will translate
``auto`` into the old default of ``/usr/bin/python2`` when configured
to use older Ansible versions.
Thus for modern Python 3-only hosts no further configuration is needed
when using Ansible >=2.8 (e.g. Fedora, Bionic onwards). If using
earlier Ansible versions you may need to explicitly set the
``python-path`` if ``/usr/bin/python2`` is not available on the node.
Ansible roles/modules which include Python code are generally Python 3
safe now, but there is still a small possibility of incompatibility.
See also the Ansible `Python 3 support page
<https://docs.ansible.com/ansible/latest/reference_appendices/python_3_support.html>`__.
.. _nodepool_console_streaming:
Log streaming
~~~~~~~~~~~~~
The log streaming service enables Zuul to show the live status of
long-running ``shell`` or ``command`` tasks. The server side is setup
by the ``zuul_console:`` task built-in to Zuul's Ansible installation.
The executor requires the ability to communicate with this server on
the job nodes via port ``19885`` for this to work.
The log streaming service spools command output via files on the job
node in the format ``/tmp/console-<uuid>-<task_id>-<host>.log``. By
default, it will clean these files up automatically.
Occasionally, a streaming file may be left if a job is interrupted.
These may be safely removed after a short period of inactivity with a
command such as
.. code-block:: shell
find /tmp -maxdepth 1 -name 'console-*-*-<host>.log' -mtime +2 -delete
If the executor is unable to reach port ``19885`` (for example due to
firewall rules), or the ``zuul_console`` daemon can not be run for
some other reason, the command to clean these spool files will not be
processed and they may be left behind; on an ephemeral node this is
not usually a problem, but on a static node these files will persist.
In this situation, Zuul can be instructed to not to create any spool
files for ``shell`` and ``command`` tasks via setting
``zuul_console_disabled: True`` (usually via a global host variable in
inventory). Live streaming of ``shell`` and ``command`` calls will of
course be unavailable in this case, but no spool files will be
created.
For Kubernetes-based job nodes the connection from the executor to the
``zuul_console`` daemon is established by using ``kubectl port-forward``
to forward a local port to the appropriate port on the pod containing
the job node. If the Kubernetes user is not bound to a role that has
authorization for port-forwarding, this will prevent connection to
the ``zuul_console`` daemon.
Web Server
----------
Operation
~~~~~~~~~
To start the web server, run ``zuul-web``. To stop it, kill the
PID which was saved in the pidfile specified in the configuration.
Finger Gateway
--------------
Operation
~~~~~~~~~
To start the finger gateway, run ``zuul-fingergw``. To stop it, kill the
PID which was saved in the pidfile specified in the configuration.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/operation.rst | operation.rst |
:title: Zuul Concepts
Zuul Concepts
=============
Zuul is organized around the concept of a *pipeline*. In Zuul, a
pipeline encompasses a workflow process which can be applied to one or
more projects. For instance, a "check" pipeline might describe the
actions which should cause newly proposed changes to projects to be
tested. A "gate" pipeline might implement :ref:`project_gating` to
automate merging changes to projects only if their tests pass. A
"post" pipeline might update published documentation for a project
when changes land.
The names "check", "gate", and "post" are arbitrary -- these are not
concepts built into Zuul, but rather they are just a few common
examples of workflow processes that can be described to Zuul and
implemented as pipelines.
Once a pipeline has been defined, any number of projects may be
associated with it, each one specifying what jobs should be run for
that project in a given pipeline.
Pipelines have associated *triggers* which are descriptions of events
which should cause something to be enqueued into a pipeline. For
example, when a patchset is uploaded to Gerrit, a Gerrit
*patchset-created* event is emitted. A pipeline configured to trigger
on *patchset-created* events would then enqueue the associated change
when Zuul receives that event. If there are jobs associated with that
project and pipeline, they will be run. In addition to Gerrit, other
triggers are available, including GitHub, timer, and zuul. See
:ref:`drivers` for a full list of available triggers.
Once all of the jobs for an item in a pipeline have been run, the
pipeline's *reporters* are responsible for reporting the results of
all of the jobs. Continuing the example from earlier, if a pipeline
were configured with a Gerrit reporter, it would leave a review
comment on the change and set any approval votes that are configured.
There are several reporting phases available; each of which may be
configured with any number of reporters. See :ref:`drivers` for a
full list of available reporters.
The items enqueued into a pipeline are each associated with a
`git ref <https://git-scm.com/book/en/v2/Git-Internals-Git-References>`_.
That ref may point to a proposed change, or it may be the tip of a
branch or a tag. The triggering event determines the ref, and whether
it represents a proposed or merged commit. Zuul prepares the ref for
an item before running jobs. In the case of a proposed change, that
means speculatively merging the change into its target branch. This
means that any jobs that operate on the change will run with the git
repo in the state it will be in after the change merges (which may be
substantially different than the git repo state of the change itself
since the repo may have merged other changes since the change was
originally authored). Items in a pipeline may depend on other items,
and if they do, all of their dependent changes will be included in the
git repo state that Zuul prepares. Jobs may also specify that they
require additional git repos, and if so, the repo state (as of the
time when the item was enqueued in the pipeline) for those repos will
also be included. For more detail on this process, see
:ref:`project_gating`, :ref:`dependencies`, and
:ref:`global_repo_state`.
The configuration for nearly everything described above is held in
files inside of the git repos upon which Zuul operates. Zuul's
configuration is global, but distributed. Jobs which are defined in
one project might be used in another project while pipelines are
available to all projects. When Zuul starts, it reads its
configuration from all of the projects it knows about, and when
changes to its configuration are proposed, those changes may take
effect temporarily as part of the proposed change, or immediately
after the change merges, depending on the type of project in which the
change appears.
Jobs specify the type and quantity of nodes which they require.
Before executing each job, Zuul will contact its companion program,
Nodepool, to supply them. Nodepool may be configured to supply static
nodes or contact cloud providers to create or delete nodes as
necessary. The types of nodes available to Zuul are determined by the
Nodepool administrator.
The executable contents of jobs themselves are Ansible playbooks.
Ansible's support for orchestrating tasks on remote nodes is
particularly suited to Zuul's support for multi-node testing. Ansible
is also easy to use for simple tasks (such as executing a shell
script) or sophisticated deployment scenarios. When Zuul runs
Ansible, it attempts to do so in a manner most similar to the way that
Ansible might be used to orchestrate remote systems. Ansible itself
is run on the :ref:`executor <executor>` and acts remotely upon the test
nodes supplied to a job. This facilitates continuous delivery by making it
possible to use the same Ansible playbooks in testing and production.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/concepts.rst | concepts.rst |
:title: Job Content
.. _job-content:
Job Content
===========
Zuul jobs are implemented as Ansible playbooks. Zuul prepares the
repositories used for a job, installs any required Ansible roles, and
then executes the job's playbooks. Any setup or artifact collection
required is the responsibility of the job itself. While this flexible
arrangement allows for almost any kind of job to be run by Zuul,
batteries are included. Zuul has a standard library of jobs upon
which to build.
Working Directory
-----------------
Before starting each job, the Zuul executor creates a directory to
hold all of the content related to the job. This includes some
directories which are used by Zuul to configure and run Ansible and
may not be accessible, as well as a directory tree, under ``work/``,
that is readable and writable by the job. The hierarchy is:
**work/**
The working directory of the job.
**work/src/**
Contains the prepared git repositories for the job.
**work/logs/**
Where the Ansible log for the job is written; your job
may place other logs here as well.
Git Repositories
----------------
The git repositories in ``work/src`` contain the repositories for all
of the projects specified in the ``required-projects`` section of the
job, plus the project associated with the queue item if it isn't
already in that list. In the case of a proposed change, that change
and all of the changes ahead of it in the pipeline queue will already
be merged into their respective repositories and target branches. The
change's project will have the change's branch checked out, as will
all of the other projects, if that branch exists (otherwise, a
fallback or default branch will be used). If your job needs to
operate on multiple branches, simply checkout the appropriate branches
of these git repos to ensure that the job results reflect the proposed
future state that Zuul is testing, and all dependencies are present.
The git repositories will have a remote ``origin`` with refs pointing
to the previous change in the speculative state. This means that e.g.
a ``git diff origin/<branch>..<branch>`` will show the changes being
tested. Note that the ``origin`` URL is set to a bogus value
(``file:///dev/null``) and can not be used for updating the repository
state; the local repositories are guaranteed to be up to date.
The repositories will be placed on the filesystem in directories
corresponding with the canonical hostname of their source connection.
For example::
work/src/git.example.com/project1
work/src/github.com/project2
Is the layout that would be present for a job which included project1
from the connection associated to git.example.com and project2 from
GitHub. This helps avoid collisions between projects with the same
name, and some language environments, such as Go, expect repositories
in this format.
Note that these git repositories are located on the executor; in order
to be useful to most kinds of jobs, they will need to be present on
the test nodes. The ``base`` job in the standard library (see
`zuul-base-jobs documentation`_ for details) contains a
pre-playbook which copies the repositories to all of the job's nodes.
It is recommended to always inherit from this base job to ensure that
behavior.
.. _zuul-base-jobs documentation: https://zuul-ci.org/docs/zuul-base-jobs/jobs.html#job-base
.. TODO: document src (and logs?) directory
.. _user_jobs_variable_inheritance:
Variables
---------
There are several sources of variables which are available to Ansible:
variables defined in jobs, secrets, and site-wide variables. The
order of precedence is:
#. :ref:`Site-wide variables <user_jobs_sitewide_variables>`
#. :ref:`Job extra variables <user_jobs_job_extra_variables>`
#. :ref:`Secrets <user_jobs_secrets>`
#. :ref:`Job variables <user_jobs_job_variables>`
#. :ref:`Project variables <user_jobs_project_variables>`
#. :ref:`Parent job results <user_jobs_parent_results>`
Meaning that a site-wide variable with the same name as any other will
override its value, and similarly, secrets override job variables of
the same name which override data returned from parent jobs. Each of
the sources is described below.
.. _user_jobs_sitewide_variables:
Site-wide Variables
~~~~~~~~~~~~~~~~~~~
The Zuul administrator may define variables which will be available to
all jobs running in the system. These are statically defined and may
not be altered by jobs. See the :ref:`Administrator's Guide
<admin_sitewide_variables>` for information on how a site
administrator may define these variables.
.. _user_jobs_job_extra_variables:
Job Extra Variables
~~~~~~~~~~~~~~~~~~~
Any extra variables in the job definition (using the :attr:`job.extra-vars`
attribute) are available to Ansible but not added into the inventory file.
.. _user_jobs_secrets:
Secrets
~~~~~~~
:ref:`Secrets <secret>` also appear as variables available to Ansible.
Unlike job variables, these are not added to the inventory file (so
that the inventory file may be kept for debugging purposes without
revealing secrets). But they are still available to Ansible as normal
variables. Because secrets are groups of variables, they will appear
as a dictionary structure in templates, with the dictionary itself
being the name of the secret, and its members the individual items in
the secret. For example, a secret defined as:
.. code-block:: yaml
- secret:
name: credentials
data:
username: foo
password: bar
Might be used in a template as::
{{ credentials.username }} {{ credentials.password }}
Secrets are only available to playbooks associated with the job
definition which uses the secret; they are not available to playbooks
associated with child jobs or job variants.
.. _user_jobs_job_variables:
Job Variables
~~~~~~~~~~~~~
Any variables specified in the job definition (using the
:attr:`job.vars` attribute) are available as Ansible host variables.
They are added to the ``vars`` section of the inventory file under the
``all`` hosts group, so they are available to all hosts. Simply refer
to them by the name specified in the job's ``vars`` section.
.. _user_jobs_project_variables:
Project Variables
~~~~~~~~~~~~~~~~~
Any variables specified in the project definition (using the
:attr:`project.vars` attribute) are available to jobs as Ansible host
variables in the same way as :ref:`job variables
<user_jobs_job_variables>`. Variables set in a ``project-template``
are merged into the project variables when the template is included by
a project.
.. code-block:: yaml
- project-template:
name: sample-template
description: Description
vars:
var_from_template: foo
post:
jobs:
- template_job
release:
jobs:
- template_job
- project:
name: Sample project
description: Description
templates:
- sample-template
vars:
var_for_all_jobs: value
check:
jobs:
- job1
- job2:
vars:
var_for_all_jobs: override
.. _user_jobs_parent_results:
Parent Job Results
~~~~~~~~~~~~~~~~~~
A job may return data to Zuul for later use by jobs which depend on
it. For details, see :ref:`return_values`.
Zuul Variables
--------------
Zuul supplies not only the variables specified by the job definition
to Ansible, but also some variables from Zuul itself.
When a pipeline is triggered by an action, it enqueues items which may
vary based on the pipeline's configuration. For example, when a new
change is created, that change may be enqueued into the pipeline,
while a tag may be enqueued into the pipeline when it is pushed.
Information about these items is available to jobs. All of the items
enqueued in a pipeline are git references, and therefore share some
attributes in common. But other attributes may vary based on the type
of item.
.. var:: zuul
All items provide the following information as Ansible variables
under the ``zuul`` key:
.. var:: artifacts
:type: list
If the job has a :attr:`job.requires` attribute, and Zuul has
found changes ahead of this change in the pipeline with matching
:attr:`job.provides` attributes, then information about any
:ref:`artifacts returned <return_artifacts>` from those jobs
will appear here.
This value is a list of dictionaries with the following format:
.. var:: project
The name of the project which supplied this artifact.
.. var:: change
The change number which supplied this artifact.
.. var:: patchset
The patchset of the change.
.. var:: job
The name of the job which produced the artifact.
.. var:: name
The name of the artifact (as supplied to :ref:`return_artifacts`).
.. var:: url
The URL of the artifact (as supplied to :ref:`return_artifacts`).
.. var:: metadata
The metadata of the artifact (as supplied to :ref:`return_artifacts`).
.. var:: build
The UUID of the build. A build is a single execution of a job.
When an item is enqueued into a pipeline, this usually results
in one build of each job configured for that item's project.
However, items may be re-enqueued in which case another build
may run. In dependent pipelines, the same job may run multiple
times for the same item as circumstances change ahead in the
queue. Each time a job is run, for whatever reason, it is
acompanied with a new unique id.
.. var:: buildset
The build set UUID. When Zuul runs jobs for an item, the
collection of those jobs is known as a buildset. If the
configuration of items ahead in a dependent pipeline changes,
Zuul creates a new buildset and restarts all of the jobs.
.. var:: child_jobs
A list of the first level dependent jobs to be run after this job
has finished successfully.
.. var:: ref
The git ref of the item. This will be the full path (e.g.,
`refs/heads/master` or `refs/changes/...`).
.. var:: override_checkout
If the job was configured to override the branch or tag checked
out, this will contain the specified value. Otherwise, this
variable will be undefined.
.. var:: pipeline
The name of the pipeline in which the job is being run.
.. var:: post_review
:type: bool
Whether the current job is running in a post-review pipeline or not.
.. var:: job
The name of the job being run.
.. var:: event_id
The UUID of the event that triggered this execution. This is mainly
useful for debugging purposes.
.. var:: voting
A boolean indicating whether the job is voting.
.. var:: attempts
An integer count of how many attempts have been made to run this
job for the current buildset. If there are pre-run failures or network
connectivity issues then previous attempts may have been cancelled,
and this value will be greater than 1.
.. var:: max_attempts
The number of attempts that will be be made for this job when
encountering an error in a pre-playbook before it is reported as failed.
This value is taken from :attr:`job.attempts`.
.. var:: ansible_version
The version of the Ansible community package release used for executing
the job.
.. var:: project
The item's project. This is a data structure with the following
fields:
.. var:: name
The name of the project, excluding hostname. E.g., `org/project`.
.. var:: short_name
The name of the project, excluding directories or
organizations. E.g., `project`.
.. var:: canonical_hostname
The canonical hostname where the project lives. E.g.,
`git.example.com`.
.. var:: canonical_name
The full canonical name of the project including hostname.
E.g., `git.example.com/org/project`.
.. var:: src_dir
The path to the source code relative to the work dir. E.g.,
`src/git.example.com/org/project`.
.. var:: projects
:type: dict
A dictionary of all projects prepared by Zuul for the item. It
includes, at least, the item's own project. It also includes
the projects of any items this item depends on, as well as the
projects that appear in :attr:`job.required-projects`.
This is a dictionary of dictionaries. Each value has a key of
the `canonical_name`, then each entry consists of:
.. var:: name
The name of the project, excluding hostname. E.g., `org/project`.
.. var:: short_name
The name of the project, excluding directories or
organizations. E.g., `project`.
.. var:: canonical_hostname
The canonical hostname where the project lives. E.g.,
`git.example.com`.
.. var:: canonical_name
The full canonical name of the project including hostname.
E.g., `git.example.com/org/project`.
.. var:: src_dir
The path to the source code, relative to the work dir. E.g.,
`src/git.example.com/org/project`.
.. var:: required
A boolean indicating whether this project appears in the
:attr:`job.required-projects` list for this job.
.. var:: checkout
The branch or tag that Zuul checked out for this project.
This may be influenced by the branch or tag associated with
the item as well as the job configuration.
.. var:: checkout_description
A human-readable description of why Zuul chose this
particular branch or tag to be checked out. This is intended
as a debugging aid in the case of complex jobs. The specific
text is not defined and is subject to change.
.. var:: commit
The hex SHA of the commit checked out. This commit may
appear in the upstream repository, or if it the result of a
speculative merge, it may only exist during the run of this
job.
For example, to access the source directory of a single known
project, you might use::
{{ zuul.projects['git.example.com/org/project'].src_dir }}
To iterate over the project list, you might write a task
something like::
- name: Sample project iteration
debug:
msg: "Project {{ item.name }} is at {{ item.src_dir }}
with_items: {{ zuul.projects.values() | list }}
.. var:: playbook_context
:type: dict
This dictionary contains information about the execution of each
playbook in the job. This may be useful for understanding
exactly what playbooks and roles Zuul executed.
All paths herein are located under the root of the build
directory (note that is one level higher than the workspace
directory accessible to jobs on the executor).
.. var:: playbook_projects
:type: dict
A dictionary of projects that have been checked out for
playbook execution. When used in the trusted execution
context, these will contain only merged commits in upstream
repositories. In the case of the untrusted context, they may
contain speculatively merged code.
The key is the path and each value is another dictionary with
the following keys:
.. var:: canonical_name
The canonical name of the repository.
.. var:: checkout
The branch or tag checked out.
.. var:: commit
The hex SHA of the commit checked out. As above, this
commit may or may not exist in the upstream repository
depending on whether it was the result of a speculative
merge.
.. var:: playbooks
:type: list
An ordered list of playbooks executed for the job. Each item
is a dictionary with the following keys:
.. var:: path
The path to the playbook.
.. var:: roles
:type: list
Information about the roles available to the playbook.
The actual `role path` supplied to Ansible is the
concatenation of the ``role_path`` entry in each of the
following dictionaries. The rest of the information
describes what is in the role path.
In order to deal with the many possible role layouts and
aliases, each element in the role path gets its own
directory. Depending on the contents and alias
configuration for that role repo, a symlink is added to
one of the repo checkouts in
:var:`zuul.playbook_context.playbook_projects` so that the
role may be supplied to Ansible with the correct name.
.. var:: checkout
The branch or tag checked out.
.. var:: checkout_description
A human-readable description of why Zuul chose this
particular branch or tag to be checked out. This is
intended as a debugging aid in the case of complex
jobs. The specific text is not defined and is subject
to change.
.. var:: link_name
The name of the symbolic link.
.. var:: link_target
The target of the symbolic_link.
.. var:: role_path
The role path passed to Ansible.
.. var:: tenant
The name of the current Zuul tenant.
.. var:: timeout
The job timeout, in seconds.
.. var:: post_timeout
The post-run playbook timeout, in seconds.
.. var:: jobtags
A list of tags associated with the job. Not to be confused with
git tags, these are simply free-form text fields that can be
used by the job for reporting or classification purposes.
.. var:: items
:type: list
.. note::
``zuul.items`` conflicts with the ``items()`` builtin so the
variable can only be accessed with python dictionary like syntax,
e.g: ``zuul['items']``
A list of dictionaries, each representing an item being tested
with this change with the format:
.. var:: project
The item's project. This is a data structure with the
following fields:
.. var:: name
The name of the project, excluding hostname. E.g.,
`org/project`.
.. var:: short_name
The name of the project, excluding directories or
organizations. E.g., `project`.
.. var:: canonical_hostname
The canonical hostname where the project lives. E.g.,
`git.example.com`.
.. var:: canonical_name
The full canonical name of the project including hostname.
E.g., `git.example.com/org/project`.
.. var:: src_dir
The path to the source code on the remote host, relative
to the home dir of the remote user.
E.g., `src/git.example.com/org/project`.
.. var:: branch
The target branch of the change (without the `refs/heads/` prefix).
.. var:: bundle_id
The id of the bundle if the change is in a circular dependency cycle.
.. var:: change
The identifier for the change.
.. var:: change_url
The URL to the source location of the given change.
E.g., `https://review.example.org/#/c/123456/` or
`https://github.com/example/example/pull/1234`.
.. var:: patchset
The patchset identifier for the change. If a change is
revised, this will have a different value.
.. var:: resources
:type: dict
A job using a container build resources has access to a resources variable
that describes the resource. Resources is a dictionary of group keys,
each value consists of:
.. var:: namespace
The resource's namespace name.
.. var:: context
The kube config context name.
.. var:: pod
The name of the pod when the label defines a kubectl connection.
Project or namespace resources might be used in a template as:
.. code-block:: yaml
- hosts: localhost
tasks:
- name: Create a k8s resource
k8s_raw:
state: present
context: "{{ zuul.resources['node-name'].context }}"
namespace: "{{ zuul.resources['node-name'].namespace }}"
Kubectl resources might be used in a template as:
.. code-block:: yaml
- hosts: localhost
tasks:
- name: Copy src repos to the pod
command: >
oc rsync -q --progress=false
{{ zuul.executor.src_root }}/
{{ zuul.resources['node-name'].pod }}:src/
no_log: true
.. var:: zuul_success
Post run playbook(s) will be passed this variable to indicate if the run
phase of the job was successful or not. This variable is meant to be used
with the `bool` filter.
.. code-block:: yaml
tasks:
- shell: echo example
when: zuul_success | bool
.. var:: zuul_will_retry
Post run and cleanup playbook(s) will be passed this variable to indicate
if the job will be retried. This variable is meant to be used with the
`bool` filter.
.. code-block:: yaml
tasks:
- shell: echo example
when: zuul_will_retry | bool
.. var:: nodepool
Information about each host from Nodepool is supplied in the
`nodepool` host variable. Availability of values varies based on
the node and the driver that supplied it. Values may be ``null``
if they are not applicable.
.. var:: label
The nodepool label of this node.
.. var:: az
The availability zone in which this node was placed.
.. var:: cloud
The name of the cloud in which this node was created.
.. var:: provider
The name of the nodepool provider of this node.
.. var:: region
The name of the nodepool provider's region.
.. var:: host_id
The cloud's host identification for this node's hypervisor.
.. var:: external_id
The cloud's identifier for this node.
.. var:: slot
If the node supports running multiple jobs on the node, a unique
numeric ID for the subdivision of the node assigned to this job.
This may be used to avoid build directory collisions.
.. var:: interface_ip
The best IP address to use to contact the node as determined by
the cloud provider and nodepool.
.. var:: public_ipv4
A public IPv4 address of the node.
.. var:: private_ipv4
A private IPv4 address of the node.
.. var:: public_ipv6
A public IPv6 address of the node.
.. var:: private_ipv6
A private IPv6 address of the node.
Change Items
~~~~~~~~~~~~
A change to the repository. Most often, this will be a git reference
which has not yet been merged into the repository (e.g., a gerrit
change or a GitHub pull request). The following additional variables
are available:
.. var:: zuul
:hidden:
.. var:: branch
The target branch of the change (without the `refs/heads/` prefix).
.. var:: change
The identifier for the change.
.. var:: patchset
The patchset identifier for the change. If a change is revised,
this will have a different value.
.. var:: change_url
The URL to the source location of the given change.
E.g., `https://review.example.org/#/c/123456/` or
`https://github.com/example/example/pull/1234`.
.. var:: message
The commit or pull request message of the change base64 encoded. Use the
`b64decode` filter in ansible when working with it.
.. warning:: This variable is deprecated and will be removed in
a future version. Use :var:`zuul.change_message`
instead.
.. var:: change_message
The commit or pull request message of the change. When Zuul
runs Ansible, this variable is tagged with the ``!unsafe`` YAML
tag so that Ansible will not interpolate values into it. Note,
however, that the `inventory.yaml` file placed in the build's
workspace for debugging and inspection purposes does not inclued
the ``!unsafe`` tag.
.. var:: commit_id
The git sha of the change. This may be the commit sha of the
current patchset revision or the tip of a pull request branch
depending on the source. Because of Zuul's speculative merge
process, this commit may not even appear in the prepared git
repos, so it should not be relied upon for git operations in
jobs. It is included here to support interfacing with systems
that identify a change by the commit.
Branch Items
~~~~~~~~~~~~
This represents a branch tip. This item may have been enqueued
because the branch was updated (via a change having merged, or a
direct push). Or it may have been enqueued by a timer for the purpose
of verifying the current condition of the branch. The following
additional variables are available:
.. var:: zuul
:hidden:
.. var:: branch
The name of the item's branch (without the `refs/heads/`
prefix).
.. var:: oldrev
If the item was enqueued as the result of a change merging or
being pushed to the branch, the git sha of the old revision will
be included here. Otherwise, this variable will be undefined.
.. var:: newrev
If the item was enqueued as the result of a change merging or
being pushed to the branch, the git sha of the new revision will
be included here. Otherwise, this variable will be undefined.
.. var:: commit_id
The git sha of the branch. Identical to ``newrev`` or
``oldrev`` if defined.
Tag Items
~~~~~~~~~
This represents a git tag. The item may have been enqueued because a
tag was created or deleted. The following additional variables are
available:
.. var:: zuul
:hidden:
.. var:: tag
The name of the item's tag (without the `refs/tags/` prefix).
.. var:: oldrev
If the item was enqueued as the result of a tag being deleted,
the previous git sha of the tag will be included here. If the
tag was created, this variable will be undefined.
.. var:: newrev
If the item was enqueued as the result of a tag being created,
the new git sha of the tag will be included here. If the tag
was deleted, this variable will be undefined.
.. var:: commit_id
The git sha of the branch. Identical to ``newrev`` or
``oldrev`` if defined.
Ref Items
~~~~~~~~~
This represents a git reference that is neither a change, branch, or
tag. Note that all items include a `ref` attribute which may be used
to identify the ref. The following additional variables are
available:
.. var:: zuul
:hidden:
.. var:: oldrev
If the item was enqueued as the result of a ref being deleted,
the previous git sha of the ref will be included here. If the
ref was created, this variable will be undefined.
.. var:: newrev
If the item was enqueued as the result of a ref being created,
the new git sha of the ref will be included here. If the ref
was deleted, this variable will be undefined.
.. var:: commit_id
The git sha of the branch. Identical to ``newrev`` or
``oldrev`` if defined.
Working Directory
~~~~~~~~~~~~~~~~~
Additionally, some information about the working directory and the
executor running the job is available:
.. var:: zuul
:hidden:
.. var:: executor
A number of values related to the executor running the job are
available:
.. var:: hostname
The hostname of the executor.
.. var:: src_root
The path to the source directory.
.. var:: log_root
The path to the logs directory.
.. var:: work_root
The path to the working directory.
.. var:: inventory_file
The path to the inventory. This variable is needed for jobs running
without a nodeset since Ansible doesn't set it for localhost; see
this `porting guide
<https://docs.ansible.com/ansible/latest/porting_guides/porting_guide_2.4.html#inventory>`_.
The inventory file is only readable by jobs running in a
:term:`trusted execution context`.
SSH Keys
--------
Zuul starts each job with an SSH agent running and at least one key
added to that agent. Generally you won't need to be aware of this
since Ansible will use this when performing any tasks on remote nodes.
However, under some circumstances you may want to interact with the
agent. For example, you may wish to add a key provided as a secret to
the job in order to access a specific host, or you may want to, in a
pre-playbook, replace the key used to log into the assigned nodes in
order to further protect it from being abused by untrusted job
content.
A description of each of the keys added to the SSH agent follows.
Nodepool Key
~~~~~~~~~~~~
This key is supplied by the system administrator. It is expected to
be accepted by every node supplied by Nodepool and is generally the
key that will be used by Zuul when running jobs. Because of the
potential for an unrelated job to add an arbitrary host to the Ansible
inventory which might accept this key (e.g., a node for another job,
or a static host), the use of the `add-build-sshkey
<https://zuul-ci.org/docs/zuul-jobs/general-roles.html#role-add-build-sshkey>`_
role is recommended.
Project Key
~~~~~~~~~~~
Each project in Zuul has its own SSH keypair. This key is added to
the SSH agent for all jobs running in a post-review pipeline. If a
system administrator trusts that project, they can add the project's
public key to systems to allow post-review jobs to access those
systems. The systems may be added to the inventory using the
``add_host`` Ansible module, or they may be supplied by static nodes
in Nodepool.
Zuul serves each project's public SSH key using its build-in
webserver. They can be fetched at the path
``/api/tenant/<tenant>/project-ssh-key/<project>.pub`` where
``<project>`` is the canonical name of a project and ``<tenant>`` is
the name of a tenant with that project.
.. _return_values:
Return Values
-------------
A job may return some values to Zuul to affect its behavior and for
use by dependent jobs. To return a value, use the ``zuul_return``
Ansible module in a job playbook.
For example:
.. code-block:: yaml
tasks:
- zuul_return:
data:
foo: bar
Will return the dictionary ``{'foo': 'bar'}`` to Zuul.
Optionally, if you have a large supply of data to return, you may specify the
path to a JSON-formatted file with that data. For example:
.. code-block:: yaml
tasks:
- zuul_return:
file: /path/to/data.json
Normally returned data are provided to dependent jobs in the inventory
file, which may end up in the log archive of a job. In the case where
sensitive data must be provided to dependent jobs, the ``secret_data``
attribute may be used instead, and the data will be provided via the
same mechanism as job secrets, where the data are not written to disk
in the work directory. Care must still be taken to avoid displaying
or storing sensitive data within the job. For example:
.. code-block:: yaml
tasks:
- zuul_return:
secret_data:
password: foobar
.. TODO: xref to section describing formatting
Any values other than those in the ``zuul`` hierarchy will be supplied
as Ansible variables to dependent jobs. These variables have less
precedence than any other type of variable in Zuul, so be sure their
names are not shared by any job variables. If more than one parent
job returns the same variable, the value from the later job in the job
graph will take precedence.
The values in the ``zuul`` hierarchy are special variables that influence the
behavior of zuul itself. The following paragraphs describe the currently
supported special variables and their meaning.
Returning the log url
~~~~~~~~~~~~~~~~~~~~~
To set the log URL for a build, use *zuul_return* to set the
**zuul.log_url** value. For example:
.. code-block:: yaml
tasks:
- zuul_return:
data:
zuul:
log_url: http://logs.example.com/path/to/build/logs
.. _return_artifacts:
Returning artifact URLs
~~~~~~~~~~~~~~~~~~~~~~~
If a build produces artifacts, any number of URLs may be returned to
Zuul and stored in the SQL database. These will then be available via
the web interface and subsequent jobs.
To provide artifact URLs for a build, use *zuul_return* to set keys
under the :var:`zuul.artifacts` dictionary. For example:
.. code-block:: yaml
tasks:
- zuul_return:
data:
zuul:
artifacts:
- name: tarball
url: http://example.com/path/to/package.tar.gz
metadata:
version: 3.0
- name: docs
url: build/docs/
If the value of **url** is a relative URL, it will be combined with
the **zuul.log_url** value if set to create an absolute URL. The
**metadata** key is optional; if it is provided, it must be a
dictionary; its keys and values may be anything.
If *zuul_return* is invoked multiple times (e.g., via multiple
playbooks), then the elements of :var:`zuul.artifacts` from each
invocation will be appended.
.. _skipping child jobs:
Skipping dependent jobs
~~~~~~~~~~~~~~~~~~~~~~~
.. note::
In the following section the use of 'child jobs' refers to dependent jobs
configured by `job.dependencies` and should not be confused with jobs
that inherit from a parent job.
To skip a dependent job for the current build, use *zuul_return* to set the
:var:`zuul.child_jobs` value. For example:
.. code-block:: yaml
tasks:
- zuul_return:
data:
zuul:
child_jobs:
- dependent_jobA
- dependent_jobC
Will tell zuul to only run the dependent_jobA and dependent_jobC for pre-configured
dependent jobs. If dependent_jobB was configured, it would be now marked as SKIPPED. If
zuul.child_jobs is empty, all jobs will be marked as SKIPPED. Invalid dependent jobs
are stripped and ignored, if only invalid jobs are listed it is the same as
providing an empty list to zuul.child_jobs.
Leaving warnings
~~~~~~~~~~~~~~~~
A job can leave warnings that will be appended to the comment zuul leaves on
the change. Use *zuul_return* to add a list of warnings. For example:
.. code-block:: yaml
tasks:
- zuul_return:
data:
zuul:
warnings:
- This warning will be posted on the change.
If *zuul_return* is invoked multiple times (e.g., via multiple
playbooks), then the elements of **zuul.warnings** from each
invocation will be appended.
Leaving file comments
~~~~~~~~~~~~~~~~~~~~~
To instruct the reporters to leave line comments on files in the
change, set the **zuul.file_comments** value. For example:
.. code-block:: yaml
tasks:
- zuul_return:
data:
zuul:
file_comments:
path/to/file.py:
- line: 42
message: "Line too long"
level: info
- line: 82
message: "Line too short"
- line: 119
message: "This block is indented too far."
level: warning
range:
start_line: 117
start_character: 0
end_line: 119
end_character: 37
Not all reporters currently support line comments (or all of the
features of line comments); in these cases, reporters will simply
ignore this data. The ``level`` is optional, but if provided must
be one of ``info``, ``warning``, ``error``.
Zuul will attempt to automatically translate the supplied line numbers
to the corresponding lines in the original change as written (they may
differ due to other changes which may have merged since the change was
written). If this produces erroneous results for a job, the behavior
may be disabled by setting the
**zuul.disable_file_comment_line_mapping** variable to ``true`` in
*zuul_return*.
If *zuul_return* is invoked multiple times (e.g., via multiple playbooks), then
the elements of `zuul.file_comments` from each invocation will be appended.
Pausing the job
~~~~~~~~~~~~~~~
A job can be paused after the run phase by notifing zuul during the run phase.
In this case the dependent jobs can start and the prior job stays paused until
all dependent jobs are finished. This for example can be useful to start
a docker registry in a job that will be used by the dependent job.
To indicate that the job should be paused use *zuul_return* to
set the **zuul.pause** value.
You still can at the same time supply any arbitrary data to the dependent jobs.
For example:
.. code-block:: yaml
tasks:
- zuul_return:
data:
zuul:
pause: true
registry_ip_address: "{{ hostvars[groups.all[0]].ansible_host }}"
Skipping retries
~~~~~~~~~~~~~~~~
It's possible to skip the retry caused by a failure in ``pre-run``
by setting **zuul.retry** to ``false``.
For example the following would skip retrying the build:
.. code-block:: yaml
tasks:
- zuul_return:
data:
zuul:
retry: false
.. _build_status:
Build Status
------------
A job build may have the following status:
**SUCCESS**
Nominal job execution.
**FAILURE**
Job executed correctly, but exited with a failure.
**RETRY**
The ``pre-run`` playbook failed and the job will be retried.
**RETRY_LIMIT**
The ``pre-run`` playbook failed more than the maximum number of
retry ``attempts``.
**POST_FAILURE**
The ``post-run`` playbook failed.
**SKIPPED**
One of the build dependencies failed and this job was not executed.
**NODE_FAILURE**
The test instance provider was unable to fullfill the nodeset request.
This can happen if Nodepool is unable to provide the requested node(s)
for the request.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/job-content.rst | job-content.rst |
:title: About Zuul
.. _about-zuul:
About Zuul
==========
Zuul is a Project Gating System. That's like a CI or CD system, but
the focus is on testing the future state of code repositories.
A gating system doesn't just test a proposed change; it tests the
proposed future state of multiple branches and repositories with any
number of in-flight changes and their dependencies. And the same
playbooks used to test software can also be used to deploy it.
Zuul itself is a service which listens to events from various code
review systems, executes jobs based on those events, and reports the
results back to the code review system. The primary interface for
Zuul is the code review system (or systems) so that it fits seamlessly
into developer workflows, and a web interface is available for
inspecting the current status and browsing build results.
The best way to run Zuul is with a single installation serving as many
projects or groups as possible. It is a multi-tenant application that
is able to provide as much or as little separation between projects as
desired.
Zuul works with a wide range of code review systems, and can work with
multiple systems (including integrating projects on different systems)
simultaneously. See :ref:`drivers` for a complete list.
Zuul uses a separate component called `Nodepool`_ to provide the
resources to run jobs. Nodepool works with several cloud providers
as well as statically defined nodes (again, simultaneously).
Because Zuul is designed from the ground up to run jobs in a
multi-node environment (whether those nodes are bare metal machines,
VMs, Kubernetes clusters, or containers), Zuul's job definition
language needs to support orchestrating tasks on multiple nodes. Zuul
uses Ansible for this. Ansible is well-known and easy to learn and
use. Some existing Ansible playbooks and roles may be able to be used
directly with Zuul (but some restrictions apply, so not all will).
However, knowledge or use of Ansible is not required for Zuul -- it is
quite simple for Zuul's embedded Ansible to run any shell script or
any other program. Zuul's library of standard jobs even includes a
job that will run a specified shell script, so it's possible to use
Zuul without writing any Ansible at all.
Zuul is an open source project developed and maintained by a community
of users. We welcome your `support and contribution
<https://zuul-ci.org/community.html>`__.
.. toctree::
:hidden:
concepts
gating
_`Nodepool`: https://zuul-ci.org/docs/nodepool/
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/about.rst | about.rst |
:title: Tracing
.. _tracing:
Tracing
=======
Zuul includes support for `distributed tracing`_ as described by the
OpenTelemetry project. This allows operators (and potentially users)
to visualize the progress of events and queue items through the
various Zuul components as an aid to debugging.
OpenTelemetry defines several observability signals such as traces,
metrics, and logs. Zuul uses other systems for metrics and logs; only
traces are exported via OpenTelemetry.
Zuul supports the OpenTelemetry Protocol (OTLP) for exporting traces.
Many observability systems support receiving traces via OTLP
(including Jaeger tracing).
Configuration
-------------
Related configuration is in the :attr:`tracing` section of ``zuul.conf``.
Tutorial
--------
Here is a tutorial that shows how to enable tracing with Zuul and Jaeger.
.. toctree::
:maxdepth: 1
tutorials/tracing
_`distributed tracing`: https://opentelemetry.io/docs/concepts/observability-primer/#distributed-traces
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/tracing.rst | tracing.rst |
:title: Configuration
Configuration
=============
All Zuul processes read the ``/etc/zuul/zuul.conf`` file (an alternate
location may be supplied on the command line) which uses an INI file
syntax. Each component may have its own configuration file, though
you may find it simpler to use the same file for all components.
Zuul will interpolate environment variables starting with the
``ZUUL_`` prefix given in the config file escaped as python string
expansion. ``foo=%(ZUUL_HOME)s`` will set the value of ``foo`` to the
same value as the environment variable named ``ZUUL_HOME``.
An example ``zuul.conf``:
.. code-block:: ini
[zookeeper]
hosts=zk1.example.com,zk2.example.com,zk3.example.com
[database]
dburi=<URI>
[keystore]
password=MY_SECRET_PASSWORD
[web]
status_url=https://zuul.example.com/status
[scheduler]
log_config=/etc/zuul/scheduler-logging.yaml
Common Options
--------------
The following sections of ``zuul.conf`` are used by all Zuul components:
Statsd
~~~~~~
.. attr:: statsd
Information about the optional statsd server. If the ``statsd``
python module is installed and this section is configured,
statistics will be reported to statsd. See :ref:`statsd` for more
information.
.. attr:: server
Hostname or IP address of the statsd server.
.. attr:: port
:default: 8125
The UDP port on which the statsd server is listening.
.. attr:: prefix
If present, this will be prefixed to all of the keys before
transmitting to the statsd server.
Tracing
~~~~~~~
.. attr:: tracing
Information about the optional OpenTelemetry tracing configuration.
See :ref:`tracing` for more information.
.. attr:: enabled
:required:
To enable tracing, set this value to ``true``. This is the only
required parameter in order to export to a collector running
locally.
.. attr:: protocol
:default: grpc
The OTLP wire protocol to use.
.. value:: grpc
Use gRPC (the default).
.. value:: http/protobuf
Use HTTP with protobuf encoding.
.. attr:: endpoint
The endpoint to use. The default is protocol specific, but
defaults to localhost in all cases.
.. attr:: service_name
:default: zuul
The service name may be specified here. Multiple Zuul
installations should use different values.
.. attr:: tls_cert
The path to the PEM encoded certificate file. Used only by
:value:`tracing.protocol.grpc`.
.. attr:: tls_key
The path to the PEM encoded key file. Used only by
:value:`tracing.protocol.grpc`.
.. attr:: tls_ca
The path to the PEM encoded CA certificate file. Used only by
:value:`tracing.protocol.grpc`.
.. attr:: certificate_file
The path to the PEM encoded certificate file used to verify the
endpoint. Used only by :value:`tracing.protocol.http/protobuf`.
.. attr:: insecure
Whether to allow an insecure connection. Used only by
:value:`tracing.protocol.grpc`.
.. attr:: timeout
:default: 10000
The timeout for outgoing data in milliseconds.
.. attr:: compression
The compression algorithm to use. Available values depend on
the protocol and endpoint. The only universally supported value
is ``gzip``.
ZooKeeper
~~~~~~~~~
.. attr:: zookeeper
Client connection information for ZooKeeper. TLS is required.
.. attr:: hosts
:required:
A list of zookeeper hosts for Zuul to use when communicating
with Nodepool.
.. attr:: tls_cert
:required:
The path to the PEM encoded certificate file.
.. attr:: tls_key
:required:
The path to the PEM encoded key file.
.. attr:: tls_ca
:required:
The path to the PEM encoded CA certificate file.
.. attr:: session_timeout
:default: 10.0
The ZooKeeper session timeout, in seconds.
.. _database:
Database
~~~~~~~~
.. attr:: database
.. attr:: dburi
:required:
Database connection information in the form of a URI understood
by SQLAlchemy. See `The SQLAlchemy manual
<https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls>`_
for more information.
The driver will automatically set up the database creating and managing
the necessary tables. Therefore the provided user should have sufficient
permissions to manage the database. For example:
.. code-block:: sql
GRANT ALL ON my_database TO 'my_user'@'%';
.. attr:: pool_recycle
:default: 1
Tune the pool_recycle value. See `The SQLAlchemy manual on pooling
<http://docs.sqlalchemy.org/en/latest/core/pooling.html#setting-pool-recycle>`_
for more information.
.. attr:: table_prefix
:default: ''
The string to prefix the table names. This makes it possible to run
several zuul deployments against the same database. This can be useful
if you rely on external databases which are not under your control.
The default is to have no prefix.
.. _scheduler:
Scheduler
---------
The scheduler is the primary component of Zuul. The scheduler is a
scalable component; one or more schedulers must be running at all
times for Zuul to be operational. It receives events from any
connections to remote systems which have been configured, enqueues
items into pipelines, distributes jobs to executors, and reports
results.
The scheduler must be able to connect to the ZooKeeper cluster shared
by Zuul and Nodepool in order to request nodes. It does not need to
connect directly to the nodes themselves, however -- that function is
handled by the Executors.
It must also be able to connect to any services for which connections
are configured (Gerrit, GitHub, etc).
The following sections of ``zuul.conf`` are used by the scheduler:
.. attr:: web
.. attr:: root
:required:
The root URL of the web service (e.g.,
``https://zuul.example.com/``).
See :attr:`tenant.web-root` for additional options for
whitelabeled tenant configuration.
.. attr:: status_url
URL that will be posted in Zuul comments made to changes when
starting jobs for a change.
.. TODO: is this effectively required?
.. attr:: keystore
.. _keystore-password:
.. attr:: password
:required:
Encryption password for private data stored in Zookeeper.
.. attr:: scheduler
.. attr:: command_socket
:default: /var/lib/zuul/scheduler.socket
Path to command socket file for the scheduler process.
.. attr:: tenant_config
Path to :ref:`tenant-config` file. This attribute
is exclusive with :attr:`scheduler.tenant_config_script`.
.. attr:: tenant_config_script
Path to a script to execute and load the tenant
config from. This attribute is exclusive with
:attr:`scheduler.tenant_config`.
.. attr:: default_ansible_version
Default ansible version to use for jobs that doesn't specify a version.
See :attr:`job.ansible-version` for details.
.. attr:: log_config
Path to log config file.
.. attr:: pidfile
:default: /var/run/zuul/scheduler.pid
Path to PID lock file.
.. attr:: relative_priority
:default: False
A boolean which indicates whether the scheduler should supply
relative priority information for node requests.
In all cases, each pipeline may specify a precedence value which
is used by Nodepool to satisfy requests from higher-precedence
pipelines first. If ``relative_priority`` is set to ``True``,
then Zuul will additionally group items in the same pipeline by
pipeline queue and weight each request by its position in that
project's group. A request for the first change in a given
queue will have the highest relative priority, and the second
change a lower relative priority. The first change of each
queue in a pipeline has the same relative priority, regardless
of the order of submission or how many other changes are in the
pipeline. This can be used to make node allocations complete
faster for projects with fewer changes in a system dominated by
projects with more changes.
After the first 10 changes, the relative priority becomes more
coarse (batching groups of 10 changes at the same priority).
Likewise, after 100 changes they are batchen in groups of 100.
This is to avoid causing additional load with unecessary
priority changes if queues are long.
If this value is ``False`` (the default), then node requests are
sorted by pipeline precedence followed by the order in which
they were submitted. If this is ``True``, they are sorted by
pipeline precedence, followed by relative priority, and finally
the order in which they were submitted.
.. attr:: default_hold_expiration
:default: max_hold_expiration
The default value for held node expiration if not supplied. This
will default to the value of ``max_hold_expiration`` if not changed,
or if it is set to a higher value than the max.
.. attr:: max_hold_expiration
:default: 0
Maximum number of seconds any nodes held for an autohold request
will remain available. A value of 0 disables this, and the nodes
will remain held until the autohold request is manually deleted.
If a value higher than ``max_hold_expiration`` is supplied during
hold request creation, it will be lowered to this value.
.. attr:: prometheus_port
Set a TCP port to start the prometheus metrics client.
.. attr:: prometheus_addr
:default: 0.0.0.0
The IPv4 addr to listen for prometheus metrics poll.
To use IPv6, python>3.8 is required `issue24209 <https://bugs.python.org/issue24209>`_.
Merger
------
Mergers are an optional Zuul service; they are not required for Zuul
to operate, but some high volume sites may benefit from running them.
Zuul performs quite a lot of git operations in the course of its work.
Each change that is to be tested must be speculatively merged with the
current state of its target branch to ensure that it can merge, and to
ensure that the tests that Zuul perform accurately represent the
outcome of merging the change. Because Zuul's configuration is stored
in the git repos it interacts with, and is dynamically evaluated, Zuul
often needs to perform a speculative merge in order to determine
whether it needs to perform any further actions.
All of these git operations add up, and while Zuul executors can also
perform them, large numbers may impact their ability to run jobs.
Therefore, administrators may wish to run standalone mergers in order
to reduce the load on executors.
Mergers need to be able to connect to the ZooKeeper cluster as well as
any services for which connections are configured (Gerrit, GitHub,
etc).
The following section of ``zuul.conf`` is used by the merger:
.. attr:: merger
.. attr:: command_socket
:default: /var/lib/zuul/merger.socket
Path to command socket file for the merger process.
.. attr:: git_dir
:default: /var/lib/zuul/merger-git
Directory in which Zuul should clone git repositories.
.. attr:: git_http_low_speed_limit
:default: 1000
If the HTTP transfer speed is less then git_http_low_speed_limit for
longer then git_http_low_speed_time, the transfer is aborted.
Value in bytes, setting to 0 will disable.
.. attr:: git_http_low_speed_time
:default: 30
If the HTTP transfer speed is less then git_http_low_speed_limit for
longer then git_http_low_speed_time, the transfer is aborted.
Value in seconds, setting to 0 will disable.
.. attr:: git_timeout
:default: 300
Timeout for git clone and fetch operations. This can be useful when
dealing with large repos. Note that large timeouts can increase startup
and reconfiguration times if repos are not cached so be cautious when
increasing this value.
Value in seconds.
.. attr:: git_user_email
Value to pass to `git config user.email
<https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup>`_.
.. attr:: git_user_name
Value to pass to `git config user.name
<https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup>`_.
.. attr:: log_config
Path to log config file for the merger process.
.. attr:: pidfile
:default: /var/run/zuul/merger.pid
Path to PID lock file for the merger process.
.. attr:: prometheus_port
Set a TCP port to start the prometheus metrics client.
.. attr:: prometheus_addr
:default: 0.0.0.0
The IPv4 addr to listen for prometheus metrics poll.
To use IPv6, python>3.8 is required `issue24209 <https://bugs.python.org/issue24209>`_.
.. _executor:
Executor
--------
Executors are responsible for running jobs. At the start of each job,
an executor prepares an environment in which to run Ansible which
contains all of the git repositories specified by the job with all
dependent changes merged into their appropriate branches. The branch
corresponding to the proposed change will be checked out (in all
projects, if it exists). Any roles specified by the job will also be
present (also with dependent changes merged, if appropriate) and added
to the Ansible role path. The executor also prepares an Ansible
inventory file with all of the nodes requested by the job.
The executor also contains a merger. This is used by the executor to
prepare the git repositories used by jobs, but is also available to
perform any tasks normally performed by standalone mergers. Because
the executor performs both roles, small Zuul installations may not
need to run standalone mergers.
Executors need to be able to connect to the ZooKeeper cluster, any
services for which connections are configured (Gerrit, GitHub, etc),
as well as directly to the hosts which Nodepool provides.
Trusted and Untrusted Playbooks
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The executor runs playbooks in one of two execution contexts depending
on whether the project containing the playbook is a
:term:`config-project` or an :term:`untrusted-project`. If the
playbook is in a config project, the executor runs the playbook in the
*trusted* execution context, otherwise, it is run in the *untrusted*
execution context.
Both execution contexts use `bubblewrap`_ to create a namespace to
ensure that playbook executions are isolated and are unable to access
files outside of a restricted environment. The administrator may
configure additional local directories on the executor to be made
available to the restricted environment.
.. _bubblewrap: https://github.com/projectatomic/bubblewrap
.. _executor_security:
Security Considerations
~~~~~~~~~~~~~~~~~~~~~~~
Bubblewrap restricts access to files outside of the build environment
in both execution contexts. Operators may allow either read-only or
read-write access to additional paths in either the `trusted` context
or both contexts with additional options described below. Be careful
when adding additional paths, and consider that any `trusted` or
`untrusted` (as appropriate) playbook will have access to these paths.
If executors are configured to use WinRM certificates, these must be
made available to the bubblewrap environment in order for Ansible to
use them. This invariably makes them accessible to any playbook in
that execution context. Operators may want to consider only supplying
WinRM credentials to trusted playbooks and installing per-build
certificates in a pre-playbook; or using Ansible's experimental SSH
support instead of WinRM.
Local code execution is permitted on the executor, so if a
vulnerability in bubblewrap or the kernel allows for an escape from
the restricted environment, users may be able to escalate their
privileges and obtain access to any data or secrets available to the
executor.
Playbooks which run on the executor will have the same network access
as the executor itself. This should be kept in mind when considering
IP-based network access control within an organization. Zuul's
internal communication is via ZooKeeper which is authenticated and
secured by TLS certificates, so as long as these certificates are not
made available to jobs, users should not be able to access or disrupt
Zuul's internal communications. However, statsd is an unauthenticated
protocol, so a malicious user could emit false statsd information.
If the Zuul executor is running in a cloud environment with a network
metadata service, users may be able to access that service. If it
supplies credentials, they may be able to obtain those credentials and
access cloud resources. Operators should ensure that in these
environments, the executors are configured with appropriately
restricted IAM profiles.
Configuration
~~~~~~~~~~~~~
The following sections of ``zuul.conf`` are used by the executor:
.. attr:: executor
.. attr:: command_socket
:default: /var/lib/zuul/executor.socket
Path to command socket file for the executor process.
.. attr:: finger_port
:default: 7900
Port to use for finger log streamer.
.. attr:: state_dir
:default: /var/lib/zuul
Path to directory in which Zuul should save its state.
.. attr:: git_dir
:default: /var/lib/zuul/executor-git
Directory that Zuul should clone local git repositories to. The
executor keeps a local copy of every git repository it works
with to speed operations and perform speculative merging.
This should be on the same filesystem as
:attr:`executor.job_dir` so that when git repos are cloned into
the job workspaces, they can be hard-linked to the local git
cache.
.. attr:: job_dir
:default: /var/lib/zuul/builds
Directory that Zuul should use to hold temporary job directories.
When each job is run, a new entry will be created under this
directory to hold the configuration and scratch workspace for
that job. It will be deleted at the end of the job (unless the
`--keep-jobdir` command line option is specified).
This should be on the same filesystem as :attr:`executor.git_dir`
so that when git repos are cloned into the job workspaces, they
can be hard-linked to the local git cache.
.. attr:: log_config
Path to log config file for the executor process.
.. attr:: pidfile
:default: /var/run/zuul/executor.pid
Path to PID lock file for the executor process.
.. attr:: private_key_file
:default: ~/.ssh/id_rsa
SSH private key file to be used when logging into worker nodes.
.. note:: If you use an RSA key, ensure it is encoded in the PEM
format (use the ``-t rsa -m PEM`` arguments to
`ssh-keygen`).
.. attr:: default_username
:default: zuul
Username to use when logging into worker nodes, if none is
supplied by Nodepool.
.. attr:: winrm_cert_key_file
:default: ~/.winrm/winrm_client_cert.key
The private key file of the client certificate to use for winrm
connections to Windows nodes.
.. attr:: winrm_cert_pem_file
:default: ~/.winrm/winrm_client_cert.pem
The certificate file of the client certificate to use for winrm
connections to Windows nodes.
.. note:: Currently certificate verification is disabled when
connecting to Windows nodes via winrm.
.. attr:: winrm_operation_timeout_sec
:default: None. The Ansible default of 20 is used in this case.
The timeout for WinRM operations.
.. attr:: winrm_read_timeout_sec
:default: None. The Ansible default of 30 is used in this case.
The timeout for WinRM read. Increase this if there are intermittent
network issues and read timeout errors keep occurring.
.. _admin_sitewide_variables:
.. attr:: variables
Path to an Ansible variables file to supply site-wide variables.
This should be a YAML-formatted file consisting of a single
dictionary. The contents will be made available to all jobs as
Ansible variables. These variables take precedence over all
other forms (job variables and secrets). Care should be taken
when naming these variables to avoid potential collisions with
those used by jobs. Prefixing variable names with a
site-specific identifier is recommended. The default is not to
add any site-wide variables. See the :ref:`User's Guide
<user_jobs_sitewide_variables>` for more information.
.. attr:: manage_ansible
:default: True
Specifies wether the zuul-executor should install the supported ansible
versions during startup or not. If this is ``True`` the zuul-executor
will install the ansible versions into :attr:`executor.ansible_root`.
It is recommended to set this to ``False`` and manually install Ansible
after the Zuul installation by running ``zuul-manage-ansible``. This has
the advantage that possible errors during Ansible installation can be
spotted earlier. Further especially containerized deployments of Zuul
will have the advantage of predictable versions.
.. attr:: ansible_root
:default: <state_dir>/ansible-bin
Specifies where the zuul-executor should look for its supported ansible
installations. By default it looks in the following directories and uses
the first which it can find.
* ``<zuul_install_dir>/lib/zuul/ansible``
* ``<ansible_root>``
The ``ansible_root`` setting allows you to override the second location
which is also used for installation if ``manage_ansible`` is ``True``.
.. attr:: ansible_setup_timeout
:default: 60
Timeout of the ansible setup playbook in seconds that runs before
the first playbook of the job.
.. attr:: disk_limit_per_job
:default: 250
This integer is the maximum number of megabytes that any one job
is allowed to consume on disk while it is running. If a job's
scratch space has more than this much space consumed, it will be
aborted. Set to -1 to disable the limit.
.. attr:: trusted_ro_paths
List of paths, separated by ``:`` to read-only bind mount into
trusted bubblewrap contexts.
.. attr:: trusted_rw_paths
List of paths, separated by ``:`` to read-write bind mount into
trusted bubblewrap contexts.
.. attr:: untrusted_ro_paths
List of paths, separated by ``:`` to read-only bind mount into
untrusted bubblewrap contexts.
.. attr:: untrusted_rw_paths
List of paths, separated by ``:`` to read-write bind mount into
untrusted bubblewrap contexts.
.. attr:: load_multiplier
:default: 2.5
When an executor host gets too busy, the system may suffer
timeouts and other ill effects. The executor will stop accepting
more than 1 job at a time until load has lowered below a safe
level. This level is determined by multiplying the number of
CPU's by `load_multiplier`.
So for example, if the system has 2 CPUs, and load_multiplier
is 2.5, the safe load for the system is 5.00. Any time the
system load average is over 5.00, the executor will quit
accepting multiple jobs at one time.
The executor will observe system load and determine whether
to accept more jobs every 30 seconds.
.. attr:: max_starting_builds
:default: None
An executor is accepting up to as many starting builds as defined by the
:attr:`executor.load_multiplier` on systems with more than four CPU cores,
and up to twice as many on systems with four or less CPU cores. For
example, on a system with two CPUs: 2 * 2.5 * 2 - up to ten starting
builds may run on such executor; on systems with eight CPUs: 2.5 * 8 - up
to twenty starting builds may run on such executor.
On systems with high CPU/vCPU count an executor may accept too many
starting builds. This can be overwritten using this option providing a
fixed number of maximum starting builds on an executor.
.. attr:: min_avail_hdd
:default: 5.0
This is the minimum percentage of HDD storage available for the
:attr:`executor.state_dir` directory. The executor will stop accepting
more than 1 job at a time until more HDD storage is available. The
available HDD percentage is calculated from the total available
disk space divided by the total real storage capacity multiplied by
100.
.. attr:: min_avail_mem
:default: 5.0
This is the minimum percentage of system RAM available. The
executor will stop accepting more than 1 job at a time until
more memory is available. The available memory percentage is
calculated from the total available memory divided by the
total real memory multiplied by 100. Buffers and cache are
considered available in the calculation.
.. attr:: hostname
:default: hostname of the server
The executor needs to know its hostname under which it is reachable by
zuul-web. Otherwise live console log streaming doesn't work. In most cases
This is automatically detected correctly. But when running in environments
where it cannot determine its hostname correctly this can be overridden
here.
.. attr:: paused_on_start
:default: false
Whether the executor should start in a paused mode. Such executor will not
accept tasks until it is unpaused.
.. attr:: zone
:default: None
Name of the nodepool executor-zone to exclusively execute all jobs that
have nodes with the specified executor-zone attribute. As an example,
it is possible for nodepool nodes to exist in a cloud without public
accessable IP address. By adding an executor to a zone nodepool nodes
could be configured to use private ip addresses.
To enable this in nodepool, you'll use the node-attributes setting in a
provider pool. For example:
.. code-block:: yaml
pools:
- name: main
node-attributes:
executor-zone: vpn
.. attr:: allow_unzoned
:default: False
If :attr:`executor.zone` is set it by default only processes jobs with
nodes of that specific zone even if the nodes have no zone at all.
Enabling ``allow_unzoned`` lets the executor also take jobs with nodes
without zone.
.. attr:: merge_jobs
:default: True
To disable global merge job, set it to false. This is useful for zoned
executors that are running on slow network where you don't want them to
perform merge operations for any events. The executor will still perform
the merge operations required for the build they are executing.
.. attr:: sigterm_method
:default: graceful
Determines how the executor responds to a ``SIGTERM`` signal.
.. value:: graceful
Stop accepting new jobs and wait for all running jobs to
complete before exiting.
.. value:: stop
Abort all running jobs and exit as soon as possible.
.. attr:: prometheus_port
Set a TCP port to start the prometheus metrics client.
.. attr:: prometheus_addr
:default: 0.0.0.0
The IPv4 addr to listen for prometheus metrics poll.
To use IPv6, python>3.8 is required `issue24209 <https://bugs.python.org/issue24209>`_.
.. attr:: keystore
.. attr:: password
:required:
Encryption password for private data stored in Zookeeper.
.. attr:: merger
.. attr:: git_user_email
Value to pass to `git config user.email
<https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup>`_.
.. attr:: git_user_name
Value to pass to `git config user.name
<https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup>`_.
.. attr:: prometheus_port
Set a TCP port to start the prometheus metrics client.
.. attr:: prometheus_addr
:default: 0.0.0.0
The IPv4 addr to listen for prometheus metrics poll.
To use IPv6, python>3.8 is required `issue24209 <https://bugs.python.org/issue24209>`_.
.. attr:: ansible_callback "<name>"
To whitelist ansible callback ``<name>``. Any attributes found is this section
will be added to the ``callback_<name>`` section in ansible.cfg.
An example of what configuring the builtin mail callback would look like.
The configuration in zuul.conf.
.. code-block:: ini
[ansible_callback "mail"]
to = [email protected]
sender = [email protected]
Would generate the following in ansible.cfg:
.. code-block:: ini
[defaults]
callback_whitelist = mail
[callback_mail]
to = [email protected]
sender = [email protected]
.. _web-server:
Web Server
----------
.. TODO: Turn REST API into a link to swagger docs when we grow them
The Zuul web server serves as the single process handling all HTTP
interactions with Zuul. This includes the websocket interface for live
log streaming, the REST API and the html/javascript dashboard. All three are
served as a holistic web application. For information on additional supported
deployment schemes, see :ref:`web-deployment-options`.
Web servers need to be able to connect to the ZooKeeper cluster and
the SQL database. If a GitHub, Gitlab, or Pagure connection is
configured, they need to be reachable so they may receive
notifications.
In addition to the common configuration sections, the following
sections of ``zuul.conf`` are used by the web server:
.. attr:: web
.. attr:: command_socket
:default: /var/lib/zuul/web.socket
Path to command socket file for the web process.
.. attr:: listen_address
:default: 127.0.0.1
IP address or domain name on which to listen.
.. attr:: log_config
Path to log config file for the web server process.
.. attr:: pidfile
:default: /var/run/zuul/web.pid
Path to PID lock file for the web server process.
.. attr:: port
:default: 9000
Port to use for web server process.
.. attr:: websocket_url
Base URL on which the websocket service is exposed, if different
than the base URL of the web app.
.. attr:: stats_url
Base URL from which statistics emitted via statsd can be queried.
.. attr:: stats_type
:default: graphite
Type of server hosting the statistics information. Currently only
'graphite' is supported by the dashboard.
.. attr:: static_path
:default: zuul/web/static
Path containing the static web assets.
.. attr:: static_cache_expiry
:default: 3600
The Cache-Control max-age response header value for static files served
by the zuul-web. Set to 0 during development to disable Cache-Control.
.. attr:: zone
The zone in which zuul-web is deployed. This is only needed if
there are executors with different zones in the environment and
not all executors are directly addressable from zuul-web. The
parameter specifies the zone where the executors are directly
adressable. Live log streaming will go directly to the executors
of the same zone and be routed to a finger gateway of the target
zone if the zones are different.
In a mixed system (with zoned and unzoned executors) there may
also be zoned and unzoned zuul-web services. Omit the zone
parameter for any unzoned zuul-web servers.
If this is used the finger gateways should be configured accordingly.
.. attr:: keystore
.. attr:: password
:required:
Encryption password for private data stored in Zookeeper.
Authentication
~~~~~~~~~~~~~~
A user can be granted access to protected REST API endpoints by providing a
valid JWT (JSON Web Token) as a bearer token when querying the API endpoints.
JWTs are signed and therefore Zuul must be configured so that signatures can be
verified. More information about the JWT standard can be found on the `IETF's
RFC page <https://tools.ietf.org/html/rfc7519>`_.
This optional section of ``zuul.conf``, if present, will activate the
protected endpoints and configure JWT validation:
.. attr:: auth <authenticator name>
.. attr:: driver
The signing algorithm to use. Accepted values are ``HS256``, ``RS256``,
``RS256withJWKS`` or ``OpenIDConnect``. See below for driver-specific
configuration options.
.. attr:: allow_authz_override
:default: false
Allow a JWT to override predefined access rules. See the section on
:ref:`JWT contents <jwt-format>` for more details on how to grant access
to tenants with a JWT.
.. attr:: realm
The authentication realm.
.. attr:: default
:default: false
If set to ``true``, use this realm as the default authentication realm
when handling HTTP authentication errors.
.. attr:: client_id
The expected value of the "aud" claim in the JWT. This is required for
validation.
.. attr:: issuer_id
The expected value of the "iss" claim in the JWT. This is required for
validation.
.. attr:: uid_claim
:default: sub
The JWT claim that Zuul will use as a unique identifier for the bearer of
a token. This is "sub" by default, as it is usually the purpose of this
claim in a JWT. This identifier is used in audit logs.
.. attr:: max_validity_time
Optional value to ensure a JWT cannot be valid for more than this amount
of time in seconds. This is useful if the Zuul operator has no control
over the service issueing JWTs, and the tokens are too long-lived.
.. attr:: skew
:default: 0
Optional integer value to compensate for skew between Zuul's and the
JWT emitter's respective clocks. Use a negative value if Zuul's clock
is running behind.
This section can be repeated as needed with different authenticators, allowing
access to privileged API actions from several JWT issuers.
Driver-specific attributes
..........................
HS256
,,,,,
This is a symmetrical encryption algorithm that only requires a shared secret
between the JWT issuer and the JWT consumer (ie Zuul). This driver should be
used in test deployments, or in deployments where JWTs may be issued manually
to users.
.. note:: At least one HS256 authenticator should be configured in order to use admin commands with the Zuul command line interface.
.. attr:: secret
:noindex:
The shared secret used to sign JWTs and validate signatures.
RS256
,,,,,
This is an asymmetrical encryption algorithm that requires an RSA key pair. Only
the public key is needed by Zuul for signature validation.
.. attr:: public_key
The path to the public key of the RSA key pair. It must be readable by Zuul.
.. attr:: private_key
Optional. The path to the private key of the RSA key pair. It must be
readable by Zuul.
RS256withJWKS
,,,,,,,,,,,,,
.. warning::
This driver is deprecated, use ``OpenIDConnect`` instead.
Some Identity Providers use key sets (also known as **JWKS**), therefore the key to
use when verifying the Authentication Token's signatures cannot be known in
advance; the key's id is stored in the JWT's header and the key must then be
found in the remote key set.
The key set is usually available at a specific URL that can be found in the
"well-known" configuration of an OpenID Connect Identity Provider.
.. attr:: keys_url
The URL where the Identity Provider's key set can be found. For example, for
Google's OAuth service: https://www.googleapis.com/oauth2/v3/certs
OpenIDConnect
,,,,,,,,,,,,,
Use a third-party Identity Provider implementing the OpenID Connect protocol.
The issuer ID should be an URI, from which the "well-known" configuration URI
of the Identity Provider can be inferred. This is intended to be used for
authentication on Zuul's web user interface.
.. attr:: scope
:default: openid profile
The scope(s) to use when requesting access to a user's details. This attribute
can be multivalued (values must be separated by a space). Most OpenID Connect
Identity Providers support the default scopes "openid profile". A full list
of supported scopes can be found in the well-known configuration of the
Identity Provider under the key "scopes_supported".
.. attr:: keys_url
Optional. The URL where the Identity Provider's key set can be found.
For example, for Google's OAuth service: https://www.googleapis.com/oauth2/v3/certs
The well-known configuration of the Identity Provider should provide this URL
under the key "jwks_uri", therefore this attribute is usually not necessary.
Some providers may not conform to the JWT specification and further
configuration may be necessary. In these cases, the following
additional values may be used:
.. attr:: authority
:default: issuer_id
If the authority in the token response is not the same as the
issuer_id in the request, it may be explicitly set here.
.. attr:: audience
:default: client_id
If the audience in the token response is not the same as the
issuer_id in the request, it may be explicitly set here.
.. attr:: load_user_info
:default: true
If the web UI should skip accessing the "UserInfo" endpoint and
instead rely only on the information returned in the token, set
this to ``false``.
Client
------
Zuul's command line client may be configured to make calls to Zuul's web
server. The client will then look for a ``zuul.conf`` file with a ``webclient``
section to set up the connection over HTTP.
.. note:: At least one authenticator must be configured in Zuul for admin commands to be enabled in the client.
.. attr:: webclient
.. attr:: url
The root URL of Zuul's web server.
.. attr:: verify_ssl
:default: true
Enforce SSL verification when sending requests over to Zuul's web server.
This should only be disabled when working with test servers.
Finger Gateway
--------------
The Zuul finger gateway listens on the standard finger port (79) for
finger requests specifying a build UUID for which it should stream log
results. The gateway will determine which executor is currently running that
build and query that executor for the log stream.
This is intended to be used with the standard finger command line client.
For example::
finger [email protected]
The above would stream the logs for the build identified by `UUID`.
Finger gateway servers need to be able to connect to the ZooKeeper
cluster, as well as the console streaming port on the executors
(usually 7900).
Finger gateways are optional. They may be run for either or both of
the following purposes:
* Allowing end-users to connect to the finger port to stream logs.
* Providing an accessible log streaming port for remote zoned
executors which are otherwise inacessible.
In this case, log streaming requests from finger gateways or
zuul-web will route to the executors via finger gateways in the same
zone.
In addition to the common configuration sections, the following
sections of ``zuul.conf`` are used by the finger gateway:
.. attr:: fingergw
.. attr:: command_socket
:default: /var/lib/zuul/fingergw.socket
Path to command socket file for the executor process.
.. attr:: listen_address
:default: all addresses
IP address or domain name on which to listen.
.. attr:: log_config
Path to log config file for the finger gateway process.
.. attr:: pidfile
:default: /var/run/zuul/fingergw.pid
Path to PID lock file for the finger gateway process.
.. attr:: port
:default: 79
Port to use for the finger gateway. Note that since command line
finger clients cannot usually specify the port, leaving this set to
the default value is highly recommended.
.. attr:: user
User ID for the zuul-fingergw process. In normal operation as a
daemon, the finger gateway should be started as the ``root``
user, but if this option is set, it will drop privileges to this
user during startup. It is recommended to set this option to an
unprivileged user.
.. attr:: hostname
:default: hostname of the server
When running finger gateways in a multi-zone configuration, the
gateway needs to know its hostname under which it is reachable
by zuul-web. Otherwise live console log streaming doesn't
work. In most cases This is automatically detected
correctly. But when running in environments where it cannot
determine its hostname correctly this can be overridden here.
.. attr:: zone
The zone where the finger gateway is located. This is only needed for
live log streaming if the zuul deployment is spread over multiple
zones without the ability to directly connect to all executors from
zuul-web. See :attr:`executor.zone` for further information.
In a mixed system (with zoned and unzoned executors) there may
also be zoned and unzoned finger gateway services. Omit the zone
parameter for any unzoned finger gateway servers.
If the Zuul installation spans an untrusted network (for example, if
there are remote executor zones), it may be necessary to use TLS
between the components that handle log streaming (zuul-executor,
zuul-fingergw, and zuul-web). If so, set the following options.
Note that this section is also read by zuul-web in order to load a
client certificate to use when connecting to a finger gateway which
requires TLS, and it is also read by zuul-executor to load a server
certificate for its console streaming port.
If any of these are present, all three certificate options must be
provided.
.. attr:: tls_cert
The path to the PEM encoded certificate file.
.. attr:: tls_key
The path to the PEM encoded key file.
.. attr:: tls_ca
The path to the PEM encoded CA certificate file.
.. attr:: tls_verify_hostnames
:default: true
In the case of a private CA it may be both safe and convenient
to disable hostname checks. However, if the certificates are
issued by a public CA, hostname verification should be enabled.
.. attr:: tls_client_only
:default: false
In order to provide a finger gateway which can reach remote
finger gateways and executors which use TLS, but does not itself
serve end-users via TLS (i.e., it runs within a protected
network and users access it directly via the finger port), set
this to ``true`` and the finger gateway will not listen on TLS,
but will still use the supplied certificate to make remote TLS
connections.
.. _connections:
Connections
===========
Most of Zuul's configuration is contained in the git repositories upon
which Zuul operates, however, some configuration outside of git
repositories is still required to bootstrap the system. This includes
information on connections between Zuul and other systems, as well as
identifying the projects Zuul uses.
In order to interact with external systems, Zuul must have a
*connection* to that system configured. Zuul includes a number of
:ref:`drivers <drivers>`, each of which implements the functionality
necessary to connect to a system. Each connection in Zuul is
associated with a driver.
To configure a connection in Zuul, select a unique name for the
connection and add a section to ``zuul.conf`` with the form
``[connection NAME]``. For example, a connection to a gerrit server
may appear as:
.. code-block:: ini
[connection mygerritserver]
driver=gerrit
server=review.example.com
Zuul needs to use a single connection to look up information about
changes hosted by a given system. When it looks up changes, it will
do so using the first connection it finds that matches the server name
it's looking for. It's generally best to use only a single connection
for a given server, however, if you need more than one (for example,
to satisfy unique reporting requirements) be sure to list the primary
connection first as that is what Zuul will use to look up all changes
for that server.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/configuration.rst | configuration.rst |
:title: Zuul Admin Client
Zuul Admin Client
=================
Zuul includes a simple command line client that may be used to affect Zuul's
behavior while running.
.. note:: For operations related to normal workflow like enqueue, dequeue, autohold and promote, the `zuul-client` CLI should be used instead.
Configuration
-------------
The client uses the same zuul.conf file as the server, and will look
for it in the same locations if not specified on the command line.
Usage
-----
The general options that apply to all subcommands are:
.. program-output:: zuul-admin --help
The following subcommands are supported:
tenant-conf-check
^^^^^^^^^^^^^^^^^
.. program-output:: zuul-admin tenant-conf-check --help
Example::
zuul-admin tenant-conf-check
This command validates the tenant configuration schema. It exits '-1' in
case of errors detected.
create-auth-token
^^^^^^^^^^^^^^^^^
.. note:: This command is only available if an authenticator is configured in
``zuul.conf``. Furthermore the authenticator's configuration must
include a signing secret.
.. program-output:: zuul-admin create-auth-token --help
Example::
zuul-admin create-auth-token --auth-config zuul-operator --user alice --tenant tenantA --expires-in 1800
The return value is the value of the ``Authorization`` header the user must set
when querying a protected endpoint on Zuul's REST API.
Example::
bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJodHRwOi8vbWFuYWdlc2Yuc2ZyZG90ZXN0aW5zdGFuY2Uub3JnIiwienV1bC50ZW5hbnRzIjp7ImxvY2FsIjoiKiJ9LCJleHAiOjE1Mzc0MTcxOTguMzc3NTQ0fQ.DLbKx1J84wV4Vm7sv3zw9Bw9-WuIka7WkPQxGDAHz7s
.. _export-keys:
export-keys
^^^^^^^^^^^
.. program-output:: zuul-admin export-keys --help
Example::
zuul-admin export-keys /var/backup/zuul-keys.json
.. _import-keys:
import-keys
^^^^^^^^^^^
.. program-output:: zuul-admin import-keys --help
Example::
zuul-admin import-keys /var/backup/zuul-keys.json
copy-keys
^^^^^^^^^
.. program-output:: zuul-admin copy-keys --help
Example::
zuul-admin copy-keys gerrit old_project gerrit new_project
delete-keys
^^^^^^^^^^^
.. program-output:: zuul-admin delete-keys --help
Example::
zuul-admin delete-keys gerrit old_project
delete-state
^^^^^^^^^^^^
.. program-output:: zuul-admin delete-state --help
Example::
zuul-admin delete-state
delete-pipeline-state
^^^^^^^^^^^^^^^^^^^^^
.. program-output:: zuul-admin delete-pipeline-state --help
Example::
zuul-admin delete-pipeline-state tenant pipeline
prune-database
^^^^^^^^^^^^^^
.. program-output:: zuul-admin prune-database --help
Example::
zuul-admin prune-database --older-than 180d
Deprecated commands
-------------------
The following commands are deprecated in the zuul-admin CLI, and thus may not be entirely supported in Zuul's current version.
They will be removed in a future release of Zuul. They can still be performed via the `zuul-client` CLI.
Please refer to `zuul-client's documentation <https://zuul-ci.org/docs/zuul-client/>`__
for more details.
In order to run these commands, the ``webclient`` section is required in the configuration file.
It is also possible to run the client without a configuration file, by using the
``--zuul-url`` option to specify the base URL of the Zuul web server.
Autohold
^^^^^^^^
.. program-output:: zuul-admin autohold --help
Example::
zuul-admin autohold --tenant openstack --project example_project --job example_job --reason "reason text" --count 1
Autohold Delete
^^^^^^^^^^^^^^^
.. program-output:: zuul-admin autohold-delete --help
Example::
zuul-admin autohold-delete --id 0000000123
Autohold Info
^^^^^^^^^^^^^
.. program-output:: zuul-admin autohold-info --help
Example::
zuul-admin autohold-info --id 0000000123
Autohold List
^^^^^^^^^^^^^
.. program-output:: zuul-admin autohold-list --help
Example::
zuul-admin autohold-list --tenant openstack
Dequeue
^^^^^^^
.. program-output:: zuul-admin dequeue --help
Examples::
zuul-admin dequeue --tenant openstack --pipeline check --project example_project --change 5,1
zuul-admin dequeue --tenant openstack --pipeline periodic --project example_project --ref refs/heads/master
Enqueue
^^^^^^^
.. program-output:: zuul-admin enqueue --help
Example::
zuul-admin enqueue --tenant openstack --trigger gerrit --pipeline check --project example_project --change 12345,1
Note that the format of change id is <number>,<patchset>.
Enqueue-ref
^^^^^^^^^^^
.. program-output:: zuul-admin enqueue-ref --help
This command is provided to manually simulate a trigger from an
external source. It can be useful for testing or replaying a trigger
that is difficult or impossible to recreate at the source. The
arguments to ``enqueue-ref`` will vary depending on the source and
type of trigger. Some familiarity with the arguments emitted by
``gerrit`` `update hooks
<https://gerrit-review.googlesource.com/admin/projects/plugins/hooks>`__
such as ``patchset-created`` and ``ref-updated`` is recommended. Some
examples of common operations are provided below.
Manual enqueue examples
***********************
It is common to have a ``release`` pipeline that listens for new tags
coming from ``gerrit`` and performs a range of code packaging jobs.
If there is an unexpected issue in the release jobs, the same tag can
not be recreated in ``gerrit`` and the user must either tag a new
release or request a manual re-triggering of the jobs. To re-trigger
the jobs, pass the failed tag as the ``ref`` argument and set
``newrev`` to the change associated with the tag in the project
repository (i.e. what you see from ``git show X.Y.Z``)::
zuul-admin enqueue-ref --tenant openstack --trigger gerrit --pipeline release --project openstack/example_project --ref refs/tags/X.Y.Z --newrev abc123..
The command can also be used asynchronosly trigger a job in a
``periodic`` pipeline that would usually be run at a specific time by
the ``timer`` driver. For example, the following command would
trigger the ``periodic`` jobs against the current ``master`` branch
top-of-tree for a project::
zuul-admin enqueue-ref --tenant openstack --trigger timer --pipeline periodic --project openstack/example_project --ref refs/heads/master
Another common pipeline is a ``post`` queue listening for ``gerrit``
merge results. Triggering here is slightly more complicated as you
wish to recreate the full ``ref-updated`` event from ``gerrit``. For
a new commit on ``master``, the gerrit ``ref-updated`` trigger
expresses "reset ``refs/heads/master`` for the project from ``oldrev``
to ``newrev``" (``newrev`` being the committed change). Thus to
replay the event, you could ``git log`` in the project and take the
current ``HEAD`` and the prior change, then enqueue the event::
NEW_REF=$(git rev-parse HEAD)
OLD_REF=$(git rev-parse HEAD~1)
zuul-admin enqueue-ref --tenant openstack --trigger gerrit --pipeline post --project openstack/example_project --ref refs/heads/master --newrev $NEW_REF --oldrev $OLD_REF
Note that zero values for ``oldrev`` and ``newrev`` can indicate
branch creation and deletion; the source code is the best reference
for these more advanced operations.
Promote
^^^^^^^
.. program-output:: zuul-admin promote --help
Example::
zuul-admin promote --tenant openstack --pipeline gate --changes 12345,1 13336,3
Note that the format of changes id is <number>,<patchset>.
The promote action is used to reorder the changes in a pipeline, by
putting the provided changes at the top of the queue.
The most common use case for the promote action is the need to merge
an urgent fix when the gate pipeline has several patches queued
ahead. This is especially needed if there is concern that one or more
changes ahead in the queue may fail, thus increasing the time to land
for the fix; or concern that the fix may not pass validation if
applied on top of the current patch queue in the gate.
Any items in a dependent pipeline which have had items ahead of them
changed will have their jobs canceled and restarted based on the new
ordering.
If items in independent pipelines are promoted, no jobs will be
restarted, but their change queues within the pipeline will be
re-ordered so that they will be processed first and their node request
priorities will increase.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/client.rst | client.rst |
:title: Monitoring
Monitoring
==========
.. _statsd:
Statsd reporting
----------------
Zuul comes with support for the statsd protocol, when enabled and configured
(see below), the Zuul scheduler will emit raw metrics to a statsd receiver
which let you in turn generate nice graphics.
Configuration
~~~~~~~~~~~~~
Statsd support uses the ``statsd`` python module. Note that support
is optional and Zuul will start without the statsd python module
present.
Configuration is in the :attr:`statsd` section of ``zuul.conf``.
Metrics
~~~~~~~
These metrics are emitted by the Zuul :ref:`scheduler`:
.. stat:: zuul.event.<driver>.<type>
:type: counter
Zuul will report counters for each type of event it receives from
each of its configured drivers.
.. stat:: zuul.connection.<connection>
Holds metrics specific to connections. This hierarchy includes:
.. stat:: cache.data_size_compressed
:type: gauge
The number of bytes stored in ZooKeeper for all items in this
connection's change cache.
.. stat:: cache.data_size_uncompressed
:type: gauge
The number of bytes required to for the change cache (the
decompressed value of ``data_size_compressed``).
.. stat:: zuul.tenant.<tenant>.event_enqueue_processing_time
:type: timer
A timer metric reporting the time from when the scheduler receives
a trigger event from a driver until the corresponding item is
enqueued in a pipeline. This measures the performance of the
scheduler in dispatching events.
.. stat:: zuul.tenant.<tenant>.event_enqueue_time
:type: timer
A timer metric reporting the time from when a trigger event was
received from the remote system to when the corresponding item is
enqueued in a pipeline. This includes
:stat:`zuul.tenant.<tenant>.event_enqueue_processing_time` and any
driver-specific pre-processing of the event.
.. stat:: zuul.tenant.<tenant>.management_events
:type: gauge
The size of the tenant's management event queue.
.. stat:: zuul.tenant.<tenant>.reconfiguration_time
:type: timer
A timer metric reporting the time taken to reconfigure a tenant.
This is performed by one scheduler after a tenant reconfiguration
event is received. During this time, all processing of that
tenant's pipelines are halted. This measures that time.
Once the first scheduler completes a tenant reconfiguration, other
schedulers may update their layout in the background without
interrupting processing. That is not reported in this metric.
.. stat:: zuul.tenant.<tenant>.trigger_events
:type: gauge
The size of the tenant's trigger event queue.
.. stat:: zuul.tenant.<tenant>.pipeline
Holds metrics specific to jobs. This hierarchy includes:
.. stat:: <pipeline>
A set of metrics for each pipeline named as defined in the Zuul
config.
.. stat:: event_enqueue_time
:type: timer
The time elapsed from when a trigger event was received from
the remote system to when the corresponding item is enqueued
in a pipeline.
.. stat:: merge_request_time
:type: timer
The amount of time spent waiting for the initial merge
operation(s). This will always include a request to a Zuul
merger to speculatively merge the change, but it may also
include a second request submitted in parallel to identify
the files altered by the change. Includes
:stat:`zuul.tenant.<tenant>.pipeline.<pipeline>.merger_merge_op_time`
and
:stat:`zuul.tenant.<tenant>.pipeline.<pipeline>.merger_files_changes_op_time`.
.. stat:: merger_merge_op_time
:type: timer
The amount of time the merger spent performing a merge
operation. This does not include any of the round-trip time
from the scheduler to the merger, or any other merge
operations.
.. stat:: merger_files_changes_op_time
:type: timer
The amount of time the merger spent performing a files-changes
operation to detect changed files (this is sometimes
performed if the source does not provide this information).
This does not include any of the round-trip time from the
scheduler to the merger, or any other merge operations.
.. stat:: layout_generation_time
:type: timer
The amount of time spent generating a dynamic configuration layout.
.. stat:: job_freeze_time
:type: timer
The amount of time spent freezing the inheritance hierarchy
and parameters of a job.
.. stat:: repo_state_time
:type: timer
The amount of time waiting for a secondary Zuul merger
operation to collect additional information about the repo
state of required projects. Includes
:stat:`zuul.tenant.<tenant>.pipeline.<pipeline>.merger_repo_state_op_time`.
.. stat:: merger_repo_state_op_time
:type: timer
The amount of time the merger spent performing a repo state
operation to collect additional information about the repo
state of required projects. This does not include any of the
round-trip time from the scheduler to the merger, or any
other merge operations.
.. stat:: node_request_time
:type: timer
The amount of time spent waiting for each node request to be
fulfilled.
.. stat:: job_wait_time
:type: timer
How long a job waited for an executor to start running it
after the build was requested.
.. stat:: event_job_time
:type: timer
The total amount of time elapsed from when a trigger event
was received from the remote system until the item's first
job is run. This is only emitted once per queue item, even
if its buildset is reset due to a speculative execution
failure.
.. stat:: all_jobs
:type: counter
Number of jobs triggered by the pipeline.
.. stat:: current_changes
:type: gauge
The number of items currently being processed by this
pipeline.
.. stat:: handling
:type: timer
The total time taken to refresh and process the pipeline.
This is emitted every time a scheduler examines a pipeline
regardless of whether it takes any actions.
.. stat:: event_process
:type: timer
The time taken to process the event queues for the pipeline.
This is emitted only if there are events to process.
.. stat:: process
:type: timer
The time taken to process the pipeline. This is emitted only
if there were events to process.
.. stat:: data_size_compressed
:type: gauge
The number of bytes stored in ZooKeeper to represent the
serialized state of the pipeline.
.. stat:: data_size_uncompressed
:type: gauge
The number of bytes required to represent the serialized
state of the pipeline (the decompressed value of
``data_size_compressed``).
.. stat:: project
This hierarchy holds more specific metrics for each project
participating in the pipeline.
.. stat:: <canonical_hostname>
The canonical hostname for the triggering project.
Embedded ``.`` characters will be translated to ``_``.
.. stat:: <project>
The name of the triggering project. Embedded ``/`` or
``.`` characters will be translated to ``_``.
.. stat:: <branch>
The name of the triggering branch. Embedded ``/`` or
``.`` characters will be translated to ``_``.
.. stat:: job
Subtree detailing per-project job statistics:
.. stat:: <jobname>
The triggered job name.
.. stat:: <result>
:type: counter, timer
A counter for each type of result (e.g., ``SUCCESS`` or
``FAILURE``, ``ERROR``, etc.) for the job. If the
result is ``SUCCESS`` or ``FAILURE``, Zuul will
additionally report the duration of the build as a
timer.
.. stat:: wait_time
:type: timer
How long the job waited for an executor to
start running it after the build was requested.
.. stat:: current_changes
:type: gauge
The number of items of this project currently being
processed by this pipeline.
.. stat:: resident_time
:type: timer
A timer metric reporting how long each item for this
project has been in the pipeline.
.. stat:: total_changes
:type: counter
The number of changes for this project processed by the
pipeline since Zuul started.
.. stat:: read_time
:type: timer
The time spent reading data from ZooKeeper during a single
pipeline processing run.
.. stat:: read_znodes
:type: gauge
The number of ZNodes read from ZooKeeper during a single
pipeline processing run.
.. stat:: read_objects
:type: gauge
The number of Zuul data model objects read from ZooKeeper
during a single pipeline processing run.
.. stat:: read_bytes
:type: gauge
The amount of data read from ZooKeeper during a single
pipeline processing run.
.. stat:: refresh
:type: timer
The time taken to refresh the state from ZooKeeper.
.. stat:: resident_time
:type: timer
A timer metric reporting how long each item has been in the
pipeline.
.. stat:: total_changes
:type: counter
The number of changes processed by the pipeline since Zuul
started.
.. stat:: trigger_events
:type: gauge
The size of the pipeline's trigger event queue.
.. stat:: result_events
:type: gauge
The size of the pipeline's result event queue.
.. stat:: management_events
:type: gauge
The size of the pipeline's management event queue.
.. stat:: write_time
:type: timer
The time spent writing data to ZooKeeper during a single
pipeline processing run.
.. stat:: write_znodes
:type: gauge
The number of ZNodes written to ZooKeeper during a single
pipeline processing run.
.. stat:: write_objects
:type: gauge
The number of Zuul data model objects written to ZooKeeper
during a single pipeline processing run.
.. stat:: write_bytes
:type: gauge
The amount of data written to ZooKeeper during a single
pipeline processing run.
.. stat:: zuul.executor.<executor>
Holds metrics emitted by individual executors. The ``<executor>``
component of the key will be replaced with the hostname of the
executor.
.. stat:: merger.<result>
:type: counter
Incremented to represent the status of a Zuul executor's merger
operations. ``<result>`` can be either ``SUCCESS`` or ``FAILURE``.
A failed merge operation which would be accounted for as a ``FAILURE``
is what ends up being returned by Zuul as a ``MERGE_CONFLICT``.
.. stat:: builds
:type: counter
Incremented each time the executor starts a build.
.. stat:: starting_builds
:type: gauge, timer
The number of builds starting on this executor and a timer containing
how long jobs were in this state. These are builds which have not yet
begun their first pre-playbook.
The timer needs special thoughts when interpreting it because it
aggregates all jobs. It can be useful when aggregating it over a longer
period of time (maybe a day) where fast rising graphs could indicate e.g.
IO problems of the machines the executors are running on. But it has to
be noted that a rising graph also can indicate a higher usage of complex
jobs using more required projects. Also comparing several executors might
give insight if the graphs differ a lot from each other. Typically the
jobs are equally distributed over all executors (in the same zone when
using the zone feature) and as such the starting jobs timers (aggregated
over a large enough interval) should not differ much.
.. stat:: running_builds
:type: gauge
The number of builds currently running on this executor. This
includes starting builds.
.. stat:: paused_builds
:type: gauge
The number of currently paused builds on this executor.
.. stat:: phase
Subtree detailing per-phase execution statistics:
.. stat:: <phase>
``<phase>`` represents a phase in the execution of a job.
This can be an *internal* phase (such as ``setup`` or ``cleanup``) as
well as *job* phases such as ``pre``, ``run`` or ``post``.
.. stat:: <result>
:type: counter
A counter for each type of result.
These results do not, by themselves, determine the status of a build
but are indicators of the exit status provided by Ansible for the
execution of a particular phase.
Example of possible counters for each phase are: ``RESULT_NORMAL``,
``RESULT_TIMED_OUT``, ``RESULT_UNREACHABLE``, ``RESULT_ABORTED``.
.. stat:: load_average
:type: gauge
The one-minute load average of this executor, multiplied by 100.
.. stat:: pause
:type: gauge
Indicates if the executor is paused. 1 means paused else 0.
.. stat:: pct_used_hdd
:type: gauge
The used disk on this executor, as a percentage multiplied by 100.
.. stat:: pct_used_ram
:type: gauge
The used RAM (excluding buffers and cache) on this executor, as
a percentage multiplied by 100.
.. stat:: pct_used_ram_cgroup
:type: gauge
The used RAM (excluding buffers and cache) on this executor allowed by
the cgroup, as percentage multiplied by 100.
.. stat:: zuul.nodepool.requests
Holds metrics related to Zuul requests and responses from Nodepool.
States are one of:
*requested*
Node request submitted by Zuul to Nodepool
*canceled*
Node request was canceled by Zuul
*failed*
Nodepool failed to fulfill a node request
*fulfilled*
Nodes were assigned by Nodepool
.. stat:: <state>
:type: timer
Records the elapsed time from request to completion for states
`failed` and `fulfilled`. For example,
``zuul.nodepool.request.fulfilled.mean`` will give the average
time for all fulfilled requests within each ``statsd`` flush
interval.
A lower value for `fulfilled` requests is better. Ideally,
there will be no `failed` requests.
.. stat:: <state>.total
:type: counter
Incremented when nodes are assigned or removed as described in
the states above.
.. stat:: <state>.size.<size>
:type: counter, timer
Increments for the node count of each request. For example, a
request for 3 nodes would use the key
``zuul.nodepool.requests.requested.size.3``; fulfillment of 3
node requests can be tracked with
``zuul.nodepool.requests.fulfilled.size.3``.
The timer is implemented for ``fulfilled`` and ``failed``
requests. For example, the timer
``zuul.nodepool.requests.failed.size.3.mean`` gives the average
time of 3-node failed requests within the ``statsd`` flush
interval. A lower value for `fulfilled` requests is better.
Ideally, there will be no `failed` requests.
.. stat:: <state>.label.<label>
:type: counter, timer
Increments for the label of each request. For example, requests
for `centos7` nodes could be tracked with
``zuul.nodepool.requests.requested.centos7``.
The timer is implemented for ``fulfilled`` and ``failed``
requests. For example, the timer
``zuul.nodepool.requests.fulfilled.label.centos7.mean`` gives
the average time of ``centos7`` fulfilled requests within the
``statsd`` flush interval. A lower value for `fulfilled`
requests is better. Ideally, there will be no `failed`
requests.
.. stat:: zuul.nodepool
.. stat:: current_requests
:type: gauge
The number of outstanding nodepool requests from Zuul. Ideally
this will be at zero, meaning all requests are fulfilled.
Persistently high values indicate more testing node resources
would be helpful.
.. stat:: tenant.<tenant>.current_requests
:type: gauge
The number of outstanding nodepool requests from Zuul drilled down by
<tenant>. If a tenant for a node request cannot be determed, it is
reported as ``unknown``. This relates to
``zuul.nodepool.current_requests``.
.. stat:: zuul.nodepool.resources
Holds metrics about resource usage by tenant or project if resources
of nodes are reported by nodepool.
.. stat:: in_use
Holds metrics about resources currently in use by a build.
.. stat:: tenant
Holds resource usage metrics by tenant.
.. stat:: <tenant>.<resource>
:type: counter, gauge
Counter with the summed usage by tenant as <resource> seconds and
gauge with the currently in use resources by tenant.
.. stat:: project
Holds resource usage metrics by project.
.. stat:: <project>.<resource>
:type: counter, gauge
Counter with the summed usage by project as <resource> seconds and
gauge with the currently used resources by project.
.. stat:: total
Holds metrics about resources allocated in total. This includes
resources that are currently in use, allocated but not yet in use, and
scheduled to be deleted.
.. stat:: tenant
Holds resource usage metrics by tenant.
.. stat:: <tenant>.<resource>
:type: gauge
Gauge with the currently used resources by tenant.
.. stat:: zuul.mergers
Holds metrics related to Zuul mergers.
.. stat:: online
:type: gauge
The number of Zuul merger processes online.
.. stat:: jobs_running
:type: gauge
The number of merge jobs running.
.. stat:: jobs_queued
:type: gauge
The number of merge jobs waiting for a merger. This should
ideally be zero; persistent higher values indicate more merger
resources would be useful.
.. stat:: zuul.executors
Holds metrics related to unzoned executors.
This is a copy of :stat:`zuul.executors.unzoned`. It does not
include information about zoned executors.
.. warning:: The metrics at this location are deprecated and will
be removed in a future version. Please begin using
:stat:`zuul.executors.unzoned` instead.
.. stat:: online
:type: gauge
The number of Zuul executor processes online.
.. stat:: accepting
:type: gauge
The number of Zuul executor processes accepting new jobs.
.. stat:: jobs_running
:type: gauge
The number of executor jobs running.
.. stat:: jobs_queued
:type: gauge
The number of jobs allocated nodes, but queued waiting for an
executor to run on. This should ideally be at zero; persistent
higher values indicate more executor resources would be useful.
.. stat:: unzoned
Holds metrics related to unzoned executors.
.. stat:: online
:type: gauge
The number of unzoned Zuul executor processes online.
.. stat:: accepting
:type: gauge
The number of unzoned Zuul executor processes accepting new
jobs.
.. stat:: jobs_running
:type: gauge
The number of unzoned executor jobs running.
.. stat:: jobs_queued
:type: gauge
The number of jobs allocated nodes, but queued waiting for an
unzoned executor to run on. This should ideally be at zero;
persistent higher values indicate more executor resources
would be useful.
.. stat:: zone
Holds metrics related to zoned executors.
.. stat:: <zone>.online
:type: gauge
The number of Zuul executor processes online in this zone.
.. stat:: <zone>.accepting
:type: gauge
The number of Zuul executor processes accepting new jobs in
this zone.
.. stat:: <zone>.jobs_running
:type: gauge
The number of executor jobs running in this zone.
.. stat:: <zone>.jobs_queued
:type: gauge
The number of jobs allocated nodes, but queued waiting for an
executor in this zone to run on. This should ideally be at
zero; persistent higher values indicate more executor
resources would be useful.
.. stat:: zuul.scheduler
Holds metrics related to the Zuul scheduler.
.. stat:: eventqueues
Holds metrics about the event queue lengths in the Zuul scheduler.
.. stat:: management
:type: gauge
The size of the current reconfiguration event queue.
.. stat:: connection.<connection-name>
:type: gauge
The size of the current connection event queue.
.. stat:: run_handler
:type: timer
A timer metric reporting the time taken for one scheduler run
handler iteration.
.. stat:: time_query
:type: timer
Each time the scheduler performs a query against the SQL
database in order to determine an estimated time for a job, it
emits this timer of the duration of the query. Note this is a
performance metric of how long the SQL query takes; it is not
the estimated time value itself.
.. stat:: zuul.web
Holds metrics related to the Zuul web component.
.. stat:: server.<hostname>
Holds metrics from a specific zuul-web server.
.. stat:: threadpool
Metrics related to the web server thread pool.
.. stat:: idle
:type: gauge
The number of idle workers.
.. stat:: queue
:type: gauge
The number of requests queued for workers.
.. stat:: streamers
:type: gauge
The number of log streamers currently in operation.
As an example, given a job named `myjob` in `mytenant` triggered by a
change to `myproject` on the `master` branch in the `gate` pipeline
which took 40 seconds to build, the Zuul scheduler will emit the
following statsd events:
* ``zuul.tenant.mytenant.pipeline.gate.project.example_com.myproject.master.job.myjob.SUCCESS`` +1
* ``zuul.tenant.mytenant.pipeline.gate.project.example_com.myproject.master.job.myjob.SUCCESS`` 40 seconds
* ``zuul.tenant.mytenant.pipeline.gate.all_jobs`` +1
Prometheus monitoring
---------------------
Zuul comes with support to start a prometheus_ metric server to be added as
prometheus's target.
.. _prometheus: https://prometheus.io/docs/introduction/overview/
Configuration
~~~~~~~~~~~~~
To enable the service, set the ``prometheus_port`` in a service section of
``zuul.conf``. For example setting :attr:`scheduler.prometheus_port` to 9091
starts a HTTP server to expose metrics to a prometheus services at:
http://scheduler:9091/metrics
Metrics
~~~~~~~
These metrics are exposed by default:
.. stat:: process_virtual_memory_bytes
:type: gauge
.. stat:: process_resident_memory_bytes
:type: gauge
.. stat:: process_open_fds
:type: gauge
.. stat:: process_start_time_seconds
:type: gauge
.. stat:: process_cpu_seconds_total
:type: counter
On web servers the following additional metrics are exposed:
.. stat:: web_threadpool_idle
:type: gauge
The number of idle workers in the thread pool.
.. stat:: web_threadpool_queue
:type: gauge
The number of requests queued for thread pool workers.
.. stat:: web_streamers
:type: gauge
The number of log streamers currently in operation.
.. _prometheus_liveness:
Liveness Probes
~~~~~~~~~~~~~~~
The Prometheus server also supports liveness and ready probes at the
following URIS:
.. path:: health/live
Returns 200 as long as the process is running.
.. path:: health/ready
Returns 200 if the process is in `RUNNING` or `PAUSED` states.
Otherwise, returns 503. Note that 503 is returned for
`INITIALIZED`, so this may be used to determine when a component
has completely finished loading configuration.
.. path:: health/status
This always returns 200, but includes the component status as the
text body of the response.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/monitoring.rst | monitoring.rst |
.. _pipeline:
Pipeline
========
A pipeline describes a workflow operation in Zuul. It associates jobs
for a given project with triggering and reporting events.
Its flexible configuration allows for characterizing any number of
workflows, and by specifying each as a named configuration, makes it
easy to apply similar workflow operations to projects or groups of
projects.
By way of example, one of the primary uses of Zuul is to perform
project gating. To do so, one can create a :term:`gate` pipeline
which tells Zuul that when a certain event (such as approval by a code
reviewer) occurs, the corresponding change or pull request should be
enqueued into the pipeline. When that happens, the jobs which have
been configured to run for that project in the gate pipeline are run,
and when they complete, the pipeline reports the results to the user.
Pipeline configuration items may only appear in :term:`config-projects
<config-project>`.
Generally, a Zuul administrator would define a small number of
pipelines which represent the workflow processes used in their
environment. Each project can then be added to the available
pipelines as appropriate.
Here is an example :term:`check` pipeline, which runs whenever a new
patchset is created in Gerrit. If the associated jobs all report
success, the pipeline reports back to Gerrit with ``Verified`` vote of
+1, or if at least one of them fails, a -1:
.. code-block:: yaml
- pipeline:
name: check
manager: independent
trigger:
my_gerrit:
- event: patchset-created
success:
my_gerrit:
Verified: 1
failure:
my_gerrit:
Verified: -1
.. TODO: See TODO for more annotated examples of common pipeline configurations.
.. attr:: pipeline
The attributes available on a pipeline are as follows (all are
optional unless otherwise specified):
.. attr:: name
:required:
This is used later in the project definition to indicate what jobs
should be run for events in the pipeline.
.. attr:: manager
:required:
There are several schemes for managing pipelines. The following
table summarizes their features; each is described in detail
below.
=========== ============================= ============ ===== ============= =========
Manager Use Case Dependencies Merge Shared Queues Window
=========== ============================= ============ ===== ============= =========
Independent :term:`check`, :term:`post` No No No Unlimited
Dependent :term:`gate` Yes Yes Yes Variable
Serial :term:`deploy` No No Yes 1
Supercedent :term:`post`, :term:`promote` No No Project-ref 1
=========== ============================= ============ ===== ============= =========
.. value:: independent
Every event in this pipeline should be treated as independent
of other events in the pipeline. This is appropriate when
the order of events in the pipeline doesn't matter because
the results of the actions this pipeline performs can not
affect other events in the pipeline. For example, when a
change is first uploaded for review, you may want to run
tests on that change to provide early feedback to reviewers.
At the end of the tests, the change is not going to be
merged, so it is safe to run these tests in parallel without
regard to any other changes in the pipeline. They are
independent.
Another type of pipeline that is independent is a post-merge
pipeline. In that case, the changes have already merged, so
the results can not affect any other events in the pipeline.
.. value:: dependent
The dependent pipeline manager is designed for gating. It
ensures that every change is tested exactly as it is going to
be merged into the repository. An ideal gating system would
test one change at a time, applied to the tip of the
repository, and only if that change passed tests would it be
merged. Then the next change in line would be tested the
same way. In order to achieve parallel testing of changes,
the dependent pipeline manager performs speculative execution
on changes. It orders changes based on their entry into the
pipeline. It begins testing all changes in parallel,
assuming that each change ahead in the pipeline will pass its
tests. If they all succeed, all the changes can be tested
and merged in parallel. If a change near the front of the
pipeline fails its tests, each change behind it ignores
whatever tests have been completed and are tested again
without the change in front. This way gate tests may run in
parallel but still be tested correctly, exactly as they will
appear in the repository when merged.
For more detail on the theory and operation of Zuul's
dependent pipeline manager, see: :doc:`/gating`.
.. value:: serial
This pipeline manager supports shared queues (like depedent
pipelines) but only one item in each shared queue is
processed at a time.
This may be useful for post-merge pipelines which perform
partial production deployments (i.e., there are jobs with
file matchers which only deploy to affected parts of the
system). In such a case it is important for every change to
be processed, but they must still be processed one at a time
in order to ensure that the production system is not
inadvertently regressed. Support for shared queues ensures
that if multiple projects are involved deployment runs still
execute sequentially.
.. value:: supercedent
This is like an independent pipeline, in that every item is
distinct, except that items are grouped by project and ref,
and only one item for each project-ref is processed at a
time. If more than one additional item is enqueued for the
project-ref, previously enqueued items which have not started
processing are removed.
In other words, this pipeline manager will only run jobs for
the most recent item enqueued for a given project-ref.
This may be useful for post-merge pipelines which perform
artifact builds where only the latest version is of use. In
these cases, build resources can be conserved by avoiding
building intermediate versions.
.. note:: Since this pipeline filters intermediate buildsets
using it in combination with file filters on jobs
is dangerous. In this case jobs of in between
buildsets can be unexpectedly skipped entirely. If
file filters are needed the ``independent`` or
``serial`` pipeline managers should be used.
.. attr:: post-review
:default: false
This is a boolean which indicates that this pipeline executes
code that has been reviewed. Some jobs perform actions which
should not be permitted with unreviewed code. When this value
is ``false`` those jobs will not be permitted to run in the
pipeline. If a pipeline is designed only to be used after
changes are reviewed or merged, set this value to ``true`` to
permit such jobs.
For more information, see :ref:`secret` and
:attr:`job.post-review`.
.. attr:: description
This field may be used to provide a textual description of the
pipeline. It may appear in the status page or in documentation.
.. attr:: variant-description
:default: branch name
This field may be used to provide a textual description of the
variant. It may appear in the status page or in documentation.
.. attr:: success-message
:default: Build successful.
The introductory text in reports when all the voting jobs are
successful.
.. attr:: failure-message
:default: Build failed.
The introductory text in reports when at least one voting job
fails.
.. attr:: start-message
:default: Starting {pipeline.name} jobs.
The introductory text in reports when jobs are started.
Three replacement fields are available ``status_url``, ``pipeline`` and
``change``.
.. attr:: enqueue-message
The introductory text in reports when an item is enqueued.
Empty by default.
.. attr:: merge-conflict-message
:default: Merge failed.
The introductory text in the message reported when a change
fails to merge with the current state of the repository.
Defaults to "Merge failed."
.. attr:: no-jobs-message
The introductory text in reports when an item is dequeued
without running any jobs. Empty by default.
.. attr:: dequeue-message
:default: Build canceled.
The introductory text in reports when an item is dequeued.
The dequeue message only applies if the item was dequeued without
a result.
.. attr:: footer-message
Supplies additional information after test results. Useful for
adding information about the CI system such as debugging and
contact details.
.. attr:: trigger
At least one trigger source must be supplied for each pipeline.
Triggers are not exclusive -- matching events may be placed in
multiple pipelines, and they will behave independently in each
of the pipelines they match.
Triggers are loaded from their connection name. The driver type
of the connection will dictate which options are available. See
:ref:`drivers`.
.. attr:: require
If this section is present, it establishes prerequisites for
any kind of item entering the Pipeline. Regardless of how the
item is to be enqueued (via any trigger or automatic dependency
resolution), the conditions specified here must be met or the
item will not be enqueued. These requirements may vary
depending on the source of the item being enqueued.
Requirements are loaded from their connection name. The driver
type of the connection will dictate which options are available.
See :ref:`drivers`.
.. attr:: reject
If this section is present, it establishes prerequisites that
can block an item from being enqueued. It can be considered a
negative version of :attr:`pipeline.require`.
Requirements are loaded from their connection name. The driver
type of the connection will dictate which options are available.
See :ref:`drivers`.
.. attr:: allow-other-connections
:default: true
If this is set to `false` then any change enqueued into the
pipeline (whether it is enqueued to run jobs or merely as a
dependency) must be from one of the connections specified in the
pipeline configuration (this includes any trigger, reporter, or
source requirement). When used in conjuctions with
:attr:`pipeline.require`, this can ensure that pipeline
requirements are exhaustive.
.. attr:: supercedes
The name of a pipeline, or a list of names, that this pipeline
supercedes. When a change is enqueued in this pipeline, it will
be removed from the pipelines listed here. For example, a
:term:`gate` pipeline may supercede a :term:`check` pipeline so
that test resources are not spent running near-duplicate jobs
simultaneously.
.. attr:: dequeue-on-new-patchset
:default: true
Normally, if a new patchset is uploaded to a change that is in a
pipeline, the existing entry in the pipeline will be removed
(with jobs canceled and any dependent changes that can no longer
merge as well. To suppress this behavior (and allow jobs to
continue running), set this to ``false``.
.. attr:: ignore-dependencies
:default: false
In any kind of pipeline (dependent or independent), Zuul will
attempt to enqueue all dependencies ahead of the current change
so that they are tested together (independent pipelines report
the results of each change regardless of the results of changes
ahead). To ignore dependencies completely in an independent
pipeline, set this to ``true``. This option is ignored by
dependent pipelines.
.. attr:: precedence
:default: normal
Indicates how the build scheduler should prioritize jobs for
different pipelines. Each pipeline may have one precedence,
jobs for pipelines with a higher precedence will be run before
ones with lower. The value should be one of ``high``,
``normal``, or ``low``. Default: ``normal``.
.. _reporters:
The following options configure :term:`reporters <reporter>`.
Reporters are complementary to triggers; where a trigger is an
event on a connection which causes Zuul to enqueue an item, a
reporter is the action performed on a connection when an item is
dequeued after its jobs complete. The actual syntax for a reporter
is defined by the driver which implements it. See :ref:`drivers`
for more information.
.. attr:: success
Describes where Zuul should report to if all the jobs complete
successfully. This section is optional; if it is omitted, Zuul
will run jobs and do nothing on success -- it will not report at
all. If the section is present, the listed :term:`reporters
<reporter>` will be asked to report on the jobs. The reporters
are listed by their connection name. The options available
depend on the driver for the supplied connection.
.. attr:: failure
These reporters describe what Zuul should do if at least one job
fails.
.. attr:: merge-conflict
These reporters describe what Zuul should do if it is unable to
merge the patchset into the current state of the target
branch. If no merge-conflict reporters are listed then the
``failure`` reporters will be used.
.. attr:: config-error
These reporters describe what Zuul should do if it encounters a
configuration error while trying to enqueue the item. If no
config-error reporters are listed then the ``failure`` reporters
will be used.
.. attr:: enqueue
These reporters describe what Zuul should do when an item is
enqueued into the pipeline. This may be used to indicate to a
system or user that Zuul is aware of the triggering event even
though it has not evaluated whether any jobs will run.
.. attr:: start
These reporters describe what Zuul should do when jobs start
running for an item in the pipeline. This can be used, for
example, to reset a previously reported result.
.. attr:: no-jobs
These reporters describe what Zuul should do when an item is
dequeued from a pipeline without running any jobs. This may be
used to indicate to a system or user that the pipeline is not
relevant for a change.
.. attr:: disabled
These reporters describe what Zuul should do when a pipeline is
disabled. See ``disable-after-consecutive-failures``.
.. attr:: dequeue
These reporters describe what Zuul should do if an item is
dequeued. The dequeue reporters will only apply, if the item
was dequeued without a result.
The following options can be used to alter Zuul's behavior to
mitigate situations in which jobs are failing frequently (perhaps
due to a problem with an external dependency, or unusually high
non-deterministic test failures).
.. attr:: disable-after-consecutive-failures
If set, a pipeline can enter a *disabled* state if too many
changes in a row fail. When this value is exceeded the pipeline
will stop reporting to any of the **success**, **failure** or
**merge-conflict** reporters and instead only report to the
**disabled** reporters. (No **start** reports are made when a
pipeline is disabled).
.. attr:: window
:default: 20
Dependent pipeline managers only. Zuul can rate limit dependent
pipelines in a manner similar to TCP flow control. Jobs are
only started for items in the queue if they are within the
actionable window for the pipeline. The initial length of this
window is configurable with this value. The value given should
be a positive integer value. A value of ``0`` disables rate
limiting on the :value:`dependent pipeline manager
<pipeline.manager.dependent>`.
.. attr:: window-floor
:default: 3
Dependent pipeline managers only. This is the minimum value for
the window described above. Should be a positive non zero
integer value.
.. attr:: window-increase-type
:default: linear
Dependent pipeline managers only. This value describes how the
window should grow when changes are successfully merged by zuul.
.. value:: linear
Indicates that **window-increase-factor** should be added to
the previous window value.
.. value:: exponential
Indicates that **window-increase-factor** should be
multiplied against the previous window value and the result
will become the window size.
.. attr:: window-increase-factor
:default: 1
Dependent pipeline managers only. The value to be added or
multiplied against the previous window value to determine the
new window after successful change merges.
.. attr:: window-decrease-type
:default: exponential
Dependent pipeline managers only. This value describes how the
window should shrink when changes are not able to be merged by
Zuul.
.. value:: linear
Indicates that **window-decrease-factor** should be
subtracted from the previous window value.
.. value:: exponential
Indicates that **window-decrease-factor** should be divided
against the previous window value and the result will become
the window size.
.. attr:: window-decrease-factor
:default: 2
:value:`Dependent pipeline managers
<pipeline.manager.dependent>` only. The value to be subtracted
or divided against the previous window value to determine the
new window after unsuccessful change merges.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/config/pipeline.rst | pipeline.rst |
.. _nodeset:
Nodeset
=======
A Nodeset is a named collection of nodes for use by a job. Jobs may
specify what nodes they require individually, however, by defining
groups of node types once and referring to them by name, job
configuration may be simplified.
Nodesets, like most configuration items, are unique within a tenant,
though a nodeset may be defined on multiple branches of the same
project as long as the contents are the same. This is to aid in
branch maintenance, so that creating a new branch based on an existing
branch will not immediately produce a configuration error.
.. code-block:: yaml
- nodeset:
name: nodeset1
nodes:
- name: controller
label: controller-label
- name: compute1
label: compute-label
- name:
- compute2
- web
label: compute-label
groups:
- name: ceph-osd
nodes:
- controller
- name: ceph-monitor
nodes:
- controller
- compute1
- compute2
- name: ceph-web
nodes:
- web
Nodesets may also be used to express that Zuul should use the first of
multiple alternative node configurations to run a job. When a Nodeset
specifies a list of :attr:`nodeset.alternatives`, Zuul will request the
first Nodeset in the series, and if allocation fails for any reason,
Zuul will re-attempt the request with the subsequent Nodeset and so
on. The first Nodeset which is sucessfully supplied by Nodepool will
be used to run the job. An example of such a configuration follows.
.. code-block:: yaml
- nodeset:
name: fast-nodeset
nodes:
- label: fast-label
name: controller
- nodeset:
name: slow-nodeset
nodes:
- label: slow-label
name: controller
- nodeset:
name: fast-or-slow
alternatives:
- fast-nodeset
- slow-nodeset
In the above example, a job that requested the `fast-or-slow` nodeset
would receive `fast-label` nodes if a provider was able to supply
them, otherwise it would receive `slow-label` nodes. A Nodeset may
specify nodes and groups, or alternative nodesets, but not both.
.. attr:: nodeset
A Nodeset requires two attributes:
.. attr:: name
:required:
The name of the Nodeset, to be referenced by a :ref:`job`.
This is required when defining a standalone Nodeset in Zuul.
When defining an in-line anonymous nodeset within a job
definition, this attribute should be omitted.
.. attr:: nodes
This attribute is required unless `alteranatives` is supplied.
A list of node definitions, each of which has the following format:
.. attr:: name
:required:
The name of the node. This will appear in the Ansible inventory
for the job.
This can also be as a list of strings. If so, then the list of hosts in
the Ansible inventory will share a common ansible_host address.
.. attr:: label
:required:
The Nodepool label for the node. Zuul will request a node with
this label.
.. attr:: groups
Additional groups can be defined which are accessible from the ansible
playbooks.
.. attr:: name
:required:
The name of the group to be referenced by an ansible playbook.
.. attr:: nodes
:required:
The nodes that shall be part of the group. This is specified as a list
of strings.
.. attr:: alternatives
:type: list
A list of alternative nodesets for which requests should be
attempted in series. The first request which succeeds will be
used for the job.
The items in the list may be either strings, in which case they
refer to other Nodesets within the layout, or they may be a
dictionary which is a nested anonymous Nodeset definition. The
two types (strings or nested definitions) may be mixed.
An alternative Nodeset definition may in turn refer to other
alternative nodeset definitions. In this case, the tree of
definitions will be flattened in a breadth-first manner to
create the ordered list of alternatives.
A Nodeset which specifies alternatives may not also specify
nodes or groups (this attribute is exclusive with
:attr:`nodeset.nodes` and :attr:`nodeset.groups`.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/config/nodeset.rst | nodeset.rst |
.. _semaphore:
Semaphore
=========
Semaphores can be used to restrict the number of certain jobs which
are running at the same time. This may be useful for jobs which
access shared or limited resources. A semaphore has a value which
represents the maximum number of jobs which use that semaphore at the
same time.
Semaphores, like most configuration items, are unique within a tenant,
though a semaphore may be defined on multiple branches of the same
project as long as the value is the same. This is to aid in branch
maintenance, so that creating a new branch based on an existing branch
will not immediately produce a configuration error.
Zuul also supports global semaphores (see :ref:`global_semaphore`)
which may only be created by the Zuul administrator, but can be used
to coordinate resources across multiple tenants.
Semaphores are never subject to dynamic reconfiguration. If the value
of a semaphore is changed, it will take effect only when the change
where it is updated is merged. However, Zuul will attempt to validate
the configuration of semaphores in proposed updates, even if they
aren't used.
An example usage of semaphores follows:
.. code-block:: yaml
- semaphore:
name: semaphore-foo
max: 5
- semaphore:
name: semaphore-bar
max: 3
.. attr:: semaphore
The following attributes are available:
.. attr:: name
:required:
The name of the semaphore, referenced by jobs.
.. attr:: max
:default: 1
The maximum number of running jobs which can use this semaphore.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/config/semaphore.rst | semaphore.rst |
.. _project:
Project
=======
A project corresponds to a source code repository with which Zuul is
configured to interact. The main responsibility of the project
configuration item is to specify which jobs should run in which
pipelines for a given project. Within each project definition, a
section for each :ref:`pipeline <pipeline>` may appear. This
project-pipeline definition is what determines how a project
participates in a pipeline.
Multiple project definitions may appear for the same project (for
example, in a central :term:`config projects <config-project>` as well
as in a repo's own ``.zuul.yaml``). In this case, all of the project
definitions for the relevant branch are combined (the jobs listed in
all of the matching definitions will be run). If a project definition
appears in a :term:`config-project`, it will apply to all branches of
the project. If it appears in a branch of an
:term:`untrusted-project` it will only apply to changes on that
branch. In the case of an item which does not have a branch (for
example, a tag), all of the project definitions will be combined.
Consider the following project definition::
- project:
name: yoyodyne
queue: integrated
check:
jobs:
- check-syntax
- unit-tests
gate:
jobs:
- unit-tests
- integration-tests
The project has two project-pipeline stanzas, one for the ``check``
pipeline, and one for ``gate``. Each specifies which jobs should run
when a change for that project enters the respective pipeline -- when
a change enters ``check``, the ``check-syntax`` and ``unit-test`` jobs
are run.
Pipelines which use the dependent pipeline manager (e.g., the ``gate``
example shown earlier) maintain separate queues for groups of
projects. When Zuul serializes a set of changes which represent
future potential project states, it must know about all of the
projects within Zuul which may have an effect on the outcome of the
jobs it runs. If project *A* uses project *B* as a library, then Zuul
must be told about that relationship so that it knows to serialize
changes to A and B together, so that it does not merge a change to B
while it is testing a change to A.
Zuul could simply assume that all projects are related, or even infer
relationships by which projects a job indicates it uses, however, in a
large system that would become unwieldy very quickly, and
unnecessarily delay changes to unrelated projects. To allow for
flexibility in the construction of groups of related projects, the
change queues used by dependent pipeline managers are specified
manually. To group two or more related projects into a shared queue
for a dependent pipeline, set the ``queue`` parameter to the same
value for those projects.
The ``gate`` project-pipeline definition above specifies that this
project participates in the ``integrated`` shared queue for that
pipeline.
.. attr:: project
The following attributes may appear in a project:
.. attr:: name
The name of the project. If Zuul is configured with two or more
unique projects with the same name, the canonical hostname for
the project should be included (e.g., `git.example.com/foo`).
This can also be a regex. In this case the regex must start with ``^``
and match the full project name following the same rule as name without
regex. If not given it is implicitly derived from the project where this
is defined.
.. attr:: templates
A list of :ref:`project-template` references; the
project-pipeline definitions of each Project Template will be
applied to this project. If more than one template includes
jobs for a given pipeline, they will be combined, as will any
jobs specified in project-pipeline definitions on the project
itself.
.. attr:: default-branch
:default: master
The name of a branch that Zuul should check out in jobs if no
better match is found. Typically Zuul will check out the branch
which matches the change under test, or if a job has specified
an :attr:`job.override-checkout`, it will check that out.
However, if there is no matching or override branch, then Zuul
will checkout the default branch.
Each project may only have one ``default-branch`` therefore Zuul
will use the first value that it encounters for a given project
(regardless of in which branch the definition appears). It may
not appear in a :ref:`project-template` definition.
.. attr:: merge-mode
:default: (driver specific)
The merge mode which is used by Git for this project. Be sure
this matches what the remote system which performs merges (i.e.,
Gerrit). The requested merge mode will also be used by the
GitHub and GitLab drivers when performing merges.
The default is :value:`project.merge-mode.merge` for all drivers
except Gerrit, where the default is
:value:`project.merge-mode.merge-resolve`.
Each project may only have one ``merge-mode`` therefore Zuul
will use the first value that it encounters for a given project
(regardless of in which branch the definition appears). It may
not appear in a :ref:`project-template` definition.
It must be one of the following values:
.. value:: merge
Uses the default git merge strategy (recursive). This maps to
the merge mode ``merge`` in GitHub and GitLab.
.. value:: merge-resolve
Uses the resolve git merge strategy. This is a very
conservative merge strategy which most closely matches the
behavior of Gerrit. This maps to the merge mode ``merge`` in
GitHub and GitLab.
.. value:: cherry-pick
Cherry-picks each change onto the branch rather than
performing any merges. This is not supported by GitHub and GitLab.
.. value:: squash-merge
Squash merges each change onto the branch. This maps to the
merge mode ``squash`` in GitHub and GitLab.
.. value:: rebase
Rebases the changes onto the branch. This is only supported
by GitHub and maps to the ``rebase`` merge mode (but
does not alter committer information in the way that GitHub
does in the repos that Zuul prepares for jobs).
.. attr:: vars
:default: None
A dictionary of variables to be made available for all jobs in
all pipelines of this project. For more information see
:ref:`variable inheritance <user_jobs_variable_inheritance>`.
.. attr:: queue
This specifies the
name of the shared queue this project is in. Any projects
which interact with each other in tests should be part of the
same shared queue in order to ensure that they don't merge
changes which break the others. This is a free-form string;
just set the same value for each group of projects.
The name can refer to the name of a :attr:`queue` which allows
further configuration of the queue.
Each pipeline for a project can only belong to one queue,
therefore Zuul will use the first value that it encounters.
It need not appear in the first instance of a :attr:`project`
stanza; it may appear in secondary instances or even in a
:ref:`project-template` definition.
.. note:: This attribute is not evaluated speculatively and
its setting shall be merged to be effective.
.. attr:: <pipeline>
Each pipeline that the project participates in should have an
entry in the project. The value for this key should be a
dictionary with the following format:
.. attr:: jobs
:required:
A list of jobs that should be run when items for this project
are enqueued into the pipeline. Each item of this list may
be a string, in which case it is treated as a job name, or it
may be a dictionary, in which case it is treated as a job
variant local to this project and pipeline. In that case,
the format of the dictionary is the same as the top level
:attr:`job` definition. Any attributes set on the job here
will override previous versions of the job.
.. attr:: debug
If this is set to `true`, Zuul will include debugging
information in reports it makes about items in the pipeline.
This should not normally be set, but in situations were it is
difficult to determine why Zuul did or did not run a certain
job, the additional information this provides may help.
.. attr:: fail-fast
:default: false
If this is set to `true`, Zuul will report a build failure
immediately and abort all still running builds. This can be used
to save resources in resource constrained environments at the cost
of potentially requiring multiple attempts if more than one problem
is present.
Once this is defined it cannot be overridden afterwards. So this
can be forced to a specific value by e.g. defining it in a config
repo.
.. _project-template:
Project Template
================
A Project Template defines one or more project-pipeline definitions
which can be re-used by multiple projects.
A Project Template uses the same syntax as a :ref:`project`
definition, however, in the case of a template, the
:attr:`project.name` attribute does not refer to the name of a
project, but rather names the template so that it can be referenced in
a :ref:`project` definition.
Because Project Templates may be used outside of the projects where
they are defined, they honor the implied branch :ref:`pragmas <pragma>`
(unlike Projects). The same heuristics described in
:attr:`job.branches` that determine what implied branches a :ref:`job`
will receive apply to Project Templates (with the exception that it is
not possible to explicity set a branch matcher on a Project Template).
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/config/project.rst | project.rst |
.. _job:
Job
===
A job is a unit of work performed by Zuul on an item enqueued into a
pipeline. Items may run any number of jobs (which may depend on each
other). Each job is an invocation of an Ansible playbook with a
specific inventory of hosts. The actual tasks that are run by the job
appear in the playbook for that job while the attributes that appear in the
Zuul configuration specify information about when, where, and how the
job should be run.
Jobs in Zuul support inheritance. Any job may specify a single parent
job, and any attributes not set on the child job are collected from
the parent job. In this way, a configuration structure may be built
starting with very basic jobs which describe characteristics that all
jobs on the system should have, progressing through stages of
specialization before arriving at a particular job. A job may inherit
from any other job in any project (however, if the other job is marked
as :attr:`job.final`, jobs may not inherit from it). Generally,
attributes on child jobs will override (or completely replace)
attributes on the parent, however some attributes are combined. See
the documentation for individual attributes for these exceptions.
A job with no parent is called a *base job* and may only be defined in
a :term:`config-project`. Every other job must have a parent, and so
ultimately, all jobs must have an inheritance path which terminates at
a base job. Each tenant has a default parent job which will be used
if no explicit parent is specified.
Multiple job definitions with the same name are called variants.
These may have different selection criteria which indicate to Zuul
that, for instance, the job should behave differently on a different
git branch. Unlike inheritance, all job variants must be defined in
the same project. Some attributes of jobs marked :attr:`job.final`
may not be overridden.
When Zuul decides to run a job, it performs a process known as
freezing the job. Because any number of job variants may be
applicable, Zuul collects all of the matching variants and applies
them in the order they appeared in the configuration. The resulting
frozen job is built from attributes gathered from all of the
matching variants. In this way, exactly what is run is dependent on
the pipeline, project, branch, and content of the item.
In addition to the job's main playbook, each job may specify one or
more pre- and post-playbooks. These are run, in order, before and
after (respectively) the main playbook. They may be used to set up
and tear down resources needed by the main playbook. When combined
with inheritance, they provide powerful tools for job construction. A
job only has a single main playbook, and when inheriting from a
parent, the child's main playbook overrides (or replaces) the
parent's. However, the pre- and post-playbooks are appended and
prepended in a nesting fashion. So if a parent job and child job both
specified pre and post playbooks, the sequence of playbooks run would
be:
* parent pre-run playbook
* child pre-run playbook
* child playbook
* child post-run playbook
* parent post-run playbook
* parent cleanup-run playbook
Further inheritance would nest even deeper.
Here is an example of two job definitions:
.. code-block:: yaml
- job:
name: base
pre-run: copy-git-repos
post-run: copy-logs
- job:
name: run-tests
parent: base
nodeset:
nodes:
- name: test-node
label: fedora
.. attr:: job
The following attributes are available on a job; all are optional
unless otherwise specified:
.. attr:: name
:required:
The name of the job. By default, Zuul looks for a playbook with
this name to use as the main playbook for the job. This name is
also referenced later in a project pipeline configuration.
.. TODO: figure out how to link the parent default to tenant.default.parent
.. attr:: parent
:default: Tenant default-parent
Specifies a job to inherit from. The parent job can be defined
in this or any other project. Any attributes not specified on a
job will be collected from its parent. If no value is supplied
here, the job specified by :attr:`tenant.default-parent` will be
used. If **parent** is set to ``null`` (which is only valid in
a :term:`config-project`), this is a :term:`base job`.
.. attr:: description
A textual description of the job. Not currently used directly
by Zuul, but it is used by the zuul-sphinx extension to Sphinx
to auto-document Zuul jobs (in which case it is interpreted as
ReStructuredText.
.. attr:: final
:default: false
To prevent other jobs from inheriting from this job, and also to
prevent changing execution-related attributes when this job is
specified in a project's pipeline, set this attribute to
``true``.
.. warning::
It is possible to circumvent the use of `final` in an
:term:`untrusted-project` by creating a change which
`Depends-On` a change which alters `final`. This limitation
does not apply to jobs in a :term:`config-project`.
.. attr:: protected
:default: false
When set to ``true`` only jobs defined in the same project may inherit
from this job. This includes changing execution-related attributes when
this job is specified in a project's pipeline. Once this is set to
``true`` it cannot be reset to ``false``.
.. warning::
It is possible to circumvent the use of `protected` in an
:term:`untrusted-project` by creating a change which
`Depends-On` a change which alters `protected`. This
limitation does not apply to jobs in a
:term:`config-project`.
.. attr:: abstract
:default: false
To indicate a job is not intended to be run directly, but
instead must be inherited from, set this attribute to ``true``.
Once this is set to ``true`` in a job it cannot be reset to
``false`` within the same job by other variants; however jobs
which inherit from it can (and by default do) reset it to
``false``.
.. warning::
It is possible to circumvent the use of `abstract` in an
:term:`untrusted-project` by creating a change which
`Depends-On` a change which alters `abstract`. This
limitation does not apply to jobs in a
:term:`config-project`.
.. attr:: intermediate
:default: false
An intermediate job must be inherited by an abstract job; it can
not be inherited by a final job. All ``intermediate`` jobs
*must* also be ``abstract``; a configuration error will be
raised if not.
Once this is set to ``true`` in a job it cannot be reset to
``false`` within the same job by other variants; however jobs
which inherit from it can (and by default do) reset it to
``false``.
For example, you may define a base abstract job `foo` and create
two abstract jobs that inherit from `foo` called
`foo-production` and `foo-development`. If it would be an error
to accidentally inherit from the base job `foo` instead of
choosing one of the two variants, `foo` could be marked as
``intermediate``.
.. attr:: success-message
:default: SUCCESS
Normally when a job succeeds, the string ``SUCCESS`` is reported
as the result for the job. If set, this option may be used to
supply a different string.
.. attr:: failure-message
:default: FAILURE
Normally when a job fails, the string ``FAILURE`` is reported as
the result for the job. If set, this option may be used to
supply a different string.
.. attr:: hold-following-changes
:default: false
In a dependent pipeline, this option may be used to indicate
that no jobs should start on any items which depend on the
current item until this job has completed successfully. This
may be used to conserve build resources, at the expense of
inhibiting the parallelization which speeds the processing of
items in a dependent pipeline.
.. attr:: voting
:default: true
Indicates whether the result of this job should be used in
determining the overall result of the item.
.. attr:: semaphore
A deprecated alias of :attr:`job.semaphores`.
.. attr:: semaphores
The name of a :ref:`semaphore` (or list of them) or
:ref:`global_semaphore` which should be acquired and released
when the job begins and ends. If the semaphore is at maximum
capacity, then Zuul will wait until it can be acquired before
starting the job. The format is either a string, a dictionary,
or a list of either of those in the case of multiple
semaphores. If it's a string it references a semaphore using the
default value for :attr:`job.semaphores.resources-first`.
Also the name of a semaphore can be any string (without being
previosly defined via `semaphore` directive). In this case
an implicit semaphore is created with capacity max=1.
If multiple semaphores are requested, the job will not start
until all have been acquired, and Zuul will wait until all are
available before acquiring any.
When inheriting jobs or applying variants, the list of
semaphores is extended (semaphores specified in a job definition
are added to any supplied by their parents).
.. attr:: name
:required:
The name of the referenced semaphore
.. attr:: resources-first
:default: False
By default a semaphore is acquired before the resources are
requested. However in some cases the user may want to run
cheap jobs as quickly as possible in a consecutive manner. In
this case `resources-first` can be enabled to request the
resources before locking the semaphore. This can lead to some
amount of blocked resources while waiting for the semaphore
so this should be used with caution.
.. attr:: tags
Metadata about this job. Tags are units of information attached
to the job; they do not affect Zuul's behavior, but they can be
used within the job to characterize the job. For example, a job
which tests a certain subsystem could be tagged with the name of
that subsystem, and if the job's results are reported into a
database, then the results of all jobs affecting that subsystem
could be queried. This attribute is specified as a list of
strings, and when inheriting jobs or applying variants, tags
accumulate in a set, so the result is always a set of all the
tags from all the jobs and variants used in constructing the
frozen job, with no duplication.
.. attr:: provides
A list of free-form strings which identifies resources provided
by this job which may be used by other jobs for other changes
using the :attr:`job.requires` attribute.
When inheriting jobs or applying variants, the list of
`provides` is extended (`provides` specified in a job definition
are added to any supplied by their parents).
.. attr:: requires
A list of free-form strings which identify resources which may
be provided by other jobs for other changes (via the
:attr:`job.provides` attribute) that are used by this job.
When Zuul encounters a job with a `requires` attribute, it
searches for those values in the `provides` attributes of any
jobs associated with any queue items ahead of the current
change. In this way, if a change uses either git dependencies
or a `Depends-On` header to indicate a dependency on another
change, Zuul will be able to determine that the parent change
affects the run-time environment of the child change. If such a
relationship is found, the job with `requires` will not start
until all of the jobs with matching `provides` have completed or
paused. Additionally, the :ref:`artifacts <return_artifacts>`
returned by the `provides` jobs will be made available to the
`requires` job.
When inheriting jobs or applying variants, the list of
`requires` is extended (`requires` specified in a job definition
are added to any supplied by their parents).
For example, a job which produces a builder container image in
one project that is then consumed by a container image build job
in another project might look like this:
.. code-block:: yaml
- job:
name: build-builder-image
provides: images
- job:
name: build-final-image
requires: images
- project:
name: builder-project
check:
jobs:
- build-builder-image
- project:
name: final-project
check:
jobs:
- build-final-image
.. attr:: secrets
A list of secrets which may be used by the job. A
:ref:`secret` is a named collection of private information
defined separately in the configuration. The secrets that
appear here must be defined in the same project as this job
definition.
Each item in the list may may be supplied either as a string,
in which case it references the name of a :ref:`secret` definition,
or as a dict. If an element in this list is given as a dict, it
may have the following fields:
.. attr:: name
:required:
The name to use for the Ansible variable into which the secret
content will be placed.
.. attr:: secret
:required:
The name to use to find the secret's definition in the
configuration.
.. attr:: pass-to-parent
:default: false
A boolean indicating that this secret should be made
available to playbooks in parent jobs. Use caution when
setting this value -- parent jobs may be in different
projects with different security standards. Setting this to
true makes the secret available to those playbooks and
therefore subject to intentional or accidental exposure.
For example:
.. code-block:: yaml
- secret:
name: important-secret
data:
key: encrypted-secret-key-data
- job:
name: amazing-job
secrets:
- name: ssh_key
secret: important-secret
will result in the following being passed as a variable to the playbooks
in ``amazing-job``:
.. code-block:: yaml
ssh_key:
key: decrypted-secret-key-data
.. attr:: nodeset
The nodes which should be supplied to the job. This parameter
may be supplied either as a string, in which case it references
a :ref:`nodeset` definition which appears elsewhere in the
configuration, or a dictionary, in which case it is interpreted
in the same way as a Nodeset definition, though the top-level
nodeset ``name`` attribute should be omitted (in essence, it is
an anonymous Nodeset definition unique to this job; the nodes
themselves still require names). See the :ref:`nodeset`
reference for the syntax to use in that case.
If a job has an empty (or no) :ref:`nodeset` definition, it will
still run and is able to perform limited actions within the Zuul
executor sandbox. Note so-called "executor-only" jobs run with
an empty inventory, and hence Ansible's *implicit localhost*.
This means an executor-only playbook must be written to match
``localhost`` directly; i.e.
.. code-block:: yaml
- hosts: localhost
tasks:
...
not with ``hosts: all`` (as this does not match the implicit
localhost and the playbook will not run). There are also
caveats around things like enumerating the magic variable
``hostvars`` in this situation. For more information see the
Ansible `implicit localhost documentation
<https://docs.ansible.com/ansible/latest/inventory/implicit_localhost.html>`__.
A useful example of executor-only jobs is saving resources by
directly utilising the prior results from testing a committed
change. For example, a review which updates documentation
source files would generally test validity by building a
documentation tree. When this change is committed, the
pre-built output can be copied in an executor-only job directly
to the publishing location in a post-commit *promote* pipeline;
avoiding having to use a node to rebuild the documentation for
final publishing.
.. attr:: override-checkout
When Zuul runs jobs for a proposed change, it normally checks
out the branch associated with that change on every project
present in the job. If jobs are running on a ref (such as a
branch tip or tag), then that ref is normally checked out. This
attribute is used to override that behavior and indicate that
this job should, regardless of the branch for the queue item,
use the indicated ref (i.e., branch or tag) instead. This can
be used, for example, to run a previous version of the software
(from a stable maintenance branch) under test even if the change
being tested applies to a different branch (this is only likely
to be useful if there is some cross-branch interaction with some
component of the system being tested). See also the
project-specific :attr:`job.required-projects.override-checkout`
attribute to apply this behavior to a subset of a job's
projects.
This value is also used to help select which variants of a job
to run. If ``override-checkout`` is set, then Zuul will use
this value instead of the branch of the item being tested when
collecting jobs to run.
.. attr:: timeout
The time in seconds that the job should be allowed to run before
it is automatically aborted and failure is reported. If no
timeout is supplied, the job may run indefinitely. Supplying a
timeout is highly recommended.
This timeout only applies to the pre-run and run playbooks in a
job.
.. attr:: post-timeout
The time in seconds that each post playbook should be allowed to run
before it is automatically aborted and failure is reported. If no
post-timeout is supplied, the job may run indefinitely. Supplying a
post-timeout is highly recommended.
The post-timeout is handled separately from the above timeout because
the post playbooks are typically where you will copy jobs logs.
In the event of the pre-run or run playbooks timing out we want to
do our best to copy the job logs in the post-run playbooks.
.. attr:: attempts
:default: 3
When Zuul encounters an error running a job's pre-run playbook,
Zuul will stop and restart the job. Errors during the main or
post-run -playbook phase of a job are not affected by this
parameter (they are reported immediately). This parameter
controls the number of attempts to make before an error is
reported.
.. attr:: pre-run
The name of a playbook or list of playbooks to run before the
main body of a job. Values are either a string describing the
full path to the playbook in the repo where the job is defined,
or a dictionary described below.
When a job inherits from a parent, the child's pre-run playbooks
are run after the parent's. See :ref:`job` for more
information.
If the value is a dictionary, the following attributes are
available:
.. attr:: name
The path to the playbook relative to the root of the repo.
.. attr:: semaphore
The name of a :ref:`semaphore` (or list of them) or
:ref:`global_semaphore` which should be acquired and released
when the playbook begins and ends. If the semaphore is at
maximum capacity, then Zuul will wait until it can be
acquired before starting the playbook. The format is either a
string, or a list of strings.
If multiple semaphores are requested, the playbook will not
start until all have been acquired, and Zuul will wait until
all are available before acquiring any. The time spent
waiting for pre-run playbook semaphores is counted against
the :attr:`job.timeout`.
None of the semaphores specified for a playbook may also be
specified in the same job.
.. attr:: post-run
The name of a playbook or list of playbooks to run after the
main body of a job. Values are either a string describing the
full path to the playbook in the repo where the job is defined,
or a dictionary described below.
When a job inherits from a parent, the child's post-run playbooks
are run before the parent's. See :ref:`job` for more
information.
If the value is a dictionary, the following attributes are
available:
.. attr:: name
The path to the playbook relative to the root of the repo.
.. attr:: semaphore
The name of a :ref:`semaphore` (or list of them) or
:ref:`global_semaphore` which should be acquired and released
when the playbook begins and ends. If the semaphore is at
maximum capacity, then Zuul will wait until it can be
acquired before starting the playbook. The format is either a
string, or a list of strings.
If multiple semaphores are requested, the playbook will not
start until all have been acquired, and Zuul will wait until
all are available before acquiring any. The time spent
waiting for post-run playbook semaphores is counted against
the :attr:`job.post-timeout`.
None of the semaphores specified for a playbook may also be
specified in the same job.
.. attr:: cleanup-run
The name of a playbook or list of playbooks to run after job
execution. Values are either a string describing the full path
to the playbook in the repo where the job is defined, or a
dictionary described below.
The cleanup phase is performed regardless of the job's result,
even when the job is canceled. Cleanup results are not taken
into account when reporting the job result.
When a job inherits from a parent, the child's cleanup-run playbooks
are run before the parent's. See :ref:`job` for more
information.
There is a hard-coded five minute timeout for cleanup playbooks.
If the value is a dictionary, the following attributes are
available:
.. attr:: name
The path to the playbook relative to the root of the repo.
.. attr:: semaphore
The name of a :ref:`semaphore` (or list of them) or
:ref:`global_semaphore` which should be acquired and released
when the playbook begins and ends. If the semaphore is at
maximum capacity, then Zuul will wait until it can be
acquired before starting the playbook. The format is either a
string, or a list of strings.
If multiple semaphores are requested, the playbook will not
start until all have been acquired, and Zuul will wait until
all are available before acquiring any. The time spent
waiting for post-run playbook semaphores is counted against
the cleanup phase timeout.
None of the semaphores specified for a playbook may also be
specified in the same job.
.. attr:: run
The name of a playbook or list of playbooks for this job. If it
is not supplied, the parent's playbook will be used (and
likewise up the inheritance chain). Values are either a string
describing the full path to the playbook in the repo where the
job is defined, or a dictionary described below.
If the value is a dictionary, the following attributes are
available:
.. attr:: name
The path to the playbook relative to the root of the repo.
.. attr:: semaphore
The name of a :ref:`semaphore` (or list of them) or
:ref:`global_semaphore` which should be acquired and released
when the playbook begins and ends. If the semaphore is at
maximum capacity, then Zuul will wait until it can be
acquired before starting the playbook. The format is either a
string, or a list of strings.
If multiple semaphores are requested, the playbook will not
start until all have been acquired, and Zuul will wait until
all are available before acquiring any. The time spent
waiting for run playbook semaphores is counted against
the :attr:`job.timeout`.
None of the semaphores specified for a playbook may also be
specified in the same job.
Example:
.. code-block:: yaml
run: playbooks/job-playbook.yaml
Or:
.. code-block:: yaml
run:
- name: playbooks/job-playbook.yaml
semaphores: playbook-semaphore
.. attr:: ansible-split-streams
:default: False
Keep stdout/stderr of command and shell tasks separate (the Ansible
default behavior) instead of merging stdout and stderr.
Since version 3, Zuul has combined the stdout and stderr streams
in Ansible command tasks, but will soon switch to using the
normal Ansible behavior. In an upcoming release of Zuul, this
default will change to `True`, and in a later release, this
option will be removed altogether.
This option may be used in the interim to verify playbook
compatibility and facilitate upgrading to the new behavior.
.. attr:: ansible-version
The ansible version to use for all playbooks of the job. This can be
defined at the following layers of configuration where the first match
takes precedence:
* :attr:`job.ansible-version`
* :attr:`tenant.default-ansible-version`
* :attr:`scheduler.default_ansible_version`
* Zuul default version
The supported ansible versions are:
.. program-output:: zuul-manage-ansible -l
.. attr:: roles
.. code-block:: yaml
:name: job-roles-example
- job:
name: myjob
roles:
- zuul: myorg/our-roles-project
- zuul: myorg/ansible-role-foo
name: foo
A list of Ansible roles to prepare for the job. Because a job
runs an Ansible playbook, any roles which are used by the job
must be prepared and installed by Zuul before the job begins.
This value is a list of dictionaries, each of which indicates
one of two types of roles: a Galaxy role, which is simply a role
that is installed from Ansible Galaxy, or a Zuul role, which is
a role provided by a project managed by Zuul. Zuul roles are
able to benefit from speculative merging and cross-project
dependencies when used by playbooks in untrusted projects.
Roles are added to the Ansible role path in the order they
appear on the job -- roles earlier in the list will take
precedence over those which follow.
This attribute is not overridden on inheritance or variance;
instead roles are added with each new job or variant. In the
case of job inheritance or variance, the roles used for each of
the playbooks run by the job will be only those which were
cumulatively defined up to that point in the inheritance
hierarchy where that playbook was added. If a child job
inherits from a parent which defines a pre and post playbook,
then the pre and post playbooks it inherits from the parent job
will run only with the roles that were defined on the parent.
If the child adds its own pre and post playbooks, then any roles
added by the child will be available to the child's playbooks.
This is so that a job which inherits from a parent does not
inadvertently alter the behavior of the parent's playbooks by
the addition of conflicting roles. Roles added by a child will
appear before those it inherits from its parent.
If a project used for a Zuul role has branches, the usual
process of selecting which branch should be checked out applies.
See :attr:`job.override-checkout` for a description of that
process and how to override it. As a special case, if the role
project is the project in which this job definition appears,
then the branch in which this definition appears will be used.
In other words, a playbook may not use a role from a different
branch of the same project.
If the job is run on a ref (for example, a branch tip or a tag)
then a different form of the branch selection process is used.
There is no single branch context available for selecting an
appropriate branch of the role's repo to check out, so only the
following are considered: First the ref specified by
:attr:`job.required-projects.override-checkout`, or
:attr:`job.override-checkout`. Then if the role repo is the
playbook repo, that branch is used; otherwise the project's
default branch is selected.
.. warning::
Keep this behavior difference in mind when designing jobs
that run on both branches and tags. If the same job must be
used in both circumstances, ensure that any roles from other
repos used by playbooks in the job originate only in
un-branched repositories. Otherwise different branches of
the role repo may be checked out.
A project which supplies a role may be structured in one of two
configurations: a bare role (in which the role exists at the
root of the project), or a contained role (in which the role
exists within the ``roles/`` directory of the project, perhaps
along with other roles). In the case of a contained role, the
``roles/`` directory of the project is added to the role search
path. In the case of a bare role, the project itself is added
to the role search path. In case the name of the project is not
the name under which the role should be installed (and therefore
referenced from Ansible), the ``name`` attribute may be used to
specify an alternate.
A job automatically has the project in which it is defined added
to the roles path if that project appears to contain a role or
``roles/`` directory. By default, the project is added to the
path under its own name, however, that may be changed by
explicitly listing the project in the roles list in the usual
way.
.. attr:: galaxy
.. warning:: Galaxy roles are not yet implemented.
The name of the role in Ansible Galaxy. If this attribute is
supplied, Zuul will search Ansible Galaxy for a role by this
name and install it. Mutually exclusive with ``zuul``;
either ``galaxy`` or ``zuul`` must be supplied.
.. attr:: zuul
The name of a Zuul project which supplies the role. Mutually
exclusive with ``galaxy``; either ``galaxy`` or ``zuul`` must
be supplied.
.. attr:: name
The installation name of the role. In the case of a bare
role, the role will be made available under this name.
Ignored in the case of a contained role.
.. attr:: required-projects
A list of other projects which are used by this job. Any Zuul
projects specified here will also be checked out by Zuul into
the working directory for the job. Speculative merging and
cross-repo dependencies will be honored. If there is not a
change for the project ahead in the pipeline, its repo state as
of the time the item was enqueued will be frozen and used for
all jobs for a given change (see :ref:`global_repo_state`).
This attribute is not overridden by inheritance; instead it is
the union of all applicable parents and variants (i.e., jobs can
expand but not reduce the set of required projects when they
inherit).
The format for this attribute is either a list of strings or
dictionaries. Strings are interpreted as project names,
dictionaries, if used, may have the following attributes:
.. attr:: name
:required:
The name of the required project.
.. attr:: override-checkout
When Zuul runs jobs for a proposed change, it normally checks
out the branch associated with that change on every project
present in the job. If jobs are running on a ref (such as a
branch tip or tag), then that ref is normally checked out.
This attribute is used to override that behavior and indicate
that this job should, regardless of the branch for the queue
item, use the indicated ref (i.e., branch or tag) instead,
for only this project. See also the
:attr:`job.override-checkout` attribute to apply the same
behavior to all projects in a job.
This value is also used to help select which variants of a
job to run. If ``override-checkout`` is set, then Zuul will
use this value instead of the branch of the item being tested
when collecting any jobs to run which are defined in this
project.
.. attr:: vars
A dictionary of variables to supply to Ansible. When inheriting
from a job (or creating a variant of a job) vars are merged with
previous definitions. This means a variable definition with the
same name will override a previously defined variable, but new
variable names will be added to the set of defined variables.
When running a trusted playbook, the value of variables will be
frozen at the start of the job. Therefore if the value of the
variable is an Ansible Jinja template, it may only reference
values which are known at the start of the job, and its value
will not change. Untrusted playbooks dynamically evaluate
variables and are not limited by this restriction.
Un-frozen versions of all the original job variables are
available tagged with the ``!unsafe`` YAML tag under the
``unsafe_vars`` variable hierarchy. This tag prevents Ansible
from evaluating them as Jinja templates. For example, the job
variable `myvar` would be available under `unsafe_vars.myvar`.
Advanced users may force Ansible to evaluate these values, but
it is not recommended to do so except in the most controlled of
circumstances. They are almost impossible to render safely.
.. attr:: extra-vars
A dictionary of variables to supply to Ansible with higher
precedence than job, host, or group vars. Note, that despite
the name this is not passed to Ansible using the `--extra-vars`
flag.
.. attr:: host-vars
A dictionary of host variables to supply to Ansible. The keys
of this dictionary are node names as defined in a
:ref:`nodeset`, and the values are dictionaries of variables,
just as in :attr:`job.vars`.
.. attr:: group-vars
A dictionary of group variables to supply to Ansible. The keys
of this dictionary are node groups as defined in a
:ref:`nodeset`, and the values are dictionaries of variables,
just as in :attr:`job.vars`.
An example of three kinds of variables:
.. code-block:: yaml
- job:
name: variable-example
nodeset:
nodes:
- name: controller
label: fedora-27
- name: api1
label: centos-7
- name: api2
label: centos-7
groups:
- name: api
nodes:
- api1
- api2
vars:
foo: "this variable is visible to all nodes"
host-vars:
controller:
bar: "this variable is visible only on the controller node"
group-vars:
api:
baz: "this variable is visible on api1 and api2"
.. attr:: dependencies
A list of other jobs upon which this job depends. Zuul will not
start executing this job until all of its dependencies have
completed successfully or have been paused, and if one or more of
them fail, this job will not be run.
The format for this attribute is either a list of strings or
dictionaries. Strings are interpreted as job names,
dictionaries, if used, may have the following attributes:
.. attr:: name
:required:
The name of the required job.
.. attr:: soft
:default: false
A boolean value which indicates whether this job is a *hard*
or *soft* dependency. A *hard* dependency will cause an
error if the specified job is not run. That is, if job B
depends on job A, but job A is not run for any reason (for
example, it contains a file matcher which does not match),
then Zuul will not run any jobs and report an error. A
*soft* dependency will simply be ignored if the dependent job
is not run.
.. attr:: allowed-projects
A list of Zuul projects which may use this job. By default, a
job may be used by any other project known to Zuul, however,
some jobs use resources or perform actions which are not
appropriate for other projects. In these cases, a list of
projects which are allowed to use this job may be supplied. If
this list is not empty, then it must be an exhaustive list of
all projects permitted to use the job. The current project
(where the job is defined) is not automatically included, so if
it should be able to run this job, then it must be explicitly
listed. This setting is ignored by :term:`config projects
<config-project>` -- they may add any job to any project's
pipelines. By default, all projects may use the job.
If a :attr:`job.secrets` is used in a job definition in an
:term:`untrusted-project`, `allowed-projects` is automatically
set to the current project only, and can not be overridden.
However, a :term:`config-project` may still add such a job to
any project's pipeline. Apply caution when doing so as other
projects may be able to expose the source project's secrets.
This attribute is not overridden by inheritance; instead it is
the intersection of all applicable parents and variants (i.e.,
jobs can reduce but not expand the set of allowed projects when
they inherit).
.. warning::
It is possible to circumvent the use of `allowed-projects` in
an :term:`untrusted-project` by creating a change which
`Depends-On` a change which alters `allowed-projects`. This
limitation does not apply to jobs in a
:term:`config-project`, or jobs in an `untrusted-project`
which use a secret.
.. attr:: post-review
:default: false
A boolean value which indicates whether this job may only be
used in pipelines where :attr:`pipeline.post-review` is
``true``. This is automatically set to ``true`` if this job
uses a :ref:`secret` and is defined in a :term:`untrusted-project`.
It may be explicitly set to obtain the same behavior for jobs
defined in :term:`config projects <config-project>`. Once this
is set to ``true`` anywhere in the inheritance hierarchy for a job,
it will remain set for all child jobs and variants (it can not be
set to ``false``).
.. warning::
It is possible to circumvent the use of `post-review` in an
:term:`untrusted-project` by creating a change which
`Depends-On` a change which alters `post-review`. This
limitation does not apply to jobs in a
:term:`config-project`, or jobs in an `untrusted-project`
which use a secret.
.. attr:: branches
A :ref:`regular expression <regex>` (or list of regular
expressions) which describe on what branches a job should run
(or in the case of variants, to alter the behavior of a job for
a certain branch).
This attribute is not inherited in the usual manner. Instead,
it is used to determine whether each variant on which it appears
will be used when running the job.
If none of the defined job variants contain a branches setting which
matches the branch of an item, then that job is not run for the item.
Otherwise, all of the job variants which match that branch are
used when freezing the job. However, if
:attr:`job.override-checkout` or
:attr:`job.required-projects.override-checkout` are set for a
project, Zuul will attempt to use the job variants which match
the values supplied in ``override-checkout`` for jobs defined in
those projects. This can be used to run a job defined in one
project on another project without a matching branch.
If a tag item is enqueued, we look up the branches which contain
the commit referenced by the tag. If any of those branches match a
branch matcher, the matcher is considered to have matched.
Additionally in the case of a tag item, if the expression
matches the full name of the ref (eg, `refs/tags/foo`) then the
job is considered to match. The preceding section still
applies, so the definition must appear in a branch containing
the commit referenced by the tag to be considered, and then the
expression must also match the tag.
This example illustrates a job called *run-tests* which uses a
nodeset based on the current release of an operating system to
perform its tests, except when testing changes to the stable/2.0
branch, in which case it uses an older release:
.. code-block:: yaml
- job:
name: run-tests
nodeset: current-release
- job:
name: run-tests
branches: stable/2.0
nodeset: old-release
In some cases, Zuul uses an implied value for the branch
specifier if none is supplied:
* For a job definition in a :term:`config-project`, no implied
branch specifier is used. If no branch specifier appears, the
job applies to all branches.
* In the case of an :term:`untrusted-project`, if the project
has only one branch, no implied branch specifier is applied to
:ref:`job` definitions. If the project has more than one
branch, the branch containing the job definition is used as an
implied branch specifier.
This allows for the very simple and expected workflow where if a
project defines a job on the ``master`` branch with no branch
specifier, and then creates a new branch based on ``master``,
any changes to that job definition within the new branch only
affect that branch, and likewise, changes to the master branch
only affect it.
See :attr:`pragma.implied-branch-matchers` for how to override
this behavior on a per-file basis. The behavior may also be
configured by a Zuul administrator using
:attr:`tenant.untrusted-projects.<project>.implied-branch-matchers`.
.. attr:: files
This indicates that the job should only run on changes where the
specified files are modified. Unlike **branches**, this value
is subject to inheritance and overriding, so only the final
value is used to determine if the job should run. This is a
:ref:`regular expression <regex>` or list of regular expressions.
.. warning::
File filters will be ignored for refs that don't have any
files. This will be the case for merge commits (e.g. in a post
pipeline) or empty commits created with
``git commit --allow-empty`` (which can be used in order to
run all jobs).
.. attr:: irrelevant-files
This is a negative complement of **files**. It indicates that
the job should run unless *all* of the files changed match this
list. In other words, if the regular expression ``docs/.*`` is
supplied, then this job will not run if the only files changed
are in the docs directory. A :ref:`regular expression <regex>`
or list of regular expressions.
.. warning::
File filters will be ignored for refs that don't have any
files. This will be the case for merge commits (e.g. in a post
pipeline) or empty commits created with
``git commit --allow-empty`` (which can be used in order to
run all jobs).
.. attr:: match-on-config-updates
:default: true
If this is set to ``true`` (the default), then the job's file
matchers are ignored if a change alters the job's configuration.
This means that changes to jobs with file matchers will be
self-testing without requiring that the file matchers include
the Zuul configuration file defining the job.
.. attr:: deduplicate
:default: auto
In the case of a dependency cycle where multiple changes within
the cycle run the same job, this setting indicates whether Zuul
should attempt to deduplicate the job. If it is deduplicated,
then the job will only run for one queue item within the cycle
and other items which run the same job will use the results of
that build.
This setting determins whether Zuul will consider deduplication.
If it is set to ``false``, Zuul will never attempt to
deduplicate the job. If it is set to ``auto`` (the default),
then Zuul will compare the job with other jobs of other queue
items in the dependency cycle, and if they are equivalent and
meet certain project criteria, it will deduplicate them.
The project criteria that Zuul considers under the ``auto``
setting are either:
* The job must specify :attr:`job.required-projects`.
* Or the queue items must be for the same project.
This is because of the following heuristic: if a job specifies
:attr:`job.required-projects`, it is most likely to be one which
operates in the same way regardless of which project the change
under test belongs to, therefore the result of the same job
running on two queue items in the same dependency cycle should
be the same. If a job does not specify
:attr:`job.required-projects` and runs with two different
projects under test, the outcome is likely different for those
two items.
If this is not true for a job (e.g., the job ignores the project
under test and interacts only with external resources)
:attr:`job.deduplicate` may be set to ``true`` to ignore the
heuristic and deduplicate anyway.
.. attr:: workspace-scheme
:default: golang
The scheme to use when placing git repositories in the
workspace.
.. value:: golang
This writes the repository into a directory based on the
canonical hostname and the full name of the repository. For
example::
src/example.com/organization/project
This is the default and, despite the name, is suitable and
recommended for any language.
.. value:: flat
This writes the repository into a directory based only on the
last component of the name. For example::
src/project
In some cases the ``golang`` scheme can produce collisions
(consider the projects `component` and
`component/subcomponent`). In this case it may be preferable
to use the ``flat`` scheme (which would produce repositories
at `component` and `subcomponent`).
Note, however, that this scheme may produce collisions with
`component` and `component/component`.
.. value:: unique
This writes the repository into a directory based on the
organization name and the ``urllib.parse.quote_plus`` formatted
project name. For example::
src/example.com/organization/organization%2Fproject
This scheme will produce unique workspace paths for every repository
and won't cause collisions.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/config/job.rst | job.rst |
.. _pragma:
Pragma
======
The `pragma` item does not behave like the others. It can not be
included or excluded from configuration loading by the administrator,
and does not form part of the final configuration itself. It is used
to alter how the configuration is processed while loading.
A pragma item only affects the current file. The same file in another
branch of the same project will not be affected, nor any other files
or any other projects. The effect is global within that file --
pragma directives may not be set and then unset within the same file.
.. code-block:: yaml
- pragma:
implied-branch-matchers: False
.. attr:: pragma
The pragma item currently supports the following attributes:
.. attr:: implied-branch-matchers
This is a boolean, which, if set, may be used to enable
(``true``) or disable (``false``) the addition of implied branch
matchers to job and project-template definitions. Normally Zuul
decides whether to add these based on heuristics described in
:attr:`job.branches`. This attribute overrides that behavior.
This can be useful if a project has multiple branches, yet the
jobs defined in the master branch should apply to all branches.
The behavior may also be configured by a Zuul administrator
using
:attr:`tenant.untrusted-projects.<project>.implied-branch-matchers`.
This pragma overrides that setting if both are present.
Note that if a job contains an explicit branch matcher, it will
be used regardless of the value supplied here.
.. attr:: implied-branches
This is a list of :ref:`regular expressions <regex>`, just as
:attr:`job.branches`, which may be used to supply the value of
the implied branch matcher for all jobs and project-templates in
a file.
This may be useful if two projects share jobs but have
dissimilar branch names. If, for example, two projects have
stable maintenance branches with dissimilar names, but both
should use the same job variants, this directive may be used to
indicate that all of the jobs defined in the stable branch of
the first project may also be used for the stable branch of the
other. For example:
.. code-block:: yaml
- pragma:
implied-branches:
- stable/foo
- stable/bar
The above code, when added to the ``stable/foo`` branch of a
project would indicate that the job variants described in that
file should not only be used for changes to ``stable/foo``, but
also on changes to ``stable/bar``, which may be in another
project.
Note that if a job contains an explicit branch matcher, it will
be used regardless of the value supplied here.
If this is used in a branch, it should include that branch name
or changes on that branch may be ignored.
Note also that the presence of `implied-branches` does not
automatically set `implied-branch-matchers`. Zuul will still
decide if implied branch matchers are warranted at all, using
the heuristics described in :attr:`job.branches`, and only use
the value supplied here if that is the case. If you want to
declare specific implied branches on, for example, a
:term:`config-project` project (which normally would not use
implied branches), you must set `implied-branch-matchers` as
well.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/config/pragma.rst | pragma.rst |
.. _secret:
Secret
======
A Secret is a collection of private data for use by one or more jobs.
In order to maintain the security of the data, the values are usually
encrypted, however, data which are not sensitive may be provided
unencrypted as well for convenience.
A Secret may only be used by jobs defined within the same project.
Note that they can be used by any branch of that project, so if a
project's branches have different access controls, consider whether
all branches of that project are equally trusted before using secrets.
To use a secret, a :ref:`job` must specify the secret in
:attr:`job.secrets`. With one exception, secrets are bound to the
playbooks associated with the specific job definition where they were
declared. Additional pre or post playbooks which appear in child jobs
will not have access to the secrets, nor will playbooks which override
the main playbook (if any) of the job which declared the secret. This
protects against jobs in other repositories declaring a job with a
secret as a parent and then exposing that secret.
The exception to the above is if the
:attr:`job.secrets.pass-to-parent` attribute is set to true. In that
case, the secret is made available not only to the playbooks in the
current job definition, but to all playbooks in all parent jobs as
well. This allows for jobs which are designed to work with secrets
while leaving it up to child jobs to actually supply the secret. Use
this option with care, as it may allow the authors of parent jobs to
accidentially or intentionally expose secrets. If a secret with
`pass-to-parent` set in a child job has the same name as a secret
available to a parent job's playbook, the secret in the child job will
not override the parent, instead it will simply not be available to
that playbook (but will remain available to others).
It is possible to use secrets for jobs defined in :term:`config
projects <config-project>` as well as :term:`untrusted projects
<untrusted-project>`, however their use differs slightly. Because
playbooks in a config project which use secrets run in the
:term:`trusted execution context` where proposed changes are not used
in executing jobs, it is safe for those secrets to be used in all
types of pipelines. However, because playbooks defined in an
untrusted project are run in the :term:`untrusted execution context`
where proposed changes are used in job execution, it is dangerous to
allow those secrets to be used in pipelines which are used to execute
proposed but unreviewed changes. By default, pipelines are considered
`pre-review` and will refuse to run jobs which have playbooks that use
secrets in the untrusted execution context (including those subject to
:attr:`job.secrets.pass-to-parent` secrets) in order to protect
against someone proposing a change which exposes a secret. To permit
this (for instance, in a pipeline which only runs after code review),
the :attr:`pipeline.post-review` attribute may be explicitly set to
``true``.
In some cases, it may be desirable to prevent a job which is defined
in a config project from running in a pre-review pipeline (e.g., a job
used to publish an artifact). In these cases, the
:attr:`job.post-review` attribute may be explicitly set to ``true`` to
indicate the job should only run in post-review pipelines.
If a job with secrets is unsafe to be used by other projects, the
:attr:`job.allowed-projects` attribute can be used to restrict the
projects which can invoke that job. If a job with secrets is defined
in an `untrusted-project`, `allowed-projects` is automatically set to
that project only, and can not be overridden (though a
:term:`config-project` may still add the job to any project's pipeline
regardless of this setting; do so with caution as other projects may
expose the source project's secrets).
Secrets, like most configuration items, are unique within a tenant,
though a secret may be defined on multiple branches of the same
project as long as the contents are the same. This is to aid in
branch maintenance, so that creating a new branch based on an existing
branch will not immediately produce a configuration error.
When the values of secrets are passed to Ansible, the ``!unsafe`` YAML
tag is added which prevents them from being evaluated as Jinja
expressions. This is to avoid a situation where a child job might
expose a parent job's secrets via template expansion.
However, if it is known that a given secret value can be trusted, then
this limitation can be worked around by using the following construct
in a playbook:
.. code-block:: yaml
- set_fact:
unsafe_var_eval: "{{ hostvars['localhost'].secretname.var }}"
This will force an explicit template evaluation of the `var` attribute
on the `secretname` secret. The results will be stored in
unsafe_var_eval.
.. attr:: secret
The following attributes must appear on a secret:
.. attr:: name
:required:
The name of the secret, used in a :ref:`job` definition to
request the secret.
.. attr:: data
:required:
A dictionary which will be added to the Ansible variables
available to the job. The values can be any of the normal YAML
data types (strings, integers, dictionaries or lists) or
encrypted strings. See :ref:`encryption` for more information.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/config/secret.rst | secret.rst |
.. _queue:
Queue
=====
Projects that interact with each other should share a ``queue``.
This is especially used in a :value:`dependent <pipeline.manager.dependent>`
pipeline. The :attr:`project.queue` can optionally refer
to a specific :attr:`queue` object that can further configure the
behavior of the queue.
Here is an example ``queue`` configuration.
.. code-block:: yaml
- queue:
name: integrated
per-branch: false
.. attr:: queue
The attributes available on a queue are as follows (all are
optional unless otherwise specified):
.. attr:: name
:required:
This is used later in the project definition to refer to this queue.
.. attr:: per-branch
:default: false
Queues by default define a single queue for all projects and
branches that use it. This is especially important if projects
want to do upgrade tests between different branches in
the :term:`gate`. If a set of projects doesn't have this use case
it can configure the queue to create a shared queue per branch for
all projects. This can be useful for large projects to improve the
throughput of a gate pipeline as this results in shorter queues
and thus less impact when a job fails in the gate. Note that this
means that all projects that should be gated must have aligned branch
names when using per branch queues. Otherwise changes that belong
together end up in different queues.
.. attr:: allow-circular-dependencies
:default: false
Determines whether Zuul is allowed to process circular
dependencies between changes for this queue. All projects that
are part of a dependency cycle must share the same change queue.
If Zuul detects a dependency cycle it will ensure that every
change also includes all other changes that are part of the
cycle. However each change will still be a normal item in the
queue with its own jobs.
Reporting of success will be postponed until all items in the cycle
succeed. In the case of a failure in any of those items the whole cycle
will be dequeued.
An error message will be posted to all items of the cycle if some
items fail to report (e.g. merge failure when some items were already
merged). In this case the target branch(es) might be in a broken state.
In general, circular dependencies are considered to be an
antipattern since they add extra constraints to continuous
deployment systems. Additionally, due to the lack of atomicity
in merge operations in code review systems (this includes
Gerrit, even with submitWholeTopic set), it may be possible for
only part of a cycle to be merged. In that case, manual
interventions (such as reverting a commit, or bypassing gating
to force-merge the remaining commits) may be required.
.. warning:: If the remote system is able to merge the first but
unable to merge the second or later change in a
dependency cycle, then the gating system for a
project may be broken and may require an
intervention to correct.
.. attr:: dependencies-by-topic
:default: false
Determines whether Zuul should query the code review system for
changes under the same topic and treat those as a set of
circular dependencies.
Note that the Gerrit code review system supports a setting
called ``change.submitWholeTopic``, which, when set, will cause
all changes under the same topic to be merged simultaneously.
Zuul automatically observes this setting and treats all changes
to be submitted together as circular dependencies. If this
setting is enabled in gerrit, do not enable
``dependencies-by-topic`` in associated Zuul queues.
Because ``change.submitWholeTopic`` is applied system-wide in
Gerrit, some Zuul users may wish to emulate the behavior for
some projects without enabling it for all of Gerrit. In this
case, setting ``dependencies-by-topic`` will cause Zuul to
approxiamate the Gerrit behavior only for changes enqueued into
queues where this is set.
This setting requires :attr:`queue.allow-circular-dependencies`
to also be set. All of the caveats noted there continue to
apply.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/config/queue.rst | queue.rst |
:title: Pagure Driver
.. _pagure_driver:
Pagure
======
The Pagure driver supports sources, triggers, and reporters. It can
interact with the public Pagure.io service as well as site-local
installations of Pagure.
Configure Pagure
----------------
The user's API token configured in zuul.conf must have the following
ACL rights:
- "Merge a pull-request" set to on (optional, only for gating)
- "Flag a pull-request" set to on
- "Comment on a pull-request" set to on
- "Modify an existing project" set to on
Each project to be integrated with Zuul needs:
- "Web hook target" set to
http://<zuul-web>/zuul/api/connection/<conn-name>/payload
- "Pull requests" set to on
- "Open metadata access to all" set to off (optional, expected if approval
based on PR a metadata tag)
- "Minimum score to merge pull-request" set to the same value than
the score requierement (optional, expected if score requierement is
defined in a pipeline)
Furthermore, the user must be added as project collaborator
(**ticket** access level), to be able to read the project's
webhook token. This token is used to validate webhook's payload. But
if Zuul is configured to merge pull requests then the access level
must be **commit**.
Connection Configuration
------------------------
The supported options in ``zuul.conf`` connections are:
.. attr:: <pagure connection>
.. attr:: driver
:required:
.. value:: pagure
The connection must set ``driver=pagure`` for Pagure connections.
.. attr:: api_token
The user's API token with the ``Modify an existing project`` capability.
.. attr:: server
:default: pagure.io
Hostname of the Pagure server.
.. attr:: canonical_hostname
The canonical hostname associated with the git repos on the
Pagure server. Defaults to the value of :attr:`<pagure
connection>.server`. This is used to identify projects from
this connection by name and in preparing repos on the filesystem
for use by jobs. Note that Zuul will still only communicate
with the Pagure server identified by **server**; this option is
useful if users customarily use a different hostname to clone or
pull git repos so that when Zuul places them in the job's
working directory, they appear under this directory name.
.. attr:: baseurl
:default: https://{server}
Path to the Pagure web and API interface.
.. attr:: cloneurl
:default: https://{baseurl}
Path to the Pagure Git repositories. Used to clone.
.. attr:: app_name
:default: Zuul
Display name that will appear as the application name in front
of each CI status flag.
.. attr:: source_whitelist
:default: ''
A comma separated list of source ip adresses from which webhook
calls are whitelisted. If the source is not whitelisted, then
call payload's signature is verified using the project webhook
token. An admin access to the project is required by Zuul to read
the token. White listing a source of hook calls allows Zuul to
react to events without any authorizations. This setting should
not be used in production.
Trigger Configuration
---------------------
Pagure webhook events can be configured as triggers.
A connection name with the Pagure driver can take multiple events with
the following options.
.. attr:: pipeline.trigger.<pagure source>
The dictionary passed to the Pagure pipeline ``trigger`` attribute
supports the following attributes:
.. attr:: event
:required:
The event from Pagure. Supported events are:
.. value:: pg_pull_request
.. value:: pg_pull_request_review
.. value:: pg_push
.. attr:: action
A :value:`pipeline.trigger.<pagure source>.event.pg_pull_request`
event will have associated action(s) to trigger from. The
supported actions are:
.. value:: opened
Pull request opened.
.. value:: changed
Pull request synchronized.
.. value:: closed
Pull request closed.
.. value:: comment
Comment added to pull request.
.. value:: status
Status set on pull request.
.. value:: tagged
Tag metadata set on pull request.
A :value:`pipeline.trigger.<pagure
source>.event.pg_pull_request_review` event will have associated
action(s) to trigger from. The supported actions are:
.. value:: thumbsup
Positive pull request review added.
.. value:: thumbsdown
Negative pull request review added.
.. attr:: comment
This is only used for ``pg_pull_request`` and ``comment`` actions. It
accepts a list of regexes that are searched for in the comment
string. If any of these regexes matches a portion of the comment
string the trigger is matched. ``comment: retrigger`` will
match when comments containing 'retrigger' somewhere in the
comment text are added to a pull request.
.. attr:: status
This is used for ``pg_pull_request`` and ``status`` actions. It
accepts a list of strings each of which matches the user setting
the status, the status context, and the status itself in the
format of ``status``. For example, ``success`` or ``failure``.
.. attr:: tag
This is used for ``pg_pull_request`` and ``tagged`` actions. It
accepts a list of strings and if one of them is part of the
event tags metadata then the trigger is matched.
.. attr:: ref
This is only used for ``pg_push`` events. This field is treated as
a regular expression and multiple refs may be listed. Pagure
always sends full ref name, eg. ``refs/tags/bar`` and this
string is matched against the regular expression.
Reporter Configuration
----------------------
Zuul reports back to Pagure via Pagure API. Available reports include a PR
comment containing the build results, a commit status on start, success and
failure, and a merge of the PR itself. Status name, description, and context
is taken from the pipeline.
.. attr:: pipeline.<reporter>.<pagure source>
To report to Pagure, the dictionaries passed to any of the pipeline
:ref:`reporter<reporters>` attributes support the following
attributes:
.. attr:: status
String value (``pending``, ``success``, ``failure``) that the
reporter should set as the commit status on Pagure.
.. attr:: status-url
:default: web.status_url or the empty string
String value for a link url to set in the Pagure status. Defaults to the
zuul server status_url, or the empty string if that is unset.
.. attr:: comment
:default: true
Boolean value that determines if the reporter should add a
comment to the pipeline status to the Pagure Pull Request. Only
used for Pull Request based items.
.. attr:: merge
:default: false
Boolean value that determines if the reporter should merge the
pull Request. Only used for Pull Request based items.
Requirements Configuration
--------------------------
As described in :attr:`pipeline.require` pipelines may specify that items meet
certain conditions in order to be enqueued into the pipeline. These conditions
vary according to the source of the project in question. To supply
requirements for changes from a Pagure source named ``pagure``, create a
configuration such as the following:
.. code-block:: yaml
pipeline:
require:
pagure:
score: 1
merged: false
status: success
tags:
- gateit
This indicates that changes originating from the Pagure connection
must have a score of *1*, a CI status *success* and not being already merged.
.. attr:: pipeline.require.<pagure source>
The dictionary passed to the Pagure pipeline `require` attribute
supports the following attributes:
.. attr:: score
If present, the minimal score a Pull Request must reached.
.. attr:: status
If present, the CI status a Pull Request must have.
.. attr:: merged
A boolean value (``true`` or ``false``) that indicates whether
the Pull Request must be merged or not in order to be enqueued.
.. attr:: open
A boolean value (``true`` or ``false``) that indicates whether
the Pull Request must be open or closed in order to be enqueued.
.. attr:: tags
if present, the list of tags a Pull Request must have.
Reference pipelines configuration
---------------------------------
Here is an example of standard pipelines you may want to define:
.. literalinclude:: /examples/pipelines/pagure-reference-pipelines.yaml
:language: yaml
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/drivers/pagure.rst | pagure.rst |
:title: SMTP Driver
SMTP
====
The SMTP driver supports reporters only. It is used to send email
when items report.
Connection Configuration
------------------------
.. attr:: <smtp connection>
.. attr:: driver
:required:
.. value:: smtp
The connection must set ``driver=smtp`` for SMTP connections.
.. attr:: server
:default: localhost
SMTP server hostname or address to use.
.. attr:: port
:default: 25
SMTP server port.
.. attr:: default_from
:default: zuul
Who the email should appear to be sent from when emailing the report.
This can be overridden by individual pipelines.
.. attr:: default_to
:default: zuul
Who the report should be emailed to by default.
This can be overridden by individual pipelines.
.. attr:: user
Optional user name used to authenticate to the SMTP server. Used only in
conjunction with a password. If no password is present, this option is
ignored.
.. attr:: password
Optional password used to authenticate to the SMTP server.
.. attr:: use_starttls
:default: false
Issue a STARTTLS request to establish an encrypted channel after having
connected to the SMTP server.
Reporter Configuration
----------------------
A simple email reporter is also available.
A :ref:`connection<connections>` that uses the smtp driver must be supplied to the
reporter. The connection also may specify a default *To* or *From*
address.
Each pipeline can overwrite the ``subject`` or the ``to`` or ``from`` address by
providing alternatives as arguments to the reporter. For example:
.. code-block:: yaml
- pipeline:
name: post-merge
success:
outgoing_smtp:
to: [email protected]
failure:
internal_smtp:
to: [email protected]
from: [email protected]
subject: Change {change} failed
.. attr:: pipeline.<reporter>.<smtp source>
To report via email, the dictionaries passed to any of the pipeline
:ref:`reporter<reporters>` attributes support the following
attributes:
.. attr:: to
The SMTP recipient address for the report. Multiple addresses
may be specified as one value separated by commas.
.. attr:: from
The SMTP sender address for the report.
.. attr:: subject
The Subject of the report email.
.. TODO: document subject string formatting.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/drivers/smtp.rst | smtp.rst |
:title: GitLab Driver
.. _gitlab_driver:
GitLab
======
The GitLab driver supports sources, triggers, and reporters. It can
interact with the public GitLab.com service as well as site-local
installations of GitLab.
Configure GitLab
----------------
Zuul needs to interact with projects by:
- receiving events via web-hooks
- performing actions via the API
web-hooks
^^^^^^^^^
Projects to be integrated with Zuul needs to send events using webhooks.
This can be enabled at Group level or Project level in "Settings/Webhooks"
- "URL" set to
``http://<zuul-web>/api/connection/<conn-name>/payload``
- "Merge request events" set to "on"
- "Push events" set to "on"
- "Tag push events" set to "on"
- "Comments" set to "on"
- Define a "Secret Token"
API
^^^
| Even though bot users exist: https://docs.gitlab.com/ce/user/project/settings/project_access_tokens.html#project-bot-users
| They are only available at project level.
In order to manage multiple projects using a single connection, Zuul needs a
global access to projects, which can only be achieved by creating a specific
Zuul user. This user counts as a licensed seat.
The API token must be created in user Settings, Access tokens. The Zuul user's
API token configured in zuul.conf must have the following ACL rights: "api".
Connection Configuration
------------------------
The supported options in ``zuul.conf`` connections are:
.. attr:: <gitlab connection>
.. attr:: driver
:required:
.. value:: gitlab
The connection must set ``driver=gitlab`` for GitLab connections.
.. attr:: api_token_name
The user's personal access token name (Used if **cloneurl** is http(s))
Set this parameter if authentication to clone projects is required
.. attr:: api_token
The user's personal access token
.. attr:: webhook_token
The webhook secret token.
.. attr:: server
:default: gitlab.com
Hostname of the GitLab server.
.. attr:: canonical_hostname
The canonical hostname associated with the git repos on the
GitLab server. Defaults to the value of :attr:`<gitlab
connection>.server`. This is used to identify projects from
this connection by name and in preparing repos on the filesystem
for use by jobs. Note that Zuul will still only communicate
with the GitLab server identified by **server**; this option is
useful if users customarily use a different hostname to clone or
pull git repos so that when Zuul places them in the job's
working directory, they appear under this directory name.
.. attr:: baseurl
:default: https://{server}
Path to the GitLab web and API interface.
.. attr:: sshkey
Path to SSH key to use (Used if **cloneurl** is ssh)
.. attr:: cloneurl
:default: {baseurl}
Omit to clone using http(s) or set to ``ssh://git@{server}``.
If **api_token_name** is set and **cloneurl** is either omitted or is
set without credentials, **cloneurl** will be modified to use credentials
as this: ``http(s)://<api_token_name>:<api_token>@<server>``.
If **cloneurl** is defined with credentials, it will be used as is,
without modification from the driver.
.. attr:: keepalive
:default: 60
TCP connection keepalive timeout; ``0`` disables.
.. attr:: disable_connection_pool
:default: false
Connection pooling improves performance and resource usage under
normal circumstances, but in adverse network conditions it can
be problematic. Set this to ``true`` to disable.
Trigger Configuration
---------------------
GitLab webhook events can be configured as triggers.
A connection name with the GitLab driver can take multiple events with
the following options.
.. attr:: pipeline.trigger.<gitlab source>
The dictionary passed to the GitLab pipeline ``trigger`` attribute
supports the following attributes:
.. attr:: event
:required:
The event from GitLab. Supported events are:
.. value:: gl_merge_request
.. value:: gl_push
.. attr:: action
A :value:`pipeline.trigger.<gitlab source>.event.gl_merge_request`
event will have associated action(s) to trigger from. The
supported actions are:
.. value:: opened
Merge request opened.
.. value:: changed
Merge request synchronized.
.. value:: merged
Merge request merged.
.. value:: comment
Comment added to merge request.
.. value:: approved
Merge request approved.
.. value:: unapproved
Merge request unapproved.
.. value:: labeled
Merge request labeled.
.. attr:: comment
This is only used for ``gl_merge_request`` and ``comment`` actions. It
accepts a list of regexes that are searched for in the comment
string. If any of these regexes matches a portion of the comment
string the trigger is matched. ``comment: retrigger`` will
match when comments containing 'retrigger' somewhere in the
comment text are added to a merge request.
.. attr:: labels
This is only used for ``gl_merge_request`` and ``labeled``
actions. It accepts a string or a list of strings that are that
must have been added for the event to match.
.. attr:: unlabels
This is only used for ``gl_merge_request`` and ``labeled``
actions. It accepts a string or a list of strings that are that
must have been removed for the event to match.
.. attr:: ref
This is only used for ``gl_push`` events. This field is treated as
a regular expression and multiple refs may be listed. GitLab
always sends full ref name, eg. ``refs/heads/bar`` and this
string is matched against the regular expression.
Reporter Configuration
----------------------
Zuul reports back to GitLab via the API. Available reports include a Merge Request
comment containing the build results. Status name, description, and context
is taken from the pipeline.
.. attr:: pipeline.<reporter>.<gitlab source>
To report to GitLab, the dictionaries passed to any of the pipeline
:ref:`reporter<reporters>` attributes support the following
attributes:
.. attr:: comment
:default: true
Boolean value that determines if the reporter should add a
comment to the pipeline status to the GitLab Merge Request.
.. attr:: approval
Bolean value that determines whether to report *approve* or *unapprove*
into the merge request approval system. To set an approval the Zuul user
must be a *Developer* or *Maintainer* project's member. If not set approval
won't be reported.
.. attr:: merge
:default: false
Boolean value that determines if the reporter should merge the
Merge Request. To merge a Merge Request the Zuul user must be a *Developer* or
*Maintainer* project's member. In case of *developer*, the *Allowed to merge*
setting in *protected branches* must be set to *Developers + Maintainers*.
.. attr:: label
A string or list of strings, each representing a label name
which should be added to the merge request.
.. attr:: unlabel
A string or list of strings, each representing a label name
which should be removed from the merge request.
Requirements Configuration
--------------------------
As described in :attr:`pipeline.require` pipelines may specify that items meet
certain conditions in order to be enqueued into the pipeline. These conditions
vary according to the source of the project in question.
.. code-block:: yaml
pipeline:
require:
gitlab:
open: true
This indicates that changes originating from the GitLab connection must be
in the *opened* state (not merged yet).
.. attr:: pipeline.require.<gitlab source>
The dictionary passed to the GitLab pipeline `require` attribute
supports the following attributes:
.. attr:: open
A boolean value (``true`` or ``false``) that indicates whether
the Merge Request must be open in order to be enqueued.
.. attr:: merged
A boolean value (``true`` or ``false``) that indicates whether
the Merge Request must be merged or not in order to be enqueued.
.. attr:: approved
A boolean value (``true`` or ``false``) that indicates whether
the Merge Request must be approved or not in order to be enqueued.
.. attr:: labels
A list of labels a Merge Request must have in order to be enqueued.
Reference pipelines configuration
---------------------------------
Here is an example of standard pipelines you may want to define:
.. literalinclude:: /examples/pipelines/gitlab-reference-pipelines.yaml
:language: yaml
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/drivers/gitlab.rst | gitlab.rst |
:title: Timer Driver
Timer
=====
The timer driver supports triggers only. It is used for configuring
pipelines so that jobs run at scheduled times. No connection
configuration is required.
Trigger Configuration
---------------------
Timers don't require a special connection or driver. Instead they can
simply be used by listing ``timer`` as the trigger.
This trigger will run based on a cron-style time specification. It
will enqueue an event into its pipeline for every project and branch
defined in the configuration. Any job associated with the pipeline
will run in response to that event.
Zuul implements the timer using `apscheduler`_, Please check the
`apscheduler documentation`_ for more information about the syntax.
.. attr:: pipeline.trigger.timer
The timer trigger supports the following attributes:
.. attr:: time
:required:
The time specification in cron syntax. Only the 5 part syntax
is supported, not the symbolic names. Example: ``0 0 * * *``
runs at midnight.
An optional 6th part specifies seconds. The optional 7th part specifies
a jitter in seconds. This delays the trigger randomly, limited by
the specified value. Example ``0 0 * * * * 60`` runs at
midnight or randomly up to 60 seconds later. The jitter is
applied individually to each project-branch combination.
.. warning::
Be aware the day-of-week value differs from from cron.
The first weekday is Monday (0), and the last is Sunday (6).
.. _apscheduler: https://apscheduler.readthedocs.io/
.. _apscheduler documentation: https://apscheduler.readthedocs.io/en/3.x/modules/triggers/cron.html#module-apscheduler.triggers.cron
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/drivers/timer.rst | timer.rst |
.. _drivers:
Drivers
=======
Drivers may support any of the following functions:
* Sources -- hosts git repositories for projects. Zuul can clone git
repos for projects and fetch refs.
* Triggers -- emits events to which Zuul may respond. Triggers are
configured in pipelines to cause changes or other refs to be
enqueued.
* Reporters -- outputs information when a pipeline is finished
processing an item.
Zuul includes the following drivers:
.. toctree::
:maxdepth: 2
gerrit
github
pagure
gitlab
git
mqtt
elasticsearch
smtp
timer
zuul
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/drivers/index.rst | index.rst |
:title: Elasticsearch Driver
Elasticsearch
=============
The Elasticsearch driver supports reporters only. The purpose of the driver is
to export build and buildset results to an Elasticsearch index.
If the index does not exist in Elasticsearch then the driver will create it
with an appropriate mapping for static fields.
The driver can add job's variables and any data returned to Zuul
via zuul_return respectively into the `job_vars` and `job_returned_vars` fields
of the exported build doc. Elasticsearch will apply a dynamic data type
detection for those fields.
Elasticsearch supports a number of different datatypes for the fields in a
document. Please refer to its `documentation`_.
The Elasticsearch reporter uses new ES client, that is only supporting
`current version`_ of Elastisearch. In that case the
reporter has been tested on ES cluster version 7. Lower version may
be working, but we can not give tu any guarantee of that.
.. _documentation: https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html
.. _current version: https://www.elastic.co/support/eol
Connection Configuration
------------------------
The connection options for the Elasticsearch driver are:
.. attr:: <Elasticsearch connection>
.. attr:: driver
:required:
.. value:: elasticsearch
The connection must set ``driver=elasticsearch``.
.. attr:: uri
:required:
Database connection information in the form of a comma separated
list of ``host:port``. The information can also include protocol (http/https)
or username and password required to authenticate to the Elasticsearch.
Example:
uri=elasticsearch1.domain:9200,elasticsearch2.domain:9200
or
uri=https://user:password@elasticsearch:9200
where user and password is optional.
.. attr:: use_ssl
:default: true
Turn on SSL. This option is not required, if you set ``https`` in
uri param.
.. attr:: verify_certs
:default: true
Make sure we verify SSL certificates.
.. attr:: ca_certs
:default: ''
Path to CA certs on disk.
.. attr:: client_cert
:default: ''
Path to the PEM formatted SSL client certificate.
.. attr:: client_key
:default: ''
Path to the PEM formatted SSL client key.
Example of driver configuration:
.. code-block:: text
[connection elasticsearch]
driver=elasticsearch
uri=https://managesf.sftests.com:9200
Additional parameters to authenticate to the Elasticsearch server you
can find in `client`_ class.
.. _client: https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/client/__init__.py
Reporter Configuration
----------------------
This reporter is used to store build results in an Elasticsearch index.
The Elasticsearch reporter does nothing on :attr:`pipeline.start` or
:attr:`pipeline.merge-conflict`; it only acts on
:attr:`pipeline.success` or :attr:`pipeline.failure` reporting stages.
.. attr:: pipeline.<reporter>.<elasticsearch source>
The reporter supports the following attributes:
.. attr:: index
:default: zuul
The Elasticsearch index to be used to index the data. To prevent
any name collisions between Zuul tenants, the tenant name is used as index
name prefix. The real index name will be:
.. code-block::
<index-name>.<tenant-name>-<YYYY>.<MM>.<DD>
The index will be created if it does not exist.
.. attr:: index-vars
:default: false
Boolean value that determines if the reporter should add job's vars
to the exported build doc.
NOTE: The index-vars is not including the secrets.
.. attr:: index-returned-vars
:default: false
Boolean value that determines if the reporter should add zuul_returned
vars to the exported build doc.
For example:
.. code-block:: yaml
- pipeline:
name: check
success:
elasticsearch:
index: 'zuul-index'
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/drivers/elasticsearch.rst | elasticsearch.rst |
:title: Git Driver
Git
===
This driver can be used to load Zuul configuration from public Git repositories,
for instance from ``opendev.org/zuul/zuul-jobs`` that is suitable for use by
any Zuul system. It can also be used to trigger jobs from ``ref-updated`` events
in a pipeline.
Connection Configuration
------------------------
The supported options in ``zuul.conf`` connections are:
.. attr:: <git connection>
.. attr:: driver
:required:
.. value:: git
The connection must set ``driver=git`` for Git connections.
.. attr:: baseurl
Path to the base Git URL. Git repos name will be appended to it.
.. attr:: poll_delay
:default: 7200
The delay in seconds of the Git repositories polling loop.
Trigger Configuration
---------------------
.. attr:: pipeline.trigger.<git source>
The dictionary passed to the Git pipeline ``trigger`` attribute
supports the following attributes:
.. attr:: event
:required:
Only ``ref-updated`` is supported.
.. attr:: ref
On ref-updated events, a ref such as ``refs/heads/master`` or
``^refs/tags/.*$``. This field is treated as a regular expression,
and multiple refs may be listed.
.. attr:: ignore-deletes
:default: true
When a ref is deleted, a ref-updated event is emitted with a
newrev of all zeros specified. The ``ignore-deletes`` field is a
boolean value that describes whether or not these newrevs
trigger ref-updated events.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/drivers/git.rst | git.rst |
:title: Zuul Driver
Zuul
====
The Zuul driver supports triggers only. It is used for triggering
pipelines based on internal Zuul events.
Trigger Configuration
---------------------
Zuul events don't require a special connection or driver. Instead they
can simply be used by listing ``zuul`` as the trigger.
.. attr:: pipeline.trigger.zuul
The Zuul trigger supports the following attributes:
.. attr:: event
:required:
The event name. Currently supported events:
.. value:: project-change-merged
When Zuul merges a change to a project, it generates this
event for every open change in the project.
.. warning::
Triggering on this event can cause poor performance when
using the GitHub driver with a large number of
installations.
.. value:: parent-change-enqueued
When Zuul enqueues a change into any pipeline, it generates
this event for every child of that change.
.. attr:: pipeline
Only available for ``parent-change-enqueued`` events. This is
the name of the pipeline in which the parent change was
enqueued.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/drivers/zuul.rst | zuul.rst |
:title: GitHub Driver
.. _github_driver:
GitHub
======
The GitHub driver supports sources, triggers, and reporters. It can
interact with the public GitHub service as well as site-local
installations of GitHub enterprise.
Configure GitHub
----------------
There are two options currently available. GitHub's project owner can either
manually setup web-hook or install a GitHub Application. In the first case,
the project's owner needs to know the zuul endpoint and the webhook secrets.
Web-Hook
........
To configure a project's `webhook events
<https://developer.github.com/webhooks/creating/>`_:
* Set *Payload URL* to
``http://<zuul-hostname>:<port>/api/connection/<connection-name>/payload``.
* Set *Content Type* to ``application/json``.
Select *Events* you are interested in. See below for the supported events.
You will also need to have a GitHub user created for your zuul:
* Zuul public key needs to be added to the GitHub account
* A api_token needs to be created too, see this `article
<https://help.github.com/articles/creating-an-access-token-for-command-line-use/>`_
Then in the zuul.conf, set webhook_token and api_token.
Application
...........
To create a `GitHub application
<https://developer.github.com/apps/building-integrations/setting-up-and-registering-github-apps/registering-github-apps/>`_:
* Go to your organization settings page to create the application, e.g.:
https://github.com/organizations/my-org/settings/apps/new
* Set GitHub App name to "my-org-zuul"
* Set Setup URL to your setup documentation, when user install the application
they are redirected to this url
* Set Webhook URL to
``http://<zuul-hostname>:<port>/api/connection/<connection-name>/payload``.
* Create a Webhook secret
* Set permissions:
* Repository administration: Read
* Checks: Read & Write
* Repository contents: Read & Write (write to let zuul merge change)
* Issues: Read & Write
* Pull requests: Read & Write
* Commit statuses: Read & Write
* Set events subscription:
* Check run
* Commit comment
* Create
* Push
* Release
* Issue comment
* Issues
* Label
* Pull request
* Pull request review
* Pull request review comment
* Status
* Set Where can this GitHub App be installed to "Any account"
* Create the App
* Generate a Private key in the app settings page
Then in the zuul.conf, set webhook_token, app_id and app_key.
After restarting zuul-scheduler, verify in the 'Advanced' tab that the
Ping payload works (green tick and 200 response)
Users can now install the application using its public page, e.g.:
https://github.com/apps/my-org-zuul
.. note::
GitHub Pull Requests that modify GitHub Actions workflow configuration
files cannot be merged by application credentials (this is any Pull Request
that edits the .github/workflows directory and its contents). These Pull
Requests must be merged by a normal user account. This means that Zuul
will be limited to posting test results and cannot merge these PRs
automatically when they pass testing.
GitHub Actions are still in Beta and this behavior may change.
Connection Configuration
------------------------
There are two forms of operation. Either the Zuul installation can be
configured as a `Github App`_ or it can be configured as a Webhook.
If the `Github App`_ approach is taken, the config settings ``app_id`` and
``app_key`` are required. If the Webhook approach is taken, the ``api_token``
setting is required.
The supported options in ``zuul.conf`` connections are:
.. attr:: <github connection>
.. attr:: driver
:required:
.. value:: github
The connection must set ``driver=github`` for GitHub connections.
.. attr:: app_id
App ID if you are using a *GitHub App*. Can be found under the
**Public Link** on the right hand side labeled **ID**.
.. attr:: app_key
Path to a file containing the secret key Zuul will use to create
tokens for the API interactions. In Github this is known as
**Private key** and must be collected when generated.
.. attr:: api_token
API token for accessing GitHub if Zuul is configured with
Webhooks. See `Creating an access token for command-line use
<https://help.github.com/articles/creating-an-access-token-for-command-line-use/>`_.
.. attr:: webhook_token
Required token for validating the webhook event payloads. In
the GitHub App Configuration page, this is called **Webhook
secret**. See `Securing your webhooks
<https://developer.github.com/webhooks/securing/>`_.
.. attr:: sshkey
:default: ~/.ssh/id_rsa
Path to SSH key to use when cloning github repositories if Zuul
is configured with Webhooks.
.. attr:: server
:default: github.com
Hostname of the github install (such as a GitHub Enterprise).
.. attr:: canonical_hostname
The canonical hostname associated with the git repos on the
GitHub server. Defaults to the value of :attr:`<github
connection>.server`. This is used to identify projects from
this connection by name and in preparing repos on the filesystem
for use by jobs. Note that Zuul will still only communicate
with the GitHub server identified by **server**; this option is
useful if users customarily use a different hostname to clone or
pull git repos so that when Zuul places them in the job's
working directory, they appear under this directory name.
.. attr:: verify_ssl
:default: true
Enable or disable ssl verification for GitHub Enterprise. This
is useful for a connection to a test installation.
.. attr:: rate_limit_logging
:default: true
Enable or disable GitHub rate limit logging. If rate limiting is disabled
in GitHub Enterprise this can save some network round trip times.
.. attr:: repo_cache
To configure Zuul to use a GitHub Enterprise `repository cache
<https://docs.github.com/en/[email protected]/admin/enterprise-management/caching-repositories/about-repository-caching>`_
set this value to the hostname of the cache (e.g.,
``europe-ci.github.example.com``). Zuul will fetch commits as
well as determine the global repo state of repositories used in
jobs from this host.
This setting is incompatible with :attr:`<github
connection>.sshkey`.
Because the repository cache may be several minutes behind the
canonical site, enabling this setting automatically sets the
default :attr:`<github connection>.repo_retry_timeout` to 600
seconds. That setting may still be overidden to specify a
different value.
.. attr:: repo_retry_timeout
This setting is only used if :attr:`<github
connection>.repo_cache` is set. It specifies the amount of time
in seconds that Zuul mergers and executors should spend
attempting to fetch git commits which are not available from the
GitHub repository cache host.
When :attr:`<github connection>.repo_cache` is set, this value
defaults to 600 seconds, but it can be overridden. Zuul retries
git fetches every 30 seconds, and this value will be rounded up
to the next highest multiple of 30 seconds.
.. attr:: max_threads_per_installation
:default: 1
The GitHub driver performs event pre-processing in parallel
before forwarding the events (in the correct order) to the
scheduler for processing. By default, this parallel
pre-processing is restricted to a single request for each GitHub
App installation that Zuul uses when interacting with GitHub.
This is to avoid running afoul of GitHub's abuse detection
mechanisms. Some high-traffic installations of GitHub
Enterprise may wish to increase this value to allow more
parallel requests if resources permit. If GitHub Enterprise
resource usage is not a concern, setting this value to ``10`` or
greater may be reasonable.
Trigger Configuration
---------------------
GitHub webhook events can be configured as triggers.
A connection name with the GitHub driver can take multiple events with
the following options.
.. attr:: pipeline.trigger.<github source>
The dictionary passed to the GitHub pipeline ``trigger`` attribute
supports the following attributes:
.. attr:: event
:required:
The event from github. Supported events are:
.. value:: pull_request
.. value:: pull_request_review
.. value:: push
.. value:: check_run
.. attr:: action
A :value:`pipeline.trigger.<github source>.event.pull_request`
event will have associated action(s) to trigger from. The
supported actions are:
.. value:: opened
Pull request opened.
.. value:: changed
Pull request synchronized.
.. value:: closed
Pull request closed.
.. value:: reopened
Pull request reopened.
.. value:: comment
Comment added to pull request.
.. value:: labeled
Label added to pull request.
.. value:: unlabeled
Label removed from pull request.
.. value:: status
Status set on commit. The syntax is ``user:status:value``.
This also can be a regular expression.
A :value:`pipeline.trigger.<github
source>.event.pull_request_review` event will have associated
action(s) to trigger from. The supported actions are:
.. value:: submitted
Pull request review added.
.. value:: dismissed
Pull request review removed.
A :value:`pipeline.trigger.<github source>.event.check_run`
event will have associated action(s) to trigger from. The
supported actions are:
.. value:: requested
A check run is requested.
.. value:: completed
A check run completed.
.. attr:: branch
The branch associated with the event. Example: ``master``. This
field is treated as a regular expression, and multiple branches
may be listed. Used for ``pull_request`` and
``pull_request_review`` events.
.. attr:: comment
This is only used for ``pull_request`` ``comment`` actions. It
accepts a list of regexes that are searched for in the comment
string. If any of these regexes matches a portion of the comment
string the trigger is matched. ``comment: retrigger`` will
match when comments containing 'retrigger' somewhere in the
comment text are added to a pull request.
.. attr:: label
This is only used for ``labeled`` and ``unlabeled``
``pull_request`` actions. It accepts a list of strings each of
which matches the label name in the event literally. ``label:
recheck`` will match a ``labeled`` action when pull request is
labeled with a ``recheck`` label. ``label: 'do not test'`` will
match a ``unlabeled`` action when a label with name ``do not
test`` is removed from the pull request.
.. attr:: state
This is only used for ``pull_request_review`` events. It
accepts a list of strings each of which is matched to the review
state, which can be one of ``approved``, ``comment``, or
``request_changes``.
.. attr:: status
This is used for ``pull-request`` and ``status`` actions. It
accepts a list of strings each of which matches the user setting
the status, the status context, and the status itself in the
format of ``user:context:status``. For example,
``zuul_github_ci_bot:check_pipeline:success``.
.. attr:: check
This is only used for ``check_run`` events. It works similar to
the ``status`` attribute and accepts a list of strings each of
which matches the app requesting or updating the check run, the
check run's name and the conclusion in the format of
``app:name::conclusion``.
To make Zuul properly interact with Github's checks API, each
pipeline that is using the checks API should have at least one
trigger that matches the pipeline's name regardless of the result,
e.g. ``zuul:cool-pipeline:.*``. This will enable the cool-pipeline
to trigger whenever a user requests the ``cool-pipeline`` check
run as part of the ``zuul`` check suite.
Additionally, one could use ``.*:success`` to trigger a pipeline
whenever a successful check run is reported (e.g. useful for
gating).
.. attr:: ref
This is only used for ``push`` events. This field is treated as
a regular expression and multiple refs may be listed. GitHub
always sends full ref name, eg. ``refs/tags/bar`` and this
string is matched against the regular expression.
.. attr:: require-status
.. warning:: This is deprecated and will be removed in a future
version. Use :attr:`pipeline.trigger.<github
source>.require` instead.
This may be used for any event. It requires that a certain kind
of status be present for the PR (the status could be added by
the event in question). It follows the same syntax as
:attr:`pipeline.require.<github source>.status`. For each
specified criteria there must exist a matching status.
This is ignored if the :attr:`pipeline.trigger.<github
source>.require` attribute is present.
.. attr:: require
This may be used for any event. It describes conditions that
must be met by the PR in order for the trigger event to match.
Those conditions may be satisfied by the event in question. It
follows the same syntax as :ref:`github_requirements`.
.. attr:: reject
This may be used for any event and is the mirror of
:attr:`pipeline.trigger.<github source>.require`. It describes
conditions that when met by the PR cause the trigger event not
to match. Those conditions may be satisfied by the event in
question. It follows the same syntax as
:ref:`github_requirements`.
Reporter Configuration
----------------------
Zuul reports back to GitHub via GitHub API. Available reports include a PR
comment containing the build results, a commit status on start, success and
failure, an issue label addition/removal on the PR, and a merge of the PR
itself. Status name, description, and context is taken from the pipeline.
.. attr:: pipeline.<reporter>.<github source>
To report to GitHub, the dictionaries passed to any of the pipeline
:ref:`reporter<reporters>` attributes support the following
attributes:
.. attr:: status
:type: str
:default: None
Report status via the Github `status API
<https://docs.github.com/v3/repos/statuses/>`__. Set to one of
* ``pending``
* ``success``
* ``failure``
This is usually mutually exclusive with a value set in
:attr:`pipeline.<reporter>.<github source>.check`, since this
reports similar results via a different API. This API is older
and results do not show up on the "checks" tab in the Github UI.
It is recommended to use `check` unless you have a specific
reason to use the status API.
.. TODO support role markup in :default: so we can xref
:attr:`web.status_url` below
.. attr:: status-url
:default: link to the build status page
:type: string
URL to set in the Github status.
Defaults to a link to the build status or results page. This
should probably be left blank unless there is a specific reason
to override it.
.. attr:: check
:type: string
Report status via the Github `checks API
<https://docs.github.com/v3/checks/>`__. Set to one of
* ``cancelled``
* ``failure``
* ``in_progress``
* ``neutral``
* ``skipped``
* ``success``
This is usually mutually exclusive with a value set in
:attr:`pipeline.<reporter>.<github source>.status`, since this
reports similar results via a different API.
.. attr:: comment
:default: true
Boolean value that determines if the reporter should add a
comment to the pipeline status to the github pull request. Only
used for Pull Request based items.
.. attr:: review
One of `approve`, `comment`, or `request-changes` that causes the
reporter to submit a review with the specified status on Pull Request
based items. Has no effect on other items.
.. attr:: review-body
Text that will be submitted as the body of the review. Required if review
is set to `comment` or `request-changes`.
.. attr:: merge
:default: false
Boolean value that determines if the reporter should merge the
pull reqeust. Only used for Pull Request based items.
.. attr:: label
List of strings each representing an exact label name which
should be added to the pull request by reporter. Only used for
Pull Request based items.
.. attr:: unlabel
List of strings each representing an exact label name which
should be removed from the pull request by reporter. Only used
for Pull Request based items.
.. _Github App: https://developer.github.com/apps/
.. _github_requirements:
Requirements Configuration
--------------------------
As described in :attr:`pipeline.require` and :attr:`pipeline.reject`,
pipelines may specify that items meet certain conditions in order to
be enqueued into the pipeline. These conditions vary according to the
source of the project in question. To supply requirements for changes
from a GitHub source named ``my-github``, create a configuration such
as the following::
pipeline:
require:
my-github:
review:
- type: approved
This indicates that changes originating from the GitHub connection
named ``my-github`` must have an approved code review in order to be
enqueued into the pipeline.
.. attr:: pipeline.require.<github source>
The dictionary passed to the GitHub pipeline `require` attribute
supports the following attributes:
.. attr:: review
This requires that a certain kind of code review be present for
the pull request (it could be added by the event in question).
It takes several sub-parameters, all of which are optional and
are combined together so that there must be a code review
matching all specified requirements.
.. attr:: username
If present, a code review from this username matches. It is
treated as a regular expression.
.. attr:: email
If present, a code review with this email address matches.
It is treated as a regular expression.
.. attr:: older-than
If present, the code review must be older than this amount of
time to match. Provide a time interval as a number with a
suffix of "w" (weeks), "d" (days), "h" (hours), "m"
(minutes), "s" (seconds). Example ``48h`` or ``2d``.
.. attr:: newer-than
If present, the code review must be newer than this amount of
time to match. Same format as "older-than".
.. attr:: type
If present, the code review must match this type (or types).
.. TODO: what types are valid?
.. attr:: permission
If present, the author of the code review must have this
permission (or permissions) to match. The available values
are ``read``, ``write``, and ``admin``.
.. attr:: open
A boolean value (``true`` or ``false``) that indicates whether
the change must be open or closed in order to be enqueued.
.. attr:: merged
A boolean value (``true`` or ``false``) that indicates whether
the change must be merged or not in order to be enqueued.
.. attr:: current-patchset
A boolean value (``true`` or ``false``) that indicates whether
the item must be associated with the latest commit in the pull
request in order to be enqueued.
.. TODO: this could probably be expanded upon -- under what
circumstances might this happen with github
.. attr:: draft
A boolean value (``true`` or ``false``) that indicates whether
or not the change must be marked as a draft in GitHub in order
to be enqueued.
.. attr:: status
A string value that corresponds with the status of the pull
request. The syntax is ``user:status:value``. This can also
be a regular expression.
Zuul does not differentiate between a status reported via
status API or via checks API (which is also how Github behaves
in terms of branch protection and `status checks`_).
Thus, the status could be reported by a
:attr:`pipeline.<reporter>.<github source>.status` or a
:attr:`pipeline.<reporter>.<github source>.check`.
When a status is reported via the status API, Github will add
a ``[bot]`` to the name of the app that reported the status,
resulting in something like ``user[bot]:status:value``. For a
status reported via the checks API, the app's slug will be
used as is.
.. attr:: label
A string value indicating that the pull request must have the
indicated label (or labels).
.. attr:: pipeline.reject.<github source>
The `reject` attribute is the mirror of the `require` attribute and
is used to specify pull requests which should not be enqueued into
a pipeline. It accepts a dictionary under the connection name and
with the following attributes:
.. attr:: review
This requires that a certain kind of code review be absent for
the pull request (it could be removed by the event in question).
It takes several sub-parameters, all of which are optional and
are combined together so that there must not be a code review
matching all specified requirements.
.. attr:: username
If present, a code review from this username matches. It is
treated as a regular expression.
.. attr:: email
If present, a code review with this email address matches.
It is treated as a regular expression.
.. attr:: older-than
If present, the code review must be older than this amount of
time to match. Provide a time interval as a number with a
suffix of "w" (weeks), "d" (days), "h" (hours), "m"
(minutes), "s" (seconds). Example ``48h`` or ``2d``.
.. attr:: newer-than
If present, the code review must be newer than this amount of
time to match. Same format as "older-than".
.. attr:: type
If present, the code review must match this type (or types).
.. TODO: what types are valid?
.. attr:: permission
If present, the author of the code review must have this
permission (or permissions) to match. The available values
are ``read``, ``write``, and ``admin``.
.. attr:: open
A boolean value (``true`` or ``false``) that indicates whether
the change must be open or closed in order to be rejected.
.. attr:: merged
A boolean value (``true`` or ``false``) that indicates whether
the change must be merged or not in order to be rejected.
.. attr:: current-patchset
A boolean value (``true`` or ``false``) that indicates whether
the item must be associated with the latest commit in the pull
request in order to be rejected.
.. TODO: this could probably be expanded upon -- under what
circumstances might this happen with github
.. attr:: draft
A boolean value (``true`` or ``false``) that indicates whether
or not the change must be marked as a draft in GitHub in order
to be rejected.
.. attr:: status
A string value that corresponds with the status of the pull
request. The syntax is ``user:status:value``. This can also
be a regular expression.
Zuul does not differentiate between a status reported via
status API or via checks API (which is also how Github behaves
in terms of branch protection and `status checks`_).
Thus, the status could be reported by a
:attr:`pipeline.<reporter>.<github source>.status` or a
:attr:`pipeline.<reporter>.<github source>.check`.
When a status is reported via the status API, Github will add
a ``[bot]`` to the name of the app that reported the status,
resulting in something like ``user[bot]:status:value``. For a
status reported via the checks API, the app's slug will be
used as is.
.. attr:: label
A string value indicating that the pull request must not have
the indicated label (or labels).
Reference pipelines configuration
---------------------------------
Branch protection rules
.......................
The rules prevent Pull requests to be merged on defined branches if they are
not met. For instance a branch might require that specific status are marked
as ``success`` before allowing the merge of the Pull request.
Zuul provides the attribute tenant.untrusted-projects.exclude-unprotected-branches.
This attribute is by default set to ``false`` but we recommend to set it to
``true`` for the whole tenant. By doing so Zuul will benefit from:
- exluding in-repo development branches used to open Pull requests. This will
prevent Zuul to fetch and read useless branches data to find Zuul
configuration files.
- reading protection rules configuration from the Github API for a given branch
to define whether a Pull request must enter the gate pipeline. As of now
Zuul only takes in account "Require status checks to pass before merging" and
the checked status checkboxes.
With the use of the reference pipelines below, the Zuul project recommends to
set the minimum following settings:
- attribute tenant.untrusted-projects.exclude-unprotected-branches to ``true``
in the tenant (main.yaml) configuration file.
- on each Github repository, activate the branch protections rules and
configure the name of the protected branches. Furthermore set
"Require status checks to pass before merging" and check the status labels
checkboxes (at least ```<tenant>/check```) that must be marked as success in
order for Zuul to make the Pull request enter the gate pipeline to be merged.
Reference pipelines
...................
Here is an example of standard pipelines you may want to define:
.. literalinclude:: /examples/pipelines/github-reference-pipelines.yaml
:language: yaml
Github Checks API
-----------------
Github provides two distinct methods for reporting results; a "checks"
and a "status" API.
The `checks API`_ provides some additional features compared to the
`status API`_ like file comments and custom actions (e.g. cancel a
running build).
Either can be chosen when configuring Zuul to report for your Github
project. However, there are some considerations to take into account
when choosing the API.
Design decisions
................
The Github checks API defines the concepts of `Check Suites`_ and
`Check Runs`_. *Check suites* are a collection of *check runs* for a
specific commit and summarize a final status
A priori the check suite appears to be a good mapping for a pipeline
execution in Zuul, where a check run maps to a single job execution
that is part of the pipeline run. Unfortunately, there are a few
problematic restrictions mapping between Github and Zuul concepts.
Github check suites are opaque and the current status, duration and
the overall conclusion are all calculated and set automatically
whenever an included check run is updated. Most importantly, there
can only be one check suite per commit SHA, per app. Thus there is no
facility for for Zuul to create multiple check suite results for a
change, e.g. one check suite for each pipeline such as check and gate.
The Github check suite thus does not map well to Zuul's concept of
multiple pipelines for a single change. Since a check suite is unique
and global for the change, it can not be used to flag the status of
arbitrary pipelines. This makes the check suite API insufficient for
recording details that Zuul needs such as "the check pipeline has
passed but the gate pipeline has failed".
Another issue is that Zuul only reports on the results of the whole
pipeline, not individual jobs. Reporting each Zuul job as a separate
check is problematic for a number of reasons.
Zuul often runs the same job for the same change multiple times; for
example in the check and gate pipeline. There is no facility for
these runs to be reported differently in the single check suite for
the Github change.
When configuring branch protection in Github, only a *check run* can
be selected as required status check. This is in conflict with
managing jobs in pipelines with Zuul. For example, to implement
branch protection on GitHub would mean listing each job as a dedicated
check, leading to a check run list that is not kept in sync with the
project's Zuul pipeline configuration. Additionally, you lose some
of Zuul's features like non-voting jobs as Github branch protections
has no concept of a non-voting job.
Thus Zuul can integrate with the checks API, but only at a pipeline
level. Each pipeline execution will map to a check-run result
reported to Github.
Behaviour in Zuul
.................
Reporting
~~~~~~~~~
The Github reporter is able to report both a status
:attr:`pipeline.<reporter>.<github source>.status` or a check
:attr:`pipeline.<reporter>.<github source>.check`. While it's possible to
configure a Github reporter to report both, it's recommended to use only one.
Reporting both might result in duplicated status check entries in the Github
PR (the section below the comments).
Trigger
~~~~~~~
The Github driver is able to trigger on a reported check
(:value:`pipeline.trigger.<github source>.event.check_run`) similar to a
reported status (:value:`pipeline.trigger.<github source>.action.status`).
Requirements
~~~~~~~~~~~~
While trigger and reporter differentiates between status and check, the Github
driver does not differentiate between them when it comes to pipeline
requirements. This is mainly because Github also doesn't differentiate between
both in terms of branch protection and `status checks`_.
Actions / Events
................
Github provides a set of default actions for check suites and check runs.
Those actions are available as buttons in the Github UI. Clicking on those
buttons will emit webhook events which will be handled by Zuul.
These actions are only available on failed check runs / check suites. So
far, a running or successful check suite / check run does not provide any
action from Github side.
Available actions are:
Re-run all checks
Github emits a webhook event with type ``check_suite`` and action
``rerequested`` that is meant to re-run all check-runs contained in this
check suite. Github does not provide the list of check-runs in that case,
so it's up to the Github app what should run.
Re-run failed checks
Github emits a webhook event with type ``check_run`` and action
``rerequested`` for each failed check run contained in this suite.
Re-run
Github emits a webhook event with type ``check_run`` and action
``rerequested`` for the specific check run.
Zuul will handle all events except for the `Re-run all checks` event;
it does not make sense in the Zuul model to trigger all pipelines to
run simultaneously.
These events are unable to be customized in Github. Github will
always report "You have successfully requested ..." despite nothing
listening to the event. Therefore, it might be a solution to handle
the `Re-run all checks` event in Zuul similar to `Re-run failed
checks` just to not do anything while Github makes the user believe an
action was really triggered.
File comments (annotations)
...........................
Check runs can be used to post file comments directly in the files of the PR.
Those are similar to user comments, but must provide some more information.
Zuul jobs can already return file comments via ``zuul_return``
(see: :ref:`return_values`). We can simply use this return value, build the
necessary annotations (how Github calls it) from it and attach them to the
check run.
Custom actions
~~~~~~~~~~~~~~
Check runs can provide some custom actions which will result in additional
buttons being available in the Github UI for this specific check run.
Clicking on such a button will emit a webhook event with type ``check_run``
and action ``requested_action`` and will additionally contain the id/name of
the requested action which we can define when creating the action on the
check run.
We could use these custom actions to provide some "Re-run" action on a
running check run (which might otherwise be stuck in case a check run update
fails) or to abort a check run directly from the Github UI.
Restrictions and Recommendations
................................
Although both the checks API and the status API can be activated for a
Github reporter at the same time, it's not recommended to do so as this might
result in multiple status checks to be reported to the PR for the same pipeline
execution (which would result in duplicated entries in the status section below
the comments of a PR).
In case the update on a check run fails (e.g. request timeout when reporting
success or failure to Github), the check run will stay in status "in_progess"
and there will be no way to re-run the check run via the Github UI as the
predefined actions are only available on failed check runs.
Thus, it's recommended to configure a
:value:`pipeline.trigger.<github source>.action.comment` trigger on the
pipeline to still be able to trigger re-run of the stuck check run via e.g.
"recheck".
The check suite will only list check runs that were reported by Zuul. If
the requirements for a certain pipeline are not met and it is not run, the
check run for this pipeline won't be listed in the check suite. However,
this does not affect the required status checks. If the check run is enabled
as required, Github will still show it in the list of required status checks
- even if it didn't run yet - just not in the check suite.
.. _checks API: https://docs.github.com/v3/checks/
.. _status API: https://docs.github.com/v3/repos/statuses/
.. _Check Suites: https://docs.github.com/v3/checks/suites/
.. _Check Runs: https://docs.github.com/v3/checks/runs/
.. _status checks: https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-status-checks#types-of-status-checks-on-github
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/drivers/github.rst | github.rst |
:title: MQTT Driver
MQTT
====
The MQTT driver supports reporters only. It is used to send MQTT
message when items report.
Message Schema
--------------
An MQTT report uses this schema:
.. attr:: <mqtt schema>
.. attr:: uuid
The item UUID. Each item enqueued into a Zuul pipeline is
assigned a UUID which remains the same even as Zuul's
speculative execution algorithm re-orders pipeline contents.
.. attr:: action
The reporter action name, e.g.: 'start', 'success', 'failure',
'merge-conflict', ...
.. attr:: tenant
The tenant name.
.. attr:: pipeline
The pipeline name.
.. attr:: project
The project name.
.. attr:: branch
The branch name.
.. attr:: change_url
The change url.
.. attr:: message
The report message.
.. attr:: change
The change number.
.. attr:: patchset
The patchset number.
.. attr:: commit_id
The commit id number.
.. attr:: owner
The owner username of the change.
.. attr:: ref
The change reference.
.. attr:: zuul_ref
The internal zuul change reference.
.. attr:: trigger_time
The timestamp when the event was added to the scheduler.
.. attr:: enqueue_time
The timestamp when the event was added to the pipeline.
.. attr:: buildset
The buildset information.
.. value:: uuid
The buildset global uuid.
.. attr:: result
The buildset result
.. attr:: builds
The list of builds.
.. attr:: job_name
The job name.
.. attr:: voting
The job voting status.
.. attr:: uuid
The build uuid (not present in start report).
.. attr:: execute_time
The build execute time.
.. attr:: start_time
The build start time (not present in start report).
.. attr:: end_time
The build end time (not present in start report).
.. attr:: log_url
The build log url (not present in start report).
.. attr:: web_url
The url to the build result page. Not present in start
report.
.. attr:: result
The build results (not present in start report).
.. attr:: artifacts
:type: list
The build artifacts (not present in start report).
This is a list of dictionaries corresponding to the returned artifacts.
.. attr:: name
The name of the artifact.
.. attr:: url
The url of the artifact.
.. attr:: metadata
:type: dict
The metadata of the artifact. This is a dictionary of
arbitrary key values determined by the job.
Here is an example of a start message:
.. code-block:: javascript
{
'action': 'start',
'tenant': 'openstack.org',
'pipeline': 'check',
'project': 'sf-jobs',
'branch': 'master',
'change_url': 'https://gerrit.example.com/r/3',
'message': 'Starting check jobs.',
'trigger_time': '1524801056.2545864',
'enqueue_time': '1524801093.5689457',
'change': '3',
'patchset': '1',
'commit_id': '2db20c7fb26adf9ac9936a9e750ced9b4854a964',
'owner': 'username',
'ref': 'refs/changes/03/3/1',
'zuul_ref': 'Zf8b3d7cd34f54cb396b488226589db8f',
'buildset': {
'uuid': 'f8b3d7cd34f54cb396b488226589db8f',
'builds': [{
'job_name': 'linters',
'voting': True
}],
},
}
Here is an example of a success message:
.. code-block:: javascript
{
'action': 'success',
'tenant': 'openstack.org',
'pipeline': 'check',
'project': 'sf-jobs',
'branch': 'master',
'change_url': 'https://gerrit.example.com/r/3',
'message': 'Build succeeded.',
'trigger_time': '1524801056.2545864',
'enqueue_time': '1524801093.5689457',
'change': '3',
'patchset': '1',
'commit_id': '2db20c7fb26adf9ac9936a9e750ced9b4854a964',
'owner': 'username',
'ref': 'refs/changes/03/3/1',
'zuul_ref': 'Zf8b3d7cd34f54cb396b488226589db8f',
'buildset': {
'uuid': 'f8b3d7cd34f54cb396b488226589db8f',
'builds': [{
'job_name': 'linters',
'voting': True
'uuid': '16e3e55aca984c6c9a50cc3c5b21bb83',
'execute_time': 1524801120.75632954,
'start_time': 1524801179.8557224,
'end_time': 1524801208.928095,
'log_url': 'https://logs.example.com/logs/3/3/1/check/linters/16e3e55/',
'web_url': 'https://tenant.example.com/t/tenant-one/build/16e3e55aca984c6c9a50cc3c5b21bb83/',
'result': 'SUCCESS',
'dependencies': [],
'artifacts': [],
}],
},
}
Connection Configuration
------------------------
.. attr:: <mqtt connection>
.. attr:: driver
:required:
.. value:: mqtt
The connection must set ``driver=mqtt`` for MQTT connections.
.. attr:: server
:default: localhost
MQTT server hostname or address to use.
.. attr:: port
:default: 1883
MQTT server port.
.. attr:: keepalive
:default: 60
Maximum period in seconds allowed between communications with the broker.
.. attr:: user
Set a username for optional broker authentication.
.. attr:: password
Set a password for optional broker authentication.
.. attr:: ca_certs
A string path to the Certificate Authority certificate files to enable
TLS connection.
.. attr:: certfile
A strings pointing to the PEM encoded client certificate to
enable client TLS based authentication. This option requires keyfile to
be set too.
.. attr:: keyfile
A strings pointing to the PEM encoded client private keys to
enable client TLS based authentication. This option requires certfile to
be set too.
.. attr:: ciphers
A string specifying which encryption ciphers are allowable for this
connection. More information in this
`openssl doc <https://www.openssl.org/docs/manmaster/man1/ciphers.html>`_.
Reporter Configuration
----------------------
A :ref:`connection<connections>` that uses the mqtt driver must be supplied to the
reporter. Each pipeline must provide a topic name. For example:
.. code-block:: yaml
- pipeline:
name: check
success:
mqtt:
topic: "{tenant}/zuul/{pipeline}/{project}/{branch}/{change}"
qos: 2
.. attr:: pipeline.<reporter>.<mqtt>
To report via MQTT message, the dictionaries passed to any of the pipeline
:ref:`reporter<reporters>` support the following attributes:
.. attr:: topic
The MQTT topic to publish messages. The topic can be a format string that
can use the following parameters: ``tenant``, ``pipeline``, ``project``,
``branch``, ``change``, ``patchset`` and ``ref``.
MQTT topic can have hierarchy separated by ``/``, more details in this
`doc <https://mosquitto.org/man/mqtt-7.html>`_
.. attr:: qos
:default: 0
The quality of service level to use, it can be 0, 1 or 2. Read more in this
`guide <https://www.hivemq.com/blog/mqtt-essentials-part-6-mqtt-quality-of-service-levels>`_
.. attr:: include-returned-data
:default: false
If set to ``true``, Zuul will include any data returned from the
job via :ref:`return_values`.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/drivers/mqtt.rst | mqtt.rst |
:title: Gerrit Driver
Gerrit
======
`Gerrit`_ is a code review system. The Gerrit driver supports
sources, triggers, and reporters.
.. _Gerrit: https://www.gerritcodereview.com/
Zuul will need access to a Gerrit user.
Give that user whatever permissions will be needed on the projects you
want Zuul to report on. For instance, you may want to grant
``Verified +/-1`` and ``Submit`` to the user. Additional categories
or values may be added to Gerrit. Zuul is very flexible and can take
advantage of those.
If ``change.submitWholeTopic`` is configured in Gerrit, Zuul will
honor this by enqueing changes with the same topic as circular
dependencies. However, it is still necessary to enable circular
dependency support in any pipeline queues where such changes may
appear. See :attr:`queue.allow-circular-dependencies` for information
on how to configure this.
Zuul interacts with Gerrit in up to three ways:
* Receiving trigger events
* Fetching source code
* Reporting results
Trigger events arrive over an event stream, either SSH (via the
``gerrit stream-events`` command) or other protocols such as Kafka, or
AWS Kinesis.
Fetching source code may happen over SSH or HTTP.
Reporting may happen over SSH or HTTP (strongly preferred).
The appropriate connection methods must be configured to satisfy the
interactions Zuul will have with Gerrit. The recommended
configuration is to configure both SSH and HTTP access.
The section below describes commond configuration settings. Specific
settings for different connection methods follow.
Connection Configuration
------------------------
The supported options in ``zuul.conf`` connections are:
.. attr:: <gerrit connection>
.. attr:: driver
:required:
.. value:: gerrit
The connection must set ``driver=gerrit`` for Gerrit connections.
.. attr:: server
:required:
Fully qualified domain name of Gerrit server.
.. attr:: canonical_hostname
The canonical hostname associated with the git repos on the
Gerrit server. Defaults to the value of
:attr:`<gerrit connection>.server`. This is used to identify
projects from this connection by name and in preparing repos on
the filesystem for use by jobs. Note that Zuul will still only
communicate with the Gerrit server identified by ``server``;
this option is useful if users customarily use a different
hostname to clone or pull git repos so that when Zuul places
them in the job's working directory, they appear under this
directory name.
.. attr:: baseurl
:default: https://{server}
Path to Gerrit web interface. Omit the trailing ``/``.
.. attr:: gitweb_url_template
:default: {baseurl}/gitweb?p={project.name}.git;a=commitdiff;h={sha}
Url template for links to specific git shas. By default this will
point at Gerrit's built in gitweb but you can customize this value
to point elsewhere (like cgit or github).
The three values available for string interpolation are baseurl
which points back to Gerrit, project and all of its safe attributes,
and sha which is the git sha1.
.. attr:: user
:default: zuul
User name to use when accessing Gerrit.
SSH Configuration
~~~~~~~~~~~~~~~~~
To prepare for SSH access, create an SSH keypair for Zuul to use if
there isn't one already, and create a Gerrit user with that key::
cat ~/id_rsa.pub | ssh -p29418 review.example.com gerrit create-account --ssh-key - --full-name Zuul zuul
.. note:: If you use an RSA key, ensure it is encoded in the PEM
format (use the ``-t rsa -m PEM`` arguments to
`ssh-keygen`).
If using Gerrit 2.7 or later, make sure the user is a member of a group
that is granted the ``Stream Events`` permission, otherwise it will not
be able to invoke the ``gerrit stream-events`` command over SSH.
.. attr:: <gerrit ssh connection>
.. attr:: ssh_server
If SSH access to the Gerrit server should be via a different
hostname than web access, set this value to the hostname to use
for SSH connections.
.. attr:: port
:default: 29418
Gerrit SSH server port.
.. attr:: sshkey
:default: ~zuul/.ssh/id_rsa
Path to SSH key to use when logging into Gerrit.
.. attr:: keepalive
:default: 60
SSH connection keepalive timeout; ``0`` disables.
.. attr:: git_over_ssh
:default: false
This forces git operation over SSH even if the ``password``
attribute is set. This allow REST API access to the Gerrit
server even when git-over-http operation is disabled on the
server.
HTTP Configuration
~~~~~~~~~~~~~~~~~~
.. attr:: <gerrit ssh connection>
.. attr:: password
The HTTP authentication password for the user. This is
optional, but if it is provided, Zuul will report to Gerrit via
HTTP rather than SSH. It is required in order for file and line
comments to reported (the Gerrit SSH API only supports review
messages). Retrieve this password from the ``HTTP Password``
section of the ``Settings`` page in Gerrit.
.. attr:: auth_type
:default: basic
The HTTP authentication mechanism.
.. value:: basic
HTTP Basic authentication; the default for most Gerrit
installations.
.. value:: digest
HTTP Digest authentication; only used in versions of Gerrit
prior to 2.15.
.. value:: form
Zuul will submit a username and password to a form in order
to authenticate.
.. value:: gcloud_service
Only valid when running in Google Cloud. This will use the
default service account to authenticate to Gerrit. Note that
this will only be used for interacting with the Gerrit API;
anonymous HTTP access will be used to access the git
repositories, therefore private repos or draft changes will
not be available.
.. attr:: verify_ssl
:default: true
When using a self-signed certificate, this may be set to
``false`` to disable SSL certificate verification.
Kafka Event Support
~~~~~~~~~~~~~~~~~~~
Zuul includes support for Gerrit's `events-kafka` plugin. This may be
used as an alternative to SSH for receiving trigger events.
Kafka does provide event delivery guarantees, so unlike SSH, if all
Zuul schedulers are unable to communicate with Gerrit or Kafka, they
will eventually receive queued events on reconnection.
All Zuul schedulers will attempt to connect to Kafka brokers. There
are some implications for event delivery:
* All events will be delivered to Zuul at least once. In the case of
a disrupted connection, Zuul may receive duplicate events.
* Events should generally arrive in order, however some events in
rapid succession may be received by Zuul out of order.
.. attr:: <gerrit kafka connection>
.. attr:: kafka_bootstrap_servers
:required:
A comma-separated list of Kafka servers (optionally including
port separated with `:`).
.. attr:: kafka_topic
:default: gerrit
The Kafka topic to which Zuul should subscribe.
.. attr:: kafka_client_id
:default: zuul
The Kafka client ID.
.. attr:: kafka_group_id
:default: zuul
The Kafka group ID.
.. attr:: kafka_tls_cert
Path to TLS certificate to use when connecting to a Kafka broker.
.. attr:: kafka_tls_key
Path to TLS certificate key to use when connecting to a Kafka broker.
.. attr:: kafka_tls_ca
Path to TLS CA certificate to use when connecting to a Kafka broker.
AWS Kinesis Event Support
~~~~~~~~~~~~~~~~~~~~~~~~~
Zuul includes support for Gerrit's `events-aws-kinesis` plugin. This
may be used as an alternative to SSH for receiving trigger events.
Kinesis does provide event delivery guarantees, so unlike SSH, if all
Zuul schedulers are unable to communicate with Gerrit or AWS, they
will eventually receive queued events on reconnection.
All Zuul schedulers will attempt to connect to AWS Kinesis, but only
one scheduler will process a given Kinesis shard at a time. There are
some implications for event delivery:
* All events will be delivered to Zuul at least once. In the case of
a disrupted connection, Zuul may receive duplicate events.
* If a connection is disrupted longer than the Kinesis retention
period for a shard, Zuul may skip to the latest event ignoring all
previous events.
* Because shard processing happens in parallel, events may not arrive
in order.
* If a long period with no events elapses and a connection is
disrupted, it may take Zuul some time to catch up to the latest
events.
.. attr:: <gerrit aws kinesis connection>
.. attr:: aws_kinesis_region
:required:
The AWS region name in which the Kinesis stream is located.
.. attr:: aws_kinesis_stream
:default: gerrit
The AWS Kinesis stream name.
.. attr:: aws_kinesis_access_key
The AWS access key to use.
.. attr:: aws_kinesis_secret_key
The AWS secret key to use.
Trigger Configuration
---------------------
.. attr:: pipeline.trigger.<gerrit source>
The dictionary passed to the Gerrit pipeline ``trigger`` attribute
supports the following attributes:
.. attr:: event
:required:
The event name from gerrit. Examples: ``patchset-created``,
``comment-added``, ``ref-updated``. This field is treated as a
regular expression.
.. attr:: branch
The branch associated with the event. Example: ``master``.
This field is treated as a regular expression, and multiple
branches may be listed.
.. attr:: ref
On ref-updated events, the branch parameter is not used, instead
the ref is provided. Currently Gerrit has the somewhat
idiosyncratic behavior of specifying bare refs for branch names
(e.g., ``master``), but full ref names for other kinds of refs
(e.g., ``refs/tags/foo``). Zuul matches this value exactly
against what Gerrit provides. This field is treated as a
regular expression, and multiple refs may be listed.
.. attr:: ignore-deletes
:default: true
When a branch is deleted, a ref-updated event is emitted with a
newrev of all zeros specified. The ``ignore-deletes`` field is a
boolean value that describes whether or not these newrevs
trigger ref-updated events.
.. attr:: approval
This is only used for ``comment-added`` events. It only matches
if the event has a matching approval associated with it.
Example: ``Code-Review: 2`` matches a ``+2`` vote on the code
review category. Multiple approvals may be listed.
.. attr:: email
This is used for any event. It takes a regex applied on the
performer email, i.e. Gerrit account email address. If you want
to specify several email filters, you must use a YAML list.
Make sure to use non greedy matchers and to escapes dots!
Example: ``email: ^.*?@example\.org$``.
.. attr:: username
This is used for any event. It takes a regex applied on the
performer username, i.e. Gerrit account name. If you want to
specify several username filters, you must use a YAML list.
Make sure to use non greedy matchers and to escapes dots.
Example: ``username: ^zuul$``.
.. attr:: comment
This is only used for ``comment-added`` events. It accepts a
list of regexes that are searched for in the comment string. If
any of these regexes matches a portion of the comment string the
trigger is matched. ``comment: retrigger`` will match when
comments containing ``retrigger`` somewhere in the comment text
are added to a change.
.. attr:: require-approval
.. warning:: This is deprecated and will be removed in a future
version. Use :attr:`pipeline.trigger.<gerrit
source>.require` instead.
This may be used for any event. It requires that a certain kind
of approval be present for the current patchset of the change
(the approval could be added by the event in question). It
follows the same syntax as :attr:`pipeline.require.<gerrit
source>.approval`. For each specified criteria there must exist
a matching approval.
This is ignored if the :attr:`pipeline.trigger.<gerrit
source>.require` attribute is present.
.. attr:: reject-approval
.. warning:: This is deprecated and will be removed in a future
version. Use :attr:`pipeline.trigger.<gerrit
source>.reject` instead.
This takes a list of approvals in the same format as
:attr:`pipeline.trigger.<gerrit source>.require-approval` but
the item will fail to enter the pipeline if there is a matching
approval.
This is ignored if the :attr:`pipeline.trigger.<gerrit
source>.reject` attribute is present.
.. attr:: require
This may be used for any event. It describes conditions that
must be met by the change in order for the trigger event to
match. Those conditions may be satisfied by the event in
question. It follows the same syntax as
:ref:`gerrit_requirements`.
.. attr:: reject
This may be used for any event and is the mirror of
:attr:`pipeline.trigger.<gerrit source>.require`. It describes
conditions that when met by the change cause the trigger event
not to match. Those conditions may be satisfied by the event in
question. It follows the same syntax as
:ref:`gerrit_requirements`.
Reporter Configuration
----------------------
.. attr:: pipeline.reporter.<gerrit reporter>
The dictionary passed to the Gerrit reporter is used to provide label
values to Gerrit. To set the `Verified` label to `1`, add ``verified:
1`` to the dictionary.
The following additional keys are recognized:
.. attr:: submit
:default: False
Set this to ``True`` to submit (merge) the change.
.. attr:: comment
:default: True
If this is true (the default), Zuul will leave review messages
on the change (including job results). Set this to false to
disable this behavior (file and line commands will still be sent
if present).
A :ref:`connection<connections>` that uses the gerrit driver must be
supplied to the trigger.
.. _gerrit_requirements:
Requirements Configuration
--------------------------
As described in :attr:`pipeline.require` and :attr:`pipeline.reject`,
pipelines may specify that items meet certain conditions in order to
be enqueued into the pipeline. These conditions vary according to the
source of the project in question. To supply requirements for changes
from a Gerrit source named ``my-gerrit``, create a configuration such
as the following:
.. code-block:: yaml
pipeline:
require:
my-gerrit:
approval:
- Code-Review: 2
This indicates that changes originating from the Gerrit connection
named ``my-gerrit`` must have a ``Code-Review`` vote of ``+2`` in
order to be enqueued into the pipeline.
.. attr:: pipeline.require.<gerrit source>
The dictionary passed to the Gerrit pipeline `require` attribute
supports the following attributes:
.. attr:: approval
This requires that a certain kind of approval be present for the
current patchset of the change (the approval could be added by
the event in question). Approval is a dictionary or a list of
dictionaries with attributes listed below, all of which are
optional and are combined together so that there must be an approval
matching all specified requirements.
.. attr:: username
If present, an approval from this username is required. It is
treated as a regular expression.
.. attr:: email
If present, an approval with this email address is required. It is
treated as a regular expression.
.. attr:: older-than
If present, the approval must be older than this amount of time
to match. Provide a time interval as a number with a suffix of
"w" (weeks), "d" (days), "h" (hours), "m" (minutes), "s"
(seconds). Example ``48h`` or ``2d``.
.. attr:: newer-than
If present, the approval must be newer than this amount
of time to match. Same format as "older-than".
Any other field is interpreted as a review category and value
pair. For example ``Verified: 1`` would require that the
approval be for a +1 vote in the "Verified" column. The value
may either be a single value or a list: ``Verified: [1, 2]``
would match either a +1 or +2 vote.
.. attr:: open
A boolean value (``true`` or ``false``) that indicates whether
the change must be open or closed in order to be enqueued.
.. attr:: current-patchset
A boolean value (``true`` or ``false``) that indicates whether the
change must be the current patchset in order to be enqueued.
.. attr:: wip
A boolean value (``true`` or ``false``) that indicates whether the
change must be wip or not wip in order to be enqueued.
.. attr:: status
A string value that corresponds with the status of the change
reported by Gerrit.
.. attr:: pipeline.reject.<gerrit source>
The `reject` attribute is the mirror of the `require` attribute. It
also accepts a dictionary under the connection name. This
dictionary supports the following attributes:
.. attr:: approval
This requires that a certain kind of approval not be present for the
current patchset of the change (the approval could be added by
the event in question). Approval is a dictionary or a list of
dictionaries with attributes listed below, all of which are
optional and are combined together so that there must be no approvals
matching all specified requirements.
Example to reject a change with any negative vote:
.. code-block:: yaml
reject:
my-gerrit:
approval:
- Code-Review: [-1, -2]
.. attr:: username
If present, an approval from this username is required. It is
treated as a regular expression.
.. attr:: email
If present, an approval with this email address is required. It is
treated as a regular expression.
.. attr:: older-than
If present, the approval must be older than this amount of time
to match. Provide a time interval as a number with a suffix of
"w" (weeks), "d" (days), "h" (hours), "m" (minutes), "s"
(seconds). Example ``48h`` or ``2d``.
.. attr:: newer-than
If present, the approval must be newer than this amount
of time to match. Same format as "older-than".
Any other field is interpreted as a review category and value
pair. For example ``Verified: 1`` would require that the
approval be for a +1 vote in the "Verified" column. The value
may either be a single value or a list: ``Verified: [1, 2]``
would match either a +1 or +2 vote.
.. attr:: open
A boolean value (``true`` or ``false``) that indicates whether
the change must be open or closed in order to be rejected.
.. attr:: current-patchset
A boolean value (``true`` or ``false``) that indicates whether the
change must be the current patchset in order to be rejected.
.. attr:: wip
A boolean value (``true`` or ``false``) that indicates whether the
change must be wip or not wip in order to be rejected.
.. attr:: status
A string value that corresponds with the status of the change
reported by Gerrit.
Reference Pipelines Configuration
---------------------------------
Here is an example of standard pipelines you may want to define:
.. literalinclude:: /examples/pipelines/gerrit-reference-pipelines.yaml
:language: yaml
Checks Plugin Support (Deprecated)
------------------------------------
The Gerrit driver has support for Gerrit's `checks` plugin. Due to
the deprecation of the checks plugin in Gerrit, support in Zuul is
also deprecated and likely to be removed in a future version. It is
not recommended for use.
Caveats include (but are not limited to):
* This documentation is brief.
* Access control for the `checks` API in Gerrit depends on a single
global administrative permission, ``administrateCheckers``. This is
required in order to use the `checks` API and can not be restricted
by project. This means that any system using the `checks` API can
interfere with any other.
* Checkers are restricted to a single project. This means that a
system with many projects will require many checkers to be defined
in Gerrit -- one for each project+pipeline.
* No support is provided for attaching checks to tags or commits,
meaning that tag, release, and post pipelines are unable to be used
with the `checks` API and must rely on `stream-events`.
* Sub-checks are not implemented yet, so in order to see the results
of individual jobs on a change, users must either follow the
buildset link, or the pipeline must be configured to leave a
traditional comment.
* Familiarity with the `checks` API is recommended.
* Checkers may not be permanently deleted from Gerrit (only
"soft-deleted" so they no longer apply), so any experiments you
perform on a production system will leave data there forever.
In order to use the `checks` API, you must have HTTP access configured
in `zuul.conf`.
There are two ways to configure a pipeline for the `checks` API:
directly referencing the checker UUID, or referencing it's scheme. It
is hoped that once multi-repository checks are supported, that an
administrator will be able to configure a single checker in Gerrit for
each Zuul pipeline, and those checkers can apply to all repositories.
If and when that happens, we will be able to reference the checker
UUID directly in Zuul's pipeline configuration. If you only have a
single project, you may find this approach acceptable now.
To use this approach, create a checker named ``zuul:check`` and
configure a pipeline like this:
.. code-block:: yaml
- pipeline:
name: check
manager: independent
trigger:
gerrit:
- event: pending-check
uuid: 'zuul:check'
enqueue:
gerrit:
checks-api:
uuid: 'zuul:check'
state: SCHEDULED
message: 'Change has been enqueued in check'
start:
gerrit:
checks-api:
uuid: 'zuul:check'
state: RUNNING
message: 'Jobs have started running'
no-jobs:
gerrit:
checks-api:
uuid: 'zuul:check'
state: NOT_RELEVANT
message: 'Change has no jobs configured'
success:
gerrit:
checks-api:
uuid: 'zuul:check'
state: SUCCESSFUL
message: 'Change passed all voting jobs'
failure:
gerrit:
checks-api:
uuid: 'zuul:check'
state: FAILED
message: 'Change failed'
For a system with multiple repositories and one or more checkers for
each repository, the `scheme` approach is recommended. To use this,
create a checker for each pipeline in each repository. Give them
names such as ``zuul_check:project1``, ``zuul_gate:project1``,
``zuul_check:project2``, etc. The part before the ``:`` is the
`scheme`. Then create a pipeline like this:
.. code-block:: yaml
- pipeline:
name: check
manager: independent
trigger:
gerrit:
- event: pending-check
scheme: 'zuul_check'
enqueue:
gerrit:
checks-api:
scheme: 'zuul_check'
state: SCHEDULED
message: 'Change has been enqueued in check'
start:
gerrit:
checks-api:
scheme: 'zuul_check'
state: RUNNING
message: 'Jobs have started running'
no-jobs:
gerrit:
checks-api:
scheme: 'zuul_check'
state: NOT_RELEVANT
message: 'Change has no jobs configured'
success:
gerrit:
checks-api:
scheme: 'zuul_check'
state: SUCCESSFUL
message: 'Change passed all voting jobs'
failure:
gerrit:
checks-api:
scheme: 'zuul_check'
state: FAILED
message: 'Change failed'
This will match and report to the appropriate checker for a given
repository based on the scheme you provided.
.. The original design doc may be of use during development:
https://gerrit-review.googlesource.com/c/gerrit/+/214733
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/drivers/gerrit.rst | gerrit.rst |
Optional: Register with an Identity Provider
============================================
By default, there is no public link between your Matrix account and
any identifying information such as your email address. However, you
may wish people to be able to find your Matrix ID by looking up your
email address or phone number. We also have plans to add additional
functionality to our bots if they are able to look up contributors by
email addresses. If you wish to make your account discoverable in
this way, you may perform the following steps to list your account in
one of the public third-party identifier services. Note that these
services are designed to only return results for someone who already
knows your email address or phone number; they take care to ensure
that it is not possible (or nearly so) to "scrape" their data sets to
obtain lists of users.
To get started, open the User Menu and click `All settings`. Under
the `General` section, find `Email addresses`. If you followed the
instructions above, you should already have an email address listed
here. If you don't, enter your address, click `Add`, and follow the
instructions to verify your address. The dialog should look like this
when complete:
.. image:: /images/matrix/id-email-complete.png
:align: center
To make your account discoverable by email, scroll down to the
`Discovery` section.
.. image:: /images/matrix/id-disc.png
:align: center
Read the privacy notice and click the checkbox
next to `Accept`. That will enable the `Continue` button; click that
to proceed.
.. image:: /images/matrix/id-disc-accept.png
:align: center
The `Discovery` section will be replaced with the email address you
registered above.
.. image:: /images/matrix/id-disc-accept.png
:align: center
Click the `Share` button next to the address. The system will send an
email to you, and meanwhile the dialog will show this:
.. image:: /images/matrix/id-disc-verify-wait.png
:align: center
You will receive an email like this:
.. image:: /images/matrix/id-disc-verify-email.png
:align: center
Follow the link in the email to verify it really is you making the
request.
.. image:: /images/matrix/id-disc-verify-success.png
:align: center
Then return to the settings page and click the `Complete` button.
.. image:: /images/matrix/id-disc-verify-wait.png
:align: center
Once everything is finished, the complete button should change to read
`Revoke`.
.. image:: /images/matrix/id-disc-verify-complete.png
:align: center
If you see that, you're all done. If you change your mind and don't
want your account to be discoverable via email, you can click the
`Revoke` button at any time.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/howtos/matrix-id.rst | matrix-id.rst |
Configuring Microsoft Authentication
====================================
This document explains how to configure Zuul in order to enable
authentication with Microsoft Login.
Prerequisites
-------------
* The Zuul instance must be able to query Microsoft's OAUTH API servers. This
simply generally means that the Zuul instance must be able to send and
receive HTTPS data to and from the Internet.
* You must have an Active Directory instance in Azure and the ability
to create an App Registration.
By convention, we will assume Zuul's Web UI's base URL is
``https://zuul.example.com/``.
Creating the App Registration
-----------------------------
Navigate to the Active Directory instance in Azure and select `App
registrations` under ``Manage``. Select ``New registration``. This
will open a dialog to register an application.
Enter a name of your choosing (e.g., ``Zuul``), and select which
account types should have access. Under ``Redirect URI`` select
``Single-page application(SPA)`` and enter
``https://zuul.example.com/auth_callback`` as the redirect URI. Press
the ``Register`` button.
You should now be at the overview of the Zuul App registration. This
page displays several values which will be used later. Record the
``Application (client) ID`` and ``Directory (tenant) ID``. When we need
to construct values including these later, we will refer to them with
all caps (e.g., ``CLIENT_ID`` and ``TENANT_ID`` respectively).
Select ``Authentication`` under ``Manage``. You should see a
``Single-page application`` section with the redirect URI previously
configured during registration; if not, correct that now.
Under ``Implicit grant and hybrid flows`` select both ``Access
tokens`` and ``ID tokens``, then Save.
Back at the Zuul App Registration menu, select ``Expose an API``, then
press ``Set`` and then press ``Save`` to accept the default
Application ID URI (it should look like ``api://CLIENT_ID``).
Press ``Add a scope`` and enter ``zuul`` as the scope name. Enter
``Access zuul`` for both the ``Admin consent display name`` and
``Admin consent description``. Leave ``Who can consent`` set to
``Admins only``, then press ``Add scope``.
Optional: Include Groups Claim
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In order to include group information in the token sent to Zuul,
select ``Token configuration`` under ``Manage`` and then ``Add groups
claim``.
Setting up Zuul
---------------
Edit the ``/etc/zuul/zuul.conf`` to add the microsoft authenticator:
.. code-block:: ini
[auth microsoft]
default=true
driver=OpenIDConnect
realm=zuul.example.com
authority=https://login.microsoftonline.com/TENANT_ID/v2.0
issuer_id=https://sts.windows.net/TENANT_ID/
client_id=CLIENT_ID
scope=openid profile api://CLIENT_ID/zuul
audience=api://CLIENT_ID
load_user_info=false
Restart Zuul services (scheduler, web).
Head to your tenant's status page. If all went well, you should see a
`Sign in` button in the upper right corner of the
page. Congratulations!
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/howtos/openid-with-microsoft.rst | openid-with-microsoft.rst |
Chatting with Matrix
====================
The Zuul community uses mailing lists for long-form communication and
Matrix for real-time (or near real-time) chat.
This guide will walk you through getting started with Matrix and how
to use it to join communities like Zuul on IRC.
Familiar with Matrix already and want to jump straight to the room?
Follow this link: `https://matrix.to/#/#zuul:opendev.org <https://matrix.to/#/#zuul:opendev.org>`_
Why Use Matrix?
---------------
Matrix has a number of clients available including feature-rich web,
desktop and mobile clients, as well as integration with the popular
text-based weechat client. This provides plenty of choices based on
your own preference. This guide will focus on using the Element web
client.
Matrix supports persistent presence in "rooms". Once you join a room,
your homeserver will keep you connected to that room and save all of
the messages sent to it, so that if you close your client and return
later, you won't miss anything. You don't need to run your own server
to use Matrix; you are welcome to use the public server at matrix.org.
But if you are a member of an organization that already runs a
homeserver, or you would like to run one yourself, you may do so and
federate with the larger Matrix network. This guide will walk you
through setting up an account on the matrix.org homeserver.
Matrix is an open (in every sense of the word) federated communication
system. Because of this it's possible to bridge the Matrix network to
other networks (including IRC, slack, etc). That makes it the perfect
system to use to communicate with various communities using a single
interface.
Create An Account
-----------------
If you don't already have an account on a Matrix homeserver, go to
https://app.element.io/ to create one, then click `Create Account`.
.. image:: /images/matrix/account-welcome.png
:align: center
You can create an account with an email address or one of the
supported authentication providers.
.. image:: /images/matrix/account-create.png
:align: center
You'll be asked to accept the terms and conditions of the service.
.. image:: /images/matrix/account-accept.png
:align: center
If you are registering an account via email, you will be prompted to
verify your email address.
.. image:: /images/matrix/account-verify.png
:align: center
You will receive an email like this:
.. image:: /images/matrix/account-verify-email.png
:align: center
Once you click the link in the email, your account will be created.
.. image:: /images/matrix/account-success.png
:align: center
You can follow the link to sign in.
.. image:: /images/matrix/account-signin.png
:align: center
Join the #zuul Room
-------------------
Click the plus icon next to `Rooms` on the left of the screen, then
click `Explore public rooms` in the dropdown that appears.
.. image:: /images/matrix/account-rooms-dropdown.png
:align: center
A popup dialog will appear; enter ``#zuul:opendev.org`` into the
search box.
.. image:: /images/matrix/account-rooms-zuul.png
:align: center
It will display `No results for "#zuul:opendev.org"` since the room is
hosted on a federated homeserver, but it's really there. Disregard
that and hit `enter` or click `Join`, and you will join the room.
Go ahead and say hi, introduce yourself, and let us know what you're
working on or any questions you have. Keep in mind that the Zuul
community is world-wide and we may be away from our desks when you
join. Because Matrix keeps a message history, we'll see your message
and you'll see any responses, even if you close your browser and log
in later.
Optional Next Steps
-------------------
The following steps are optional. You don't need to do these just to
hop in with a quick question, but if you plan on spending more than a
brief amount of time interacting with communities in Matrix, they will
improve your experience.
.. toctree::
:maxdepth: 1
matrix-encryption
matrix-id
matrix-irc
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/howtos/matrix.rst | matrix.rst |
Optional: Save Encryption Keys
==============================
The Matrix protocol supports end-to-end encryption. We don't have
this enabled for the ``#zuul`` room (there's little point as it's a
public room), but if you start direct chats with other Matrix users,
your communication will be encrypted by default. Since it's
*end-to-end* encryption, that means your encryption keys are stored on
your client, and the server has no way to decrypt those messages. But
that also means that if you sign out of your client or switch
browsers, you will lose your encryption keys along with access to any
old messages that were encrypted with them. To avoid this, you can
back up your keys to the server (in an encrypted form, of course) so
that if you log in from another session, you can restore those keys
and pick up where you left off. To set this up, open the User Menu by
clicking on your name at the top left of the screen.
.. image:: /images/matrix/user-menu.png
:align: center
Click the `Security & privacy` menu item in the dropdown.
.. image:: /images/matrix/user-menu-dropdown.png
:align: center
Click the `Set up` button under `Encryption` / `Secure Backup` in the
dialog that pops up.
.. image:: /images/matrix/user-encryption.png
:align: center
Follow the prompts to back up your encryption keys.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/howtos/matrix-encryption.rst | matrix-encryption.rst |
:title: Project Testing Interface
.. _pti:
Project Testing Interface
=========================
The following sections describe an example PTI (Project Testing Interface)
implementation. The goal is to setup a consistent interface for driving tests
and other necessary tasks to succesfully configure :ref:`project_gating` within
your organization.
Projects layout
---------------
A proper PTI needs at least two projects:
* org-config: a :term:`config-project` curated by administrators, and
* org-jobs: a :term:`untrusted-project` to hold common jobs.
The projects that are being tested or deployed are also
:term:`untrusted-project`, and for the purpose of this example we will use a
couple of integrated projects named:
* org-server
* org-client
org-config
~~~~~~~~~~
The config project needs careful scrutiny as it defines priviledged Zuul
configurations that are shared by all your projects:
:ref:`pipeline` triggers and requirements let you define when a change is
tested and what are the conditions for merging code. Approval from core
members or special labels to indicate a change is good to go are pipelines
configuration.
The base job let you define how the test environment is validated before
the actual job is executed. The base job also defines how and where the job
artifacts are stored.
More importantly, a config-project may enforce a set of integration jobs to
be executed on behalf of the other projects. A regular (untrusted-project) can
only manage its own configuration, and as part of a PTI implementation, you
want to ensure your projects' changes undergo validation that are defined
globally by your organization.
Because the nature of those configuration settings are priviledged,
config-projects changes are only effective when merged.
org-jobs
~~~~~~~~
Jobs definition content are not priviledged Zuul settings and jobs can be
defined in a regular :term:`untrusted-project`.
As a matter of fact, it is recommended to define jobs outside of the config
project so that job updates can be tested before being merged.
In this example, we are using a dedicated org-jobs project.
Projects content
----------------
In this example PTI, the organization requirements are a consistent code style
and an integration test to validate org-client and org-server works according
to a reference implementation.
In the org-jobs project, we define a couple of jobs:
.. code-block:: yaml
# org-jobs/zuul.yaml
- job:
name: org-codestyle
parent: run-test-command
vars:
test_command: $code-style-tool $org-check-argument
# e.g.: linters --max-column 120
- job:
name: org-integration-test
run: integration-tests.yaml
required-projects:
- org-server
- org-client
The integration-tests.yaml playbook needs to implement an integration test
that checks both the server and client code.
In the org-config project, we define a project template:
.. code-block:: yaml
# org-config/zuul.d/pti.yaml
- project-template:
name: org-pti
queue: integrated
check:
jobs:
- org-codestyle
- org-integration-test
gate:
jobs:
- org-codestyle
- org-integration-test
Finaly, in the org-config project, we setup the PTI template on both projects:
.. code-block:: yaml
# org-config/zuul.d/projects.yaml
- project:
name: org-server
templates:
- org-pti
- project:
name: org-client
templates:
- org-pti
Usage
-----
With the above layout, the organization projects use a consistent testing
interface.
The org-client or org-server does not need extra settings, all new
contribution shall pass the codestyle and integration-test as defined by
the organization admin.
Project tests
~~~~~~~~~~~~~
Projects may add extra jobs on top of the PTI.
For example, the org-client project can add a user interface test:
.. code-block:: yaml
# org-client/.zuul.yaml
- job:
name: org-client-ui-validation
- project:
check:
jobs:
- org-client-ui-validation
gate:
jobs:
- org-client-ui-validation
In this example, new org-client change will run the PTI's jobs as well as the
org-client-ui-validation job.
Updating PTI test
~~~~~~~~~~~~~~~~~
Once the PTI is in place, if a project needs adjustment,
it can proceed as follow:
First a change on org-jobs is proposed to modify a job. For example, update a
codestyle check using such commit:
.. code-block:: text
# org-jobs/change-url
Update codestyle to enforce CamelCase.
Then, without merging this proposal, it can be tested accross the projects using
such commit:
.. code-block:: text
# org-client/change-url
Validate new codestyle.
Depends-On: org-jobs/change-url
Lastly the org-jobs may be enriched with:
.. code-block:: text
# org-jobs/change-url
Update codestyle to enforce CamelCase.
Needed-By: org-client/change-url
.. note:: Extra care is required when updating PTI jobs as they affects all
the projects. Ideally, the org-jobs project would use a org-jobs-check
to run PTI jobs change on every projects.
Cross project gating
--------------------
The org-pti template is using the "integrated" queue to ensure projects change
are gated by the zuul scheduler. Though, the jobs need extra care to properly
test projects as they are prepared by Zuul. For example, the
org-integration-test playbook need to ensure the client and server are installed
from the zuul src_root.
This is called sibling installation, and it is a critical piece to ensure cross
project gating.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/howtos/pti.rst | pti.rst |
.. _howto-zookeeper:
ZooKeeper Administration
========================
This section will cover some basic tasks and recommendations when
setting up ZooKeeper for use with Zuul. A complete tutorial for
ZooKeeper is out of scope for this documentation.
Configuration
-------------
The following general configuration setting in
``/etc/zookeeper/zoo.cfg`` is recommended:
.. code-block::
autopurge.purgeInterval=6
This instructs ZooKeeper to purge old snapshots every 6 hours. This
will avoid filling the disk.
.. _zk-encrypted-connections:
Encrypted Connections
---------------------
Zuul requires its connections to ZooKeeper are TLS encrypted.
ZooKeeper version 3.5.1 or greater is required for TLS support.
ZooKeeper performs hostname validation for all ZooKeeper servers
("quorum members"), therefore each member of the ZooKeeper cluster
should have its own certificate. This does not apply to clients which
may share a certificate.
ZooKeeper performs certificate validation on all connections (server
and client). If you use a private Certificate Authority (CA) (which
is generally recommended and discussed below), then these TLS
certificates not only serve to encrypt traffic, but also to
authenticate and authorize clients to the cluster. Only clients with
certificates authorized by a CA explicitly trusted by your ZooKeeper
installation will be able to connect.
.. note:: The instructions below direct you to sign certificates with
a CA that you create specifically for Zuul's ZooKeeper
cluster. If you use a CA you share with other users in your
organization, any certificate signed by that CA will be able
to connect to your ZooKeeper cluster. In this case, you may
need to take additional steps such as network isolation to
protect your ZooKeeper cluster. These are beyond the scope
of this document.
The ``tools/zk-ca.sh`` script in the Zuul source code repository can
be used to quickly and easily generate self-signed certificates for
all ZooKeeper cluster members and clients.
Make a directory for it to store the certificates and CA data, and run
it once for each client:
.. code-block::
mkdir /etc/zookeeper/ca
tools/zk-ca.sh /etc/zookeeper/ca zookeeper1.example.com
tools/zk-ca.sh /etc/zookeeper/ca zookeeper2.example.com
tools/zk-ca.sh /etc/zookeeper/ca zookeeper3.example.com
Add the following to ``/etc/zookeeper/zoo.cfg``:
.. code-block::
# Necessary for TLS support
serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory
# Client TLS configuration
secureClientPort=2281
ssl.keyStore.location=/etc/zookeeper/ca/keystores/zookeeper1.example.com.pem
ssl.trustStore.location=/etc/zookeeper/ca/certs/cacert.pem
# Server TLS configuration
sslQuorum=true
ssl.quorum.keyStore.location=/etc/zookeeper/ca/keystores/zookeeper1.example.com.pem
ssl.quorum.trustStore.location=/etc/zookeeper/ca/certs/cacert.pem
Change the name of the certificate filenames as appropriate for the
host (e.g., ``zookeeper1.example.com.pem``).
In order to disable plaintext connections, ensure that the
``clientPort`` option does not appear in ``zoo.cfg``. Use the new
method of specifying Zookeeper quorum servers, which looks like this:
.. code-block::
server.1=zookeeper1.example.com:2888:3888
server.2=zookeeper2.example.com:2888:3888
server.3=zookeeper3.example.com:2888:3888
This format normally includes ``;2181`` at the end of each line,
signifying that the server should listen on port 2181 for plaintext
client connections (this is equivalent to the ``clientPort`` option).
Omit it to disable plaintext connections. The earlier addition of
``secureClientPort`` to the config file instructs ZooKeeper to listen
for encrypted connections on port 2281.
Be sure to specify port 2281 rather than the standard 2181 in the
:attr:`zookeeper.hosts` setting in ``zuul.conf``.
Finally, add the :attr:`zookeeper.tls_cert`,
:attr:`zookeeper.tls_key`, and :attr:`zookeeper.tls_ca` options. Your
``zuul.conf`` file should look like:
.. code-block::
[zookeeper]
hosts=zookeeper1.example.com:2281,zookeeper2.example.com:2281,zookeeper3.example.com:2281
tls_cert=/etc/zookeeper/ca/certs/client.pem
tls_key=/etc/zookeeper/ca/keys/clientkey.pem
tls_ca=/etc/zookeeper/ca/certs/cacert.pem
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/howtos/zookeeper.rst | zookeeper.rst |
Configuring Google Authentication
=================================
This document explains how to configure Zuul in order to enable authentication
with Google.
Prerequisites
-------------
* The Zuul instance must be able to query Google's OAUTH API servers. This
simply generally means that the Zuul instance must be able to send and
receive HTTPS data to and from the Internet.
* You must set up a project in `Google's developers console <https://console.developers.google.com/>`_.
Setting up credentials with Google
----------------------------------
In the developers console, choose your project and click `APIs & Services`.
Choose `Credentials` in the menu on the left, then click `Create Credentials`.
Choose `Create OAuth client ID`. You might need to configure a consent screen first.
Create OAuth client ID
......................
Choose `Web application` as Application Type.
In `Authorized JavaScript Origins`, add the base URL of Zuul's Web UI. For example,
if you are running a yarn development server on your computer, it would be
`http://localhost:3000` .
In `Authorized redirect URIs`, write down the base URL of Zuul's Web UI followed
by "/t/<tenant>/auth_callback", for each tenant on which you want to enable
authentication. For example, if you are running a yarn development server on
your computer and want to set up authentication for tenant "local",
write `http://localhost:3000/t/local/auth_callback` .
Click Save. Google will generate a Client ID and a Client secret for your new
credentials; we will only need the Client ID for the rest of this How-To.
Configure Zuul
..............
Edit the ``/etc/zuul/zuul.conf`` to add the google authenticator:
.. code-block:: ini
[auth google_auth]
default=true
driver=OpenIDConnect
realm=my_realm
issuer_id=https://accounts.google.com
client_id=<your Google Client ID>
Restart Zuul services (scheduler, web).
Head to your tenant's status page. If all went well, you should see a "Sign in"
button in the upper right corner of the page. Congratulations!
Further Reading
---------------
This How-To is based on `Google's documentation on their implementation of OpenID Connect <https://developers.google.com/identity/protocols/oauth2/openid-connect>`_.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/howtos/openid-with-google.rst | openid-with-google.rst |
Configuring Keycloak Authentication
===================================
This document explains how to configure Zuul and Keycloak in order to enable
authentication in Zuul with Keycloak.
Prerequisites
-------------
* The Zuul instance must be able to query Keycloak over HTTPS.
* Authenticating users must be able to reach Keycloak's web UI.
* Have a realm set up in Keycloak.
`Instructions on how to do so can be found here <https://www.keycloak.org/docs/latest/server_admin/#configuring-realms>`_ .
By convention, we will assume the Keycloak server's FQDN is ``keycloak``, and
Zuul's Web UI's base URL is ``https://zuul/``. We will use the realm ``my_realm``.
Most operations below regarding the configuration of Keycloak can be performed through
Keycloak's admin CLI. The following steps must be performed as an admin on Keycloak's
GUI.
Setting up Keycloak
-------------------
Create a client
...............
Choose the realm ``my_realm``, then click ``clients`` in the Configure panel.
Click ``Create``.
Name your client as you please. We will pick ``zuul`` for this example. Make sure
to fill the following fields:
* Client Protocol: ``openid-connect``
* Access Type: ``public``
* Implicit Flow Enabled: ``ON``
* Valid Redirect URIs: ``https://zuul/*``
* Web Origins: ``https://zuul/``
Click "Save" when done.
Create a client scope
......................
Keycloak maps the client ID to a specific claim, instead of the usual `aud` claim.
We need to configure Keycloak to add our client ID to the `aud` claim by creating
a custom client scope for our client.
Choose the realm ``my_realm``, then click ``client scopes`` in the Configure panel.
Click ``Create``.
Name your scope as you please. We will name it ``zuul_aud`` for this example.
Make sure you fill the following fields:
* Protocol: ``openid-connect``
* Include in Token Scope: ``ON``
Click "Save" when done.
On the Client Scopes page, click on ``zuul_aud`` to configure it; click on
``Mappers`` then ``create``.
Make sure to fill the following:
* Mapper Type: ``Audience``
* Included Client Audience: ``zuul``
* Add to ID token: ``ON``
* Add to access token: ``ON``
Then save.
Finally, go back to the clients list and pick the ``zuul`` client again. Click
on ``Client Scopes``, and add the ``zuul_aud`` scope to the ``Assigned Default
Client Scopes``.
Configuring JWT signing algorithms
..................................
.. note::
Skip this step if you are using a keycloak version prior to 18.0.
Due to current limitations with the pyJWT library, Zuul does not support every default
signing algorithm used by Keycloak.
Go to `my_realm->Settings->Keys`, then choose `rsa-enc-generated` (this should be mapped
to "RSA-OAEP") if available. Then set `enabled` to false and save your changes.
(Optional) Set up a social identity provider
............................................
Keycloak can delegate authentication to predefined social networks. Follow
`these steps to find out how. <https://www.keycloak.org/docs/latest/server_admin/index.html#social-identity-providers>`_
If you don't set up authentication delegation, make sure to create at least one
user in your realm, or allow self-registration. See Keycloak's documentation section
on `user management <https://www.keycloak.org/docs/latest/server_admin/index.html#assembly-managing-users_server_administration_guide>`_
for more details on how to do so.
Setting up Zuul
---------------
Edit the ``/etc/zuul/zuul.conf`` to add the keycloak authenticator:
.. code-block:: ini
[auth keycloak]
default=true
driver=OpenIDConnect
realm=my_realm
issuer_id=https://keycloak/auth/realms/my_realm
client_id=zuul
Restart Zuul services (scheduler, web).
Head to your tenant's status page. If all went well, you should see a "Sign in"
button in the upper right corner of the page. Congratulations!
Further Reading
---------------
This How-To is based on `Keycloak's documentation <https://www.keycloak.org/documentation.html>`_,
specifically `the documentation about clients <https://www.keycloak.org/docs/latest/server_admin/#assembly-managing-clients_server_administration_guide>`_.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/howtos/openid-with-keycloak.rst | openid-with-keycloak.rst |
:title: Badges
.. We don't need no stinking badges
.. _badges:
Badges
======
You can embed a badge declaring that your project is gated and therefore by
definition always has a working build. Since there is only one status to
report, it is a simple static file:
.. image:: https://zuul-ci.org/gated.svg
:alt: Zuul: Gated
To use it, simply put ``https://zuul-ci.org/gated.svg`` into an RST or
markdown formatted README file, or use it in an ``<img>`` tag in HTML.
For advanced usage Zuul also supports generating dynamic badges via the
REST api. This can be useful if you want to display the status of e.g. periodic
pipelines of a project. To use it use an url like
``https://zuul.opendev.org/api/tenant/zuul/badge?project=zuul/zuul-website&pipeline=post``
instead of the above mentioned url. It supports filtering by ``project``,
``pipeline`` and ``branch``.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/howtos/badges.rst | badges.rst |
Optional: Join an IRC Room
==========================
The Matrix community maintains bridges to most major IRC networks.
You can use the same Matrix account and client to join IRC channels as
well as Zuul's Matrix Room. You will benefit from the persistent
connection and history features as well. Follow the instructions
below to join an IRC channel. The example below is for the
``#opendev`` channel on OFTC, but the process is similar for other
channels or networks.
Click the plus icon next to `Rooms` on the left of the screen, then
click `Explore public rooms` in the dropdown that appears.
.. image:: /images/matrix/account-rooms-dropdown.png
:align: center
A popup dialog will appear; below the search bar in the dialog, click
the dropdown selector labeled `Matrix rooms (matrix.org)` and change
it to `OFTC rooms (matrix.org)`. Then enter ``#opendev`` into the search
box.
.. image:: /images/matrix/account-rooms-opendev.png
:align: center
It will display `No results for "#opendev"` which is an unfortunate
consequence of one of the anti-spam measures that is necessary on IRC.
Disregard that and hit `enter` or click `Join`, and you will join the
room.
If this is your first time joining an OFTC channel, you will also
receive an invitation to join the `OFTC IRC Bridge status` room.
.. image:: /images/matrix/account-rooms-invite.png
:align: center
Accept the invitation.
.. image:: /images/matrix/account-rooms-accept.png
:align: center
This is a private control channel between you and the system that
operates the OFTC bridge. Here you can perform some IRC commands such
as changing your nickname and setting up nick registration. That is
out of scope for this HOWTO, but advanced IRC users may be interested
in doing so.
You may repeat this procedure for any other IRC channels on the OFTC,
Freenode, or libera.chat networks.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/howtos/matrix-irc.rst | matrix-irc.rst |
Data Model
==========
It all starts with the :py:class:`~zuul.model.Pipeline`. A Pipeline is the
basic organizational structure that everything else hangs off.
.. autoclass:: zuul.model.Pipeline
Pipelines have a configured
:py:class:`~zuul.manager.PipelineManager` which controlls how
the :py:class:`Ref <zuul.model.Ref>` objects are enqueued and
processed.
There are currently two,
:py:class:`~zuul.manager.dependent.DependentPipelineManager` and
:py:class:`~zuul.manager.independent.IndependentPipelineManager`
.. autoclass:: zuul.manager.PipelineManager
.. autoclass:: zuul.manager.dependent.DependentPipelineManager
.. autoclass:: zuul.manager.independent.IndependentPipelineManager
A :py:class:`~zuul.model.Pipeline` has one or more
:py:class:`~zuul.model.ChangeQueue` objects.
.. autoclass:: zuul.model.ChangeQueue
A :py:class:`~zuul.model.Job` represents the definition of what to do. A
:py:class:`~zuul.model.Build` represents a single run of a
:py:class:`~zuul.model.Job`. A :py:class:`~zuul.model.JobGraph` is used to
encapsulate the dependencies between one or more :py:class:`~zuul.model.Job`
objects.
.. autoclass:: zuul.model.Job
.. autoclass:: zuul.model.JobGraph
.. autoclass:: zuul.model.Build
The :py:class:`~zuul.manager.base.PipelineManager` enqueues each
:py:class:`Ref <zuul.model.Ref>` into the
:py:class:`~zuul.model.ChangeQueue` in a :py:class:`~zuul.model.QueueItem`.
.. autoclass:: zuul.model.QueueItem
As the Changes are processed, each :py:class:`~zuul.model.Build` is put into
a :py:class:`~zuul.model.BuildSet`
.. autoclass:: zuul.model.BuildSet
Changes
~~~~~~~
.. autoclass:: zuul.model.Change
.. autoclass:: zuul.model.Ref
Filters
~~~~~~~
.. autoclass:: zuul.model.RefFilter
.. autoclass:: zuul.model.EventFilter
Tenants
~~~~~~~
An abide is a collection of tenants.
.. autoclass:: zuul.model.Tenant
.. autoclass:: zuul.model.UnparsedAbideConfig
.. autoclass:: zuul.model.UnparsedConfig
.. autoclass:: zuul.model.ParsedConfig
Other Global Objects
~~~~~~~~~~~~~~~~~~~~
.. autoclass:: zuul.model.Project
.. autoclass:: zuul.model.Layout
.. autoclass:: zuul.model.RepoFiles
.. autoclass:: zuul.model.TriggerEvent
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/developer/datamodel.rst | datamodel.rst |
Ansible Integration
===================
Zuul contains Ansible modules and plugins to control the execution of Ansible
Job content.
Zuul provides realtime build log streaming to end users so that users
can watch long-running jobs in progress.
Streaming job output
--------------------
All jobs run with the :py:mod:`zuul.ansible.base.callback.zuul_stream` callback
plugin enabled, which writes the build log to a file so that the
:py:class:`zuul.lib.log_streamer.LogStreamer` can provide the data on demand
over the finger protocol. Finally, :py:class:`zuul.web.LogStreamHandler`
exposes that log stream over a websocket connection as part of
:py:class:`zuul.web.ZuulWeb`.
.. autoclass:: zuul.ansible.base.callback.zuul_stream.CallbackModule
:members:
.. autoclass:: zuul.lib.log_streamer.LogStreamer
.. autoclass:: zuul.web.LogStreamHandler
.. autoclass:: zuul.web.ZuulWeb
In addition to real-time streaming, Zuul also installs another callback module,
:py:mod:`zuul.ansible.base.callback.zuul_json.CallbackModule` that collects all
of the information about a given run into a json file which is written to the
work dir so that it can be published along with build logs.
.. autoclass:: zuul.ansible.base.callback.zuul_json.CallbackModule
Since the streaming log is by necessity a single text stream, choices
have to be made for readability about what data is shown and what is
not shown. The json log file is intended to allow for a richer more
interactive set of data to be displayed to the user.
.. _zuul_console_streaming:
Capturing live command output
-----------------------------
As jobs may execute long-running shell scripts or other commands,
additional effort is expended to stream ``stdout`` and ``stderr`` of
shell tasks as they happen rather than waiting for the command to
finish.
The global job configuration should run the ``zuul_console`` task as a
very early prerequisite step.
.. automodule:: zuul.ansible.base.library.zuul_console
This will start a daemon that listens on TCP port 19885 on the testing
node. This daemon can be queried to stream back the output of shell
tasks as described below.
Zuul contains a modified version of Ansible's
:ansible:module:`command` module that overrides the default
implementation.
.. automodule:: zuul.ansible.base.library.command
This library will capture the output of the running
command and write it to a temporary file on the host the command is
running on. These files are named in the format
``/tmp/console-<uuid>-<task_id>-<host>.log``
The ``zuul_stream`` callback mentioned above will send a request to
the remote ``zuul_console`` daemon, providing the uuid and task id of
the task it is currently processing. The ``zuul_console`` daemon will
then read the logfile from disk and stream the data back as it
appears, which ``zuul_stream`` will then present as described above.
The ``zuul_stream`` callback will indicate to the ``zuul_console``
daemon when it has finished reading the task, which prompts the remote
side to remove the temporary streaming output files. In some cases,
aborting the Ansible process may not give the ``zuul_stream`` callback
the chance to send this notice, leaking the temporary files. If nodes
are ephemeral this makes little difference, but these files may be
visible on static nodes.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/developer/ansible.rst | ansible.rst |
Zuul Dashboard Javascript
=========================
zuul-web has an html, css and javascript component, `zuul-dashboard`, that
is managed using Javascript toolchains. It is intended to be served by zuul-web
directly from zuul/web/static in the simple case, or to be published to
an alternate static web location, such as an Apache server.
The web dashboard is written in `React`_ and `PatternFly`_ and is
managed by `create-react-app`_ and `yarn`_ which in turn both assume a
functioning and recent `nodejs`_ installation.
.. note::
The web dashboard source code and package.json are located in the ``web``
directory. All the yarn commands need to be executed from the ``web``
directory.
For the impatient who don't want deal with javascript toolchains
----------------------------------------------------------------
tl;dr - You have to build stuff with javascript tools.
The best thing would be to get familiar with the tools, there are a lot of
good features available. If you're going to hack on the Javascript, you should
get to know them.
If you don't want to hack on Javascript and just want to run Zuul's tests,
``tox`` has been set up to handle it for you.
If you do not have `yarn`_ installed, ``tox`` will use `nodeenv`_ to install
node into the active python virtualenv, and then will install `yarn`_ into
that virtualenv as well.
yarn dependency management
--------------------------
`yarn`_ manages the javascript dependencies. That means the first step is
getting `yarn`_ installed.
.. code-block:: console
tools/install-js-tools.sh
The ``tools/install-js-tools.sh`` script will add apt or yum repositories and
install `nodejs`_ and `yarn`_ from them. For RPM-based distros it needs to know
which repo description file to download, so it calls out to
``tools/install-js-repos-rpm.sh``.
Once yarn is installed, getting dependencies installed is:
.. code-block:: console
yarn install
The ``yarn.lock`` file contains all of the specific versions that were
installed before. Since this is an application it has been added to the repo.
To add new runtime dependencies:
.. code-block:: console
yarn add awesome-package
To add new build-time dependencies:
.. code-block:: console
yarn add -D awesome-package
To remove dependencies:
.. code-block:: console
yarn remove terrible-package
Adding or removing packages will add the logical dependency to ``package.json``
and will record the version of the package and any of its dependencies that
were installed into ``yarn.lock`` so that other users can simply run
``yarn install`` and get the same environment.
To update a dependency:
.. code-block:: console
yarn add awesome-package
Dependencies are installed into the ``node_modules`` directory. Deleting that
directory and re-running ``yarn install`` should always be safe.
Dealing with yarn.lock merge conflicts
--------------------------------------
Since ``yarn.lock`` is generated, it can create merge conflicts. Resolving
them at the ``yarn.lock`` level is too hard, but `yarn`_ itself is
deterministic. The best procedure for dealing with ``yarn.lock`` merge
conflicts is to first resolve the conflicts, if any, in ``package.json``. Then:
.. code-block:: console
yarn install --force
git add yarn.lock
Which causes yarn to discard the ``yarn.lock`` file, recalculate the
dependencies and write new content.
React Components and Styling
----------------------------
Each page is a React Component. For instance the status.html page code
is ``web/src/pages/status.jsx``. It is usually a good idea not to put
too much markup in those page components and create different
components for this instead. This way, the page component can deal
with the logic like reloading data if needed or evaluating URL
parameters and the child components can deal with the markup. Thus,
you will find a lot of components in the ``web/src/containers``
directory that mainly deal with the markup.
Mapping of pages/urls to components can be found in the route list in
``web/src/routes.js``.
The best way to get started is to check out the libraries that glue
everything together. Those are `React`__, `react-router`_ and
`Redux`_.
.. _React-getting-started: https://reactjs.org/docs/getting-started.html
__ React-getting-started_
For the visual part we are using `PatternFly`_. For a list of available
PatternFly React components, take a look at the `Components`_ section in their
documentation. If a single component is not enough, you could also take a
look at the `Demos`_ sections which provides some more advanced examples
incorporating multiple components and their interaction.
If you are unsure which component you should use for your purpose, you might
want to check out the `Usage and behaviour`_ section in their design guidelines.
There is also a list of available `icons`_ including some recommendations on
when to use which icon. In case you don't find an appropriate icon there, you
could check out the `FontAwesome Free`_ icons, as most of them are included in
PatternFly. To find out if an icon is available, simply try to import it from
the ``@patternfly/react-icons`` package.
For example if you want to use the `address-book`_ icon (which is not listed in
the PatternFly icon list) you can import it via the following statement:
.. code-block:: javascript
import { AddressBookIcon } from '@patternfly/react-icons'
Please note that the spelling of the icon name changes to CamelCase and is
always extended by ``Icon``.
Development
-----------
Building the code can be done with:
.. code-block:: bash
yarn build
zuul-web has a ``static`` route defined which serves files from
``zuul/web/static``. ``yarn build`` will put the build output files
into the ``zuul/web/static`` directory so that zuul-web can serve them.
Development server that handles things like reloading and
hot-updating of code can be started with:
.. code-block:: bash
yarn start
will build the code and launch the dev server on `localhost:3000`. Fake
api response needs to be set in the ``web/public/api`` directory.
.. code-block:: bash
mkdir public/api/
for route in info status jobs builds; do
curl -o public/api/${route} https://zuul.openstack.org/api/${route}
done
To use an existing zuul api, uses the REACT_APP_ZUUL_API environment
variable:
.. code-block:: bash
# Use openstack zuul's api:
yarn start:openstack
# Use software-factory multi-tenant zuul's api:
yarn start:multi
# Use a custom zuul:
REACT_APP_ZUUL_API="https://zuul.example.com/api/" yarn start
To run eslint tests locally:
.. code-block:: bash
yarn lint
Authentication
~~~~~~~~~~~~~~
The docker-compose file in ``doc/source/examples/keycloak`` can be
used to run a Keycloak server for use with a development build of the
web app. The default values in that file are already set up for the
web app running on localhost. See the Keycloak tutorial for details.
Deploying
---------
The web application is a set of static files and is designed to be served
by zuul-web from its ``static`` route. In order to make sure this works
properly, the javascript build needs to be performed so that the javascript
files are in the ``zuul/web/static`` directory. Because the javascript
build outputs into the ``zuul/web/static`` directory, as long as
``yarn build`` has been done before ``pip install .`` or
``python setup.py sdist``, all the files will be where they need to be.
As long as `yarn`_ is installed, the installation of zuul will run
``yarn build`` appropriately.
.. _yarn: https://yarnpkg.com/en/
.. _nodejs: https://nodejs.org/
.. _webpack: https://webpack.js.org/
.. _devtool: https://webpack.js.org/configuration/devtool/#devtool
.. _nodeenv: https://pypi.org/project/nodeenv
.. _React: https://reactjs.org/
.. _react-router: https://reactrouter.com/web/guides/philosophy
.. _Redux: https://redux.js.org/introduction/core-concepts
.. _PatternFly: https://www.patternfly.org/
.. _create-react-app: https://github.com/facebook/create-react-app/blob/master/packages/react-scripts/template/README.md
.. _Components: https://www.patternfly.org/v4/documentation/react/components/aboutmodal
.. _Demos: https://www.patternfly.org/v4/documentation/react/demos/bannerdemo
.. _Usage and behaviour: https://www.patternfly.org/v4/design-guidelines/usage-and-behavior/about-modal
.. _icons: https://www.patternfly.org/v4/guidelines/icons
.. _FontAwesome Free: https://fontawesome.com/icons?d=gallery&m=free
.. _address-book: https://fontawesome.com/icons/address-book?style=solid
By default, zuul-web provides a Progressive Web Application but does
not run a Service Worker. For deployers who would like to enable one,
set the environment variable
``REACT_APP_ENABLE_SERVICE_WORKER=true`` during installation.
| zuul | /zuul-9.1.0.tar.gz/zuul-9.1.0/doc/source/developer/javascript.rst | javascript.rst |
Subsets and Splits