code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def startup(self, container, instances=None, map_name=None, **kwargs):
return self.run_actions('startup', container, instances=instances, map_name=map_name, **kwargs) | Start up container instances from a container configuration. Typically this means creating and starting
containers and their dependencies. Note that not all policy classes necessarily implement this method.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to remove. If not specified, will remove all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. Only options controlling policy behavior are considered.
:return: Return values of created containers.
:rtype: list[dockermap.map.runner.ActionOutput] |
def shutdown(self, container, instances=None, map_name=None, **kwargs):
return self.run_actions('shutdown', container, instances=instances, map_name=map_name, **kwargs) | Shut down container instances from a container configuration. Typically this means stopping and removing
containers. Note that not all policy classes necessarily implement this method.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to remove. If not specified, will remove all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. Only options controlling policy behavior are considered.
:return: Return values of removed containers.
:rtype: list[dockermap.map.runner.ActionOutput] |
def update(self, container, instances=None, map_name=None, **kwargs):
return self.run_actions('update', container, instances=instances, map_name=map_name, **kwargs) | Updates instances from a container configuration. Typically this means restarting or recreating containers based
on detected changes in the configuration or environment. Note that not all policy classes necessarily implement
this method.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to remove. If not specified, will remove all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. Only options controlling policy behavior are considered.
:return: Return values of actions.
:rtype: list[dockermap.map.runner.ActionOutput] |
def call(self, action_name, container, instances=None, map_name=None, **kwargs):
return self.run_actions(action_name, container, instances=instances, map_name=map_name, **kwargs) | Generic function for running container actions based on a policy.
:param action_name: Action name.
:type action_name: unicode | str
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to remove. If not specified, runs on all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs for the policy method.
:return: Return values of actions.
:rtype: list[dockermap.map.runner.ActionOutput] |
def run_script(self, container, instance=None, map_name=None, **kwargs):
return self.run_actions('script', container, instances=instance, map_name=map_name, **kwargs) | Runs a script or single command in the context of a container. By the default implementation this means creating
the container along with all of its dependencies, mounting the script path, and running the script. The result
is recorded in a dictionary per client, before the container is removed. Dependencies are not removed. For
details, see :meth:`dockermap.map.runner.script.ScriptMixin.run_script`.
:param container: Container configuration name.
:type container: unicode | str
:param map_name: Container map name.
:type map_name: unicode | str
:param instance: Instance name. Optional, if not specified runs the default instance.
:type instance: unicode | str
:param kwargs: Keyword arguments to the script runner function.
:return: Return values of the script actions with their log output and exit codes.
:return: A dictionary of client names with their log output and exit codes.
:rtype: list[dockermap.map.runner.ActionOutput] |
def signal(self, container, instances=None, map_name=None, **kwargs):
return self.run_actions('signal', container, instances=instances, map_name=map_name, **kwargs) | Sends a signal to a single running container configuration (but possibly multiple instances). If not specified
with ``signal``, this signal is ``SIGKILL``.
:param container: Container configuration name.
:type container: unicode | str
:param map_name: Container map name.
:type map_name: unicode | str
:param instances: Instance name. Optional, if not specified sends the signal to all configured instances, or
the default.
:type instances: unicode | str
:param kwargs: Keyword arguments to the script runner function.
:return: Return values of actions.
:rtype: list[dockermap.map.runner.ActionOutput] |
def pull_images(self, container, instances=None, map_name=None, **kwargs):
return self.run_actions('pull_images', container, map_name=map_name, **kwargs) | Pulls images for container configurations along their dependency path.
:param container: Container configuration name.
:type container: unicode | str
:param map_name: Container map name.
:type map_name: unicode | str
:param instances: Not applicable for images.
:type instances: unicode | str
:param kwargs: Keyword arguments to the script runner function.
:return: Return values of actions.
:rtype: list[dockermap.map.runner.ActionOutput] |
def list_persistent_containers(self, map_name=None):
if map_name:
maps = [self._maps[map_name].get_extended_map()]
else:
maps = [m.get_extended_map() for m in self._maps.values()]
cname_func = self.policy_class.cname
aname_func = self.policy_class.aname
c_names = []
for c_map in maps:
m_name = c_map.name
attached, persistent = c_map.get_persistent_items()
if c_map.use_attached_parent_name:
c_names.extend([aname_func(m_name, ca, c_name)
for c_name, ca in attached])
else:
c_names.extend([aname_func(m_name, ca[1])
for ca in attached])
c_names.extend([cname_func(m_name, c_name, ci)
for c_name, ci in persistent])
return c_names | Lists the names of all persistent containers on the specified map or all maps. Attached containers are always
considered persistent.
:param map_name: Container map name. Optional, only returns persistent containers from the specified map.
:type map_name: unicode | str
:return: List of container names.
:rtype: list[unicode | str] |
def rest(f):
@wraps(f)
def wrapper(*args, **kwargs):
ret = f(*args, **kwargs)
if ret is None:
response = '', 204
elif isinstance(ret, current_app.response_class):
response = ret
elif isinstance(ret, tuple):
# code, result_dict|msg_string
if isinstance(ret[1], basestring):
response = jsonify(msg=ret[1])
else:
response = jsonify(**ret[1])
response.status_code = ret[0]
else:
response = jsonify(**ret)
return response
return wrapper | Decorator for simple REST endpoints.
Functions must return one of these values:
- a dict to jsonify
- nothing for an empty 204 response
- a tuple containing a status code and a dict to jsonify |
def login(self, action, registry, **kwargs):
log.info("Logging into registry %s.", registry)
login_kwargs = {'registry': registry}
auth_config = action.client_config.auth_configs.get(registry)
if auth_config:
log.debug("Registry auth config for %s found.", registry)
login_kwargs.update(auth_config)
insecure_registry = kwargs.get('insecure_registry')
if insecure_registry is not None:
login_kwargs['insecure_registry'] = insecure_registry
else:
raise KeyError("No login information found for registry.", registry)
update_kwargs(login_kwargs, kwargs)
res = action.client.login(**login_kwargs)
if res:
log.debug("User %(username)s logged into %(registry)s.", login_kwargs)
self._login_registries.add(registry)
return res | Logs in to a Docker registry.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param registry: Name of the registry server to login to.
:type registry: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict |
def pull(self, action, image_name, **kwargs):
config_id = action.config_id
registry, __, image = config_id.config_name.rpartition('/')
if registry and '.' in registry and registry not in self._login_registries:
self.login(action, registry, insecure_registry=kwargs.get('insecure_registry'))
log.info("Pulling image %s:%s.", config_id.config_name, config_id.instance_name)
res = action.client.pull(repository=config_id.config_name, tag=config_id.instance_name, **kwargs)
log.debug("Done pulling image %s:%s.", config_id.config_name, config_id.instance_name)
self._policy.images[action.client_name].refresh_repo(config_id.config_name)
log.debug("Refreshed image cache for repo %s.", config_id.config_name)
return res | Pulls an image for a container configuration
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param image_name: Image name.
:type image_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict |
def parse_networks_output(out):
if not out:
return []
line_iter = islice(out.splitlines(), 1, None) # Skip header
return list(map(_network_info, line_iter)) | Parses the output of the Docker CLI 'docker network ls' and returns it in the format similar to the Docker API.
:param out: CLI output.
:type out: unicode | str
:return: Parsed result.
:rtype: list[dict] |
def parse_volumes_output(out):
if not out:
return []
line_iter = islice(out.splitlines(), 1, None) # Skip header
return list(map(_volume_info, line_iter)) | Parses the output of the Docker CLI 'docker volume ls' and returns it in the format similar to the Docker API.
:param out: CLI output.
:type out: unicode | str
:return: Parsed result.
:rtype: list[dict] |
def parse_inspect_output(out, item_type):
parsed = json.loads(out, encoding='utf-8')
if parsed:
return parsed[0]
raise NotFound("{0} not found.".format(item_type.title()), None) | Parses the output of the Docker CLI 'docker inspect <container>' or 'docker network inspect <network>'. Essentially
just returns the parsed JSON string, like the Docker API does.
:param out: CLI output.
:type out: unicode | str
:param item_type: Type of the item that has been inspected (e.g. 'container').
:type item_type: unicode | str
:return: Parsed result.
:rtype: dict |
def parse_images_output(out):
line_iter = islice(out.splitlines(), 1, None) # Skip header
split_lines = (line.split() for line in line_iter)
return [
_summarize_tags(image_id, image_lines)
for image_id, image_lines in groupby(sorted(split_lines, key=_get_image_id), key=_get_image_id)
] | Parses the output of the Docker CLI 'docker images'. Note this is currently incomplete and only returns the ids and
tags of images, as the Docker CLI heavily modifies the output for human readability. The parent image id is also
not available on the CLI, so a full API compatibility is not possible.
:param out: CLI output.
:type out: unicode | str
:return: Parsed result.
:rtype: list[dict] |
def parse_version_output(out):
parsed = json.loads(out, encoding='utf-8')
if parsed:
return parsed.get('Client', {})
return {} | Parses the output of 'docker version --format="{{json .}}"'. Essentially just returns the parsed JSON string,
like the Docker API does. Fields are slightly different however.
:param out: CLI output.
:type out: unicode | str
:return: Parsed result.
:rtype: dict |
def parse_top_output(out):
lines = out.splitlines()
line_iter = iter(lines)
header_line = next(line_iter)
titles = header_line.split()
max_split = len(titles) - 1
return {
'Titles': titles,
'Processes': [line.split(None, max_split) for line in line_iter],
} | Parses the output of the Docker CLI 'docker top <container>'. Note that if 'ps' output columns are modified and
'args' (for the command) is anywhere but in the last column, this will not parse correctly. However, the Docker API
produces wrong output in this case as well.
Returns a dictionary with entries 'Titles' and 'Processes' just like the Docker API would.
:param out: CLI output.
:type out: unicode | str
:return: Parsed result.
:rtype: dict |
def refresh(self):
if not self._client:
return
current_images = self._client.images()
self.clear()
self._update(current_images)
for image in current_images:
tags = image.get('RepoTags')
if tags:
self.update({tag: image['Id'] for tag in tags}) | Fetches image and their ids from the client. |
def refresh(self):
if not self._client:
return
current_containers = self._client.containers(all=True)
self.clear()
for container in current_containers:
container_names = container.get('Names')
if container_names:
c_id = container['Id']
self.update((name[1:], c_id)
for name in container_names) | Fetches all current container names from the client, along with their id. |
def refresh(self):
if not self._client:
return
current_networks = self._client.networks()
self.clear()
self.update((net['Name'], net['Id'])
for net in current_networks) | Fetches all current network names from the client, along with their id. |
def refresh(self):
if not self._client:
return
current_volumes = self._client.volumes()['Volumes']
self.clear()
if current_volumes:
self.update(vol['Name'] for vol in current_volumes) | Fetches all current network names from the client. |
def refresh(self, item):
client = self._clients[item].get_client()
self[item] = val = self.item_class(client)
return val | Forces a refresh of a cached item.
:param item: Client name.
:type item: unicode | str
:return: Items in the cache.
:rtype: DockerHostItemCache.item_class |
def get_state_actions(self, state, **kwargs):
if state.config_flags & ConfigFlags.DEPENDENT or state.config_id.config_type != ItemType.CONTAINER:
return super(ScriptActionGenerator, self).get_state_actions(state, **kwargs)
if state.base_state == State.ABSENT:
actions = []
else:
log.debug("Found existing script containers: %s", state.config_id)
if not self.remove_existing_before:
config_id = state.config_id
c_name = self._policy.cname(config_id.map_name, config_id.config_name, config_id.instance_name)
if state.client_name == self._policy.default_client_name:
error_msg = "Container {0} existed prior to running the script.".format(c_name)
else:
error_msg = ("Container {0} existed on client {1} prior to running the "
"script.").format(c_name, state.client_name)
raise ScriptActionException(error_msg)
if state.base_state == State.RUNNING or state.state_flags & StateFlags.RESTARTING:
log.debug("Preparing shutdown of existing container: %s", state.config_id)
actions = [ItemAction(state, DerivedAction.SHUTDOWN_CONTAINER)]
else:
log.debug("Preparing removal existing container: %s", state.config_id)
actions = [ItemAction(state, Action.REMOVE)]
actions.append(ItemAction(state, ContainerUtilAction.SCRIPT, extra_data=kwargs))
return actions | For dependent items, inherits the behavior from :class:`dockermap.map.action.resume.ResumeActionGenerator`.
For other the main container, checks if containers exist, and depending on the ``remove_existing_before``
option either fails or removes them. Otherwise runs the script.
:param state: Configuration state.
:type state: dockermap.map.state.ConfigState
:param kwargs: Additional keyword arguments.
:return: Actions on the client, map, and configurations.
:rtype: list[dockermap.map.action.ItemAction] |
def get_persistent_items(self):
attached_items = [(container, ac)
for container, config in self
for ac in config.attaches]
persistent_containers = [(container, ci)
for container, config in self if config.persistent
for ci in config.instances or [None]]
return attached_items, persistent_containers | Returns attached container items and container configurations that are marked as persistent. Each returned
item is in the format ``(config name, instance/attached name)``, where the instance name can also be ``None``.
:return: Lists of attached items.
:rtype: (list[(unicode | str, unicode | str)], list[unicode | str, unicode | str | NoneType]) |
def get_image(self, image):
name, __, tag = image.rpartition(':')
if not name:
name, tag = tag, name
if '/' in name:
if name[0] == '/':
repo_name = name[1:]
else:
repo_name = name
else:
default_prefix = resolve_value(self.repository)
if default_prefix:
repo_name = '{0}/{1}'.format(default_prefix, name)
else:
repo_name = name
if tag:
return repo_name, tag
default_tag = resolve_value(self.default_tag)
return repo_name, default_tag or 'latest' | Generates a tuple of the full image name and tag, that should be used when creating a new container.
This implementation applies the following rules:
* If the image name starts with ``/``, the following image name is returned.
* If ``/`` is found anywhere else in the image name, it is assumed to be a repository-prefixed image and
returned as it is.
* Otherwise, if the given container map has a repository prefix set, this is prepended to the image name.
* In any other case, the image name is not modified.
Where there is a tag included in the ``image`` name, it is not modified. If it is not, the default tag from the
container map, or ``latest`` is used.
:param image: Image name.
:type image: unicode | str
:return: Image name, where applicable prefixed with a repository, and tag.
:rtype: (unicode | str, unicode | str) |
def get_extended(self, config):
if not config.extends or self._extended:
return config
extended_config = ContainerConfiguration()
for ext_name in config.extends:
ext_cfg_base = self._containers.get(ext_name)
if not ext_cfg_base:
raise KeyError(ext_name)
ext_cfg = self.get_extended(ext_cfg_base)
extended_config.merge_from_obj(ext_cfg)
extended_config.merge_from_obj(config)
return extended_config | Generates a configuration that includes all inherited values.
:param config: Container configuration.
:type config: ContainerConfiguration
:return: A merged (shallow) copy of all inherited configurations merged with the container configuration.
:rtype: ContainerConfiguration |
def get_extended_map(self):
map_copy = self.__class__(self.name)
map_copy.update_from_obj(self, copy=True, update_containers=False)
for c_name, c_config in self:
map_copy._containers[c_name] = self.get_extended(c_config)
map_copy._extended = True
return map_copy | Creates a copy of this map which includes all non-abstract configurations in their extended form.
:return: Copy of this map.
:rtype: ContainerMap |
def get_state_actions(self, state, **kwargs):
if state.base_state == State.ABSENT:
if state.config_id.config_type == ItemType.IMAGE:
return [ItemAction(state, ImageAction.PULL)]
actions = [ItemAction(state, Action.CREATE, extra_data=kwargs)]
if state.config_id.config_type == ItemType.CONTAINER:
actions.append(ItemAction(state, ContainerUtilAction.CONNECT_ALL))
return actions | Creates all missing containers, networks, and volumes.
:param state: Configuration state.
:type state: dockermap.map.state.ConfigState
:param kwargs: Additional keyword arguments.
:return: Actions on the client, map, and configurations.
:rtype: list[dockermap.map.action.ItemAction] |
def get_state_actions(self, state, **kwargs):
config_type = state.config_id.config_type
if (config_type == ItemType.VOLUME and state.base_state == State.PRESENT and
state.state_flags & StateFlags.INITIAL):
return [
ItemAction(state, Action.START),
ItemAction(state, VolumeUtilAction.PREPARE),
]
elif config_type == ItemType.CONTAINER and state.base_state == State.PRESENT:
return [
ItemAction(state, Action.START, extra_data=kwargs),
ItemAction(state, ContainerUtilAction.EXEC_ALL),
] | Generally starts containers that are not running. Attached containers are skipped unless they are initial.
Attached containers are also prepared with permissions. Where applicable, exec commands are run in started
instance containers.
:param state: Configuration state.
:type state: dockermap.map.state.ConfigState
:param kwargs: Additional keyword arguments.
:return: Actions on the client, map, and configurations.
:rtype: list[dockermap.map.action.ItemAction] |
def get_state_actions(self, state, **kwargs):
if (state.config_id.config_type == ItemType.CONTAINER and state.base_state != State.ABSENT and
not state.state_flags & StateFlags.INITIAL):
actions = [ItemAction(state, DerivedAction.RESTART_CONTAINER, extra_data=kwargs)]
if self.restart_exec_commands:
actions.append(ItemAction(state, ContainerUtilAction.EXEC_ALL, extra_data=kwargs))
return actions | Restarts instance containers.
:param state: Configuration state.
:type state: dockermap.map.state.ConfigState
:param kwargs: Additional keyword arguments.
:return: Actions on the client, map, and configurations.
:rtype: list[dockermap.map.action.ItemAction] |
def get_state_actions(self, state, **kwargs):
if (state.config_id.config_type == ItemType.CONTAINER and state.base_state != State.ABSENT and
not state.state_flags & StateFlags.INITIAL):
return [ItemAction(state, ContainerUtilAction.SIGNAL_STOP, extra_data=kwargs)] | Stops containers that are running. Does not check attached containers. Considers using the pre-configured
``stop_signal``.
:param state: Configuration state.
:type state: dockermap.map.state.ConfigState
:param kwargs: Additional keyword arguments.
:return: Actions on the client, map, and configurations.
:rtype: list[dockermap.map.action.ItemAction] |
def get_state_actions(self, state, **kwargs):
config_type = state.config_id.config_type
if config_type == ItemType.CONTAINER:
extra_data = kwargs
else:
extra_data = None
if state.base_state == State.PRESENT:
if ((config_type == ItemType.VOLUME and self.remove_attached) or
(config_type == ItemType.CONTAINER and
self.remove_persistent or not state.state_flags & StateFlags.PERSISTENT)):
return [ItemAction(state, Action.REMOVE, extra_data=extra_data)]
elif config_type == ItemType.NETWORK:
connected_containers = state.extra_data.get('containers')
if connected_containers:
actions = [ItemAction(state, NetworkUtilAction.DISCONNECT_ALL, {'containers': connected_containers})]
else:
actions = []
actions.append(ItemAction(state, Action.REMOVE, extra_data=kwargs))
return actions | Removes containers that are stopped. Optionally skips persistent containers. Attached containers are skipped
by default from removal but can optionally be included.
:param state: Configuration state.
:type state: dockermap.map.state.ConfigState
:param kwargs: Additional keyword arguments.
:return: Actions on the client, map, and configurations.
:rtype: list[dockermap.map.action.ItemAction] |
def get_state_actions(self, state, **kwargs):
config_type = state.config_id.config_type
if config_type == ItemType.VOLUME:
if state.base_state == State.ABSENT:
return [
ItemAction(state, Action.CREATE),
ItemAction(state, VolumeUtilAction.PREPARE),
]
elif state.base_state == State.PRESENT and state.state_flags & StateFlags.INITIAL:
return [
ItemAction(state, Action.START),
ItemAction(state, VolumeUtilAction.PREPARE),
]
elif config_type == ItemType.CONTAINER:
if state.base_state == State.ABSENT:
return [
ItemAction(state, DerivedAction.STARTUP_CONTAINER),
ItemAction(state, ContainerUtilAction.EXEC_ALL),
]
elif state.base_state == State.PRESENT:
return [
ItemAction(state, Action.START),
ItemAction(state, ContainerUtilAction.EXEC_ALL),
]
else:
if config_type == ItemType.NETWORK:
return [ItemAction(state, Action.CREATE)]
elif config_type == ItemType.IMAGE:
return [ItemAction(state, ImageAction.PULL)] | A combination of CreateActionGenerator and StartActionGenerator - creates and starts containers where
appropriate.
:param state: Configuration state.
:type state: dockermap.map.state.ConfigState
:param kwargs: Additional keyword arguments.
:return: Actions on the client, map, and configurations.
:rtype: list[dockermap.map.action.ItemAction] |
def get_state_actions(self, state, **kwargs):
config_type = state.config_id.config_type
if config_type == ItemType.NETWORK:
if state.base_state == State.PRESENT:
connected_containers = state.extra_data.get('containers')
if connected_containers:
cc_names = [c.get('Name', c['Id']) for c in connected_containers]
actions = [ItemAction(state, NetworkUtilAction.DISCONNECT_ALL,
extra_data={'containers': cc_names})]
else:
actions = []
actions.append(ItemAction(state, Action.REMOVE, extra_data=kwargs))
return actions
elif config_type == ItemType.VOLUME and self.remove_attached:
return [ItemAction(state, Action.REMOVE)]
elif config_type == ItemType.CONTAINER:
if self.remove_persistent or not state.state_flags & StateFlags.PERSISTENT:
if state.base_state == State.RUNNING or state.state_flags & StateFlags.RESTARTING:
return [ItemAction(state, DerivedAction.SHUTDOWN_CONTAINER)]
elif state.base_state == State.PRESENT:
return [ItemAction(state, Action.REMOVE)]
elif state.base_state == State.RUNNING or state.state_flags & StateFlags.RESTARTING:
return [ItemAction(state, Action.REMOVE)] | A combination of StopActionGenerator and RemoveActionGenerator - stops and removes containers where
appropriate.
:param state: Configuration state.
:type state: dockermap.map.state.ConfigState
:param kwargs: Additional keyword arguments.
:return: Actions on the client, map, and configurations.
:rtype: list[dockermap.map.action.ItemAction] |
def get_state_actions(self, state, **kwargs):
if state.config_id.config_type == ItemType.CONTAINER and state.base_state == State.RUNNING:
return [ItemAction(state, Action.KILL, extra_data=kwargs)] | Sends kill signals to running containers.
:param state: Configuration state.
:type state: dockermap.map.state.ConfigState
:param kwargs: Additional keyword arguments.
:return: Actions on the client, map, and configurations.
:rtype: list[dockermap.map.action.ItemAction] |
def read_version():
finder = VersionFinder()
finder.visit(ast.parse(local_file('curdling', 'version.py')))
return finder.version | Read version from curdling/version.py without loading any files |
def parse_requirements(path):
try:
requirements = map(str.strip, local_file(path).splitlines())
except IOError:
raise RuntimeError("Couldn't find the `requirements.txt' file :(")
links = []
pkgs = []
for req in requirements:
if not req:
continue
if 'http:' in req or 'https:' in req:
links.append(req)
name, version = re.findall("\#egg=([^\-]+)-(.+$)", req)[0]
pkgs.append('{0}=={1}'.format(name, version))
else:
pkgs.append(req)
return pkgs, links | Rudimentary parser for the `requirements.txt` file
We just want to separate regular packages from links to pass them to the
`install_requires` and `dependency_links` params of the `setup()`
function properly. |
def get_distribution_paths(name):
pyver = 'python' + sys.version[:3]
paths = {
'prefix' : '{prefix}',
'data' : '{prefix}/lib/{pyver}/site-packages',
'purelib': '{prefix}/lib/{pyver}/site-packages',
'platlib': '{prefix}/lib/{pyver}/site-packages',
'headers': '{prefix}/include/{pyver}/{name}',
'scripts': '{prefix}/bin',
}
# pip uses a similar path as an alternative to the system's (read-only)
# include directory:
if hasattr(sys, 'real_prefix'): # virtualenv
paths['headers'] = os.path.abspath(
os.path.join(sys.prefix, 'include', 'site', pyver, name))
# Replacing vars
for key, val in paths.items():
paths[key] = val.format(prefix=PREFIX, name=name, pyver=pyver)
return paths | Return target paths where the package content should be installed |
def parse_response(response):
if isinstance(response, six.binary_type):
response = response.decode('utf-8')
try:
obj = json.loads(response)
except ValueError:
return {}
return obj | Decodes the JSON response, simply ignoring syntax errors. Therefore it should be used for filtering visible output
only.
:param response: Server response as a JSON string.
:type response: unicode | str
:return: Decoded object from the JSON string. Returns an empty dictionary if input was invalid.
:rtype: dict |
def build(self, tag, add_latest_tag=False, add_tags=None, raise_on_error=True, **kwargs):
response = super(DockerClientWrapper, self).build(tag=tag, **kwargs)
# It is not the kwargs alone that decide if we get a stream, so we have to check.
if isinstance(response, tuple):
image_id = response[0]
else:
last_log = self._docker_log_stream(response, raise_on_error)
if last_log and last_log.startswith('Successfully built '):
image_id = last_log[19:] # Remove prefix
else:
image_id = None
if not image_id:
return None
self.add_extra_tags(image_id, tag, add_tags, add_latest_tag)
return image_id | Overrides the superclass `build()` and filters the output. Messages are deferred to `push_log`, whereas the
final message is checked for a success message. If the latter is found, only the new image id is returned.
:param tag: Tag of the new image to be built. Unlike in the superclass, this is obligatory.
:type tag: unicode | str
:param add_latest_tag: In addition to the image ``tag``, tag the image with ``latest``.
:type add_latest_tag: bool
:param add_tags: Additional tags. Can also be used as an alternative to ``add_latest_tag``.
:type add_tags: list[unicode | str]
:param raise_on_error: Raises errors in the status output as a DockerStatusException. Otherwise only logs
errors.
:type raise_on_error: bool
:param kwargs: See :meth:`docker.client.Client.build`.
:return: New, generated image id or `None`.
:rtype: unicode | str |
def login(self, username, password=None, email=None, registry=None, reauth=False, **kwargs):
response = super(DockerClientWrapper, self).login(username, password, email, registry, reauth=reauth, **kwargs)
return response.get('Status') == 'Login Succeeded' or response.get('username') == username | Login to a Docker registry server.
:param username: User name for login.
:type username: unicode | str
:param password: Login password; may be ``None`` if blank.
:type password: unicode | str
:param email: Optional; email address for login.
:type email: unicode | str
:param registry: Optional registry URL to log in to. Uses the Docker index by default.
:type registry: unicode | str
:param reauth: Re-authenticate, even if the login has been successful before.
:type reauth: bool
:param kwargs: Additional kwargs to :meth:`docker.client.Client.login`.
:return: ``True`` if the login has succeeded, or if it has not been necessary as it succeeded before. ``False``
otherwise.
:rtype: bool |
def push(self, repository, stream=False, raise_on_error=True, **kwargs):
response = super(DockerClientWrapper, self).push(repository, stream=stream, **kwargs)
if stream:
result = self._docker_status_stream(response, raise_on_error)
else:
result = self._docker_status_stream(response.split('\r\n') if response else (), raise_on_error)
return result and not result.get('error') | Pushes an image repository to the registry.
:param repository: Name of the repository (can include a tag).
:type repository: unicode | str
:param stream: Use the stream output format with additional status information.
:type stream: bool
:param raise_on_error: Raises errors in the status output as a DockerStatusException. Otherwise only logs
errors.
:type raise_on_error: bool
:param kwargs: Additional kwargs for :meth:`docker.client.Client.push`.
:return: ``True`` if the image has been pushed successfully.
:rtype: bool |
def push_container_logs(self, container):
logs = self.logs(container).decode('utf-8')
log_lines = logs.split('\n')
if log_lines and not log_lines[-1]:
log_lines.pop()
for line in log_lines:
self.push_log(LOG_CONTAINER_FORMAT, logging.INFO, container, line) | Reads the current container logs and passes them to :meth:`~push_log`. Removes a trailing empty line and
prefixes each log line with the container name.
:param container: Container name or id.
:type container: unicode | str |
def remove_container(self, container, raise_on_error=True, raise_not_found=False, **kwargs):
try:
super(DockerClientWrapper, self).remove_container(container, **kwargs)
except APIError as e:
exc_info = sys.exc_info()
if e.response.status_code == 404:
if raise_not_found:
six.reraise(*exc_info)
else:
self.push_log("Failed to remove container '%s': %s", logging.ERROR, container, e.explanation)
if raise_on_error:
six.reraise(*exc_info) | Removes a container. For convenience optionally ignores API errors.
:param container: Container name or id.
:type container: unicode | str
:param raise_on_error: Errors on stop and removal may result from Docker volume problems, that may not
affect further actions. Such errors are always logged, but do not raise an exception if this is set to
``True``.
:type raise_on_error: bool
:param raise_not_found: Whether to raise 404 errors, i.e. that the container to be removed was not
found. Default is ``False``.
:type raise_not_found: bool
:param kwargs: Additional keyword args for :meth:`docker.client.Client.remove_container`. |
def stop(self, container, raise_on_error=True, **kwargs):
try:
super(DockerClientWrapper, self).stop(container, **kwargs)
except APIError as e:
exc_info = sys.exc_info()
self.push_log("Failed to stop container '%s': %s", logging.ERROR, container, e.explanation)
if raise_on_error:
six.reraise(*exc_info) | Stops a container. For convenience optionally ignores API errors.
:param container: Container name.
:type container: unicode | str
:param raise_on_error: Errors on stop and removal may result from Docker volume problems, that may not
affect further actions. Such errors are always logged, but do not raise an exception if this is set to
``True``.
:type raise_on_error: bool
:param kwargs: Additional keyword args for :meth:`docker.client.Client.stop`. |
def copy_resource(self, container, resource, local_filename):
raw = self.copy(container, resource)
with open(local_filename, 'wb+') as f:
for buf in raw:
f.write(buf) | *Experimental:* Copies a resource from a Docker container to a local tar file. For details, see
:meth:`docker.client.Client.copy`.
:param container: Container name or id.
:type container: unicode | str
:param resource: Resource inside the container.
:type resource: unicode | str
:param local_filename: Local file to store resource into. Will be overwritten if present.
:type local_filename: unicode | str |
def save_image(self, image, local_filename):
raw = self.get_image(image)
with open(local_filename, 'wb+') as f:
for buf in raw:
f.write(buf) | *Experimental:* Copies an image from Docker to a local tar file. For details, see
:meth:`docker.client.Client.get_image`.
:param image: Image name or id.
:type image: unicode | str
:param local_filename: Local file to store image into. Will be overwritten if present.
:type local_filename: unicode | str |
def tag_check_function(tags):
suffixes = [':{0}'.format(t) for t in tags]
def _check_image(image):
repo_tags = image['RepoTags']
if not repo_tags:
return False
return any(r_tag.endswith(s) for s in suffixes for r_tag in repo_tags)
return _check_image | Generates a function that checks whether the given image has any of the listed tags.
:param tags: Tags to check for.
:type tags: list[unicode | str] | set[unicode | str]
:return: Function that returns ``True`` if any of the given tags apply to the image, ``False`` otherwise.
:rtype: (unicode | str) -> bool |
def primary_container_name(names, default=None, strip_trailing_slash=True):
if strip_trailing_slash:
ex_names = [name[1:] for name in names if name.find('/', 2) == -1]
else:
ex_names = [name for name in names if name.find('/', 2) == -1]
if ex_names:
return ex_names[0]
return default | From the list of names, finds the primary name of the container. Returns the defined default value (e.g. the
container id or ``None``) in case it cannot find any.
:param names: List with name and aliases of the container.
:type names: list[unicode | str]
:param default: Default value.
:param strip_trailing_slash: As read directly from the Docker service, every container name includes a trailing
slash. Set this to ``False`` if it is already removed.
:type strip_trailing_slash: bool
:return: Primary name of the container.
:rtype: unicode | str |
def add_extra_tags(self, image_id, main_tag, extra_tags, add_latest):
repo, __, i_tag = main_tag.rpartition(':')
tag_set = set(extra_tags or ())
if add_latest:
tag_set.add('latest')
tag_set.discard(i_tag)
added_tags = []
tag_kwargs = {}
if str(self.api_version) < DEPRECATED_FORCE_TAG_VERSION:
tag_kwargs['force'] = True
if repo and tag_set:
for t in tag_set:
try:
self.tag(image_id, repo, t, **tag_kwargs)
except:
exc_info = sys.exc_info()
raise PartialResultsError(exc_info, added_tags)
else:
added_tags.append(t)
return added_tags | Adds extra tags to an image after de-duplicating tag names.
:param image_id: Id of the image.
:type image_id: unicode | str
:param main_tag: Repo / tag specification that has been used to build the image. If present, the tag will be
removed from further arguments.
:type main_tag: unicode | str
:param extra_tags: Additional tags to add to the image.
:type extra_tags: list | tuple | set | NoneType
:param add_latest: Whether to add a ``latest`` tag to the image.
:type add_latest: bool |
def push_log(self, info, level, *args, **kwargs):
log.log(level, info, *args, **kwargs) | Writes logs. To be fully implemented by subclasses.
:param info: Log message content.
:type info: unicode | str
:param level: Logging level.
:type level: int
:param args: Positional arguments to pass to logger.
:param kwargs: Keyword arguments to pass to logger. |
def build_from_context(self, ctx, tag, **kwargs):
return self.build(fileobj=ctx.fileobj, tag=tag, custom_context=True, encoding=ctx.stream_encoding, **kwargs) | Builds a docker image from the given docker context with a `Dockerfile` file object.
:param ctx: An instance of :class:`~.context.DockerContext`.
:type ctx: dockermap.build.context.DockerContext
:param tag: New image tag.
:type tag: unicode | str
:param kwargs: See :meth:`docker.client.Client.build`.
:return: New, generated image id or `None`.
:rtype: unicode | str |
def build_from_file(self, dockerfile, tag, **kwargs):
with DockerContext(dockerfile, finalize=True) as ctx:
return self.build_from_context(ctx, tag, **kwargs) | Builds a docker image from the given :class:`~dockermap.build.dockerfile.DockerFile`. Use this as a shortcut to
:meth:`build_from_context`, if no extra data is added to the context.
:param dockerfile: An instance of :class:`~dockermap.build.dockerfile.DockerFile`.
:type dockerfile: dockermap.build.dockerfile.DockerFile
:param tag: New image tag.
:type tag: unicode | str
:param kwargs: See :meth:`docker.client.Client.build`.
:return: New, generated image id or ``None``.
:rtype: unicode | str |
def cleanup_containers(self, include_initial=False, exclude=None, raise_on_error=False, list_only=False):
exclude_names = set(exclude or ())
def _stopped_containers():
for container in self.containers(all=True):
c_names = [name[1:] for name in container['Names'] or () if name.find('/', 2)]
c_status = container['Status']
if (((include_initial and c_status == '') or c_status.startswith('Exited') or c_status == 'Dead') and
exclude_names.isdisjoint(c_names)):
c_id = container['Id']
c_name = primary_container_name(c_names, default=c_id, strip_trailing_slash=False)
yield c_id, c_name
stopped_containers = list(_stopped_containers())
if list_only:
return stopped_containers
removed_containers = []
for cid, cn in stopped_containers:
try:
self.remove_container(cn)
except:
exc_info = sys.exc_info()
if raise_on_error:
raise PartialResultsError(exc_info, removed_containers)
else:
removed_containers.append(cn)
return removed_containers | Finds all stopped containers and removes them; by default does not remove containers that have never been
started.
:param include_initial: Consider containers that have never been started.
:type include_initial: bool
:param exclude: Container names to exclude from the cleanup process.
:type exclude: collections.Iterable[unicode | str]
:param raise_on_error: Forward errors raised by the client and cancel the process. By default only logs errors.
:type raise_on_error: bool
:param list_only: When set to ``True``, only lists containers, but does not actually remove them.
:type list_only: bool
:return: List of removed containers.
:rtype: list[unicode | str] |
def remove_all_containers(self, stop_timeout=10, list_only=False):
containers = [(container['Id'], container['Status'])
for container in self.containers(all=True)]
running_containers = [c_id
for c_id, status in containers
if not (status.startswith('Exited') or status == 'Dead')]
if list_only:
return running_containers, [c[0] for c in containers]
stopped_containers = []
for c_id in running_containers:
try:
self.stop(c_id, timeout=stop_timeout)
except Timeout:
log.warning("Container %s did not stop in time - sent SIGKILL.", c_id)
try:
self.wait(c_id, timeout=stop_timeout)
except Timeout:
pass
except:
exc_info = sys.exc_info()
raise PartialResultsError(exc_info, (stopped_containers, []))
else:
stopped_containers.append(c_id)
removed_containers = []
for c_id, __ in containers:
try:
self.remove_container(c_id)
except:
exc_info = sys.exc_info()
raise PartialResultsError(exc_info, (stopped_containers, removed_containers))
else:
removed_containers.append(c_id)
return stopped_containers, removed_containers | First stops (if necessary) and them removes all containers present on the Docker instance.
:param stop_timeout: Timeout to stopping each container.
:type stop_timeout: int
:param list_only: When set to ``True`` only lists containers, but does not actually stop or remove them.
:type list_only: bool
:return: A tuple of two lists: Stopped container ids, and removed container ids.
:rtype: (list[unicode | str], list[unicode | str]) |
def get_container_names(self):
current_containers = self.containers(all=True)
return set(c_name[1:] for c in current_containers for c_name in c['Names']) | Fetches names of all present containers from Docker.
:return: All container names.
:rtype: set |
def get_image_tags(self):
current_images = self.images()
tags = {tag: i['Id'] for i in current_images for tag in i['RepoTags']}
return tags | Fetches image labels (repository / tags) from Docker.
:return: A dictionary, with image name and tags as the key and the image id as value.
:rtype: dict |
def resolve_value(value, types=type_registry):
if value is None:
return None
elif isinstance(value, lazy_type):
return value.get()
elif types:
resolve_func = types.get(expand_type_name(type(value)))
if resolve_func:
return resolve_func(value)
return value | Returns the actual value for the given object, if it is a late-resolving object type.
If not, the value itself is simply returned.
:param value: Lazy object, registered type in :attr:`type_registry`, or a simple value. In the
latter case, the value is returned as-is.
:type value: str | unicode | int | AbstractLazyObject | unknown
:return: Resolved value. |
def resolve_deep(values, max_depth=5, types=None):
def _resolve_sub(v, level):
l1 = level + 1
res_val = resolve_value(v, all_types)
if l1 < max_depth:
if isinstance(res_val, (list, tuple)):
return [_resolve_sub(item, l1) for item in res_val]
elif isinstance(res_val, dict):
return {resolve_value(rk, all_types): _resolve_sub(rv, l1) for rk, rv in iteritems(res_val)}
return res_val
if types:
all_types = type_registry.copy()
all_types.update(types)
else:
all_types = type_registry
return _resolve_sub(values, -1) | Resolves all late-resolving types into their current values to a certain depth in a dictionary or list.
:param values: Values to resolve of any type.
:param max_depth: Maximum depth to recurse into nested lists, tuples and dictionaries. Below that depth values are
returned as they are.
:type max_depth: int
:param: Dictionary of types and functions to resolve, that are not registered in ``type_registry``.
:type: dict[unicode | str, function]
:return: Resolved values. |
def register_type(resolve_type, resolve_func):
if not isinstance(resolve_type, type):
raise ValueError("Expected type, got {0}.".format(type(resolve_type).__name__))
if not callable(resolve_func):
raise ValueError("Function is not callable.")
type_registry[expand_type_name(resolve_type)] = resolve_func | Registers a type for lazy value resolution. Instances of AbstractLazyObject do not have to
be registered. The exact type must be provided in ``resolve_type``, not a superclass of it.
Types registered will be passed through the given function by :func:`resolve_value`.
:param resolve_type: Type to consider during late value resolution.
:type resolve_type: type
:param resolve_func: Function to run for retrieving the original value. It needs to accept
exactly one argument - the substitute value to resolve to the actual value.
:type resolve_func: function |
def get(self):
if not self._evaluated:
self._val = self._func(*self._args, **self._kwargs)
self._evaluated = True
return self._val | Resolves and returns the object value. Re-uses an existing previous evaluation, if applicable.
:return: The result of evaluating the object. |
def merge_dependency_paths(item_paths):
merged_paths = []
for item, path in item_paths:
sub_path_idx = []
path_set = set(path)
for index, (merged_item, merged_path, merged_set) in enumerate(merged_paths):
if item in merged_set:
path = None
break
elif merged_item in path_set:
sub_path_idx.append(index)
elif merged_set & path_set:
path = [p for p in path if p not in merged_set]
path_set = set(path)
if not path:
break
for spi in reversed(sub_path_idx):
merged_paths.pop(spi)
if path is not None:
merged_paths.append((item, path, path_set))
return [(i[0], i[1]) for i in merged_paths] | Utility function that merges multiple dependency paths, as far as they share dependencies. Paths are evaluated
and merged in the incoming order. Later paths that are independent, but share some dependencies, are shortened
by these dependencies. Paths that are contained in another entirely are discarded.
:param item_paths: List or tuple of items along with their dependency path.
:type item_paths: collections.Iterable[(Any, list[Any])]
:return: List of merged or independent paths.
:rtype: list[(Any, list[Any])] |
def update_from_dict(self, dct):
if not dct:
return
all_props = self.__class__.CONFIG_PROPERTIES
for key, value in six.iteritems(dct):
attr_config = all_props.get(key)
if attr_config:
setattr(self, key, value)
else:
self.update_default_from_dict(key, value) | Updates this configuration object from a dictionary.
See :meth:`ConfigurationObject.update` for details.
:param dct: Values to update the ConfigurationObject with.
:type dct: dict |
def update_from_obj(self, obj, copy=False):
obj.clean()
obj_config = obj._config
all_props = self.__class__.CONFIG_PROPERTIES
if copy:
for key, value in six.iteritems(obj_config):
attr_config = all_props.get(key)
if attr_config:
attr_type = attr_config.attr_type
if attr_type:
if issubclass(attr_type, list):
self._config[key] = value[:]
elif attr_type is dict:
self._config[key] = value.copy()
else:
self._config[key] = value
self._modified.discard(key)
else:
filtered_dict = {key: value
for key, value in six.iteritems(obj_config)
if key in all_props}
self._config.update(filtered_dict)
self._modified.difference_update(filtered_dict.keys()) | Updates this configuration object from another.
See :meth:`ConfigurationObject.update` for details.
:param obj: Values to update the ConfigurationObject with.
:type obj: ConfigurationObject
:param copy: Copies lists and dictionaries.
:type copy: bool |
def merge_from_dict(self, dct, lists_only=False):
if not dct:
return
self.clean()
all_props = self.__class__.CONFIG_PROPERTIES
for key, value in six.iteritems(dct):
attr_config = all_props.get(key)
if attr_config:
attr_type, default, input_func, merge_func = attr_config[:4]
if (merge_func is not False and value != default and
(not lists_only or (attr_type and issubclass(attr_type, list)))):
if input_func:
value = input_func(value)
self._merge_value(attr_type, merge_func, key, value)
else:
self.merge_default_from_dict(key, value, lists_only=lists_only) | Merges a dictionary into this configuration object.
See :meth:`ConfigurationObject.merge` for details.
:param dct: Values to update the ConfigurationObject with.
:type dct: dict
:param lists_only: Ignore single-value attributes and update dictionary options.
:type lists_only: bool |
def merge_from_obj(self, obj, lists_only=False):
self.clean()
obj.clean()
obj_config = obj._config
all_props = self.__class__.CONFIG_PROPERTIES
for key, value in six.iteritems(obj_config):
attr_config = all_props[key]
attr_type, default, __, merge_func = attr_config[:4]
if (merge_func is not False and value != default and
(not lists_only or (attr_type and issubclass(attr_type, list)))):
self._merge_value(attr_type, merge_func, key, value) | Merges a configuration object into this one.
See :meth:`ConfigurationObject.merge` for details.
:param obj: Values to update the ConfigurationObject with.
:type obj: ConfigurationObject
:param lists_only: Ignore single-value attributes and update dictionary options.
:type lists_only: bool |
def update(self, values, copy_instance=False):
if isinstance(values, self.__class__):
self.update_from_obj(values, copy=copy_instance)
elif isinstance(values, dict):
self.update_from_dict(values)
else:
raise ValueError("{0} or dictionary expected; found '{1}'.".format(self.__class__.__name__,
type(values).__name__)) | Updates the configuration with the contents of the given configuration object or dictionary.
In case of a dictionary, only valid attributes for this class are considered. Existing attributes are replaced
with the new values. The object is not cleaned before or after, i.e. may accept invalid input.
In case of an update by object, that object is cleaned before the update, so that updated values should be
validated. However, already-stored values are not cleaned before or after.
:param values: Dictionary or ConfigurationObject to update this configuration with.
:type values: dict | ConfigurationObject
:param copy_instance: Copies lists and dictionaries. Only has an effect if ``values`` is a ConfigurationObject.
:type copy_instance: bool |
def merge(self, values, lists_only=False):
if isinstance(values, self.__class__):
self.merge_from_obj(values, lists_only=lists_only)
elif isinstance(values, dict):
self.merge_from_dict(values, lists_only=lists_only)
else:
raise ValueError("{0} or dictionary expected; found '{1}'.".format(self.__class__.__name__,
type(values).__name__)) | Merges list-based attributes into one list including unique elements from both lists. When ``lists_only`` is
set to ``False``, updates dictionaries and overwrites single-value attributes. The resulting configuration
is 'clean', i.e. input values converted and validated. If the conversion is not possible, a ``ValueError`` is
raised.
:param values: Values to update the ConfigurationObject with.
:type values: dict | ConfigurationObject
:param lists_only: Ignore single-value attributes and update dictionary options.
:type lists_only: bool |
def clean(self):
all_props = self.__class__.CONFIG_PROPERTIES
for prop_name in self._modified:
attr_config = all_props.get(prop_name)
if attr_config and attr_config.input_func:
self._config[prop_name] = attr_config.input_func(self._config[prop_name])
self._modified.clear() | Cleans the input values of this configuration object.
Fields that have gotten updated through properties are converted to configuration values that match the
format needed by functions using them. For example, for list-like values it means that input of single strings
is transformed into a single-entry list. If this conversion fails, a ``ValueError`` is raised. |
def as_dict(self):
self.clean()
d = OrderedDict()
all_props = self.__class__.CONFIG_PROPERTIES
for attr_name, attr_config in six.iteritems(all_props):
value = self._config[attr_name]
attr_type = attr_config.attr_type
if attr_type:
if value:
if issubclass(attr_type, list):
if issubclass(attr_type, NamedTupleList):
d[attr_name] = [i._asdict() for i in value]
else:
d[attr_name] = value[:]
elif attr_type is dict:
d[attr_name] = dict(value)
elif value is not NotSet:
d[attr_name] = value
return d | Returns a copy of the configuration dictionary. Changes in this should not reflect on the original
object.
:return: Configuration dictionary.
:rtype: dict |
def get_dependencies(self, item):
def _get_sub_dependency(sub_item):
e = self._deps.get(sub_item)
if e is None:
return self.get_default()
if e.dependencies is NotInitialized:
e.dependencies = self.merge_dependency(sub_item, _get_sub_dependency, e.parent)
return e.dependencies
return _get_sub_dependency(item) | Performs a dependency check on the given item.
:param item: Node to start the dependency check with.
:return: The result on merged dependencies down the hierarchy. |
def get(self, item):
e = self._deps.get(item)
if e is None:
return self.get_default()
return e.parent | Returns the direct dependencies or dependents of a single item. Does not follow the entire dependency path.
:param item: Node to return dependencies for.
:return: Immediate dependencies or dependents. |
def merge_dependency(self, item, resolve_parent, parents):
dep = []
for parent_key in parents:
if item == parent_key:
raise CircularDependency(item, True)
parent_dep = resolve_parent(parent_key)
if item in parent_dep:
raise CircularDependency(item)
merge_list(dep, parent_dep)
merge_list(dep, parents)
return dep | Merge dependencies of element with further dependencies. First parent dependencies are checked, and then
immediate dependencies of the current element should be added to the list, but without duplicating any entries.
:param item: Item.
:param resolve_parent: Function to resolve parent dependencies.
:type resolve_parent: function
:type parents: collections.Iterable
:return: List of recursively resolved dependencies of this container.
:rtype: list
:raise CircularDependency: If the current element depends on one found deeper in the hierarchy. |
def update(self, items):
for parent, sub_item in _iterate_dependencies(items):
dep = self._deps[sub_item]
if parent not in dep.parent:
dep.parent.append(parent) | Updates the dependencies in the inverse relationship format, i.e. from an iterable or dict that is structured
as `(item, dependent_items)`. Note that this implementation is only valid for 1:1 relationships, i.e. that each
node has also exactly one dependent. For other cases, :class:`~MultiDependencyResolver` should be used.
:param items: Iterable or dictionary in the format `(item, dependent_items)`.
:type items: collections.Iterable |
def update(self, items):
for item, parents in _iterate_dependencies(items):
dep = self._deps[item]
merge_list(dep.parent, parents) | Updates the dependencies with the given items. Note that this does not reset all previously-evaluated and cached
nodes.
:param items: Iterable or dictionary in the format `(dependent_item, dependencies)`.
:type items: collections.Iterable |
def update(self, items):
for parent, sub_items in _iterate_dependencies(items):
for si in sub_items:
dep = self._deps[si]
if parent not in dep.parent:
dep.parent.append(parent) | Updates the dependencies in the inverse relationship format, i.e. from an iterable or dict that is structured
as `(item, dependent_items)`. The parent element `item` may occur multiple times.
:param items: Iterable or dictionary in the format `(item, dependent_items)`.
:type items: collections.Iterable |
def signal_stop(self, action, c_name, **kwargs):
client = action.client
sig = action.config.stop_signal
stop_kwargs = self.get_container_stop_kwargs(action, c_name, kwargs=kwargs)
if not sig or sig == 'SIGTERM' or sig == signal.SIGTERM:
try:
client.stop(**stop_kwargs)
except Timeout:
log.warning("Container %s did not stop in time - sent SIGKILL.", c_name)
try:
client.wait(c_name, timeout=stop_kwargs.get('timeout', 10))
except Timeout:
pass
else:
log.debug("Sending signal %s to the container %s and waiting for stop.", sig, c_name)
client.kill(c_name, signal=sig)
client.wait(c_name, timeout=stop_kwargs.get('timeout', 10)) | Stops a container, either using the default client stop method, or sending a custom signal and waiting
for the container to stop.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict |
def preprocess_matches(input_items):
for i in input_items:
s = i.strip()
if not s:
continue
if s[0] == '!':
is_negative = True
match_str = s[1:]
if not match_str:
continue
else:
is_negative = False
match_str = s
yield re.compile(fnmatch.translate(LITERAL_PATTERN.sub(r'[\g<1>]', match_str))), is_negative | Converts, as far as possible, Go filepath.Match patterns into Python regular expression patterns. Blank lines are
ignored.
This is a generator of two-element-tuples, with the first item as the compiled regular expression. Items prefixed
with an exclamation mark are considered negative exclusions, i.e. exemptions. These have the second tuple item set
to ``True``, others to ``False``.
:param input_items: Input patterns to convert.
:return: Generator of converted patterns.
:rtype: collections.Iterable[(__RegEx, bool)] |
def get_exclusions(path):
if not os.path.isdir(path):
return None
dockerignore_file = os.path.join(path, '.dockerignore')
if not os.path.isfile(dockerignore_file):
return None
with open(dockerignore_file, 'rb') as dif:
return list(preprocess_matches(dif.readlines())) | Generates exclusion patterns from a ``.dockerignore`` file located in the given path. Returns ``None`` if the
file does not exist.
:param path: Path to look up the ``.dockerignore`` in.
:type path: unicode | str
:return: List of patterns, that can be passed into :func:`get_filter_func`.
:rtype: list[(__RegEx, bool)] |
def get_filter_func(patterns, prefix):
prefix_len = len(prefix.strip(os.path.sep)) + 1
if any(i[1] for i in patterns):
def _exclusion_func(tarinfo):
name = tarinfo.name[prefix_len:]
exclude = False
for match_str, is_negative in patterns:
if is_negative:
if not exclude:
continue
if match_str.match(name) is not None:
exclude = False
elif exclude:
continue
elif match_str.match(name) is not None:
exclude = True
if exclude:
return None
return tarinfo
else:
# Optimized version: If there are no exemptions from matches, not all matches have to be processed.
exclusions = [i[0] for i in patterns]
def _exclusion_func(tarinfo):
name = tarinfo.name[prefix_len:]
if any(match_str.match(name) is not None for match_str in exclusions):
return None
return tarinfo
return _exclusion_func | Provides a filter function that can be used as filter argument on ``tarfile.add``. Generates the filter based on
the patterns and prefix provided. Patterns should be a list of tuples. Each tuple consists of a compiled RegEx
pattern and a boolean, indicating if it is an ignore entry or a negative exclusion (i.e. an exemption from
exclusions). The prefix is used to match relative paths inside the tar file, and is removed from every entry
passed into the functions.
Note that all names passed into the returned function must be paths under the provided prefix. This condition is
not checked!
:param patterns: List of patterns and negative indicator.
:type patterns: list[(__RegEx, bool)]
:param prefix: Prefix to strip from all file names passed in. Leading and trailing path separators are removed.
:type prefix: unicode | str
:return: tarinfo.TarInfo -> tarinfo.TarInfo | NoneType |
def add(self, name, arcname=None, **kwargs):
if os.path.isdir(name):
exclusions = get_exclusions(name)
if exclusions:
target_prefix = os.path.abspath(arcname or name)
kwargs.setdefault('filter', get_filter_func(exclusions, target_prefix))
self.tarfile.add(name, arcname=arcname, **kwargs) | Add a file or directory to the context tarball.
:param name: File or directory path.
:type name: unicode | str
:param args: Additional args for :meth:`tarfile.TarFile.add`.
:param kwargs: Additional kwargs for :meth:`tarfile.TarFile.add`. |
def addarchive(self, name):
with tarfile.open(name, 'r') as st:
for member in st.getmembers():
self.tarfile.addfile(member, st.extractfile(member.name)) | Add (i.e. copy) the contents of another tarball to this one.
:param name: File path to the tar archive.
:type name: unicode | str |
def add_dockerfile(self, dockerfile):
if isinstance(dockerfile, DockerFile):
dockerfile.finalize()
dockerfile_obj = dockerfile.fileobj
for path, arcname in dockerfile._files:
self.add(path, arcname=arcname)
for archive in dockerfile._archives:
self.addarchive(archive)
tarinfo = tarfile.TarInfo('Dockerfile')
tarinfo.size = dockerfile_obj.tell()
dockerfile_obj.seek(0)
self.tarfile.addfile(tarinfo, dockerfile_obj)
else:
self.add(dockerfile, arcname='Dockerfile') | Add a Dockerfile to the context. If it is a :class:`DockerFile` instance, files and archive contents added there
will automatically be copied to the tarball. The :class:`DockerFile` will be finalized.
:param dockerfile: :class:`DockerFile` instance or file path to a Dockerfile.
:type dockerfile: DockerFile | unicode | str |
def save(self, name):
with open(name, 'wb+') as f:
while True:
buf = self._fileobj.read()
if not buf:
break
f.write(buf) | Saves the entire Docker context tarball to a separate file.
:param name: File path to save the tarball into.
:type name: unicode | str |
def update_kwargs(kwargs, *updates):
for update in updates:
if not update:
continue
for key, val in six.iteritems(update):
u_item = resolve_value(val)
if u_item is None:
continue
if key in ('command' or 'entrypoint'):
kwargs[key] = u_item
elif isinstance(u_item, (tuple, list)):
kw_item = kwargs.get(key)
u_list = map(resolve_value, u_item)
if isinstance(kw_item, list):
merge_list(kw_item, u_list)
elif isinstance(kw_item, tuple):
new_list = list(kw_item)
merge_list(new_list, u_list)
kwargs[key] = new_list
else:
kwargs[key] = list(u_list)
elif isinstance(u_item, dict):
kw_item = kwargs.get(key)
u_dict = {u_k: resolve_value(u_v) for u_k, u_v in six.iteritems(u_item)}
if isinstance(kw_item, dict):
kw_item.update(u_dict)
else:
kwargs[key] = u_dict
else:
kwargs[key] = u_item | Utility function for merging multiple keyword arguments, depending on their type:
* Non-existent keys are added.
* Existing lists or tuples are extended, but not duplicating entries.
The keywords ``command`` and ``entrypoint`` are however simply overwritten.
* Nested dictionaries are updated, overriding previous key-value assignments.
* Other items are simply overwritten (just like in a regular dictionary update) unless the updating value is
``None``.
Lists/tuples and dictionaries are (shallow-)copied before adding and late resolving values are looked up.
This function does not recurse.
:param kwargs: Base keyword arguments. This is modified in-place.
:type kwargs: dict
:param updates: Dictionaries to update ``kwargs`` with.
:type updates: dict |
def get_port_bindings(container_config, client_config):
port_bindings = {}
if_ipv4 = client_config.interfaces
if_ipv6 = client_config.interfaces_ipv6
for exposed_port, ex_port_bindings in itertools.groupby(
sorted(container_config.exposes, key=_get_ex_port), _get_ex_port):
bind_list = list(_get_port_bindings(ex_port_bindings, if_ipv4, if_ipv6))
if bind_list:
port_bindings[exposed_port] = bind_list
return port_bindings | Generates the input dictionary contents for the ``port_bindings`` argument.
:param container_config: Container configuration.
:type container_config: dockermap.map.config.container.ContainerConfiguration
:param client_config: Client configuration.
:type client_config: dockermap.map.config.client.ClientConfiguration
:return: Dictionary of ports with mapped port, and if applicable, with bind address
:rtype: dict[unicode | str, list[unicode | str | int | tuple]] |
def get_preparation_cmd(user, permissions, path):
r_user = resolve_value(user)
r_permissions = resolve_value(permissions)
if user:
yield chown(r_user, path)
if permissions:
yield chmod(r_permissions, path) | Generates the command lines for adjusting a volume's ownership and permission flags. Returns an empty list if there
is nothing to adjust.
:param user: User to set ownership for on the path via ``chown``.
:type user: unicode | str | int | dockermap.functional.AbstractLazyObject
:param permissions: Permission flags to set via ``chmod``.
:type permissions: unicode | str | dockermap.functional.AbstractLazyObject
:param path: Path to adjust permissions on.
:type path: unicode | str
:return: Iterator over resulting command strings.
:rtype: collections.Iterable[unicode | str] |
def get_urlhash(self, url, fmt):
with self.open(os.path.basename(url)) as f:
return {'url': fmt(url), 'sha256': filehash(f, 'sha256')} | Returns the hash of the file of an internal url |
def package_releases(self, package, url_fmt=lambda u: u):
return [{
'name': package,
'version': version,
'urls': [self.get_urlhash(f, url_fmt) for f in files]
} for version, files in self.storage.get(package, {}).items()] | List all versions of a package
Along with the version, the caller also receives the file list with all
the available formats. |
def inspect(self):
policy = self.policy
config_id = self.config_id
if self.config_id.config_type == ItemType.VOLUME:
if self.container_map.use_attached_parent_name:
container_name = policy.aname(config_id.map_name, config_id.instance_name, config_id.config_name)
else:
container_name = policy.aname(config_id.map_name, config_id.instance_name)
else:
container_name = policy.cname(config_id.map_name, config_id.config_name, config_id.instance_name)
self.container_name = container_name
if container_name in policy.container_names[self.client_name]:
self.detail = self.client.inspect_container(container_name)
else:
self.detail = NOT_FOUND | Fetches information about the container from the client. |
def inspect(self):
if not self.client_config.features['networks']:
raise ValueError("Client does not support network configuration.", self.client_name)
config_id = self.config_id
network_name = self.network_name = self.policy.nname(config_id.map_name, config_id.config_name)
if network_name in self.policy.network_names[self.client_name]:
self.detail = self.client.inspect_network(network_name)
else:
self.detail = NOT_FOUND | Inspects the network state. |
def inspect(self):
if not self.client_config.features['volumes']:
raise ValueError("Client does not support volume configuration.", self.client_name)
config_id = self.config_id
parent_name = config_id.config_name if self.container_map.use_attached_parent_name else None
volume_name = self.volume_name = self.policy.aname(config_id.map_name, config_id.instance_name,
parent_name=parent_name)
if volume_name in self.policy.volume_names[self.client_name]:
self.detail = self.client.inspect_volume(volume_name)
else:
self.detail = NOT_FOUND | Inspects the network state. |
def inspect(self):
policy = self.policy
image_name = format_image_tag((self.config_id.config_name, self.config_id.instance_name))
image_id = policy.images[self.client_name].get(image_name)
if image_id:
self.detail = {'Id': image_id} # Currently there is no need for actually inspecting the image.
else:
self.detail = NOT_FOUND | Fetches image information from the client. |
def generate_config_states(self, config_id, config_flags=ConfigFlags.NONE):
c_map = self._policy.container_maps[config_id.map_name]
clients = c_map.clients or [self._policy.default_client_name]
config_type = config_id.config_type
for client_name in clients:
if config_type == ItemType.CONTAINER:
c_state = self.get_container_state(client_name, config_id, config_flags)
elif config_type == ItemType.VOLUME:
client_config = self._policy.clients[client_name]
if client_config.features['volumes']:
c_state = self.get_volume_state(client_name, config_id, config_flags)
else:
c_state = self.get_container_state(client_name, config_id, config_flags)
elif config_type == ItemType.NETWORK:
c_state = self.get_network_state(client_name, config_id, config_flags)
elif config_type == ItemType.IMAGE:
c_state = self.get_image_state(client_name, config_id, config_flags)
else:
raise ValueError("Invalid configuration type.", config_type)
c_state.inspect()
# Extract base state, state flags, and extra info.
state_info = ConfigState(client_name, config_id, config_flags, *c_state.get_state())
log.debug("Configuration state information: %s", state_info)
yield state_info | Generates the actions on a single item, which can be either a dependency or a explicitly selected
container.
:param config_id: Configuration id tuple.
:type config_id: dockermap.map.input.MapConfigId
:param config_flags: Optional configuration flags.
:type config_flags: dockermap.map.policy.ConfigFlags
:return: Generator for container state information.
:rtype: collections.Iterable[dockermap.map.state.ContainerConfigStates] |
def get_states(self, config_ids):
return itertools.chain.from_iterable(self.generate_config_states(config_id)
for config_id in config_ids) | Generates state information for the selected containers.
:param config_ids: List of MapConfigId tuples.
:type config_ids: list[dockermap.map.input.MapConfigId]
:return: Iterable of configuration states.
:rtype: collections.Iterable[dockermap.map.state.ConfigState] |
def get_states(self, config_ids):
input_paths = [
(config_id, list(self.get_dependency_path(config_id)))
for config_id in config_ids
]
log.debug("Dependency paths from input: %s", input_paths)
dependency_paths = merge_dependency_paths(input_paths)
log.debug("Merged dependency paths: %s", dependency_paths)
return itertools.chain.from_iterable(self._get_all_states(config_id, dependency_path)
for config_id, dependency_path in dependency_paths) | Generates state information for the selected container and its dependencies / dependents.
:param config_ids: MapConfigId tuples.
:type config_ids: list[dockermap.map.input.MapConfigId]
:return: Iterable of configuration states.
:rtype: collections.Iterable[dockermap.map.state.ConfigState] |
def get_attached_preparation_host_config_kwargs(self, action, container_name, volume_container, kwargs=None):
if action.client_config.features['volumes']:
c_kwargs = dict(binds=['{0}:{1}'.format(volume_container, PREPARATION_TMP_PATH)])
else:
c_kwargs = dict(volumes_from=[volume_container])
if container_name:
c_kwargs['container'] = container_name
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to set up the HostConfig for preparing an attached container
(i.e. adjust user and permissions) or start the preparation.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``.
:type container_name: unicode | str | NoneType
:param volume_container: Name of the volume or name of the container that shares the volume.
:type volume_container: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict |
def get_attached_preparation_wait_kwargs(self, action, container_name, kwargs=None):
c_kwargs = dict(container=container_name)
client_config = action.client_config
c_kwargs = dict(container=container_name)
wait_timeout = client_config.get('wait_timeout')
if wait_timeout is not None:
c_kwargs['timeout'] = wait_timeout
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for waiting for a container when preparing a volume. The container name may be
the container being prepared, or the id of the container calling preparation commands.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``.
:type container_name: unicode | str | NoneType
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict |
def _prepare_container(self, client, action, volume_container, volume_alias):
apc_kwargs = self.get_attached_preparation_create_kwargs(action, volume_container, volume_alias)
if not apc_kwargs:
return
a_wait_kwargs = self.get_attached_preparation_wait_kwargs(action, volume_container)
client.wait(volume_container, **a_wait_kwargs)
temp_container = client.create_container(**apc_kwargs)
temp_id = temp_container['Id']
try:
if action.client_config.features['host_config']:
client.start(temp_id)
else:
aps_kwargs = self.get_attached_preparation_host_config_kwargs(action, temp_id, volume_container)
client.start(**aps_kwargs)
temp_wait_kwargs = self.get_attached_preparation_wait_kwargs(action, temp_id)
client.wait(temp_id, **temp_wait_kwargs)
finally:
client.remove_container(temp_id) | Runs a temporary container for preparing an attached volume for a container configuration.
:param client: Docker client.
:type client: docker.Client
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param volume_container: Name of the container that shares the volume.
:type volume_container: unicode | str
:param volume_alias: Volume alias that is used for map references, for looking up paths.
:type volume_alias: unicode | str |
def prepare_attached(self, action, a_name, **kwargs):
client = action.client
config_id = action.config_id
policy = self._policy
if action.container_map.use_attached_parent_name:
v_alias = '{0.config_name}.{0.instance_name}'.format(config_id)
else:
v_alias = config_id.instance_name
user = policy.volume_users[config_id.map_name][v_alias]
permissions = policy.volume_permissions[config_id.map_name][v_alias]
if not (self.prepare_local and hasattr(client, 'run_cmd')):
return self._prepare_container(client, action, a_name, v_alias)
if action.client_config.features['volumes']:
volume_detail = client.inspect_volume(a_name)
local_path = volume_detail['Mountpoint']
else:
instance_detail = client.inspect_container(a_name)
volumes = get_instance_volumes(instance_detail, False)
path = resolve_value(policy.default_volume_paths[config_id.map_name][v_alias])
local_path = volumes.get(path)
if not local_path:
raise ValueError("Could not locate local path of volume alias '{0}' / "
"path '{1}' in container {2}.".format(action.config_id.instance_name, path, a_name))
return [
client.run_cmd(cmd)
for cmd in get_preparation_cmd(user, permissions, local_path)
] | Prepares an attached volume for a container configuration.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param a_name: The full name or id of the container sharing the volume.
:type a_name: unicode | str |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.