code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def extract_user(user_value):
user = resolve_value(user_value)
if not user and user != 0 and user != '0':
return None
if isinstance(user, tuple):
return user[0]
if isinstance(user, six.integer_types):
return six.text_type(user)
return user.partition(':')[0] | Extract the user for running a container from the following possible input formats:
* Integer (UID)
* User name string
* Tuple of ``user, group``
* String in the format ``user:group``
:param user_value: User name, uid, user-group tuple, or user:group string.
:type user_value: int | tuple | unicode | str
:return: User name or id.
:rtype: unicode | str |
def get_shared_volume_path(container_map, vol, instance=None):
if isinstance(vol, HostVolume):
c_path = resolve_value(vol.path)
if is_path(c_path):
return c_path, get_host_path(container_map.host.root, vol.host_path, instance)
raise ValueError("Host-container-binding must be described by two paths or one alias name.",
vol)
alias = vol.name
volume_config = resolve_value(container_map.volumes.get(alias))
h_path = container_map.host.get_path(alias, instance)
if volume_config and h_path:
return volume_config.default_path, h_path
raise KeyError("No host-volume information found for alias {0}.".format(alias)) | Resolves a volume alias of a container configuration or a tuple of two paths to the host and container paths.
:param container_map: Container map.
:type container_map: dockermap.map.config.main.ContainerMap
:param vol: SharedVolume or HostVolume tuple.
:type vol: dockermap.map.input.HostVolume | dockermap.map.input.SharedVolume
:param instance: Optional instance name.
:type instance: unicode | str
:return: Tuple of host path and container bind path.
:rtype: tuple[unicode | str] |
def get_instance_volumes(instance_detail, check_names):
if 'Mounts' in instance_detail:
if check_names:
return {m['Destination']: m.get('Name') or m['Source']
for m in instance_detail['Mounts']}
return {m['Destination']: m['Source']
for m in instance_detail['Mounts']}
return instance_detail.get('Volumes') or {} | Extracts the mount points and mapped directories or names of a Docker container.
:param instance_detail: Result from a container inspection.
:type instance_detail: dict
:param check_names: Whether to check for named volumes.
:type check_names: bool
:return: Dictionary of volumes, with the destination (inside the container) as a key, and the source (external to
the container) as values. If ``check_names`` is ``True``, the value contains the mounted volume name instead.
:rtype: dict[unicode | str, unicode | str] |
def web_upload(self, package):
pkg = request.files[package]
self.index.from_data(package, pkg.read())
return 'ok' | * Smart enough to not save things we already have
* Idempotent, you can call as many times as you need
* The caller names the package (its basename) |
def merge_list(merged_list, items):
if not items:
return
merged_set = set(merged_list)
merged_add = merged_set.add
merged_list.extend(item
for item in items
if item not in merged_set and not merged_add(item)) | Merges items into a list, appends ignoring duplicates but retaining the original order. This modifies the list and
does not return anything.
:param merged_list: List to append de-duplicated items to.
:type merged_list: list
:param items: Items to merge into the list.
:type items: collections.Iterable |
def disconnect_all_containers(self, action, network_name, containers, **kwargs):
client = action.client
for c_name in containers:
disconnect_kwargs = self.get_network_disconnect_kwargs(action, network_name, c_name, kwargs=kwargs)
client.disconnect_container_from_network(**disconnect_kwargs) | Disconnects all containers from a network.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param network_name: Network name or id.
:type network_name: unicode | str
:param containers: Container names or ids.
:type containers: collections.Iterable[unicode | str]
:param kwargs: Additional keyword arguments.
:type kwargs: dict |
def connect_networks(self, action, container_name, endpoints, skip_first=False, **kwargs):
if not endpoints or (skip_first and len(endpoints) <= 1):
return
client = action.client
map_name = action.config_id.map_name
nname = self._policy.nname
if skip_first:
endpoints = islice(endpoints, 1, None)
for network_endpoint in endpoints:
network_name = nname(map_name, network_endpoint.network_name)
connect_kwargs = self.get_network_connect_kwargs(action, network_name, container_name, network_endpoint,
kwargs=kwargs)
client.connect_container_to_network(**connect_kwargs) | Connects a container to a set of configured networks. By default this assumes the container has just been
created, so it will skip the first network that is already considered during creation.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param container_name: Container names or id.
:type container_name: unicode | str
:param endpoints: Network endpoint configurations.
:type endpoints: list[dockermap.map.input.NetworkEndpoint]
:param skip_first: Skip the first network passed in ``endpoints``. Defaults to ``False``, but should be set
to ``True`` when the container has just been created and the first network has been set up there.
:type skip_first: bool
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict |
def disconnect_networks(self, action, container_name, networks, **kwargs):
client = action.client
for n_name in networks:
disconnect_kwargs = self.get_network_disconnect_kwargs(action, n_name, container_name, kwargs=kwargs)
client.disconnect_container_from_network(**disconnect_kwargs) | Disconnects a container from a set of networks.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param container_name: Container names or id.
:type container_name: unicode | str
:param networks: List of network names or ids.
:type networks: collections.Iterable[unicode | str]
:param kwargs: Additional keyword arguments.
:type kwargs: dict |
def connect_all_networks(self, action, container_name, **kwargs):
kwargs.setdefault('skip_first', True)
self.connect_networks(action, container_name, action.config.networks, **kwargs) | Connects a container to all of its configured networks. Assuming that this is typically used after container
creation, where teh first endpoint is already defined, this skips the first configuration. Pass ``skip_first``
as ``False`` to change this.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param container_name: Container names or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict |
def save(self, name):
self.finalize()
with open(name, 'wb+') as f:
if six.PY3:
f.write(self.fileobj.getbuffer())
else:
f.write(self.fileobj.getvalue().encode('utf-8')) | Save the string buffer to a file. Finalizes prior to saving.
:param name: File path.
:type name: unicode | str |
def save(self, name):
self.finalize()
with open(name, 'wb+') as f:
buf = self._fileobj.read()
while buf:
f.write(buf)
buf = self._fileobj.read() | Copy the contents of the temporary file somewhere else. Finalizes prior to saving.
:param name: File path.
:type name: unicode | str |
def is_path(value):
return value and isinstance(value, six.string_types) and (value[0] == posixpath.sep or value[:2] == CURRENT_DIR) | Checks whether the given value represents a path, i.e. a string which starts with an indicator for absolute or
relative paths.
:param value: Value to check.
:return: ``True``, if the value appears to be representing a path.
:rtype: bool |
def get_list(value):
if value is None:
return []
elif value is NotSet:
return NotSet
elif isinstance(value, (list, tuple)):
return list(value)
elif isinstance(value, six.string_types + (lazy_type, )) or uses_type_registry(value):
return [value]
raise ValueError("Invalid type; expected a list, tuple, or string type, found {0}.".format(
type(value).__name__)) | Wraps the given value in a list. ``None`` returns an empty list. Lists and tuples are returned as lists. Single
strings and registered types are wrapped in a list.
:param value: Value to return as a list.
:return: List with the provided value(s).
:rtype: list |
def get_network_mode(value):
if not value or value == 'disabled':
return 'none'
if isinstance(value, (tuple, list)):
if len(value) == 2:
return tuple(value)
return ValueError("Tuples or lists need to have length 2 for container network references.")
if value in DEFAULT_PRESET_NETWORKS:
return value
if value.startswith('container:'):
return value
if value.startswith('/'):
return 'container:{0}'.format(value[1:])
ref_name, __, ref_instance = value.partition('.')
return ref_name, ref_instance or None | Generates input for the ``network_mode`` of a Docker host configuration. If it points at a container, the
configuration of the container is returned.
:param value: Network mode input.
:type value: unicode | str | tuple | list | NoneType
:return: Network mode or container to re-use the network stack of.
:rtype: unicode | str | tuple | NoneType |
def get_healthcheck(value):
if isinstance(value, HealthCheck):
return value
elif isinstance(value, six.string_types + (lazy_type,)) or uses_type_registry(value):
return HealthCheck(value)
elif isinstance(value, (tuple, list)):
return HealthCheck(*value)
elif isinstance(value, dict):
return HealthCheck(**value)
raise ValueError(
"Invalid type; expected a list, tuple, dict, or string type, found {0}.".format(type(value).__name__)) | Converts input into a :class:`HealthCheck` tuple. Input can be passed as string, tuple, list, or a dictionary. If
set to ``None``, the health check will be set to ``NONE``, i.e. override an existing configuration from the image.
:param value: Health check input.
:type value: unicode | str | tuple | list | NoneType
:return: HealthCheck tuple
:rtype: HealthCheck |
def get_type_item(self, value):
if isinstance(value, (HostVolume, SharedVolume)):
return value
elif isinstance(value, six.string_types):
return SharedVolume(value, False)
elif isinstance(value, (list, tuple)):
return _shared_host_volume_from_tuple(*value)
elif isinstance(value, dict):
v_len = len(value)
if v_len == 1:
k, v = list(value.items())[0]
if k == 'name':
return SharedVolume(v)
elif isinstance(v, (list, tuple)):
return _shared_host_volume_from_tuple(k, *v)
return _shared_host_volume_from_tuple(k, v)
elif 'path' in value:
return HostVolume(**value)
return SharedVolume(**value)
raise ValueError(
"Invalid type; expected a list, tuple, dict, or string type, found {0}.".format(type(value).__name__)) | Converts the input to a ``SharedVolume`` or ``HostVolume`` tuple for a host bind. Input can be a single string, a
list or tuple, or a single-entry dictionary.
Single values are assumed to be volume aliases for read-write access. Tuples or lists with two elements, can be
``(alias, read-only indicator)``, or ``(container path, host path)``. The latter is assumed, unless the second
element is boolean or a string of either ``ro`` or ``rw``, indicating read-only or read-write access for a volume
alias. Three elements are always used as ``(container path, host path, read-only indicator)``.
Nested values are unpacked, so that valid input formats are also ``{container path: (host path, read-only)}`` or
``(container_path: [host path, read-only])``.
:param value: Input value for conversion.
:return: A SharedVolume tuple
:rtype: SharedVolume |
def get_type_item(self, value):
if isinstance(value, (UsedVolume, SharedVolume)):
if value.readonly:
raise ValueError("Attached volumes should not be read-only.")
return value
elif isinstance(value, six.string_types):
return SharedVolume(value)
elif isinstance(value, (list, tuple)):
v_len = len(value)
if v_len == 2:
if value[1]:
return UsedVolume(value[0], value[1])
return SharedVolume(value[0])
elif v_len == 1:
return SharedVolume(value[0])
raise ValueError("Invalid element length; only tuples and lists of length 1-2 can be converted to a "
"UsedVolume or SharedVolume tuple; found length {0}.".format(v_len))
elif isinstance(value, dict):
v_len = len(value)
if v_len == 1:
k, v = list(value.items())[0]
if k == 'name':
return SharedVolume(v)
return UsedVolume(k, v)
elif 'path' in value:
return UsedVolume(**value)
return SharedVolume(**value)
raise ValueError(
"Invalid type; expected a list, tuple, dict, or string type, found {0}.".format(type(value).__name__)) | Converts the given value to a ``UsedVolume`` or ``SharedVolume`` tuple for attached volumes. It
accepts strings, lists, tuples, and dicts as input.
For strings and collections of a single element, the first item is considered to be an alias for lookup on the map.
It is converted to a ``SharedVolume`` tuple.
For two-element collections, the first item defines a new volume alias that can be re-used by other instances and
the second item is considered to be the mount point for the volume.
All attached volumes are considered as read-write access.
:param value: Input value for conversion.
:return: UsedVolume or SharedVolume tuple.
:rtype: UsedVolume | SharedVolume |
def get_type_item(self, value):
if isinstance(value, ExecCommand):
return value
elif isinstance(value, six.string_types + (lazy_type,)):
return ExecCommand(value)
elif isinstance(value, (list, tuple)):
v_len = len(value)
if 1 <= v_len <= 3:
return ExecCommand(*value)
raise ValueError("Invalid element length; only tuples and lists of length 1-3 can be converted to a "
"ExecCommand tuple. Found length {0}.".format(v_len))
elif isinstance(value, dict):
return ExecCommand(**value)
raise ValueError(
"Invalid type; expected a list, tuple, dict, or string type, found {0}.".format(type(value).__name__)) | Converts the input to a ExecCommand tuple. It can be from a single string, list, or tuple. Single values (also
single-element lists or tuples) are considered a command string, whereas two-element items are read as
``(command string, user name)``.
:param value: Input value for conversion.
:return: ExecCommand tuple.
:rtype: ExecCommand |
def expand_instances(config_ids, ext_maps):
for type_map_config, items in itertools.groupby(sorted(config_ids, key=get_map_config), get_map_config):
config_type, map_name, config_name = type_map_config
instances = _get_nested_instances(items)
c_map = ext_maps[map_name]
try:
c_instances = _get_config_instances(config_type, c_map, config_name)
except KeyError:
raise KeyError("Configuration not found.", type_map_config)
if c_instances and None in instances:
for i in c_instances:
yield MapConfigId(config_type, map_name, config_name, i)
else:
for i in instances:
yield MapConfigId(config_type, map_name, config_name, i) | Iterates over a list of input configuration ids, expanding configured instances if ``None`` is specified. Otherwise
where instance names are specified as a tuple, they are expanded.
:param config_ids: Iterable of container configuration ids or (map, config, instance names) tuples.
:type config_ids: collections.Iterable[dockermap.map.input.InputConfigId] | collections.Iterable[tuple[unicode | str, unicode | str, unicode | str]]
:param ext_maps: Dictionary of extended ContainerMap instances for looking up container configurations.
:type ext_maps: dict[unicode | str, ContainerMap]
:return: MapConfigId tuples.
:rtype: collections.Iterable[dockermap.map.input.MapConfigId] |
def get_map_config_ids(value, maps, default_map_name=None, default_instances=None):
input_ids = InputConfigIdList(value, map_name=default_map_name, instances=default_instances)
return list(expand_instances(expand_groups(input_ids, maps), maps)) | From a value, which can be a string, a iterable of strings, or MapConfigId tuple(s), generates a list of MapConfigId
tuples with expanded groups, listing all input or configured instances, and sorted by map and configuration.
:param value: Input value(s).
:type value: str | unicode | dockermap.map.input.InputConfigId | collection.Iterable[str | unicode | dockermap.map.input.InputConfigId]
:param maps: Dictionary with expanded container maps, for resolving groups, aliases (``'__all__'``), and configured
instances in absence of instance specification in the input.
:param default_map_name: Default map name that is used, in case it is not part of the input.
:param default_instances: Default instance name list that is used, in case it is not specified in the input.
:return: List of MapConfigId tuples.
:rtype: list[dockermap.map.input.MapConfigId] |
def create_network(self, action, n_name, **kwargs):
c_kwargs = self.get_network_create_kwargs(action, n_name, **kwargs)
res = action.client.create_network(**c_kwargs)
self._policy.network_names[action.client_name][n_name] = res['Id']
return res | Creates a configured network.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param n_name: Network name.
:type n_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict |
def remove_network(self, action, n_name, **kwargs):
c_kwargs = self.get_network_remove_kwargs(action, n_name, **kwargs)
res = action.client.remove_network(**c_kwargs)
del self._policy.network_names[action.client_name][n_name]
return res | Removes a network.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param n_name: Network name or id.
:type n_name: unicode | str
:param kwargs: Additional keyword arguments.
:type kwargs: dict |
def get_container_host_config_kwargs(self, action, container_name, kwargs=None):
container_map = action.container_map
container_config = action.config
client_config = action.client_config
config_id = action.config_id
map_name = config_id.map_name
policy = self._policy
cname = policy.cname
supports_volumes = client_config.features['volumes']
c_kwargs = dict(
links=[(cname(map_name, l_name), alias or policy.get_hostname(l_name))
for l_name, alias in container_config.links],
binds=get_host_binds(container_map, config_id.config_name, container_config, config_id.instance_name,
policy, supports_volumes),
volumes_from=get_volumes_from(container_map, config_id.config_name, container_config,
policy, not supports_volumes),
port_bindings=get_port_bindings(container_config, client_config),
)
network_mode = container_config.network_mode
if isinstance(network_mode, tuple):
c_kwargs['network_mode'] = 'container:{0}'.format(cname(map_name, *network_mode))
elif isinstance(network_mode, string_types):
c_kwargs['network_mode'] = network_mode
if container_name:
c_kwargs['container'] = container_name
update_kwargs(c_kwargs, init_options(container_config.host_config), kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to set up the HostConfig or start a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``.
:type container_name: unicode | str | NoneType
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict |
def get_attached_container_create_kwargs(self, action, container_name, kwargs=None):
client_config = action.client_config
policy = self._policy
config_id = action.config_id
path = resolve_value(policy.default_volume_paths[config_id.map_name][config_id.instance_name])
user = extract_user(action.config.user)
c_kwargs = dict(
name=container_name,
image=self._policy.base_image,
volumes=[path],
user=user,
network_disabled=True,
)
hc_extra_kwargs = kwargs.pop('host_config', None) if kwargs else None
use_host_config = client_config.features['host_config']
if use_host_config:
hc_kwargs = self.get_attached_container_host_config_kwargs(action, None, kwargs=hc_extra_kwargs)
if hc_kwargs:
if use_host_config == USE_HC_MERGE:
c_kwargs.update(hc_kwargs)
else:
c_kwargs['host_config'] = HostConfig(version=client_config.version, **hc_kwargs)
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to create an attached container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict |
def get_attached_container_host_config_kwargs(self, action, container_name, kwargs=None):
if container_name:
c_kwargs = {'container': container_name}
else:
c_kwargs = {}
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to set up the HostConfig or start an attached container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``.
:type container_name: unicode | str | NoneType
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict |
def get_container_update_kwargs(self, action, container_name, update_values, kwargs=None):
c_kwargs = dict(container=container_name)
update_kwargs(c_kwargs, update_values, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to update the HostConfig of an existing container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param update_values: Dictionary of values to update; i.e. keyword arguments to the Docker client.
:type update_values: dict[unicode | str, unicode | str | int | float | decimal.Decimal]
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict |
def get_container_wait_kwargs(self, action, container_name, kwargs=None):
c_kwargs = dict(container=container_name)
timeout = action.client_config.get('wait_timeout')
if timeout is not None:
c_kwargs['timeout'] = timeout
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to wait for a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict |
def get_container_stop_kwargs(self, action, container_name, kwargs=None):
c_kwargs = dict(
container=container_name,
)
stop_timeout = action.config.stop_timeout
if stop_timeout is NotSet:
timeout = action.client_config.get('stop_timeout')
if timeout is not None:
c_kwargs['timeout'] = timeout
elif stop_timeout is not None:
c_kwargs['timeout'] = stop_timeout
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to stop a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict |
def get_container_remove_kwargs(self, action, container_name, kwargs=None):
c_kwargs = dict(container=container_name)
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to remove a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict |
def get_network_create_kwargs(self, action, network_name, kwargs=None):
config = action.config
c_kwargs = dict(
name=network_name,
driver=config.driver,
options=config.driver_options,
)
if config.internal:
c_kwargs['internal'] = True
driver_opts = init_options(config.driver_options)
if driver_opts:
c_kwargs['options'] = {option_name: resolve_value(option_value)
for option_name, option_value in iteritems(driver_opts)}
update_kwargs(c_kwargs, init_options(config.create_options), kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to create a network.
:param action: Action configuration.
:type action: ActionConfig
:param network_name: Network name or id.
:type network_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict |
def get_network_remove_kwargs(self, action, network_name, kwargs=None):
c_kwargs = dict(net_id=network_name)
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to remove a network.
:param action: Action configuration.
:type action: ActionConfig
:param network_name: Network name or id.
:type network_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict |
def get_network_create_endpoint_kwargs(self, action, endpoint_config, kwargs=None):
map_name = action.config_id.map_name
policy = self._policy
c_kwargs = dict(
ipv4_address=resolve_value(endpoint_config.ipv4_address),
ipv6_address=resolve_value(endpoint_config.ipv6_address),
)
if endpoint_config.aliases:
c_kwargs['aliases'] = list(map(resolve_value, endpoint_config.aliases))
if endpoint_config.links:
c_kwargs['links'] = [(policy.cname(map_name, l_name), alias or policy.get_hostname(l_name))
for l_name, alias in endpoint_config.links]
if endpoint_config.link_local_ips:
c_kwargs['link_local_ips'] = list(map(resolve_value, endpoint_config.link_local_ips))
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for Docker's ``create_endpoint_config`` utility / ``EndpointConfig`` type as well
as for ``connect_container_to_network``.
:param action: Action configuration.
:type action: ActionConfig
:param endpoint_config: Network endpoint configuration.
:type endpoint_config: dockermap.map.input.NetworkEndpoint
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict |
def get_network_connect_kwargs(self, action, network_name, container_name, endpoint_config=None, kwargs=None):
c_kwargs = dict(
container=container_name,
net_id=network_name,
)
if endpoint_config:
c_kwargs.update(self.get_network_create_endpoint_kwargs(action, endpoint_config))
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to add a container to a network.
:param action: Action configuration.
:type action: ActionConfig
:param network_name: Network name or id.
:type network_name: unicode | str
:param container_name: Container name or id.
:type container_name: unicode | str
:param endpoint_config: Network endpoint configuration.
:type endpoint_config: dockermap.map.input.NetworkEndpoint
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict |
def get_network_disconnect_kwargs(self, action, network_name, container_name, kwargs=None):
c_kwargs = dict(
container=container_name,
net_id=network_name,
)
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to remove a container from a network.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param network_name: Network name or id.
:type network_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict |
def get_exec_create_kwargs(self, action, container_name, exec_cmd, exec_user, kwargs=None):
c_kwargs = dict(
container=container_name,
cmd=resolve_value(exec_cmd),
)
if exec_user is not None:
c_kwargs['user'] = text_type(resolve_value(exec_user))
elif action.config.user is not NotSet:
c_kwargs['user'] = extract_user(action.config.user)
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to set up the HostConfig or start a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:param exec_cmd: Command to be executed.
:type exec_cmd: unicode | str
:param exec_user: User to run the command.
:type exec_user: unicode | str
:return: Resulting keyword arguments.
:rtype: dict |
def get_exec_start_kwargs(self, action, container_name, exec_id, kwargs=None):
c_kwargs = dict(exec_id=exec_id)
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to set up the HostConfig or start a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:param exec_id: Id of the exec instance.
:type exec_id: long
:return: Resulting keyword arguments.
:rtype: dict |
def get_volume_create_kwargs(self, action, volume_name, kwargs=None):
config = action.config
c_kwargs = dict(name=volume_name)
if config:
c_kwargs['driver'] = config.driver
driver_opts = init_options(config.driver_options)
if driver_opts:
c_kwargs['driver_opts'] = {option_name: resolve_value(option_value)
for option_name, option_value in iteritems(driver_opts)}
update_kwargs(c_kwargs, init_options(config.create_options), kwargs)
else:
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to create a volume.
:param action: Action configuration.
:type action: ActionConfig
:param volume_name: Volume name.
:type volume_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict |
def get_volume_remove_kwargs(self, action, volume_name, kwargs=None):
c_kwargs = dict(name=volume_name)
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to remove a volume.
:param action: Action configuration.
:type action: ActionConfig
:param volume_name: Volume name.
:type volume_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict |
def cname(cls, map_name, container, instance=None):
if instance:
return '{0}.{1}.{2}'.format(map_name, container, instance)
return '{0}.{1}'.format(map_name, container) | Generates a container name that should be used for creating new containers and checking the status of existing
containers.
In this implementation, the format will be ``<map name>.<container name>.<instance>``. If no instance is
provided, it is just ``<map name>.<container name>``.
:param map_name: Container map name.
:type map_name: unicode | str
:param container: Container configuration name.
:type container: unicode | str
:param instance: Instance name (optional).
:type instance: unicode | str
:return: Container name.
:rtype: unicode | str |
def aname(cls, map_name, attached_name, parent_name=None):
if parent_name:
return '{0}.{1}.{2}'.format(map_name, parent_name, attached_name)
return '{0}.{1}'.format(map_name, attached_name) | Generates a container name that should be used for creating new attached volume containers and checking the
status of existing containers.
In this implementation, the format will be ``<map name>.<attached>``, or ``<map name>.<parent name>.<attached>``
if the parent container configuration name is provided.
:param map_name: Container map name.
:type map_name: unicode | str
:param attached_name: Attached container alias.
:type attached_name: unicode | str
:param parent_name: Container configuration name that has contains attached container.
:type parent_name: unicode | str
:return: Container name.
:rtype: unicode | str |
def nname(cls, map_name, network_name):
if network_name in DEFAULT_PRESET_NETWORKS:
return network_name
return '{0}.{1}'.format(map_name, network_name) | Generates a network name that should be used for creating new networks and checking the status of existing
networks on the client.
In this implementation, the format will be ``<map name>.<network name>``.
:param map_name: Container map name.
:type map_name: unicode | str
:param network_name: Network configuration name.
:type network_name: unicode | str
:return: Network name.
:rtype: unicode | str |
def get_hostname(cls, container_name, client_name=None):
base_name = container_name
for old, new in cls.hostname_replace:
base_name = base_name.replace(old, new)
if not client_name or client_name == cls.default_client_name:
return base_name
client_suffix = client_name
for old, new in cls.hostname_replace:
client_suffix = client_suffix.replace(old, new)
return '{0}-{1}'.format(base_name, client_suffix) | Determines the host name of a container. In this implementation, replaces all dots and underscores of a
container name with a dash; then attaches another dash with the client name, unless there is just one default
client.
:param container_name: Name of the container.
:type container_name: unicode | str
:param client_name: Name of the client configuration, where applicable.
:type client_name: unicode | str
:return: Host name.
:rtype: unicode | str |
def adduser(username, uid=None, system=False, no_login=True, no_password=False, group=False, gecos=None, **kwargs):
return _format_cmd('adduser', username, __system=bool(system), __uid=uid, __group=bool(group), __gid=uid,
no_login=(no_login, _NO_CREATE_HOME, _NO_LOGIN),
__disabled_password=no_login or bool(no_password),
__gecos=gecos, **kwargs) | Formats an ``adduser`` command.
:param username: User name.
:type username: unicode | str
:param uid: Optional user id to use.
:type uid: long | int
:param system: Create a system user account.
:type system: bool
:param no_login: Disable the login for this user. Not compatible with CentOS. Implies setting '--no-create-home',
and ``no_password``.
:type no_login: bool
:param no_password: Disable the password for this user. Not compatible with CentOS.
:type no_password: bool
:param group: Create a group along with the user. Not compatible with CentOS.
:type group: bool
:param gecos: Set GECOS information in order to suppress an interactive prompt. On CentOS, use ``__comment``
instead.
:type gecos: unicode | str
:param kwargs: Additional keyword arguments which are converted to the command line.
:return: A formatted ``adduser`` command with arguments.
:rtype: unicode | str |
def get_user_group(user_group):
if isinstance(user_group, tuple):
return '{0}:{1}'.format(*user_group)
elif isinstance(user_group, six.integer_types) or ':' not in user_group:
return '{0}:{0}'.format(user_group)
return user_group | Formats a user and group in the format ``user:group``, as needed for `chown`. If user_group is a tuple, this is used
for the fomatting. If a string or integer is given, it will be formatted as ``user:user``. Otherwise the input is
returned - this method does not perform any more checks.
:param user_group: User name, user id, user and group in format ``user:group``, ``user_id:group_id``, or tuple of
``(user, group)``.
:type user_group: unicode | str | int | tuple
:return: Formatted string with in the format ``user:group``.
:rtype: unicode | str |
def addgroupuser(username, uid, groupnames=None, system=False, no_login=True, no_password=False, gecos=None, sudo=False,
**kwargs):
group = addgroup(username, uid, system)
user = adduser(username, uid, system, no_login, no_password, False, gecos, **kwargs)
prefix = 'sudo ' if sudo else ''
if groupnames:
usermod = assignuser(username, groupnames)
return '{0}{1} && {0}{2} && {0}{3}'.format(prefix, group, user, usermod)
return '{0}{1} && {0}{2}'.format(prefix, group, user) | Generates a unix command line for creating user and group with the same name, assigning the user to the group.
Has the same effect as combining :func:`~addgroup`, :func:`~adduser`, and :func:`~assignuser`.
:param username: User name to create.
:type username: unicode | str
:param uid: User id to use.
:type uid: int
:param groupnames: Iterable with additional group names to assign the user to.
:type groupnames: collections.Iterable[unicode | str]
:param system: Create a system user and group. Default is ``False``.
:type system: bool
:param no_login: Disallow login of this user and group, and skip creating the home directory. Default is ``True``.
:type no_login: bool
:param no_password: Do not set a password for the new user.
:type: no_password: bool
:param gecos: Provide GECOS info and suppress prompt.
:type gecos: unicode | str
:param sudo: Prepend `sudo` to the command. Default is ``False``. When using Fabric, use its `sudo` command instead.
:type sudo: bool
:param kwargs: Additional keyword arguments for command line arguments.
:return: Unix shell command line.
:rtype: unicode | str |
def mkdir(path, create_parent=True, check_if_exists=False):
cmd = _format_cmd('mkdir', path, _p=create_parent)
if check_if_exists:
return 'if [[ ! -d {0} ]]; then {1}; fi'.format(path, cmd)
return cmd | Generates a unix command line for creating a directory.
:param path: Directory path.
:type path: unicode | str
:param create_parent: Create parent directories, if necessary. Default is ``True``.
:type create_parent: bool
:param check_if_exists: Prepend a check if the directory exists; in that case, the command is not run.
Default is ``False``.
:type check_if_exists: bool
:return: Unix shell command line.
:rtype: unicode | str |
def mkdir_chown(paths, user_group=None, permissions='ug=rwX,o=rX', create_parent=True, check_if_exists=False, recursive=False):
def _generate_str(path):
mkdir_str = mkdir(path, create_parent, check_if_exists)
chown_str = chown(user_group, path, recursive) if user_group else None
chmod_str = chmod(permissions, path, recursive) if permissions else None
return ' && '.join(n for n in (mkdir_str, chown_str, chmod_str) if n)
if isinstance(paths, (tuple, list)):
return '; '.join((_generate_str(path) for path in paths))
return _generate_str(paths) | Generates a unix command line for creating a directory and assigning permissions to it. Shortcut to a combination of
:func:`~mkdir`, :func:`~chown`, and :func:`~chmod`.
Note that if `check_if_exists` has been set to ``True``, and the directory is found, `mkdir` is not called, but
`user_group` and `permissions` are still be applied.
:param paths: Can be a single path string, or a list or tuple of path strings.
:type paths: unicode | str | tuple[unicode | str] | list[unicode | str]
:param: Optional owner of the directory. For notation, see :func:`~get_user_group`.
:type user_group: unicode | str | int | tuple
:param permissions: Optional permission mode, in any notation accepted by the unix `chmod` command.
Default is ``ug=rwX,o=rX``.
:type permissions: unicode | str
:param create_parent: Parent directories are created if not present (`-p` argument to `mkdir`).
:type create_parent: bool
:param check_if_exists: Prior to creating the directory, checks if it already exists.
:type check_if_exists: bool
:param recursive: Apply permissions and owner change recursively.
:type recursive: bool
:return: Unix shell command line.
:rtype: unicode | str |
def bind(self, field_name, parent):
super(TranslatedFieldsField, self).bind(field_name, parent)
# Expect 1-on-1 for now. Allow using source as alias,
# but it should not be a dotted path for now
related_name = self.source or field_name
# This could all be done in __init__(), but by moving the code here,
# it's possible to auto-detect the parent model.
if self.shared_model is not None and self.serializer_class is not None:
return
# Fill in the blanks
if self.serializer_class is None:
if self.shared_model is None:
# Auto detect parent model
from .serializers import TranslatableModelSerializer
if not isinstance(parent, TranslatableModelSerializer):
raise TypeError("Expected 'TranslatableModelSerializer' as serializer base class")
if not issubclass(parent.Meta.model, TranslatableModel):
raise TypeError("Expected 'TranslatableModel' for the parent model")
self.shared_model = parent.Meta.model
# Create serializer based on shared model.
translated_model = self.shared_model._parler_meta[related_name]
self.serializer_class = create_translated_fields_serializer(
self.shared_model, related_name=related_name,
meta={'fields': translated_model.get_translated_fields()}
)
else:
if not issubclass(self.serializer_class.Meta.model, TranslatedFieldsModel):
raise TypeError("Expected 'TranslatedFieldsModel' for the serializer model") | Create translation serializer dynamically.
Takes translatable model class (shared_model) from parent serializer and it
may create a serializer class on the fly if no custom class was specified. |
def to_representation(self, value):
if value is None:
return
# Only need one serializer to create the native objects
serializer = self.serializer_class(
instance=self.parent.instance, # Typically None
context=self.context,
partial=self.parent.partial
)
# Don't need to have a 'language_code', it will be split up already,
# so this should avoid redundant output.
if 'language_code' in serializer.fields:
raise ImproperlyConfigured("Serializer may not have a 'language_code' field")
translations = value.all() # value = translations related manager
languages = self.context.get('languages')
if languages:
translations = translations.filter(language_code__in=languages)
# Split into a dictionary per language
result = OrderedDict()
for translation in translations:
result[translation.language_code] = serializer.to_representation(translation)
return result | Serialize translated fields.
Simply iterate over available translations and, for each language,
delegate serialization logic to the translation model serializer.
Output languages can be selected by passing a list of language codes,
`languages`, within the serialization context. |
def to_internal_value(self, data):
if data is None:
return
if not isinstance(data, dict):
self.fail('invalid')
if not self.allow_empty and len(data) == 0:
self.fail('empty')
result, errors = {}, {}
for lang_code, model_fields in data.items():
serializer = self.serializer_class(data=model_fields)
if serializer.is_valid():
result[lang_code] = serializer.validated_data
else:
errors[lang_code] = serializer.errors
if errors:
raise serializers.ValidationError(errors)
return result | Deserialize data from translations fields.
For each received language, delegate validation logic to
the translation model serializer. |
def parse(self, text, layers=None):
params = {
"text": text,
"key": self.key,
}
if layers is not None:
# if it's string
if isinstance(layers, six.string_types):
params["layers"] = layers
# if it's another iterable object
elif isinstance(layers, collections.Iterable):
params["layers"] = ",".join(layers)
req = requests.get(self.NLU_URL, params=params)
return req.json() | Parsing passed text to json.
Args:
text: Text to parse.
layers (optional): Special fields. Only one string
or iterable object (e.g "Data", ("Data", "Fio")).
Only these fields will be returned.
Returns:
The parsed text into a json object. |
def generate(self, text):
if not text:
raise Exception("No text to speak")
if len(text) >= self.MAX_CHARS:
raise Exception("Number of characters must be less than 2000")
params = self.__params.copy()
params["text"] = text
self._data = requests.get(self.TTS_URL, params=params,
stream=False).iter_content() | Try to get the generated file.
Args:
text: The text that you want to generate. |
def save(self, path="speech"):
if self._data is None:
raise Exception("There's nothing to save")
extension = "." + self.__params["format"]
if os.path.splitext(path)[1] != extension:
path += extension
with open(path, "wb") as f:
for d in self._data:
f.write(d)
return path | Save data in file.
Args:
path (optional): A path to save file. Defaults to "speech".
File extension is optional. Absolute path is allowed.
Returns:
The path to the saved file. |
def create_translated_fields_serializer(shared_model, meta=None, related_name=None, **fields):
if not related_name:
translated_model = shared_model._parler_meta.root_model
else:
translated_model = shared_model._parler_meta[related_name].model
# Define inner Meta class
if not meta:
meta = {}
meta['model'] = translated_model
meta.setdefault('fields', ['language_code'] + translated_model.get_translated_fields())
# Define serialize class attributes
attrs = {}
attrs.update(fields)
attrs['Meta'] = type('Meta', (), meta)
# Dynamically create the serializer class
return type('{0}Serializer'.format(translated_model.__name__), (serializers.ModelSerializer,), attrs) | Create a Rest Framework serializer class for a translated fields model.
:param shared_model: The shared model.
:type shared_model: :class:`parler.models.TranslatableModel` |
def save(self, **kwargs):
translated_data = self._pop_translated_data()
instance = super(TranslatableModelSerializer, self).save(**kwargs)
self.save_translations(instance, translated_data)
return instance | Extract the translations and save them after main object save.
By default all translations will be saved no matter if creating
or updating an object. Users with more complex needs might define
their own save and handle translation saving themselves. |
def _pop_translated_data(self):
translated_data = {}
for meta in self.Meta.model._parler_meta:
translations = self.validated_data.pop(meta.rel_name, {})
if translations:
translated_data[meta.rel_name] = translations
return translated_data | Separate data of translated fields from other data. |
def save_translations(self, instance, translated_data):
for meta in self.Meta.model._parler_meta:
translations = translated_data.get(meta.rel_name, {})
for lang_code, model_fields in translations.items():
translation = instance._get_translated_model(lang_code, auto_create=True, meta=meta)
for field, value in model_fields.items():
setattr(translation, field, value)
# Go through the same hooks as the regular model,
# instead of calling translation.save() directly.
instance.save_translations() | Save translation data into translation objects. |
def load_conf(cfg_path):
global config
try:
cfg = open(cfg_path, 'r')
except Exception as ex:
if verbose:
print("Unable to open {0}".format(cfg_path))
print(str(ex))
return False
# Read the entire contents of the conf file
cfg_json = cfg.read()
cfg.close()
# print(cfg_json)
# Try to parse the conf file into a Python structure
try:
config = json.loads(cfg_json)
except Exception as ex:
print("Unable to parse configuration file as JSON")
print(str(ex))
return False
# This config was successfully loaded
return True | Try to load the given conf file. |
def add_values(name, values):
int_values = map(int, values)
cv_dict[values_key][name] = int_values | Adds an alias with list of values to the channel/value dictionary. |
def are_valid_values(values):
try:
int_values = map(int, values)
for v in int_values:
if (v >= 0) and (v <= 255):
continue
else:
return False
except Exception as ex:
# print(ex)
return False
return True | Determines if a list of values are valid DMX values (0-255). |
def load_rc_file():
# If an rc file is named in the config, use it.
# Otherwise, fall back to looking in the HOME directory.
# The fall back won't work under RPi because HOME will be root.
if "uDMXrc" in config:
rcfile = config["uDMXrc"]
else:
if os.name == "nt":
# Windows
rcfile = os.path.join(os.environ["USERPROFILE"], ".uDMXrc")
else:
# Mostly *nix type systems
rcfile = os.path.join(os.environ["HOME"], ".uDMXrc")
try:
cf = open(rcfile, 'r')
for line in cf:
tokens = line.split()
# Blank line
if len(tokens) == 0:
continue
# A comment
if tokens[0] == '#':
continue
# A channel alias
elif tokens[0] == 'channel':
# channel alias value
if len(tokens) >= 3:
if is_valid_channel(tokens[2]):
add_channel(tokens[1], tokens[2])
else:
print(line)
print("Invalid channel value")
else:
print(line)
print("Invalid channel statement")
# A DMX value or values
elif tokens[0] in ['value', 'values']:
# value alias value
if len(tokens) >= 3:
if are_valid_values(tokens[2:]):
add_values(tokens[1], tokens[2:])
else:
print(line)
print("Invalid value(s)")
else:
print(line)
print("Invalid value statement")
# Something we don't recognize
else:
print(line)
print(tokens[0], "is not a recognized resource file statement")
cf.close()
except:
print("Unable to open resource file", rcfile) | Load the contents of the resource file ~/.uDMXrc |
def translate_message_tokens(message_tokens):
trans_tokens = []
if message_tokens[0] in cv_dict[channels_key]:
trans_tokens.append(cv_dict[channels_key][message_tokens[0]])
else:
trans_tokens.append(int(message_tokens[0]))
for token in message_tokens[1:]:
if token in cv_dict[values_key]:
trans_tokens.extend(cv_dict[values_key][token])
else:
trans_tokens.append(int(token))
return trans_tokens | Translates alias references to their defined values.
The first token is a channel alias.
The remaining tokens are value aliases. |
def send_dmx_message(message_tokens):
# Open the uDMX USB device
dev = pyudmx.uDMXDevice()
if not dev.open():
print("Unable to find and open uDMX interface")
return False
# Translate the tokens into integers.
# trans_tokens[0] will be the one-based channel number (1-512) as an integer.
# The remaining tokens will be zero-based values (0-255) as integers.
trans_tokens = translate_message_tokens(message_tokens)
if len(trans_tokens) == 2:
# Single value message
if verbose:
print("Sending single value message channel:", trans_tokens[0], "value:", trans_tokens[1])
n = dev.send_single_value(trans_tokens[0], trans_tokens[1])
if verbose:
print("Sent", n, "value")
else:
# Multi-value message
if verbose:
print("Sending multi-value message channel:", trans_tokens[0], "values:", trans_tokens[1:])
n = dev.send_multi_value(trans_tokens[0], trans_tokens[1:])
if verbose:
print("Sent", n, "values")
# This may not be absolutely necessary, but it is safe.
# It's the closest thing to a close() method.
dev.close()
# Returns True if something was sent
return n > 0 | Send the DMX message defined by the command line arguments (message tokens).
The first argument/token is the DMX channel.
The remaining argument(s).token(s) are DMX values. |
def parse_headers(cls, msg):
return list(email.parser.Parser().parsestr(msg).items()) | Parse HTTP headers.
Args:
msg (str): HTTP message.
Returns:
(List[Tuple[str, str]): List of header tuples. |
def parse(cls, msg):
lines = msg.splitlines()
version, status_code, reason = lines[0].split()
headers = cls.parse_headers('\r\n'.join(lines[1:]))
return cls(version=version, status_code=status_code,
reason=reason, headers=headers) | Parse message string to response object. |
def parse(cls, msg):
lines = msg.splitlines()
method, uri, version = lines[0].split()
headers = cls.parse_headers('\r\n'.join(lines[1:]))
return cls(version=version, uri=uri, method=method, headers=headers) | Parse message string to request object. |
def sendto(self, transport, addr):
msg = bytes(self) + b'\r\n'
logger.debug("%s:%s < %s", *(addr + (self,)))
transport.sendto(msg, addr) | Send request to a given address via given transport.
Args:
transport (asyncio.DatagramTransport):
Write transport to send the message on.
addr (Tuple[str, int]):
IP address and port pair to send the message to. |
def open(self, vendor_id: int = 0x16c0, product_id: int = 0x5dc, bus: int = None, address: int = None) -> bool:
kwargs = {}
if vendor_id:
kwargs["idVendor"] = vendor_id
if product_id:
kwargs["idProduct"] = product_id
if bus:
kwargs["bus"] = bus
if address:
kwargs["address"] = address
# Find the uDMX interface
self._dev = usb.core.find(**kwargs)
return self._dev is not None | Open the first device that matches the search criteria. Th default parameters
are set up for the likely most common case of a single uDMX interface.
However, for the case of multiple uDMX interfaces, you can use the
bus and address paramters to further specifiy the uDMX interface
to be opened.
:param vendor_id:
:param product_id:
:param bus: USB bus number 1-n
:param address: USB device address 1-n
:return: Returns true if a device was opened. Otherwise, returns false. |
def close(self):
# This may not be absolutely necessary, but it is safe.
# It's the closest thing to a close() method.
if self._dev is not None:
usb.util.dispose_resources(self._dev)
self._dev = None | Close and release the current usb device.
:return: None |
def send_single_value(self, channel: int, value: int) -> int:
SetSingleChannel = 1
n = self._send_control_message(SetSingleChannel, value_or_length=value, channel=channel, data_or_length=1)
return n | Send a single value to the uDMX
:param channel: DMX channel number, 1-512
:param value: Value to be sent to channel, 0-255
:return: number of bytes actually sent |
def send_multi_value(self, channel: int, values: Union[List[int], bytearray]) -> int:
SetMultiChannel = 2
if isinstance(values, bytearray):
ba = values
else:
ba = bytearray(values)
n = self._send_control_message(SetMultiChannel, value_or_length=len(ba),
channel=channel, data_or_length=ba)
return n | Send multiple consecutive bytes to the uDMX
:param channel: The starting DMX channel number, 1-512
:param values: any sequence of integer values that can be converted
to a bytearray (e.g a list). Each value 0-255.
:return: number of bytes actually sent |
def add_boolean_argument(parser, name, default=False):
group = parser.add_mutually_exclusive_group()
group.add_argument('--set', action='store_true', dest=name,
default=default)
group.add_argument('--unset', action='store_false', dest=name,
default=default) | Add a boolean argument to an ArgumentParser instance. |
def send_rgb(dev, red, green, blue, dimmer):
cv = [0 for v in range(0, 512)]
cv[0] = red
cv[1] = green
cv[2] = blue
cv[6] = dimmer
sent = dev.send_multi_value(1, cv)
return sent | Send a set of RGB values to the light |
def main():
# Channel value list for channels 1-512
cv = [0 for v in range(0, 512)]
# Create an instance of the DMX controller and open it
print("Opening DMX controller...")
dev = pyudmx.uDMXDevice()
# This will automagically find a single Anyma-type USB DMX controller
dev.open()
# For informational purpose, display what we know about the DMX controller
print(dev.Device)
# Send messages to the light changing it to red, then green, then blue
# This is the "hard way" to do it, but illustrates how it's done
print("Setting to red...")
cv[0] = 255 # red
cv[6] = 128 # dimmer to half value
sent = dev.send_multi_value(1, cv)
print("Set to red")
sleep(3.0)
print("Setting to green...")
cv[0] = 0 # red
cv[1] = 255 # green
cv[6] = 128 # dimmer to half value
sent = dev.send_multi_value(1, cv)
print("Set to green")
sleep(3.0)
print("Setting to blue...")
cv[0] = 0 # red
cv[1] = 0 # green
cv[2] = 255 # blue
cv[6] = 128 # dimmer to half value
sent = dev.send_multi_value(1, cv)
print("Set to blue")
sleep(3.0)
# Here's an easier way to do it
print("And, again the easier way")
send_rgb(dev, 255, 0, 0, 128)
sleep(3.0)
send_rgb(dev, 0, 255, 0, 128)
sleep(3.0)
send_rgb(dev, 0, 0, 255, 128)
sleep(3.0)
print("Reset all channels and close..")
# Turns the light off
cv = [0 for v in range(0, 512)]
dev.send_multi_value(1, cv)
dev.close() | How to control a DMX light through an Anyma USB controller |
def connect(self):
try:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
if self.config['no_ssl_verify']:
requests.packages.urllib3.disable_warnings()
context.verify_mode = ssl.CERT_NONE
self.si = SmartConnectNoSSL(
host=self.config['server'],
user=self.config['username'],
pwd=self.config['password'],
port=int(self.config['port']),
certFile=None,
keyFile=None,
)
else:
self.si = SmartConnect(
host=self.config['server'],
user=self.config['username'],
pwd=self.config['password'],
port=int(self.config['port']),
sslContext=context,
certFile=None,
keyFile=None,
)
except Exception as e:
print('Unable to connect to vsphere server.')
print(e)
sys.exit(1)
# add a clean up routine
atexit.register(Disconnect, self.si)
self.content = self.si.RetrieveContent() | Connect to vCenter server |
def list_objects(self):
vimtype = self.config['type']
vim_obj = "vim.%s" % vimtype
try:
container = self.content.viewManager.CreateContainerView(
self.content.rootFolder, [eval(vim_obj)], True)
except AttributeError:
print("%s is not a Managed Object Type. See the vSphere API "
"docs for possible options." % vimtype)
sys.exit(1)
# print header line
print("%s list" % vimtype)
if vimtype == "VirtualMachine":
rows = [['MOID', 'Name', 'Status']]
else:
rows = [['MOID', 'Name']]
for c in container.view:
if vimtype == "VirtualMachine":
rows.append([c._moId, c.name, c.runtime.powerState])
else:
rows.append([c._moId, c.name])
self.print_as_table(rows) | Command Section: list
List available VMware objects |
def status(self):
vm = self.get_vm_failfast(self.config['name'])
extra = self.config['extra']
parserFriendly = self.config['parserFriendly']
status_to_print = []
if extra:
status_to_print = \
[["vmname", "powerstate", "ipaddress", "hostname", "memory",
"cpunum", "uuid", "guestid", "uptime"]] + \
[[vm.name, vm.runtime.powerState,
vm.summary.guest.ipAddress or '',
vm.summary.guest.hostName or '',
str(vm.summary.config.memorySizeMB),
str(vm.summary.config.numCpu),
vm.summary.config.uuid, vm.summary.guest.guestId,
str(vm.summary.quickStats.uptimeSeconds) or '0']]
else:
status_to_print = [[vm.name, vm.runtime.powerState]]
if parserFriendly:
self.print_as_lines(status_to_print)
else:
self.print_as_table(status_to_print) | Check power status |
def shutdown(self):
vm = self.get_vm_failfast(self.config['name'])
if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOff:
print("%s already poweredOff" % vm.name)
else:
if self.guestToolsRunning(vm):
timeout_minutes = 10
print("waiting for %s to shutdown "
"(%s minutes before forced powerOff)" % (
vm.name,
str(timeout_minutes)
))
vm.ShutdownGuest()
if self.WaitForVirtualMachineShutdown(vm,
timeout_minutes * 60):
print("shutdown complete")
print("%s poweredOff" % vm.name)
else:
print("%s has not shutdown after %s minutes:"
"will powerOff" % (vm.name, str(timeout_minutes)))
self.powerOff()
else:
print("GuestTools not running or not installed: will powerOff")
self.powerOff() | Shutdown guest
fallback to power off if guest tools aren't installed |
def get_resource_pool(self, cluster, pool_name):
pool_obj = None
# get a list of all resource pools in this cluster
cluster_pools_list = cluster.resourcePool.resourcePool
# get list of all resource pools with a given text name
pool_selections = self.get_obj(
[vim.ResourcePool],
pool_name,
return_all=True
)
# get the first pool that exists in a given cluster
if pool_selections:
for p in pool_selections:
if p in cluster_pools_list:
pool_obj = p
break
return pool_obj | Find a resource pool given a pool name for desired cluster |
def get_obj(self, vimtype, name, return_all=False, path=""):
obj = list()
if path:
obj_folder = self.content.searchIndex.FindByInventoryPath(path)
container = self.content.viewManager.CreateContainerView(
obj_folder, vimtype, True
)
else:
container = self.content.viewManager.CreateContainerView(
self.content.rootFolder, vimtype, True)
for c in container.view:
if name in [c.name, c._GetMoId()]:
if return_all is False:
return c
break
else:
obj.append(c)
if len(obj) > 0:
return obj
else:
# for backwards-compat
return None | Get the vsphere object associated with a given text name or MOID |
def get_host_system_failfast(
self,
name,
verbose=False,
host_system_term='HS'
):
if verbose:
print("Finding HostSystem named %s..." % name)
hs = self.get_host_system(name)
if hs is None:
print("Error: %s '%s' does not exist" % (host_system_term, name))
sys.exit(1)
if verbose:
print("Found HostSystem: {0} Name: {1}" % (hs, hs.name))
return hs | Get a HostSystem object
fail fast if the object isn't a valid reference |
def get_vm(self, name, path=""):
if path:
return self.get_obj([vim.VirtualMachine], name, path=path)
else:
return self.get_obj([vim.VirtualMachine], name) | Get a VirtualMachine object |
def get_vm_failfast(self, name, verbose=False, vm_term='VM', path=""):
if verbose:
print("Finding VirtualMachine named %s..." % name)
if path:
vm = self.get_vm(name, path=path)
else:
vm = self.get_vm(name)
if vm is None:
print("Error: %s '%s' does not exist" % (vm_term, name))
sys.exit(1)
if verbose:
print("Found VirtualMachine: %s Name: %s" % (vm, vm.name))
return vm | Get a VirtualMachine object
fail fast if the object isn't a valid reference |
def WaitForVirtualMachineShutdown(
self,
vm_to_poll,
timeout_seconds,
sleep_period=5
):
seconds_waited = 0 # wait counter
while seconds_waited < timeout_seconds:
# sleep first, since nothing shuts down instantly
seconds_waited += sleep_period
time.sleep(sleep_period)
vm = self.get_vm(vm_to_poll.name)
if vm.runtime.powerState == \
vim.VirtualMachinePowerState.poweredOff:
return True
return False | Guest shutdown requests do not run a task we can wait for.
So, we must poll and wait for status to be poweredOff.
Returns True if shutdown, False if poll expired. |
def request_patch(self, *args, **kwargs):
func = sup = super(FuturesSession, self).request
background_callback = kwargs.pop('background_callback', None)
if background_callback:
def wrap(*args_, **kwargs_):
resp = sup(*args_, **kwargs_)
# Patch the closure to return the callback.
return background_callback(self, resp)
func = wrap
return self.executor.submit(func, *args, **kwargs) | Maintains the existing api for Session.request.
Used by all of the higher level methods, e.g. Session.get.
The background_callback param allows you to do some processing on the
response in the background, e.g. call resp.json() so that json parsing
happens in the background thread. |
def _start_http_session(self):
api_logger.debug("Starting new HTTP session...")
self.session = FuturesSession(executor=self.executor, max_workers=self.max_workers)
self.session.headers.update({"User-Agent": self.user_agent})
if self.username and self.password:
api_logger.debug("Requests will use authorization.")
self.session.auth = HTTPBasicAuth(self.username, self.password) | Start a new requests HTTP session, clearing cookies and session data.
:return: None |
def _service_request(self, request_type, sub_uri, params=None, callback=None,
raise_for_status=True, raw=False, **kwargs):
api_logger.debug("Sending request: {} ({})".format(sub_uri, request_type))
if not self.session:
self._start_http_session()
uri = urljoin(self.uri_base, sub_uri)
if params:
kwargs.update(params=params)
if callback:
def base_callback(_, response):
if raise_for_status:
response.raise_for_status()
response.encoding = 'utf-8'
return callback(response)
else:
def base_callback(_, response):
if raise_for_status:
response.raise_for_status()
response.encoding = 'utf-8'
return response.content if raw else json.loads(response.text)
response_future = self.session.request(request_type, uri, background_callback=base_callback, **kwargs)
return response_future | Base method for handling HTTP requests via the current requests session.
:param request_type: The request type as a string (e.g. "POST", "GET", "PUT", etc.)
:param sub_uri: The REST end point (sub-uri) to communicate with.
:param params: (Optional) HTTP Request parameters. Default: none
:param callback: (Optional) A callback function to be excuted on the resulting requests response.
This synchronous implementation will return the results of the callback.
Default: None. This method returns either the decoded JSON or the raw request content.
:param raise_for_status: (Optional) When set True, we raise requests.HTTPError on 4xx or 5xx status. When
set False, non-2xx/3xx status code is ignored. Default: True
:param raw: (Optional) If no callback is set, return the raw content from the request if this is set True.
If False, the method attempts to parse the request as JSON data and return the resutls.
Default: False
:param kwargs: Additional parameters to pass to the session request call.
:return: The concurrent.futures object that holds the future for the API method call. |
def location(ip=None, key=None, field=None):
''' Get geolocation data for a given IP address
If field is specified, get specific field as text
Else get complete location data as JSON
'''
if field and (field not in field_list):
return 'Invalid field'
if field:
if ip:
url = 'https://ipapi.co/{}/{}/'.format(ip, field)
else:
url = 'https://ipapi.co/{}/'.format(field)
else:
if ip:
url = 'https://ipapi.co/{}/json/'.format(ip)
else:
url = 'https://ipapi.co/json/'
if key or API_KEY:
url = '{}?key={}'.format(url, (key or API_KEY))
response = get(url, headers=headers)
if field:
return response.text
else:
return response.json(f location(ip=None, key=None, field=None):
''' Get geolocation data for a given IP address
If field is specified, get specific field as text
Else get complete location data as JSON
'''
if field and (field not in field_list):
return 'Invalid field'
if field:
if ip:
url = 'https://ipapi.co/{}/{}/'.format(ip, field)
else:
url = 'https://ipapi.co/{}/'.format(field)
else:
if ip:
url = 'https://ipapi.co/{}/json/'.format(ip)
else:
url = 'https://ipapi.co/json/'
if key or API_KEY:
url = '{}?key={}'.format(url, (key or API_KEY))
response = get(url, headers=headers)
if field:
return response.text
else:
return response.json() | Get geolocation data for a given IP address
If field is specified, get specific field as text
Else get complete location data as JSON |
async def main_loop(loop, password, user, ip): # pylint: disable=invalid-name
async with aiohttp.ClientSession(loop=loop) as session:
VAR['sma'] = pysma.SMA(session, ip, password=password, group=user)
await VAR['sma'].new_session()
if VAR['sma'].sma_sid is None:
_LOGGER.info("No session ID")
return
_LOGGER.info("NEW SID: %s", VAR['sma'].sma_sid)
VAR['running'] = True
cnt = 5
sensors = pysma.Sensors()
while VAR.get('running'):
await VAR['sma'].read(sensors)
print_table(sensors)
cnt -= 1
if cnt == 0:
break
await asyncio.sleep(2)
await VAR['sma'].close_session() | Main loop. |
def main():
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
parser = argparse.ArgumentParser(
description='Test the SMA webconnect library.')
parser.add_argument(
'ip', type=str, help='IP address of the Webconnect module')
parser.add_argument(
'user', help='installer/user')
parser.add_argument(
'password', help='Installer password')
args = parser.parse_args()
loop = asyncio.get_event_loop()
def _shutdown(*_):
VAR['running'] = False
# asyncio.ensure_future(sma.close_session(), loop=loop)
signal.signal(signal.SIGINT, _shutdown)
# loop.add_signal_handler(signal.SIGINT, shutdown)
# signal.signal(signal.SIGINT, signal.SIG_DFL)
loop.run_until_complete(main_loop(
loop, user=args.user, password=args.password, ip=args.ip)) | Main example. |
def update(self):
result = self.api.github_api(*self._apicall_parameters())
if result is None:
# an error occurred, try again after BACKOFF
self._next_update = datetime.now() + timedelta(seconds=self.BACKOFF)
# assume an empty result until the error disappears
self._cached_result = self._apiresult_error()
else:
# request successful, cache does not expire
self._next_update = None
# Write the new result into self._cached_result to be picked up by
# _data on `del self._data`.
self._cached_result = self._apiresult_postprocess(result)
# Don't `del self._data` if it has never been cached, that would create
# ugly database entries in the cache table.
if not self._first_lookup:
del self._data
else:
self._first_lookup = False
# signal success or error
return result is not None | Connect to GitHub API endpoint specified by `_apicall_parameters()`,
postprocess the result using `_apiresult_postprocess()` and trigger
a cache update if the API call was successful.
If an error occurs, cache the empty result generated by
`_apiresult_error()`. Additionally, set up retrying after a certain
time.
Return `True` if the API call was successful, `False` otherwise.
Call this method directly if you want to invalidate the current cache.
Otherwise, just call `data()`, which will automatically call `update()`
if required. |
def data(self):
if self._next_update and datetime.now() > self._next_update:
self.update()
return self._data | Get a cached post-processed result of a GitHub API call. Uses Trac cache
to avoid constant querying of the remote API. If a previous API call did
not succeed, automatically retries after a timeout. |
def teams(self):
teams = self._teamlist.teams()
# find out which teams have been added or removed since the last sync
current_teams = set(self._teamobjects.keys())
new_teams = set(teams.keys()) # pylint: disable=no-member
added = new_teams - current_teams
removed = current_teams - new_teams
for team in removed:
del self._teamobjects[team]
for team in added:
self._teamobjects[team] = GitHubTeam(
self._api, self._env, self._org, teams[team], team) # pylint: disable=unsubscriptable-object
return self._teamobjects.values() | Return a sequence of `GitHubTeam` objects, one for each team in this
org. |
def members(self):
allmembers = set()
for team in self.teams():
allmembers.update(team.members())
return sorted(allmembers) | Return a list of all users in this organization. Users are identified
by their login name. Note that this is computed from the teams in the
organization, because GitHub does not currently offer a WebHook for
organization membership, so converting org membership would lead to
stale data. |
def update_team(self, slug):
if slug not in self._teamobjects:
# This case is checked and handled further up, but better be safe
# than sorry.
return False # pragma: no cover
return self._teamobjects[slug].update() | Trigger an update and cache invalidation for the team identified by the
given `slug`. Returns `True` on success, `False` otherwise.
:param slug: The GitHub 'slug' that identifies the team in URLs |
def github_api(self, url, *args):
import requests
import urllib
github_api_url = os.environ.get("TRAC_GITHUB_API_URL", "https://api.github.com/")
formatted_url = github_api_url + url.format(*(urllib.quote(str(x)) for x in args))
access_token = _config_secret(self.access_token)
self.log.debug("Hitting GitHub API endpoint %s with user %s", formatted_url, self.username) # pylint: disable=no-member
results = []
try:
has_next = True
while has_next:
req = requests.get(formatted_url, auth=(self.username, access_token))
if req.status_code != 200:
try:
message = req.json()['message']
except Exception: # pylint: disable=broad-except
message = req.text
self.log.error("Error communicating with GitHub API at {}: {}".format( # pylint: disable=no-member
formatted_url, message))
return None
results.extend(req.json())
has_next = 'next' in req.links
if has_next:
formatted_url = req.links['next']['url']
except requests.exceptions.ConnectionError as rce:
self.log.error("Exception while communicating with GitHub API at {}: {}".format( # pylint: disable=no-member
formatted_url, rce))
return None
return results | Connect to the given GitHub API URL template by replacing all
placeholders with the given parameters and return the decoded JSON
result on success. On error, return `None`.
:param url: The path to request from the GitHub API. Contains format
string placeholders that will be replaced with all
additional positional arguments. |
def update_team(self, slug):
if self._org:
if not self._org.has_team(slug):
return self._org.update()
return self._org.update_team(slug)
# self._org is created during Trac startup, so there should never
# be a case where we try to update an org before it's created; this
# is a sanity check only.
return False | Trigger update and cache invalidation for the team identified by the
given `slug`, if any. Returns `True` if the update was successful,
`False` otherwise.
:param slug: GitHub 'slug' name for the team to be updated. |
def get_permission_groups(self, username):
if not self.organization or not self.username or not self.access_token:
return []
elif (self.username_prefix and
not username.startswith(self.username_prefix)):
return []
data = self._fetch_groups()
if not data:
self.log.error("No cached groups from GitHub available") # pylint: disable=no-member
return []
else:
return data.get(username[len(self.username_prefix):], []) | Return a list of names of the groups that the user with the specified
name is a member of. Implements an `IPermissionGroupProvider` API.
This specific implementation connects to GitHub with a dedicated user,
fetches and caches the teams and their users configured at GitHub and
converts the data into a format usable for easy access by username. |
def match_request(self, req):
match = self._request_re.match(req.path_info)
if match:
return True
if os.environ.get('TRAC_GITHUB_ENABLE_DEBUGGING', None) is not None:
debug_match = self._debug_request_re.match(req.path_info)
if debug_match:
return True | Return whether the handler wants to process the given request.
Implements an `IRequestHandler` API. |
def process_debug_request(self, req):
req.send(json.dumps(self._fetch_groups()).encode('utf-8'), 'application/json', 200) | Debgging helper used for testing, processes the given request and dumps
the internal state of cached user to group mappings. Note that this is
only callable if TRAC_GITHUB_ENABLE_DEBUGGING is set in the
environment. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.