code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import salt.utils.versions
from salt.exceptions import SaltInvocationError
try:
import botocore.waiter
except ImportError:
pass
WAITER_CONFIGS = {
"ESDomainAvailable": {
"delay": 60,
"operation": "DescribeElasticsearchDomainConfig",
"maxAttempts": 60,
"acceptors": [
{
"expected": "Active",
"matcher": "path",
"state": "success",
"argument": "DomainConfig.ElasticsearchClusterConfig.Status.State",
},
{
"expected": True,
"matcher": "pathAny",
"state": "failure",
"argument": "DomainConfig.*.Status.PendingDeletion",
},
],
},
"ESUpgradeFinished": {
"delay": 60,
"operation": "DescribeElasticsearchDomain",
"maxAttempts": 60,
"acceptors": [
{
"expected": False,
"matcher": "path",
"state": "success",
"argument": "DomainStatus.UpgradeProcessing",
}
],
},
"ESDomainDeleted": {
"delay": 30,
"operation": "DescribeElasticsearchDomain",
"maxAttempts": 60,
"acceptors": [
{
"expected": True,
"matcher": "path",
"state": "retry",
"argument": "DomainStatus.Deleted",
},
{
"expected": False,
"matcher": "path",
"state": "failure",
"argument": "DomainStatus.Processing",
},
{
"expected": "ResourceNotFoundException",
"matcher": "error",
"state": "success",
},
],
},
"ESDomainCreated": {
"delay": 30,
"operation": "DescribeElasticsearchDomain",
"maxAttempts": 60,
"acceptors": [
{
"expected": True,
"matcher": "path",
"state": "success",
"argument": "DomainStatus.Created",
}
],
},
}
def __virtual__():
"""
Only load if botocore libraries exist.
"""
return salt.utils.versions.check_boto_reqs(check_boto=False)
@salt.utils.decorators.is_deprecated(globals(), "Sulfur")
def get_waiter(client, waiter=None, waiter_config=None):
"""
Gets a botocore waiter using either one of the preconfigured models by name
``waiter``, or with a manually supplied ``waiter_config``.
:param botoclient client: The botocore client to use.
:param str waiter: The name of the waiter config to use.
Either ``waiter`` or ``waiter_config`` must be supplied.
If both ``waiter`` and ``waiter_config`` are supplied, ``waiter`` takes
presedence, unless no configuration for ``waiter`` exists.
:param dict waiter_config: The manual waiter config to use.
Either waiter or waiter_config must be supplied.
:returns botocore.waiter
"""
if not any((waiter, waiter_config)):
raise SaltInvocationError(
"At least one of waiter or waiter_config must be specified."
)
waiter_model = botocore.waiter.WaiterModel(
{"version": 2, "waiters": {waiter: WAITER_CONFIGS.get(waiter, waiter_config)}}
)
return botocore.waiter.create_waiter_with_client(waiter, waiter_model, client)
@salt.utils.decorators.is_deprecated(globals(), "Sulfur")
def list_waiters():
"""
Lists the builtin waiter configuration names.
:returns list
"""
return WAITER_CONFIGS.keys()
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/boto3_elasticsearch.py
| 0.568895 | 0.33928 |
boto3_elasticsearch.py
|
pypi
|
import copy
from collections.abc import Mapping, Sequence, Set
class ImmutableDict(Mapping):
"""
An immutable dictionary implementation
"""
def __init__(self, obj):
self.__obj = obj
def __len__(self):
return len(self.__obj)
def __iter__(self):
return iter(self.__obj)
def __getitem__(self, key):
return freeze(self.__obj[key])
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, repr(self.__obj))
def __deepcopy__(self, memo):
return copy.deepcopy(self.__obj)
def copy(self):
"""
Return an un-frozen copy of self
"""
return copy.deepcopy(self.__obj)
class ImmutableList(Sequence):
"""
An immutable list implementation
"""
def __init__(self, obj):
self.__obj = obj
def __len__(self):
return len(self.__obj)
def __iter__(self):
return iter(self.__obj)
def __add__(self, other):
return self.__obj + other
def __radd__(self, other):
return other + self.__obj
def __getitem__(self, key):
return freeze(self.__obj[key])
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, repr(self.__obj))
def __deepcopy__(self, memo):
return copy.deepcopy(self.__obj)
def copy(self):
"""
Return an un-frozen copy of self
"""
return copy.deepcopy(self.__obj)
class ImmutableSet(Set):
"""
An immutable set implementation
"""
def __init__(self, obj):
self.__obj = obj
def __len__(self):
return len(self.__obj)
def __iter__(self):
return iter(self.__obj)
def __contains__(self, key):
return key in self.__obj
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, repr(self.__obj))
def __deepcopy__(self, memo):
return copy.deepcopy(self.__obj)
def copy(self):
"""
Return an un-frozen copy of self
"""
return copy.deepcopy(self.__obj)
def freeze(obj):
"""
Freeze python types by turning them into immutable structures.
"""
if isinstance(obj, dict):
return ImmutableDict(obj)
if isinstance(obj, list):
return ImmutableList(obj)
if isinstance(obj, set):
return ImmutableSet(obj)
return obj
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/immutabletypes.py
| 0.766468 | 0.153835 |
immutabletypes.py
|
pypi
|
import http.client
import logging
from urllib.parse import urlencode, urljoin
import salt.utils.http
from salt.version import __version__
log = logging.getLogger(__name__)
def query(
function,
token=None,
api_version="1",
method="POST",
header_dict=None,
data=None,
query_params=None,
opts=None,
):
"""
PushOver object method function to construct and execute on the API URL.
:param token: The PushOver api key.
:param api_version: The PushOver API version to use, defaults to version 1.
:param function: The PushOver api function to perform.
:param method: The HTTP method, e.g. GET or POST.
:param data: The data to be sent for POST method.
:return: The json response from the API call or False.
"""
ret = {"message": "", "res": True}
pushover_functions = {
"message": {"request": "messages.json", "response": "status"},
"validate_user": {"request": "users/validate.json", "response": "status"},
"validate_sound": {"request": "sounds.json", "response": "status"},
}
api_url = "https://api.pushover.net"
base_url = urljoin(api_url, api_version + "/")
path = pushover_functions.get(function).get("request")
url = urljoin(base_url, path, False)
if not query_params:
query_params = {}
decode = True
if method == "DELETE":
decode = False
result = salt.utils.http.query(
url,
method,
params=query_params,
data=data,
header_dict=header_dict,
decode=decode,
decode_type="json",
text=True,
status=True,
cookies=True,
persist_session=True,
opts=opts,
)
if result.get("status", None) == http.client.OK:
response = pushover_functions.get(function).get("response")
if response in result and result[response] == 0:
ret["res"] = False
ret["message"] = result
return ret
else:
try:
if "response" in result and result[response] == 0:
ret["res"] = False
ret["message"] = result
except ValueError:
ret["res"] = False
ret["message"] = result
return ret
def validate_sound(sound, token):
"""
Send a message to a Pushover user or group.
:param sound: The sound that we want to verify
:param token: The PushOver token.
"""
ret = {"message": "Sound is invalid", "res": False}
parameters = dict()
parameters["token"] = token
response = query(function="validate_sound", method="GET", query_params=parameters)
if response["res"]:
if "message" in response:
_message = response.get("message", "")
if "status" in _message:
if _message.get("dict", {}).get("status", "") == 1:
sounds = _message.get("dict", {}).get("sounds", "")
if sound in sounds:
ret["message"] = "Valid sound {}.".format(sound)
ret["res"] = True
else:
ret["message"] = "Warning: {} not a valid sound.".format(sound)
ret["res"] = False
else:
ret["message"] = "".join(_message.get("dict", {}).get("errors"))
return ret
def validate_user(user, device, token):
"""
Send a message to a Pushover user or group.
:param user: The user or group name, either will work.
:param device: The device for the user.
:param token: The PushOver token.
"""
res = {"message": "User key is invalid", "result": False}
parameters = dict()
parameters["user"] = user
parameters["token"] = token
if device:
parameters["device"] = device
response = query(
function="validate_user",
method="POST",
header_dict={"Content-Type": "application/x-www-form-urlencoded"},
data=urlencode(parameters),
)
if response["res"]:
if "message" in response:
_message = response.get("message", "")
if "status" in _message:
if _message.get("dict", {}).get("status", None) == 1:
res["result"] = True
res["message"] = "User key is valid."
else:
res["result"] = False
res["message"] = "".join(_message.get("dict", {}).get("errors"))
return res
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/pushover.py
| 0.533154 | 0.202207 |
pushover.py
|
pypi
|
import collections
import salt.utils.context
import yaml # pylint: disable=blacklisted-import
from salt.utils.odict import OrderedDict
try:
from yaml import CDumper as Dumper
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import Dumper
from yaml import SafeDumper
__all__ = [
"OrderedDumper",
"SafeOrderedDumper",
"IndentedSafeOrderedDumper",
"get_dumper",
"dump",
"safe_dump",
]
class IndentMixin(Dumper):
"""
Mixin that improves YAML dumped list readability
by indenting them by two spaces,
instead of being flush with the key they are under.
"""
def increase_indent(self, flow=False, indentless=False):
return super().increase_indent(flow, False)
class OrderedDumper(Dumper):
"""
A YAML dumper that represents python OrderedDict as simple YAML map.
"""
class SafeOrderedDumper(SafeDumper):
"""
A YAML safe dumper that represents python OrderedDict as simple YAML map.
"""
class IndentedSafeOrderedDumper(IndentMixin, SafeOrderedDumper):
"""
A YAML safe dumper that represents python OrderedDict as simple YAML map,
and also indents lists by two spaces.
"""
def represent_ordereddict(dumper, data):
return dumper.represent_dict(list(data.items()))
def represent_undefined(dumper, data):
return dumper.represent_scalar("tag:yaml.org,2002:null", "NULL")
OrderedDumper.add_representer(OrderedDict, represent_ordereddict)
SafeOrderedDumper.add_representer(OrderedDict, represent_ordereddict)
SafeOrderedDumper.add_representer(None, represent_undefined)
OrderedDumper.add_representer(
collections.defaultdict, yaml.representer.SafeRepresenter.represent_dict
)
SafeOrderedDumper.add_representer(
collections.defaultdict, yaml.representer.SafeRepresenter.represent_dict
)
OrderedDumper.add_representer(
salt.utils.context.NamespacedDictWrapper,
yaml.representer.SafeRepresenter.represent_dict,
)
SafeOrderedDumper.add_representer(
salt.utils.context.NamespacedDictWrapper,
yaml.representer.SafeRepresenter.represent_dict,
)
OrderedDumper.add_representer(
"tag:yaml.org,2002:timestamp", OrderedDumper.represent_scalar
)
SafeOrderedDumper.add_representer(
"tag:yaml.org,2002:timestamp", SafeOrderedDumper.represent_scalar
)
def get_dumper(dumper_name):
return {
"OrderedDumper": OrderedDumper,
"SafeOrderedDumper": SafeOrderedDumper,
"IndentedSafeOrderedDumper": IndentedSafeOrderedDumper,
}.get(dumper_name)
def dump(data, stream=None, **kwargs):
"""
.. versionadded:: 2018.3.0
Helper that wraps yaml.dump and ensures that we encode unicode strings
unless explicitly told not to.
"""
if "allow_unicode" not in kwargs:
kwargs["allow_unicode"] = True
kwargs.setdefault("default_flow_style", None)
return yaml.dump(data, stream, **kwargs)
def safe_dump(data, stream=None, **kwargs):
"""
Use a custom dumper to ensure that defaultdict and OrderedDict are
represented properly. Ensure that unicode strings are encoded unless
explicitly told not to.
"""
if "allow_unicode" not in kwargs:
kwargs["allow_unicode"] = True
kwargs.setdefault("default_flow_style", None)
return yaml.dump(data, stream, Dumper=SafeOrderedDumper, **kwargs)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/yamldumper.py
| 0.611266 | 0.252926 |
yamldumper.py
|
pypi
|
import logging
import os
import re
import subprocess
import salt.loader.context
import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
try:
import dbus
except ImportError:
dbus = None
log = logging.getLogger(__name__)
def booted(context=None):
"""
Return True if the system was booted with systemd, False otherwise. If the
loader context dict ``__context__`` is passed, this function will set the
``salt.utils.systemd.booted`` key to represent if systemd is running and
keep the logic below from needing to be run again during the same salt run.
"""
contextkey = "salt.utils.systemd.booted"
if isinstance(context, (dict, salt.loader.context.NamedLoaderContext)):
# Can't put this if block on the same line as the above if block,
# because it willl break the elif below.
if contextkey in context:
return context[contextkey]
elif context is not None:
raise SaltInvocationError("context must be a dictionary if passed")
try:
# This check does the same as sd_booted() from libsystemd-daemon:
# http://www.freedesktop.org/software/systemd/man/sd_booted.html
ret = bool(os.stat("/run/systemd/system"))
except OSError:
ret = False
try:
context[contextkey] = ret
except TypeError:
pass
return ret
def offline(context=None):
"""Return True if systemd is in offline mode
.. versionadded:: 3004
"""
contextkey = "salt.utils.systemd.offline"
if isinstance(context, (dict, salt.loader.context.NamedLoaderContext)):
if contextkey in context:
return context[contextkey]
elif context is not None:
raise SaltInvocationError("context must be a dictionary if passed")
# Note that there is a difference from SYSTEMD_OFFLINE=1. Here we
# assume that there is no PID 1 to talk with.
ret = not booted(context) and salt.utils.path.which("systemctl")
try:
context[contextkey] = ret
except TypeError:
pass
return ret
def version(context=None):
"""
Attempts to run systemctl --version. Returns None if unable to determine
version.
"""
contextkey = "salt.utils.systemd.version"
if isinstance(context, (dict, salt.loader.context.NamedLoaderContext)):
# Can't put this if block on the same line as the above if block,
# because it will break the elif below.
if contextkey in context:
return context[contextkey]
elif context is not None:
raise SaltInvocationError("context must be a dictionary if passed")
stdout = subprocess.Popen(
["systemctl", "--version"],
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
).communicate()[0]
outstr = salt.utils.stringutils.to_str(stdout)
try:
ret = int(re.search(r"\w+ ([0-9]+)", outstr.splitlines()[0]).group(1))
except (AttributeError, IndexError, ValueError):
log.error(
"Unable to determine systemd version from systemctl "
"--version, output follows:\n%s",
outstr,
)
return None
else:
try:
context[contextkey] = ret
except TypeError:
pass
return ret
def has_scope(context=None):
"""
Scopes were introduced in systemd 205, this function returns a boolean
which is true when the minion is systemd-booted and running systemd>=205.
"""
if not booted(context):
return False
_sd_version = version(context)
if _sd_version is None:
return False
return _sd_version >= 205
def pid_to_service(pid):
"""
Check if a PID belongs to a systemd service and return its name.
Return None if the PID does not belong to a service.
Uses DBUS if available.
"""
if dbus:
return _pid_to_service_dbus(pid)
else:
return _pid_to_service_systemctl(pid)
def _pid_to_service_systemctl(pid):
systemd_cmd = ["systemctl", "--output", "json", "status", str(pid)]
try:
systemd_output = subprocess.run(
systemd_cmd, check=True, text=True, capture_output=True
)
status_json = salt.utils.json.find_json(systemd_output.stdout)
except (ValueError, subprocess.CalledProcessError):
return None
name = status_json.get("_SYSTEMD_UNIT")
if name and name.endswith(".service"):
return _strip_suffix(name)
else:
return None
def _pid_to_service_dbus(pid):
"""
Use DBUS to check if a PID belongs to a running systemd service and return the service name if it does.
"""
bus = dbus.SystemBus()
systemd_object = bus.get_object(
"org.freedesktop.systemd1", "/org/freedesktop/systemd1"
)
systemd = dbus.Interface(systemd_object, "org.freedesktop.systemd1.Manager")
try:
service_path = systemd.GetUnitByPID(pid)
service_object = bus.get_object("org.freedesktop.systemd1", service_path)
service_props = dbus.Interface(
service_object, "org.freedesktop.DBus.Properties"
)
service_name = service_props.Get("org.freedesktop.systemd1.Unit", "Id")
name = str(service_name)
if name and name.endswith(".service"):
return _strip_suffix(name)
else:
return None
except dbus.DBusException:
return None
def _strip_suffix(service_name):
"""
Strip ".service" suffix from a given service name.
"""
return service_name[:-8]
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/systemd.py
| 0.432902 | 0.166201 |
systemd.py
|
pypi
|
import logging
from contextlib import contextmanager
from salt.exceptions import CodePageError
log = logging.getLogger(__name__)
try:
import pywintypes
import win32console
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
# Although utils are often directly imported, it is also possible to use the loader.
def __virtual__():
"""
Only load if Win32 Libraries are installed
"""
if not HAS_WIN32:
return False, "This utility requires pywin32"
return "win_chcp"
@contextmanager
def chcp(page_id, raise_error=False):
"""
Gets or sets the codepage of the shell.
Args:
page_id (str, int):
A number representing the codepage.
raise_error (bool):
``True`` will raise an error if the codepage fails to change.
``False`` will suppress the error
Returns:
int: A number representing the codepage
Raises:
CodePageError: On unsuccessful codepage change
"""
if not isinstance(page_id, int):
try:
page_id = int(page_id)
except ValueError:
error = "The `page_id` needs to be an integer, not {}".format(type(page_id))
if raise_error:
raise CodePageError(error)
log.error(error)
return -1
previous_page_id = get_codepage_id(raise_error=raise_error)
if page_id and previous_page_id and page_id != previous_page_id:
set_code_page = True
else:
set_code_page = False
try:
if set_code_page:
set_codepage_id(page_id, raise_error=raise_error)
# Subprocesses started from now will use the set code page id
yield
finally:
if set_code_page:
# Reset to the old code page
set_codepage_id(previous_page_id, raise_error=raise_error)
def get_codepage_id(raise_error=False):
"""
Get the currently set code page on windows
Args:
raise_error (bool):
``True`` will raise an error if the codepage fails to change.
``False`` will suppress the error
Returns:
int: A number representing the codepage
Raises:
CodePageError: On unsuccessful codepage change
"""
try:
return win32console.GetConsoleCP()
except pywintypes.error as exc:
_, _, msg = exc.args
error = "Failed to get the windows code page: {}".format(msg)
if raise_error:
raise CodePageError(error)
else:
log.error(error)
return -1
def set_codepage_id(page_id, raise_error=False):
"""
Set the code page on windows
Args:
page_id (str, int):
A number representing the codepage.
raise_error (bool):
``True`` will raise an error if the codepage fails to change.
``False`` will suppress the error
Returns:
int: A number representing the codepage
Raises:
CodePageError: On unsuccessful codepage change
"""
if not isinstance(page_id, int):
try:
page_id = int(page_id)
except ValueError:
error = "The `page_id` needs to be an integer, not {}".format(type(page_id))
if raise_error:
raise CodePageError(error)
log.error(error)
return -1
try:
win32console.SetConsoleCP(page_id)
return get_codepage_id(raise_error=raise_error)
except pywintypes.error as exc:
_, _, msg = exc.args
error = "Failed to set the windows code page: {}".format(msg)
if raise_error:
raise CodePageError(error)
else:
log.error(error)
return -1
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/win_chcp.py
| 0.687525 | 0.158337 |
win_chcp.py
|
pypi
|
import http.client
import logging
import urllib.parse
import salt.utils.http
log = logging.getLogger(__name__)
def query(
function,
api_key=None,
args=None,
method="GET",
header_dict=None,
data=None,
opts=None,
):
"""
Slack object method function to construct and execute on the API URL.
:param api_key: The Slack api key.
:param function: The Slack api function to perform.
:param method: The HTTP method, e.g. GET or POST.
:param data: The data to be sent for POST method.
:return: The json response from the API call or False.
"""
ret = {"message": "", "res": True}
slack_functions = {
"rooms": {"request": "channels.list", "response": "channels"},
"users": {"request": "users.list", "response": "members"},
"message": {"request": "chat.postMessage", "response": "channel"},
}
if not api_key:
api_key = __salt__["config.get"]("slack.api_key") or __salt__["config.get"](
"slack:api_key"
)
if not api_key:
log.error("No Slack api key found.")
ret["message"] = "No Slack api key found."
ret["res"] = False
return ret
api_url = "https://slack.com"
base_url = urllib.parse.urljoin(api_url, "/api/")
path = slack_functions.get(function).get("request")
url = urllib.parse.urljoin(base_url, path, False)
if not isinstance(args, dict):
query_params = {}
else:
query_params = args.copy()
if header_dict is None:
header_dict = {}
if method != "POST":
header_dict["Accept"] = "application/json"
# https://api.slack.com/changelog/2020-11-no-more-tokens-in-querystrings-for
# -newly-created-apps
# Apps created after February 24, 2021 may no longer send tokens as query
# parameters and must instead use an HTTP authorization header or
# send the token in an HTTP POST body.
# Apps created before February 24, 2021 will continue functioning no
# matter which way you pass your token.
header_dict["Authorization"] = "Bearer {}".format(api_key)
result = salt.utils.http.query(
url,
method,
params=query_params,
data=data,
decode=True,
status=True,
header_dict=header_dict,
opts=opts,
)
if result.get("status", None) == http.client.OK:
_result = result["dict"]
response = slack_functions.get(function).get("response")
if "error" in _result:
ret["message"] = _result["error"]
ret["res"] = False
return ret
ret["message"] = _result.get(response)
return ret
elif result.get("status", None) == http.client.NO_CONTENT:
return True
else:
log.debug(url)
log.debug(query_params)
log.debug(data)
log.debug(result)
if "dict" in result:
_result = result["dict"]
if "error" in _result:
ret["message"] = result["error"]
ret["res"] = False
return ret
ret["message"] = "Unknown response"
ret["res"] = False
else:
ret["message"] = "invalid_auth"
ret["res"] = False
return ret
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/slack.py
| 0.596903 | 0.16248 |
slack.py
|
pypi
|
import salt.utils.platform
from salt.exceptions import CommandExecutionError
try:
import pywintypes
import win32security
import win32service
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
SERVICE_TYPE = {
1: "Kernel Driver",
2: "File System Driver",
4: "Adapter Driver",
8: "Recognizer Driver",
16: "Win32 Own Process",
32: "Win32 Share Process",
256: "Interactive",
"kernel": 1,
"filesystem": 2,
"adapter": 4,
"recognizer": 8,
"own": 16,
"share": 32,
}
SERVICE_CONTROLS = {
1: "Stop",
2: "Pause/Continue",
4: "Shutdown",
8: "Change Parameters",
16: "Netbind Change",
32: "Hardware Profile Change",
64: "Power Event",
128: "Session Change",
256: "Pre-Shutdown",
512: "Time Change",
1024: "Trigger Event",
}
SERVICE_STATE = {
1: "Stopped",
2: "Start Pending",
3: "Stop Pending",
4: "Running",
5: "Continue Pending",
6: "Pause Pending",
7: "Paused",
}
SERVICE_ERRORS = {0: "No Error", 1066: "Service Specific Error"}
SERVICE_START_TYPE = {
"boot": 0,
"system": 1,
"auto": 2,
"manual": 3,
"disabled": 4,
0: "Boot",
1: "System",
2: "Auto",
3: "Manual",
4: "Disabled",
}
SERVICE_ERROR_CONTROL = {
0: "Ignore",
1: "Normal",
2: "Severe",
3: "Critical",
"ignore": 0,
"normal": 1,
"severe": 2,
"critical": 3,
}
__virtualname__ = "win_service"
def __virtual__():
"""
Only load if Win32 Libraries are installed
"""
if not salt.utils.platform.is_windows():
return False, "win_dacl: Requires Windows"
if not HAS_WIN32:
return False, "win_dacl: Requires pywin32"
return __virtualname__
def info(name):
"""
Get information about a service on the system
Args:
name (str): The name of the service. This is not the display name. Use
``get_service_name`` to find the service name.
Returns:
dict: A dictionary containing information about the service.
CLI Example:
.. code-block:: bash
salt '*' service.info spooler
"""
try:
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT
)
except pywintypes.error as exc:
raise CommandExecutionError(
"Failed to connect to the SCM: {}".format(exc.strerror)
)
try:
handle_svc = win32service.OpenService(
handle_scm,
name,
win32service.SERVICE_ENUMERATE_DEPENDENTS
| win32service.SERVICE_INTERROGATE
| win32service.SERVICE_QUERY_CONFIG
| win32service.SERVICE_QUERY_STATUS,
)
except pywintypes.error as exc:
raise CommandExecutionError("Failed To Open {}: {}".format(name, exc.strerror))
try:
config_info = win32service.QueryServiceConfig(handle_svc)
status_info = win32service.QueryServiceStatusEx(handle_svc)
try:
description = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION
)
except pywintypes.error:
description = "Failed to get description"
delayed_start = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO
)
finally:
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
ret = dict()
try:
sid = win32security.LookupAccountName("", "NT Service\\{}".format(name))[0]
ret["sid"] = win32security.ConvertSidToStringSid(sid)
except pywintypes.error:
ret["sid"] = "Failed to get SID"
ret["BinaryPath"] = config_info[3]
ret["LoadOrderGroup"] = config_info[4]
ret["TagID"] = config_info[5]
ret["Dependencies"] = config_info[6]
ret["ServiceAccount"] = config_info[7]
ret["DisplayName"] = config_info[8]
ret["Description"] = description
ret["Status_ServiceCode"] = status_info["ServiceSpecificExitCode"]
ret["Status_CheckPoint"] = status_info["CheckPoint"]
ret["Status_WaitHint"] = status_info["WaitHint"]
ret["StartTypeDelayed"] = delayed_start
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int):
if config_info[0] & bit:
flags.append(SERVICE_TYPE[bit])
ret["ServiceType"] = flags if flags else config_info[0]
flags = list()
for bit in SERVICE_CONTROLS:
if status_info["ControlsAccepted"] & bit:
flags.append(SERVICE_CONTROLS[bit])
ret["ControlsAccepted"] = flags if flags else status_info["ControlsAccepted"]
try:
ret["Status_ExitCode"] = SERVICE_ERRORS[status_info["Win32ExitCode"]]
except KeyError:
ret["Status_ExitCode"] = status_info["Win32ExitCode"]
try:
ret["StartType"] = SERVICE_START_TYPE[config_info[1]]
except KeyError:
ret["StartType"] = config_info[1]
try:
ret["ErrorControl"] = SERVICE_ERROR_CONTROL[config_info[2]]
except KeyError:
ret["ErrorControl"] = config_info[2]
try:
ret["Status"] = SERVICE_STATE[status_info["CurrentState"]]
except KeyError:
ret["Status"] = status_info["CurrentState"]
return ret
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/win_service.py
| 0.486819 | 0.183045 |
win_service.py
|
pypi
|
graph_prefix = "\x1b["
graph_suffix = "m"
codes = {
"reset": "0",
"bold": "1",
"faint": "2",
"italic": "3",
"underline": "4",
"blink": "5",
"slow_blink": "5",
"fast_blink": "6",
"inverse": "7",
"conceal": "8",
"strike": "9",
"primary_font": "10",
"reset_font": "10",
"font_0": "10",
"font_1": "11",
"font_2": "12",
"font_3": "13",
"font_4": "14",
"font_5": "15",
"font_6": "16",
"font_7": "17",
"font_8": "18",
"font_9": "19",
"fraktur": "20",
"double_underline": "21",
"end_bold": "21",
"normal_intensity": "22",
"end_italic": "23",
"end_fraktur": "23",
"end_underline": "24", # single or double
"end_blink": "25",
"end_inverse": "27",
"end_conceal": "28",
"end_strike": "29",
"black": "30",
"red": "31",
"green": "32",
"yellow": "33",
"blue": "34",
"magenta": "35",
"cyan": "36",
"white": "37",
"extended": "38",
"default": "39",
"fg_black": "30",
"fg_red": "31",
"fg_green": "32",
"fg_yellow": "33",
"fg_blue": "34",
"fg_magenta": "35",
"fg_cyan": "36",
"fg_white": "37",
"fg_extended": "38",
"fg_default": "39",
"bg_black": "40",
"bg_red": "41",
"bg_green": "42",
"bg_yellow": "44",
"bg_blue": "44",
"bg_magenta": "45",
"bg_cyan": "46",
"bg_white": "47",
"bg_extended": "48",
"bg_default": "49",
"frame": "51",
"encircle": "52",
"overline": "53",
"end_frame": "54",
"end_encircle": "54",
"end_overline": "55",
"ideogram_underline": "60",
"right_line": "60",
"ideogram_double_underline": "61",
"right_double_line": "61",
"ideogram_overline": "62",
"left_line": "62",
"ideogram_double_overline": "63",
"left_double_line": "63",
"ideogram_stress": "64",
"reset_ideogram": "65",
}
class TextFormat:
"""
ANSI Select Graphic Rendition (SGR) code escape sequence.
"""
def __init__(self, *attrs, **kwargs):
"""
:param attrs: are the attribute names of any format codes in `codes`
:param kwargs: may contain
`x`, an integer in the range [0-255] that selects the corresponding
color from the extended ANSI 256 color space for foreground text
`rgb`, an iterable of 3 integers in the range [0-255] that select the
corresponding colors from the extended ANSI 256^3 color space for
foreground text
`bg_x`, an integer in the range [0-255] that selects the corresponding
color from the extended ANSI 256 color space for background text
`bg_rgb`, an iterable of 3 integers in the range [0-255] that select
the corresponding colors from the extended ANSI 256^3 color space for
background text
`reset`, prepend reset SGR code to sequence (default `True`)
Examples:
.. code-block:: python
red_underlined = TextFormat('red', 'underline')
nuanced_text = TextFormat(x=29, bg_x=71)
magenta_on_green = TextFormat('magenta', 'bg_green')
print('{}Can you read this?{}'.format(magenta_on_green, TextFormat('reset')))
"""
self.codes = [codes[attr.lower()] for attr in attrs if isinstance(attr, str)]
if kwargs.get("reset", True):
self.codes[:0] = [codes["reset"]]
def qualify_int(i):
if isinstance(i, int):
return i % 256 # set i to base element of its equivalence class
def qualify_triple_int(t):
if isinstance(t, (list, tuple)) and len(t) == 3:
return qualify_int(t[0]), qualify_int(t[1]), qualify_int(t[2])
if kwargs.get("x", None) is not None:
self.codes.extend((codes["extended"], "5", qualify_int(kwargs["x"])))
elif kwargs.get("rgb", None) is not None:
self.codes.extend((codes["extended"], "2"))
self.codes.extend(*qualify_triple_int(kwargs["rgb"]))
if kwargs.get("bg_x", None) is not None:
self.codes.extend((codes["extended"], "5", qualify_int(kwargs["bg_x"])))
elif kwargs.get("bg_rgb", None) is not None:
self.codes.extend((codes["extended"], "2"))
self.codes.extend(*qualify_triple_int(kwargs["bg_rgb"]))
self.sequence = "{}{}{}".format(
graph_prefix, ";".join(self.codes), graph_suffix
)
def __call__(self, text, reset=True):
"""
Format :param text: by prefixing `self.sequence` and suffixing the
reset sequence if :param reset: is `True`.
Examples:
.. code-block:: python
green_blink_text = TextFormat('blink', 'green')
'The answer is: {0}'.format(green_blink_text(42))
"""
end = TextFormat("reset") if reset else ""
return "{}{}{}".format(self.sequence, text, end)
def __str__(self):
return self.sequence
def __repr__(self):
return self.sequence
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/textformat.py
| 0.7641 | 0.448668 |
textformat.py
|
pypi
|
import logging
import os
import shutil
import sys
import tempfile
from datetime import date
import salt.utils.files
import salt.version
from jinja2 import Template
from salt.serializers.yaml import deserialize
from salt.utils.odict import OrderedDict
log = logging.getLogger(__name__)
try:
import click
HAS_CLICK = True
except ImportError as ie:
HAS_CLICK = False
TEMPLATE_FILE_NAME = "template.yml"
def _get_template(path, option_key):
"""
Get the contents of a template file and provide it as a module type
:param path: path to the template.yml file
:type path: ``str``
:param option_key: The unique key of this template
:type option_key: ``str``
:returns: Details about the template
:rtype: ``tuple``
"""
with salt.utils.files.fopen(path, "r") as template_f:
template = deserialize(template_f)
info = (option_key, template.get("description", ""), template)
return info
def _fetch_templates(src):
"""
Fetch all of the templates in the src directory
:param src: The source path
:type src: ``str``
:rtype: ``list`` of ``tuple``
:returns: ``list`` of ('key', 'description')
"""
templates = []
log.debug("Listing contents of %s", src)
for item in os.listdir(src):
s = os.path.join(src, item)
if os.path.isdir(s):
template_path = os.path.join(s, TEMPLATE_FILE_NAME)
if os.path.isfile(template_path):
templates.append(_get_template(template_path, item))
else:
log.debug(
"Directory does not contain %s %s",
template_path,
TEMPLATE_FILE_NAME,
)
return templates
def _mergetree(src, dst):
"""
Akin to shutils.copytree but over existing directories, does a recursive merge copy.
:param src: The source path
:type src: ``str``
:param dst: The destination path
:type dst: ``str``
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
log.info("Copying folder %s to %s", s, d)
if os.path.exists(d):
_mergetree(s, d)
else:
shutil.copytree(s, d)
else:
log.info("Copying file %s to %s", s, d)
shutil.copy2(s, d)
def _mergetreejinja(src, dst, context):
"""
Merge directory A to directory B, apply Jinja2 templating to both
the file/folder names AND to the contents of the files
:param src: The source path
:type src: ``str``
:param dst: The destination path
:type dst: ``str``
:param context: The dictionary to inject into the Jinja template as context
:type context: ``dict``
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
log.info("Copying folder %s to %s", s, d)
if os.path.exists(d):
_mergetreejinja(s, d, context)
else:
os.mkdir(d)
_mergetreejinja(s, d, context)
else:
if item != TEMPLATE_FILE_NAME:
d = Template(d).render(context)
log.info("Copying file %s to %s", s, d)
with salt.utils.files.fopen(s, "r") as source_file:
src_contents = salt.utils.stringutils.to_unicode(source_file.read())
dest_contents = Template(src_contents).render(context)
with salt.utils.files.fopen(d, "w") as dest_file:
dest_file.write(salt.utils.stringutils.to_str(dest_contents))
def _prompt_user_variable(var_name, default_value):
"""
Prompt the user to enter the value of a variable
:param var_name: The question to ask the user
:type var_name: ``str``
:param default_value: The default value
:type default_value: ``str``
:rtype: ``str``
:returns: the value from the user
"""
return click.prompt(var_name, default=default_value)
def _prompt_choice(var_name, options):
"""
Prompt the user to choose between a list of options, index each one by adding an enumerator
based on https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/prompt.py#L51
:param var_name: The question to ask the user
:type var_name: ``str``
:param options: A list of options
:type options: ``list`` of ``tupple``
:rtype: ``tuple``
:returns: The selected user
"""
choice_map = OrderedDict(
("{}".format(i), value)
for i, value in enumerate(options, 1)
if value[0] != "test"
)
choices = choice_map.keys()
default = "1"
choice_lines = [
"{} - {} - {}".format(c[0], c[1][0], c[1][1]) for c in choice_map.items()
]
prompt = "\n".join(
(
"Select {}:".format(var_name),
"\n".join(choice_lines),
"Choose from {}".format(", ".join(choices)),
)
)
user_choice = click.prompt(prompt, type=click.Choice(choices), default=default)
return choice_map[user_choice]
def apply_template(template_dir, output_dir, context):
"""
Apply the template from the template directory to the output
using the supplied context dict.
:param src: The source path
:type src: ``str``
:param dst: The destination path
:type dst: ``str``
:param context: The dictionary to inject into the Jinja template as context
:type context: ``dict``
"""
_mergetreejinja(template_dir, output_dir, context)
def run(
extension=None,
name=None,
description=None,
salt_dir=None,
merge=False,
temp_dir=None,
):
"""
A template factory for extending the salt ecosystem
:param extension: The extension type, e.g. 'module', 'state', if omitted, user will be prompted
:type extension: ``str``
:param name: Python-friendly name for the module, if omitted, user will be prompted
:type name: ``str``
:param description: A description of the extension, if omitted, user will be prompted
:type description: ``str``
:param salt_dir: The targeted Salt source directory
:type salt_dir: ``str``
:param merge: Merge with salt directory, `False` to keep separate, `True` to merge trees.
:type merge: ``bool``
:param temp_dir: The directory for generated code, if omitted, system temp will be used
:type temp_dir: ``str``
"""
if not HAS_CLICK:
print("click is not installed, please install using pip")
sys.exit(1)
if salt_dir is None:
salt_dir = "."
MODULE_OPTIONS = _fetch_templates(os.path.join(salt_dir, "templates"))
if extension is None:
print("Choose which type of extension you are developing for SaltStack")
extension_type = "Extension type"
chosen_extension = _prompt_choice(extension_type, MODULE_OPTIONS)
else:
if extension not in list(zip(*MODULE_OPTIONS))[0]:
print("Module extension option not valid")
sys.exit(1)
chosen_extension = [m for m in MODULE_OPTIONS if m[0] == extension][0]
extension_type = chosen_extension[0]
extension_context = chosen_extension[2]
if name is None:
print("Enter the short name for the module (e.g. mymodule)")
name = _prompt_user_variable("Module name", "")
if description is None:
description = _prompt_user_variable("Short description of the module", "")
template_dir = "templates/{}".format(extension_type)
module_name = name
param_dict = {
"version": salt.version.SaltStackVersion.next_release().name,
"module_name": module_name,
"short_description": description,
"release_date": date.today().strftime("%Y-%m-%d"),
"year": date.today().strftime("%Y"),
}
# get additional questions from template
additional_context = {}
for key, val in extension_context.get("questions", {}).items():
# allow templates to be used in default values.
default = Template(val.get("default", "")).render(param_dict)
prompt_var = _prompt_user_variable(val["question"], default)
additional_context[key] = prompt_var
context = param_dict.copy()
context.update(extension_context)
context.update(additional_context)
if temp_dir is None:
temp_dir = tempfile.mkdtemp()
apply_template(template_dir, temp_dir, context)
if not merge:
path = temp_dir
else:
_mergetree(temp_dir, salt_dir)
path = salt_dir
log.info("New module stored in %s", path)
return path
if __name__ == "__main__":
run()
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/extend.py
| 0.569134 | 0.219672 |
extend.py
|
pypi
|
import logging
import subprocess
import salt.utils.args
import salt.utils.data
import salt.utils.winapi
from salt.exceptions import CommandExecutionError
try:
import win32com.client
import pywintypes
HAS_PYWIN32 = True
except ImportError:
HAS_PYWIN32 = False
log = logging.getLogger(__name__)
REBOOT_BEHAVIOR = {
0: "Never Requires Reboot",
1: "Always Requires Reboot",
2: "Can Require Reboot",
}
__virtualname__ = "win_update"
def __virtual__():
if not salt.utils.platform.is_windows():
return False, "win_update: Only available on Windows"
if not HAS_PYWIN32:
return False, "win_update: Missing pywin32"
return __virtualname__
class Updates:
"""
Wrapper around the 'Microsoft.Update.UpdateColl' instance
Adds the list and summary functions. For use by the WindowUpdateAgent class.
Code Example:
.. code-block:: python
# Create an instance
updates = Updates()
# Bind to the collection object
found = updates.updates
# This exposes Collections properties and methods
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa386107(v=vs.85).aspx
found.Count
found.Add
# To use custom functions, use the original instance
# Return the number of updates inside the collection
updates.count()
# Return a list of updates in the collection and details in a dictionary
updates.list()
# Return a summary of the contents of the updates collection
updates.summary()
"""
update_types = {1: "Software", 2: "Driver"}
def __init__(self):
"""
Initialize the updates collection. Can be accessed via
``Updates.updates``
"""
with salt.utils.winapi.Com():
self.updates = win32com.client.Dispatch("Microsoft.Update.UpdateColl")
def count(self):
"""
Return how many records are in the Microsoft Update Collection
Returns:
int: The number of updates in the collection
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.count()
"""
return self.updates.Count
def list(self):
"""
Create a dictionary with the details for the updates in the collection.
Returns:
dict: Details about each update
.. code-block:: cfg
Dict of Updates:
{'<GUID>': {
'Title': <title>,
'KB': <KB>,
'GUID': <the globally unique identifier for the update>,
'Description': <description>,
'Downloaded': <has the update been downloaded>,
'Installed': <has the update been installed>,
'Mandatory': <is the update mandatory>,
'UserInput': <is user input required>,
'EULAAccepted': <has the EULA been accepted>,
'Severity': <update severity>,
'NeedsReboot': <is the update installed and awaiting reboot>,
'RebootBehavior': <will the update require a reboot>,
'Categories': [
'<category 1>',
'<category 2>',
... ]
}}
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.list()
"""
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx
if self.count() == 0:
return "Nothing to return"
log.debug("Building a detailed report of the results.")
# Build a dictionary containing details for each update
results = {}
for update in self.updates:
# Windows 10 build 2004 introduced some problems with the
# InstallationBehavior COM Object. See
# https://github.com/saltstack/salt/issues/57762 for more details.
# The following 2 try/except blocks will output sane defaults
try:
user_input = bool(update.InstallationBehavior.CanRequestUserInput)
except AttributeError:
log.debug(
"Windows Update: Error reading InstallationBehavior COM Object"
)
user_input = False
try:
requires_reboot = update.InstallationBehavior.RebootBehavior
except AttributeError:
log.debug(
"Windows Update: Error reading InstallationBehavior COM Object"
)
requires_reboot = 2
# IUpdate Properties
# https://docs.microsoft.com/en-us/windows/win32/wua_sdk/iupdate-properties
results[update.Identity.UpdateID] = {
"guid": update.Identity.UpdateID,
"Title": str(update.Title),
"Type": self.update_types[update.Type],
"Description": update.Description,
"Downloaded": bool(update.IsDownloaded),
"Installed": bool(update.IsInstalled),
"Mandatory": bool(update.IsMandatory),
"EULAAccepted": bool(update.EulaAccepted),
"NeedsReboot": bool(update.RebootRequired),
"Severity": str(update.MsrcSeverity),
"UserInput": user_input,
"RebootBehavior": REBOOT_BEHAVIOR[requires_reboot],
"KBs": ["KB" + item for item in update.KBArticleIDs],
"Categories": [item.Name for item in update.Categories],
"SupportUrl": update.SupportUrl,
}
return results
def summary(self):
"""
Create a dictionary with a summary of the updates in the collection.
Returns:
dict: Summary of the contents of the collection
.. code-block:: cfg
Summary of Updates:
{'Total': <total number of updates returned>,
'Available': <updates that are not downloaded or installed>,
'Downloaded': <updates that are downloaded but not installed>,
'Installed': <updates installed (usually 0 unless installed=True)>,
'Categories': {
<category 1>: <total for that category>,
<category 2>: <total for category 2>,
... }
}
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.summary()
"""
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx
if self.count() == 0:
return "Nothing to return"
# Build a dictionary containing a summary of updates available
results = {
"Total": 0,
"Available": 0,
"Downloaded": 0,
"Installed": 0,
"Categories": {},
"Severity": {},
}
for update in self.updates:
# Count the total number of updates available
results["Total"] += 1
# Updates available for download
if not salt.utils.data.is_true(
update.IsDownloaded
) and not salt.utils.data.is_true(update.IsInstalled):
results["Available"] += 1
# Updates downloaded awaiting install
if salt.utils.data.is_true(
update.IsDownloaded
) and not salt.utils.data.is_true(update.IsInstalled):
results["Downloaded"] += 1
# Updates installed
if salt.utils.data.is_true(update.IsInstalled):
results["Installed"] += 1
# Add Categories and increment total for each one
# The sum will be more than the total because each update can have
# multiple categories
for category in update.Categories:
if category.Name in results["Categories"]:
results["Categories"][category.Name] += 1
else:
results["Categories"][category.Name] = 1
# Add Severity Summary
if update.MsrcSeverity:
if update.MsrcSeverity in results["Severity"]:
results["Severity"][update.MsrcSeverity] += 1
else:
results["Severity"][update.MsrcSeverity] = 1
return results
class WindowsUpdateAgent:
"""
Class for working with the Windows update agent
"""
# Error codes found at the following site:
# https://msdn.microsoft.com/en-us/library/windows/desktop/hh968413(v=vs.85).aspx
# https://technet.microsoft.com/en-us/library/cc720442(v=ws.10).aspx
fail_codes = {
-2145107924: "WinHTTP Send/Receive failed: 0x8024402C",
-2145124300: "Download failed: 0x80240034",
-2145124302: "Invalid search criteria: 0x80240032",
-2145124305: "Cancelled by policy: 0x8024002F",
-2145124307: "Missing source: 0x8024002D",
-2145124308: "Missing source: 0x8024002C",
-2145124312: "Uninstall not allowed: 0x80240028",
-2145124315: "Prevented by policy: 0x80240025",
-2145124316: "No Updates: 0x80240024",
-2145124322: "Service being shutdown: 0x8024001E",
-2145124325: "Self Update in Progress: 0x8024001B",
-2145124327: "Exclusive Install Conflict: 0x80240019",
-2145124330: "Install not allowed: 0x80240016",
-2145124333: "Duplicate item: 0x80240013",
-2145124341: "Operation cancelled: 0x8024000B",
-2145124343: "Operation in progress: 0x80240009",
-2145124284: "Access Denied: 0x8024044",
-2145124283: "Unsupported search scope: 0x80240045",
-2147024891: "Access is denied: 0x80070005",
-2149843018: "Setup in progress: 0x8024004A",
-4292599787: "Install still pending: 0x00242015",
-4292607992: "Already downloaded: 0x00240008",
-4292607993: "Already uninstalled: 0x00240007",
-4292607994: "Already installed: 0x00240006",
-4292607995: "Reboot required: 0x00240005",
}
def __init__(self, online=True):
"""
Initialize the session and load all updates into the ``_updates``
collection. This collection is used by the other class functions instead
of querying Windows update (expensive).
Args:
online (bool):
Tells the Windows Update Agent go online to update its local
update database. ``True`` will go online. ``False`` will use the
local update database as is. Default is ``True``
.. versionadded:: 3001
Need to look at the possibility of loading this into ``__context__``
"""
# Initialize the PyCom system
with salt.utils.winapi.Com():
# Create a session with the Windows Update Agent
self._session = win32com.client.Dispatch("Microsoft.Update.Session")
# Create Collection for Updates
self._updates = win32com.client.Dispatch("Microsoft.Update.UpdateColl")
self.refresh(online=online)
def updates(self):
"""
Get the contents of ``_updates`` (all updates) and puts them in an
Updates class to expose the list and summary functions.
Returns:
Updates:
An instance of the Updates class with all updates for the
system.
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
updates = wua.updates()
# To get a list
updates.list()
# To get a summary
updates.summary()
"""
updates = Updates()
found = updates.updates
for update in self._updates:
found.Add(update)
return updates
def refresh(self, online=True):
"""
Refresh the contents of the ``_updates`` collection. This gets all
updates in the Windows Update system and loads them into the collection.
This is the part that is slow.
Args:
online (bool):
Tells the Windows Update Agent go online to update its local
update database. ``True`` will go online. ``False`` will use the
local update database as is. Default is ``True``
.. versionadded:: 3001
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
wua.refresh()
"""
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa386526(v=vs.85).aspx
search_string = "Type='Software' or Type='Driver'"
# Create searcher object
searcher = self._session.CreateUpdateSearcher()
searcher.Online = online
self._session.ClientApplicationID = "Salt: Load Updates"
# Load all updates into the updates collection
try:
results = searcher.Search(search_string)
if results.Updates.Count == 0:
log.debug("No Updates found for:\n\t\t%s", search_string)
return "No Updates found: {}".format(search_string)
except pywintypes.com_error as error:
# Something happened, raise an error
hr, msg, exc, arg = error.args # pylint: disable=W0633
try:
failure_code = self.fail_codes[exc[5]]
except KeyError:
failure_code = "Unknown Failure: {}".format(error)
log.error("Search Failed: %s\n\t\t%s", failure_code, search_string)
raise CommandExecutionError(failure_code)
self._updates = results.Updates
def installed(self):
"""
Gets a list of all updates available on the system that have the
``IsInstalled`` attribute set to ``True``.
Returns:
Updates: An instance of Updates with the results.
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent(online=False)
installed_updates = wua.installed()
"""
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx
updates = Updates()
for update in self._updates:
if salt.utils.data.is_true(update.IsInstalled):
updates.updates.Add(update)
return updates
def available(
self,
skip_hidden=True,
skip_installed=True,
skip_mandatory=False,
skip_reboot=False,
software=True,
drivers=True,
categories=None,
severities=None,
):
"""
Gets a list of all updates available on the system that match the passed
criteria.
Args:
skip_hidden (bool):
Skip hidden updates. Default is ``True``
skip_installed (bool):
Skip installed updates. Default is ``True``
skip_mandatory (bool):
Skip mandatory updates. Default is ``False``
skip_reboot (bool):
Skip updates that can or do require reboot. Default is ``False``
software (bool):
Include software updates. Default is ``True``
drivers (bool):
Include driver updates. Default is ``True``
categories (list):
Include updates that have these categories. Default is none
(all categories). Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Include updates that have these severities. Default is none
(all severities). Severities include the following:
* Critical
* Important
.. note::
All updates are either software or driver updates. If both
``software`` and ``drivers`` is ``False``, nothing will be returned.
Returns:
Updates: An instance of Updates with the results of the search.
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# Gets all updates and shows a summary
updates = wua.available()
updates.summary()
# Get a list of Critical updates
updates = wua.available(categories=['Critical Updates'])
updates.list()
"""
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx
updates = Updates()
found = updates.updates
for update in self._updates:
if salt.utils.data.is_true(update.IsHidden) and skip_hidden:
continue
if salt.utils.data.is_true(update.IsInstalled) and skip_installed:
continue
if salt.utils.data.is_true(update.IsMandatory) and skip_mandatory:
continue
# Windows 10 build 2004 introduced some problems with the
# InstallationBehavior COM Object. See
# https://github.com/saltstack/salt/issues/57762 for more details.
# The following try/except block will default to True
try:
requires_reboot = salt.utils.data.is_true(
update.InstallationBehavior.RebootBehavior
)
except AttributeError:
log.debug(
"Windows Update: Error reading InstallationBehavior COM Object"
)
requires_reboot = True
if requires_reboot and skip_reboot:
continue
if not software and update.Type == 1:
continue
if not drivers and update.Type == 2:
continue
if categories is not None:
match = False
for category in update.Categories:
if category.Name in categories:
match = True
if not match:
continue
if severities is not None:
if update.MsrcSeverity not in severities:
continue
found.Add(update)
return updates
def search(self, search_string):
"""
Search for either a single update or a specific list of updates. GUIDs
are searched first, then KB numbers, and finally Titles.
Args:
search_string (str, list):
The search string to use to find the update. This can be the
GUID or KB of the update (preferred). It can also be the full
Title of the update or any part of the Title. A partial Title
search is less specific and can return multiple results.
Returns:
Updates: An instance of Updates with the results of the search
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# search for a single update and show its details
updates = wua.search('KB3194343')
updates.list()
# search for a list of updates and show their details
updates = wua.search(['KB3195432', '12345678-abcd-1234-abcd-1234567890ab'])
updates.list()
"""
updates = Updates()
found = updates.updates
if isinstance(search_string, str):
search_string = [search_string]
if isinstance(search_string, int):
search_string = [str(search_string)]
for update in self._updates:
for find in search_string:
# Search by GUID
if find == update.Identity.UpdateID:
found.Add(update)
continue
# Search by KB
if find in ["KB" + item for item in update.KBArticleIDs]:
found.Add(update)
continue
# Search by KB without the KB in front
if find in [item for item in update.KBArticleIDs]:
found.Add(update)
continue
# Search by Title
if find in update.Title:
found.Add(update)
continue
return updates
def download(self, updates):
"""
Download the updates passed in the updates collection. Load the updates
collection using ``search`` or ``available``
Args:
updates (Updates):
An instance of the Updates class containing a the updates to be
downloaded.
Returns:
dict: A dictionary containing the results of the download
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# Download KB3195454
updates = wua.search('KB3195454')
results = wua.download(updates)
"""
# Check for empty list
if updates.count() == 0:
ret = {"Success": False, "Updates": "Nothing to download"}
return ret
# Initialize the downloader object and list collection
downloader = self._session.CreateUpdateDownloader()
self._session.ClientApplicationID = "Salt: Download Update"
with salt.utils.winapi.Com():
download_list = win32com.client.Dispatch("Microsoft.Update.UpdateColl")
ret = {"Updates": {}}
# Check for updates that aren't already downloaded
for update in updates.updates:
# Define uid to keep the lines shorter
uid = update.Identity.UpdateID
ret["Updates"][uid] = {}
ret["Updates"][uid]["Title"] = update.Title
ret["Updates"][uid]["AlreadyDownloaded"] = bool(update.IsDownloaded)
# Accept EULA
if not salt.utils.data.is_true(update.EulaAccepted):
log.debug("Accepting EULA: %s", update.Title)
update.AcceptEula() # pylint: disable=W0104
# Update already downloaded
if not salt.utils.data.is_true(update.IsDownloaded):
log.debug("To Be Downloaded: %s", uid)
log.debug("\tTitle: %s", update.Title)
download_list.Add(update)
# Check the download list
if download_list.Count == 0:
ret = {"Success": True, "Updates": "Nothing to download"}
return ret
# Send the list to the downloader
downloader.Updates = download_list
# Download the list
try:
log.debug("Downloading Updates")
result = downloader.Download()
except pywintypes.com_error as error:
# Something happened, raise an error
hr, msg, exc, arg = error.args # pylint: disable=W0633
try:
failure_code = self.fail_codes[exc[5]]
except KeyError:
failure_code = "Unknown Failure: {}".format(error)
log.error("Download Failed: %s", failure_code)
raise CommandExecutionError(failure_code)
# Lookup dictionary
result_code = {
0: "Download Not Started",
1: "Download In Progress",
2: "Download Succeeded",
3: "Download Succeeded With Errors",
4: "Download Failed",
5: "Download Aborted",
}
log.debug("Download Complete")
log.debug(result_code[result.ResultCode])
ret["Message"] = result_code[result.ResultCode]
# Was the download successful?
if result.ResultCode in [2, 3]:
log.debug("Downloaded Successfully")
ret["Success"] = True
else:
log.debug("Download Failed")
ret["Success"] = False
# Report results for each update
for i in range(download_list.Count):
uid = download_list.Item(i).Identity.UpdateID
ret["Updates"][uid]["Result"] = result_code[
result.GetUpdateResult(i).ResultCode
]
return ret
def install(self, updates):
"""
Install the updates passed in the updates collection. Load the updates
collection using the ``search`` or ``available`` functions. If the
updates need to be downloaded, use the ``download`` function.
Args:
updates (Updates):
An instance of the Updates class containing a the updates to be
installed.
Returns:
dict: A dictionary containing the results of the installation
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# install KB3195454
updates = wua.search('KB3195454')
results = wua.download(updates)
results = wua.install(updates)
"""
# Check for empty list
if updates.count() == 0:
ret = {"Success": False, "Updates": "Nothing to install"}
return ret
installer = self._session.CreateUpdateInstaller()
self._session.ClientApplicationID = "Salt: Install Update"
with salt.utils.winapi.Com():
install_list = win32com.client.Dispatch("Microsoft.Update.UpdateColl")
ret = {"Updates": {}}
# Check for updates that aren't already installed
for update in updates.updates:
# Define uid to keep the lines shorter
uid = update.Identity.UpdateID
ret["Updates"][uid] = {}
ret["Updates"][uid]["Title"] = update.Title
ret["Updates"][uid]["AlreadyInstalled"] = bool(update.IsInstalled)
# Make sure the update has actually been installed
if not salt.utils.data.is_true(update.IsInstalled):
log.debug("To Be Installed: %s", uid)
log.debug("\tTitle: %s", update.Title)
install_list.Add(update)
# Check the install list
if install_list.Count == 0:
ret = {"Success": True, "Updates": "Nothing to install"}
return ret
# Send the list to the installer
installer.Updates = install_list
# Install the list
try:
log.debug("Installing Updates")
result = installer.Install()
except pywintypes.com_error as error:
# Something happened, raise an error
hr, msg, exc, arg = error.args # pylint: disable=W0633
try:
failure_code = self.fail_codes[exc[5]]
except KeyError:
failure_code = "Unknown Failure: {}".format(error)
log.error("Install Failed: %s", failure_code)
raise CommandExecutionError(failure_code)
# Lookup dictionary
result_code = {
0: "Installation Not Started",
1: "Installation In Progress",
2: "Installation Succeeded",
3: "Installation Succeeded With Errors",
4: "Installation Failed",
5: "Installation Aborted",
}
log.debug("Install Complete")
log.debug(result_code[result.ResultCode])
ret["Message"] = result_code[result.ResultCode]
if result.ResultCode in [2, 3]:
ret["Success"] = True
ret["NeedsReboot"] = result.RebootRequired
log.debug("NeedsReboot: %s", result.RebootRequired)
else:
log.debug("Install Failed")
ret["Success"] = False
for i in range(install_list.Count):
uid = install_list.Item(i).Identity.UpdateID
ret["Updates"][uid]["Result"] = result_code[
result.GetUpdateResult(i).ResultCode
]
# Windows 10 build 2004 introduced some problems with the
# InstallationBehavior COM Object. See
# https://github.com/saltstack/salt/issues/57762 for more details.
# The following try/except block will default to 2
try:
reboot_behavior = install_list.Item(
i
).InstallationBehavior.RebootBehavior
except AttributeError:
log.debug(
"Windows Update: Error reading InstallationBehavior COM Object"
)
reboot_behavior = 2
ret["Updates"][uid]["RebootBehavior"] = REBOOT_BEHAVIOR[reboot_behavior]
return ret
def uninstall(self, updates):
"""
Uninstall the updates passed in the updates collection. Load the updates
collection using the ``search`` or ``available`` functions.
.. note::
Starting with Windows 10 the Windows Update Agent is unable to
uninstall updates. An ``Uninstall Not Allowed`` error is returned.
If this error is encountered this function will instead attempt to
use ``dism.exe`` to perform the un-installation. ``dism.exe`` may
fail to to find the KB number for the package. In that case, removal
will fail.
Args:
updates (Updates):
An instance of the Updates class containing a the updates to be
uninstalled.
Returns:
dict: A dictionary containing the results of the un-installation
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# uninstall KB3195454
updates = wua.search('KB3195454')
results = wua.uninstall(updates)
"""
# This doesn't work with the WUA API since Windows 10. It always returns
# "0x80240028 # Uninstall not allowed". The full message is: "The update
# could not be uninstalled because the request did not originate from a
# Windows Server Update Services (WSUS) server.
# Check for empty list
if updates.count() == 0:
ret = {"Success": False, "Updates": "Nothing to uninstall"}
return ret
installer = self._session.CreateUpdateInstaller()
self._session.ClientApplicationID = "Salt: Uninstall Update"
with salt.utils.winapi.Com():
uninstall_list = win32com.client.Dispatch("Microsoft.Update.UpdateColl")
ret = {"Updates": {}}
# Check for updates that aren't already installed
for update in updates.updates:
# Define uid to keep the lines shorter
uid = update.Identity.UpdateID
ret["Updates"][uid] = {}
ret["Updates"][uid]["Title"] = update.Title
ret["Updates"][uid]["AlreadyUninstalled"] = not bool(update.IsInstalled)
# Make sure the update has actually been Uninstalled
if salt.utils.data.is_true(update.IsInstalled):
log.debug("To Be Uninstalled: %s", uid)
log.debug("\tTitle: %s", update.Title)
uninstall_list.Add(update)
# Check the install list
if uninstall_list.Count == 0:
ret = {"Success": False, "Updates": "Nothing to uninstall"}
return ret
# Send the list to the installer
installer.Updates = uninstall_list
# Uninstall the list
try:
log.debug("Uninstalling Updates")
result = installer.Uninstall()
except pywintypes.com_error as error:
# Something happened, return error or try using DISM
hr, msg, exc, arg = error.args # pylint: disable=W0633
try:
failure_code = self.fail_codes[exc[5]]
except KeyError:
failure_code = "Unknown Failure: {}".format(error)
# If "Uninstall Not Allowed" error, try using DISM
if exc[5] == -2145124312:
log.debug("Uninstall Failed with WUA, attempting with DISM")
try:
# Go through each update...
for item in uninstall_list:
# Look for the KB numbers
for kb in item.KBArticleIDs:
# Get the list of packages
cmd = ["dism", "/Online", "/Get-Packages"]
pkg_list = self._run(cmd)[0].splitlines()
# Find the KB in the pkg_list
for item in pkg_list:
# Uninstall if found
if "kb" + kb in item.lower():
pkg = item.split(" : ")[1]
ret["DismPackage"] = pkg
cmd = [
"dism",
"/Online",
"/Remove-Package",
"/PackageName:{}".format(pkg),
"/Quiet",
"/NoRestart",
]
self._run(cmd)
except CommandExecutionError as exc:
log.debug("Uninstall using DISM failed")
log.debug("Command: %s", " ".join(cmd))
log.debug("Error: %s", exc)
raise CommandExecutionError(
"Uninstall using DISM failed: {}".format(exc)
)
# DISM Uninstall Completed Successfully
log.debug("Uninstall Completed using DISM")
# Populate the return dictionary
ret["Success"] = True
ret["Message"] = "Uninstalled using DISM"
ret["NeedsReboot"] = needs_reboot()
log.debug("NeedsReboot: %s", ret["NeedsReboot"])
# Refresh the Updates Table
self.refresh(online=False)
# Check the status of each update
for update in self._updates:
uid = update.Identity.UpdateID
for item in uninstall_list:
if item.Identity.UpdateID == uid:
if not update.IsInstalled:
ret["Updates"][uid][
"Result"
] = "Uninstallation Succeeded"
else:
ret["Updates"][uid][
"Result"
] = "Uninstallation Failed"
# Windows 10 build 2004 introduced some problems with the
# InstallationBehavior COM Object. See
# https://github.com/saltstack/salt/issues/57762 for more details.
# The following try/except block will default to 2
try:
requires_reboot = (
update.InstallationBehavior.RebootBehavior
)
except AttributeError:
log.debug(
"Windows Update: Error reading"
" InstallationBehavior COM Object"
)
requires_reboot = 2
ret["Updates"][uid]["RebootBehavior"] = REBOOT_BEHAVIOR[
requires_reboot
]
return ret
# Found a different exception, Raise error
log.error("Uninstall Failed: %s", failure_code)
raise CommandExecutionError(failure_code)
# Lookup dictionary
result_code = {
0: "Uninstallation Not Started",
1: "Uninstallation In Progress",
2: "Uninstallation Succeeded",
3: "Uninstallation Succeeded With Errors",
4: "Uninstallation Failed",
5: "Uninstallation Aborted",
}
log.debug("Uninstall Complete")
log.debug(result_code[result.ResultCode])
ret["Message"] = result_code[result.ResultCode]
if result.ResultCode in [2, 3]:
ret["Success"] = True
ret["NeedsReboot"] = result.RebootRequired
log.debug("NeedsReboot: %s", result.RebootRequired)
else:
log.debug("Uninstall Failed")
ret["Success"] = False
for i in range(uninstall_list.Count):
uid = uninstall_list.Item(i).Identity.UpdateID
ret["Updates"][uid]["Result"] = result_code[
result.GetUpdateResult(i).ResultCode
]
# Windows 10 build 2004 introduced some problems with the
# InstallationBehavior COM Object. See
# https://github.com/saltstack/salt/issues/57762 for more details.
# The following try/except block will default to 2
try:
reboot_behavior = uninstall_list.Item(
i
).InstallationBehavior.RebootBehavior
except AttributeError:
log.debug(
"Windows Update: Error reading InstallationBehavior COM Object"
)
reboot_behavior = 2
ret["Updates"][uid]["RebootBehavior"] = REBOOT_BEHAVIOR[reboot_behavior]
return ret
def _run(self, cmd):
"""
Internal function for running commands. Used by the uninstall function.
Args:
cmd (str, list):
The command to run
Returns:
str: The stdout of the command
"""
if isinstance(cmd, str):
cmd = salt.utils.args.shlex_split(cmd)
try:
log.debug(cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p.communicate()
except OSError as exc:
log.debug("Command Failed: %s", " ".join(cmd))
log.debug("Error: %s", exc)
raise CommandExecutionError(exc)
def needs_reboot():
"""
Determines if the system needs to be rebooted.
Returns:
bool: ``True`` if the system requires a reboot, ``False`` if not
CLI Examples:
.. code-block:: bash
import salt.utils.win_update
salt.utils.win_update.needs_reboot()
"""
# Initialize the PyCom system
with salt.utils.winapi.Com():
# Create an AutoUpdate object
try:
obj_sys = win32com.client.Dispatch("Microsoft.Update.SystemInfo")
except pywintypes.com_error as exc:
_, msg, _, _ = exc.args
log.debug("Failed to create SystemInfo object: %s", msg)
return False
return salt.utils.data.is_true(obj_sys.RebootRequired)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/win_update.py
| 0.685213 | 0.239038 |
win_update.py
|
pypi
|
import glob
import logging
import os
import re
import salt.utils.path
import salt.utils.yaml
from jinja2 import Environment, FileSystemLoader
log = logging.getLogger(__name__)
# Renders jinja from a template file
def render_jinja(_file, salt_data):
j_env = Environment(loader=FileSystemLoader(os.path.dirname(_file)))
j_env.globals.update(
{
"__opts__": salt_data["__opts__"],
"__salt__": salt_data["__salt__"],
"__grains__": salt_data["__grains__"],
"__pillar__": salt_data["__pillar__"],
"minion_id": salt_data["minion_id"],
}
)
j_render = j_env.get_template(os.path.basename(_file)).render()
return j_render
# Renders yaml from rendered jinja
def render_yaml(_file, salt_data):
return salt.utils.yaml.safe_load(render_jinja(_file, salt_data))
# Returns a dict from a class yaml definition
def get_class(_class, salt_data):
l_files = []
saltclass_path = salt_data["path"]
straight, sub_init, sub_straight = get_class_paths(_class, saltclass_path)
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(saltclass_path, "classes"), followlinks=True
):
for l_file in files:
l_files.append(os.path.join(root, l_file))
if straight in l_files:
return render_yaml(straight, salt_data)
if sub_straight in l_files:
return render_yaml(sub_straight, salt_data)
if sub_init in l_files:
return render_yaml(sub_init, salt_data)
log.warning("%s: Class definition not found", _class)
return {}
def get_class_paths(_class, saltclass_path):
"""
Converts the dotted notation of a saltclass class to its possible file counterparts.
:param str _class: Dotted notation of the class
:param str saltclass_path: Root to saltclass storage
:return: 3-tuple of possible file counterparts
:rtype: tuple(str)
"""
straight = os.path.join(saltclass_path, "classes", "{}.yml".format(_class))
sub_straight = os.path.join(
saltclass_path, "classes", "{}.yml".format(_class.replace(".", os.sep))
)
sub_init = os.path.join(
saltclass_path, "classes", _class.replace(".", os.sep), "init.yml"
)
return straight, sub_init, sub_straight
def get_class_from_file(_file, saltclass_path):
"""
Converts the absolute path to a saltclass file back to the dotted notation.
.. code-block:: python
print(get_class_from_file('/srv/saltclass/classes/services/nginx/init.yml', '/srv/saltclass'))
# services.nginx
:param str _file: Absolute path to file
:param str saltclass_path: Root to saltclass storage
:return: class name in dotted notation
:rtype: str
"""
# remove classes path prefix
_file = _file[len(os.path.join(saltclass_path, "classes")) + len(os.sep) :]
# remove .yml extension
_file = _file[:-4]
# revert to dotted notation
_file = _file.replace(os.sep, ".")
# remove tailing init
if _file.endswith(".init"):
_file = _file[:-5]
return _file
# Return environment
def get_env_from_dict(exp_dict_list):
environment = ""
for s_class in exp_dict_list:
if "environment" in s_class:
environment = s_class["environment"]
return environment
# Merge dict b into a
def dict_merge(a, b, path=None):
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], list) and isinstance(b[key], list):
if b[key][0] == "^":
b[key].pop(0)
a[key] = b[key]
else:
a[key].extend(b[key])
elif isinstance(a[key], dict) and isinstance(b[key], dict):
dict_merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass
else:
a[key] = b[key]
else:
a[key] = b[key]
return a
# Recursive search and replace in a dict
def dict_search_and_replace(d, old, new, expanded):
for (k, v) in d.items():
if isinstance(v, dict):
dict_search_and_replace(d[k], old, new, expanded)
if isinstance(v, list):
x = 0
for i in v:
if isinstance(i, dict):
dict_search_and_replace(v[x], old, new, expanded)
if isinstance(i, str):
if i == old:
v[x] = new
x = x + 1
if v == old:
d[k] = new
return d
# Retrieve original value from ${xx:yy:zz} to be expanded
def find_value_to_expand(x, v):
a = x
for i in v[2:-1].split(":"):
if a is None:
return v
if i in a:
a = a.get(i)
else:
return v
return a
# Look for regexes and expand them
def find_and_process_re(_str, v, k, b, expanded):
vre = re.finditer(r"(^|.)\$\{.*?\}", _str)
if vre:
for re_v in vre:
re_str = str(re_v.group())
if re_str.startswith("\\"):
v_new = _str.replace(re_str, re_str.lstrip("\\"))
b = dict_search_and_replace(b, _str, v_new, expanded)
expanded.append(k)
elif not re_str.startswith("$"):
v_expanded = find_value_to_expand(b, re_str[1:])
v_new = _str.replace(re_str[1:], v_expanded)
b = dict_search_and_replace(b, _str, v_new, expanded)
_str = v_new
expanded.append(k)
else:
v_expanded = find_value_to_expand(b, re_str)
if isinstance(v, str):
v_new = v.replace(re_str, v_expanded)
else:
v_new = _str.replace(re_str, v_expanded)
b = dict_search_and_replace(b, _str, v_new, expanded)
_str = v_new
v = v_new
expanded.append(k)
return b
# Return a dict that contains expanded variables if found
def expand_variables(a, b, expanded, path=None):
if path is None:
b = a.copy()
path = []
for (k, v) in a.items():
if isinstance(v, dict):
expand_variables(v, b, expanded, path + [str(k)])
else:
if isinstance(v, list):
for i in v:
if isinstance(i, dict):
expand_variables(i, b, expanded, path + [str(k)])
if isinstance(i, str):
b = find_and_process_re(i, v, k, b, expanded)
if isinstance(v, str):
b = find_and_process_re(v, v, k, b, expanded)
return b
def match_class_glob(_class, saltclass_path):
"""
Takes a class name possibly including `*` or `?` wildcards (or any other wildcards supportet by `glob.glob`) and
returns a list of expanded class names without wildcards.
.. code-block:: python
classes = match_class_glob('services.*', '/srv/saltclass')
print(classes)
# services.mariadb
# services.nginx...
:param str _class: dotted class name, globbing allowed.
:param str saltclass_path: path to the saltclass root directory.
:return: The list of expanded class matches.
:rtype: list(str)
"""
straight, sub_init, sub_straight = get_class_paths(_class, saltclass_path)
classes = []
matches = []
matches.extend(glob.glob(straight))
matches.extend(glob.glob(sub_straight))
matches.extend(glob.glob(sub_init))
if not matches:
log.warning("%s: Class globbing did not yield any results", _class)
for match in matches:
classes.append(get_class_from_file(match, saltclass_path))
return classes
def expand_classes_glob(classes, salt_data):
"""
Expand the list of `classes` to no longer include any globbing.
:param iterable(str) classes: Iterable of classes
:param dict salt_data: configuration data
:return: Expanded list of classes with resolved globbing
:rtype: list(str)
"""
all_classes = []
expanded_classes = []
saltclass_path = salt_data["path"]
for _class in classes:
all_classes.extend(match_class_glob(_class, saltclass_path))
for _class in all_classes:
if _class not in expanded_classes:
expanded_classes.append(_class)
return expanded_classes
def expand_classes_in_order(
minion_dict, salt_data, seen_classes, expanded_classes, classes_to_expand
):
# Get classes to expand from minion dictionary
if not classes_to_expand and "classes" in minion_dict:
classes_to_expand = minion_dict["classes"]
classes_to_expand = expand_classes_glob(classes_to_expand, salt_data)
# Now loop on list to recursively expand them
for klass in classes_to_expand:
if klass not in seen_classes:
seen_classes.append(klass)
expanded_classes[klass] = get_class(klass, salt_data)
# Fix corner case where class is loaded but doesn't contain anything
if expanded_classes[klass] is None:
expanded_classes[klass] = {}
# Merge newly found pillars into existing ones
new_pillars = expanded_classes[klass].get("pillars", {})
if new_pillars:
dict_merge(salt_data["__pillar__"], new_pillars)
# Now replace class element in classes_to_expand by expansion
if expanded_classes[klass].get("classes"):
l_id = classes_to_expand.index(klass)
classes_to_expand[l_id:l_id] = expanded_classes[klass]["classes"]
expand_classes_in_order(
minion_dict,
salt_data,
seen_classes,
expanded_classes,
classes_to_expand,
)
else:
expand_classes_in_order(
minion_dict,
salt_data,
seen_classes,
expanded_classes,
classes_to_expand,
)
# We may have duplicates here and we want to remove them
tmp = []
for t_element in classes_to_expand:
if t_element not in tmp:
tmp.append(t_element)
classes_to_expand = tmp
# Now that we've retrieved every class in order,
# let's return an ordered list of dicts
ord_expanded_classes = []
ord_expanded_states = []
for ord_klass in classes_to_expand:
ord_expanded_classes.append(expanded_classes[ord_klass])
# And be smart and sort out states list
# Address the corner case where states is empty in a class definition
if (
"states" in expanded_classes[ord_klass]
and expanded_classes[ord_klass]["states"] is None
):
expanded_classes[ord_klass]["states"] = {}
if "states" in expanded_classes[ord_klass]:
ord_expanded_states.extend(expanded_classes[ord_klass]["states"])
# Add our minion dict as final element but check if we have states to process
if "states" in minion_dict and minion_dict["states"] is None:
minion_dict["states"] = []
if "states" in minion_dict:
ord_expanded_states.extend(minion_dict["states"])
ord_expanded_classes.append(minion_dict)
return ord_expanded_classes, classes_to_expand, ord_expanded_states
def expanded_dict_from_minion(minion_id, salt_data):
_file = ""
saltclass_path = salt_data["path"]
# Start
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(saltclass_path, "nodes"), followlinks=True
):
for minion_file in files:
if minion_file == "{}.yml".format(minion_id):
_file = os.path.join(root, minion_file)
# Load the minion_id definition if existing, else an empty dict
node_dict = {}
if _file:
node_dict[minion_id] = render_yaml(_file, salt_data)
else:
log.warning("%s: Node definition not found", minion_id)
node_dict[minion_id] = {}
# Merge newly found pillars into existing ones
dict_merge(salt_data["__pillar__"], node_dict[minion_id].get("pillars", {}))
# Get 2 ordered lists:
# expanded_classes: A list of all the dicts
# classes_list: List of all the classes
expanded_classes, classes_list, states_list = expand_classes_in_order(
node_dict[minion_id], salt_data, [], {}, []
)
# Here merge the pillars together
pillars_dict = {}
for exp_dict in expanded_classes:
if "pillars" in exp_dict:
dict_merge(pillars_dict, exp_dict)
return expanded_classes, pillars_dict, classes_list, states_list
def get_pillars(minion_id, salt_data):
# Get 2 dicts and 2 lists
# expanded_classes: Full list of expanded dicts
# pillars_dict: dict containing merged pillars in order
# classes_list: All classes processed in order
# states_list: All states listed in order
(
expanded_classes,
pillars_dict,
classes_list,
states_list,
) = expanded_dict_from_minion(minion_id, salt_data)
# Retrieve environment
environment = get_env_from_dict(expanded_classes)
# Expand ${} variables in merged dict
# pillars key shouldn't exist if we haven't found any minion_id ref
if "pillars" in pillars_dict:
pillars_dict_expanded = expand_variables(pillars_dict["pillars"], {}, [])
else:
pillars_dict_expanded = expand_variables({}, {}, [])
# Build the final pillars dict
pillars_dict = {}
pillars_dict["__saltclass__"] = {}
pillars_dict["__saltclass__"]["states"] = states_list
pillars_dict["__saltclass__"]["classes"] = classes_list
pillars_dict["__saltclass__"]["environment"] = environment
pillars_dict["__saltclass__"]["nodename"] = minion_id
pillars_dict.update(pillars_dict_expanded)
return pillars_dict
def get_tops(minion_id, salt_data):
# Get 2 dicts and 2 lists
# expanded_classes: Full list of expanded dicts
# pillars_dict: dict containing merged pillars in order
# classes_list: All classes processed in order
# states_list: All states listed in order
(
expanded_classes,
pillars_dict,
classes_list,
states_list,
) = expanded_dict_from_minion(minion_id, salt_data)
# Retrieve environment
environment = get_env_from_dict(expanded_classes)
# Build final top dict
tops_dict = {}
tops_dict[environment] = states_list
return tops_dict
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/saltclass.py
| 0.498779 | 0.167321 |
saltclass.py
|
pypi
|
import datetime
import hashlib
import os
from calendar import month_abbr as months
import salt.utils.stringutils
LAST_JID_DATETIME = None
def _utc_now():
"""
Helper method so tests do not have to patch the built-in method.
"""
return datetime.datetime.utcnow()
def gen_jid(opts):
"""
Generate a jid
"""
global LAST_JID_DATETIME # pylint: disable=global-statement
jid_dt = _utc_now()
if not opts.get("unique_jid", False):
return "{:%Y%m%d%H%M%S%f}".format(jid_dt)
if LAST_JID_DATETIME and LAST_JID_DATETIME >= jid_dt:
jid_dt = LAST_JID_DATETIME + datetime.timedelta(microseconds=1)
LAST_JID_DATETIME = jid_dt
return "{:%Y%m%d%H%M%S%f}_{}".format(jid_dt, os.getpid())
def is_jid(jid):
"""
Returns True if the passed in value is a job id
"""
if not isinstance(jid, str):
return False
if len(jid) != 20 and (len(jid) <= 21 or jid[20] != "_"):
return False
try:
int(jid[:20])
return True
except ValueError:
return False
def jid_to_time(jid):
"""
Convert a salt job id into the time when the job was invoked
"""
jid = str(jid)
if len(jid) != 20 and (len(jid) <= 21 or jid[20] != "_"):
return ""
year = jid[:4]
month = jid[4:6]
day = jid[6:8]
hour = jid[8:10]
minute = jid[10:12]
second = jid[12:14]
micro = jid[14:20]
ret = "{}, {} {} {}:{}:{}.{}".format(
year, months[int(month)], day, hour, minute, second, micro
)
return ret
def format_job_instance(job):
"""
Format the job instance correctly
"""
ret = {
"Function": job.get("fun", "unknown-function"),
"Arguments": list(job.get("arg", [])),
# unlikely but safeguard from invalid returns
"Target": job.get("tgt", "unknown-target"),
"Target-type": job.get("tgt_type", "list"),
"User": job.get("user", "root"),
}
if "metadata" in job:
ret["Metadata"] = job.get("metadata", {})
else:
if "kwargs" in job:
if "metadata" in job["kwargs"]:
ret["Metadata"] = job["kwargs"].get("metadata", {})
return ret
def format_jid_instance(jid, job):
"""
Format the jid correctly
"""
ret = format_job_instance(job)
ret.update({"StartTime": jid_to_time(jid)})
return ret
def format_jid_instance_ext(jid, job):
"""
Format the jid correctly with jid included
"""
ret = format_job_instance(job)
ret.update({"JID": jid, "StartTime": jid_to_time(jid)})
return ret
def jid_dir(jid, job_dir=None, hash_type="sha256"):
"""
Return the jid_dir for the given job id
"""
if not isinstance(jid, str):
jid = str(jid)
jhash = getattr(hashlib, hash_type)(
salt.utils.stringutils.to_bytes(jid)
).hexdigest()
parts = []
if job_dir is not None:
parts.append(job_dir)
parts.extend([jhash[:2], jhash[2:]])
return os.path.join(*parts)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/jid.py
| 0.514644 | 0.175573 |
jid.py
|
pypi
|
import logging
import os
import re
import shutil
import stat
import sys
import time
from subprocess import PIPE, Popen
import salt.defaults.exitcodes
import salt.utils.args
import salt.utils.hashutils
import salt.utils.path
import salt.utils.stringutils
from salt.utils.filebuffer import BufferedReader
try:
import grp
import pwd
# TODO: grp and pwd are both used in the code, we better make sure that
# that code never gets run if importing them does not succeed
except ImportError:
pass
# Set up logger
log = logging.getLogger(__name__)
_REQUIRES_PATH = 1
_REQUIRES_STAT = 2
_REQUIRES_CONTENTS = 4
_FILE_TYPES = {
"b": stat.S_IFBLK,
"c": stat.S_IFCHR,
"d": stat.S_IFDIR,
"f": stat.S_IFREG,
"l": stat.S_IFLNK,
"p": stat.S_IFIFO,
"s": stat.S_IFSOCK,
stat.S_IFBLK: "b",
stat.S_IFCHR: "c",
stat.S_IFDIR: "d",
stat.S_IFREG: "f",
stat.S_IFLNK: "l",
stat.S_IFIFO: "p",
stat.S_IFSOCK: "s",
}
_INTERVAL_REGEX = re.compile(
r"""
^\s*
(?P<modifier>[+-]?)
(?: (?P<week> \d+ (?:\.\d*)? ) \s* [wW] )? \s*
(?: (?P<day> \d+ (?:\.\d*)? ) \s* [dD] )? \s*
(?: (?P<hour> \d+ (?:\.\d*)? ) \s* [hH] )? \s*
(?: (?P<minute> \d+ (?:\.\d*)? ) \s* [mM] )? \s*
(?: (?P<second> \d+ (?:\.\d*)? ) \s* [sS] )? \s*
$
""",
flags=re.VERBOSE,
)
_PATH_DEPTH_IGNORED = (os.path.sep, os.path.curdir, os.path.pardir)
def _parse_interval(value):
"""
Convert an interval string like 1w3d6h into the number of seconds, time
resolution (1 unit of the smallest specified time unit) and the modifier(
'+', '-', or '').
w = week
d = day
h = hour
m = minute
s = second
"""
match = _INTERVAL_REGEX.match(str(value))
if match is None:
raise ValueError("invalid time interval: '{}'".format(value))
result = 0
resolution = None
for name, multiplier in [
("second", 1),
("minute", 60),
("hour", 60 * 60),
("day", 60 * 60 * 24),
("week", 60 * 60 * 24 * 7),
]:
if match.group(name) is not None:
result += float(match.group(name)) * multiplier
if resolution is None:
resolution = multiplier
return result, resolution, match.group("modifier")
def _parse_size(value):
scalar = value.strip()
if scalar.startswith(("-", "+")):
style = scalar[0]
scalar = scalar[1:]
else:
style = "="
if scalar:
multiplier = {
"b": 2 ** 0,
"k": 2 ** 10,
"m": 2 ** 20,
"g": 2 ** 30,
"t": 2 ** 40,
}.get(scalar[-1].lower())
if multiplier:
scalar = scalar[:-1].strip()
else:
multiplier = 1
else:
multiplier = 1
try:
num = int(scalar) * multiplier
except ValueError:
try:
num = int(float(scalar) * multiplier)
except ValueError:
raise ValueError('invalid size: "{}"'.format(value))
if style == "-":
min_size = 0
max_size = num
elif style == "+":
min_size = num
max_size = sys.maxsize
else:
min_size = num
max_size = num + multiplier - 1
return min_size, max_size
class Option:
"""
Abstract base class for all find options.
"""
def requires(self):
return _REQUIRES_PATH
class NameOption(Option):
"""
Match files with a case-sensitive glob filename pattern.
Note: this is the 'basename' portion of a pathname.
The option name is 'name', e.g. {'name' : '*.txt'}.
"""
def __init__(self, key, value):
self.regex = re.compile(
value.replace(".", "\\.").replace("?", ".?").replace("*", ".*") + "$"
)
def match(self, dirname, filename, fstat):
return self.regex.match(filename)
class InameOption(Option):
"""
Match files with a case-insensitive glob filename pattern.
Note: this is the 'basename' portion of a pathname.
The option name is 'iname', e.g. {'iname' : '*.TXT'}.
"""
def __init__(self, key, value):
self.regex = re.compile(
value.replace(".", "\\.").replace("?", ".?").replace("*", ".*") + "$",
re.IGNORECASE,
)
def match(self, dirname, filename, fstat):
return self.regex.match(filename)
class RegexOption(Option):
"""
Match files with a case-sensitive regular expression.
Note: this is the 'basename' portion of a pathname.
The option name is 'regex', e.g. {'regex' : '.*\\.txt'}.
"""
def __init__(self, key, value):
try:
self.regex = re.compile(value)
except re.error:
raise ValueError('invalid regular expression: "{}"'.format(value))
def match(self, dirname, filename, fstat):
return self.regex.match(filename)
class IregexOption(Option):
"""
Match files with a case-insensitive regular expression.
Note: this is the 'basename' portion of a pathname.
The option name is 'iregex', e.g. {'iregex' : '.*\\.txt'}.
"""
def __init__(self, key, value):
try:
self.regex = re.compile(value, re.IGNORECASE)
except re.error:
raise ValueError('invalid regular expression: "{}"'.format(value))
def match(self, dirname, filename, fstat):
return self.regex.match(filename)
class TypeOption(Option):
"""
Match files by their file type(s).
The file type(s) are specified as an optionally comma and/or space
separated list of letters.
b = block device
c = character device
d = directory
f = regular (plain) file
l = symbolic link
p = FIFO (named pipe)
s = socket
The option name is 'type', e.g. {'type' : 'd'} or {'type' : 'bc'}.
"""
def __init__(self, key, value):
# remove whitespace and commas
value = "".join(value.strip().replace(",", "").split())
self.ftypes = set()
for ftype in value:
try:
self.ftypes.add(_FILE_TYPES[ftype])
except KeyError:
raise ValueError('invalid file type "{}"'.format(ftype))
def requires(self):
return _REQUIRES_STAT
def match(self, dirname, filename, fstat):
return stat.S_IFMT(fstat[stat.ST_MODE]) in self.ftypes
class OwnerOption(Option):
"""
Match files by their owner name(s) and/or uid(s), e.g. 'root'.
The names are a space and/or comma separated list of names and/or integers.
A match occurs when the file's uid matches any user specified.
The option name is 'owner', e.g. {'owner' : 'root'}.
"""
def __init__(self, key, value):
self.uids = set()
for name in value.replace(",", " ").split():
if name.isdigit():
self.uids.add(int(name))
else:
try:
self.uids.add(pwd.getpwnam(value).pw_uid)
except KeyError:
raise ValueError('no such user "{}"'.format(name))
def requires(self):
return _REQUIRES_STAT
def match(self, dirname, filename, fstat):
return fstat[stat.ST_UID] in self.uids
class GroupOption(Option):
"""
Match files by their group name(s) and/or uid(s), e.g. 'admin'.
The names are a space and/or comma separated list of names and/or integers.
A match occurs when the file's gid matches any group specified.
The option name is 'group', e.g. {'group' : 'admin'}.
"""
def __init__(self, key, value):
self.gids = set()
for name in value.replace(",", " ").split():
if name.isdigit():
self.gids.add(int(name))
else:
try:
self.gids.add(grp.getgrnam(name).gr_gid)
except KeyError:
raise ValueError('no such group "{}"'.format(name))
def requires(self):
return _REQUIRES_STAT
def match(self, dirname, filename, fstat):
return fstat[stat.ST_GID] in self.gids
class SizeOption(Option):
"""
Match files by their size.
Prefix the size with '-' to find files the specified size and smaller.
Prefix the size with '+' to find files the specified size and larger.
Without the +/- prefix, match the exact file size.
The size can be suffixed with (case-insensitive) suffixes:
b = bytes
k = kilobytes
m = megabytes
g = gigabytes
t = terabytes
The option name is 'size', e.g. {'size' : '+1G'}.
"""
def __init__(self, key, value):
self.min_size, self.max_size = _parse_size(value)
def requires(self):
return _REQUIRES_STAT
def match(self, dirname, filename, fstat):
return self.min_size <= fstat[stat.ST_SIZE] <= self.max_size
class MtimeOption(Option):
"""
Match files modified since the specified time.
The option name is 'mtime', e.g. {'mtime' : '3d'}.
The value format is [<num>w] [<num>[d]] [<num>h] [<num>m] [<num>s]
where num is an integer or float and the case-insensitive suffixes are:
w = week
d = day
h = hour
m = minute
s = second
Whitespace is ignored in the value.
"""
def __init__(self, key, value):
secs, resolution, modifier = _parse_interval(value)
self.mtime = time.time() - int(secs / resolution) * resolution
self.modifier = modifier
def requires(self):
return _REQUIRES_STAT
def match(self, dirname, filename, fstat):
if self.modifier == "-":
return fstat[stat.ST_MTIME] >= self.mtime
else:
return fstat[stat.ST_MTIME] <= self.mtime
class GrepOption(Option):
"""Match files when a pattern occurs within the file.
The option name is 'grep', e.g. {'grep' : '(foo)|(bar}'}.
"""
def __init__(self, key, value):
try:
self.regex = re.compile(value)
except re.error:
raise ValueError('invalid regular expression: "{}"'.format(value))
def requires(self):
return _REQUIRES_CONTENTS | _REQUIRES_STAT
def match(self, dirname, filename, fstat):
if not stat.S_ISREG(fstat[stat.ST_MODE]):
return None
dfilename = os.path.join(dirname, filename)
with BufferedReader(dfilename, mode="rb") as bread:
for chunk in bread:
if self.regex.search(chunk):
return dfilename
return None
class PrintOption(Option):
"""
Return information about a matched file.
Print options are specified as a comma and/or space separated list of
one or more of the following:
group = group name
md5 = MD5 digest of file contents
mode = file mode (as integer)
mtime = last modification time (as time_t)
name = file basename
path = file absolute path
size = file size in bytes
type = file type
user = user name
"""
def __init__(self, key, value):
self.need_stat = False
self.print_title = False
self.fmt = []
for arg in value.replace(",", " ").split():
self.fmt.append(arg)
if arg not in ["name", "path"]:
self.need_stat = True
if not self.fmt:
self.fmt.append("path")
def requires(self):
return _REQUIRES_STAT if self.need_stat else _REQUIRES_PATH
def execute(self, fullpath, fstat, test=False):
result = []
for arg in self.fmt:
if arg == "path":
result.append(fullpath)
elif arg == "name":
result.append(os.path.basename(fullpath))
elif arg == "size":
result.append(fstat[stat.ST_SIZE])
elif arg == "type":
result.append(_FILE_TYPES.get(stat.S_IFMT(fstat[stat.ST_MODE]), "?"))
elif arg == "mode":
# PY3 compatibility: Use radix value 8 on int type-cast explicitly
result.append(int(oct(fstat[stat.ST_MODE])[-3:], 8))
elif arg == "mtime":
result.append(fstat[stat.ST_MTIME])
elif arg == "user":
uid = fstat[stat.ST_UID]
try:
result.append(pwd.getpwuid(uid).pw_name)
except KeyError:
result.append(uid)
elif arg == "group":
gid = fstat[stat.ST_GID]
try:
result.append(grp.getgrgid(gid).gr_name)
except KeyError:
result.append(gid)
elif arg == "md5":
if stat.S_ISREG(fstat[stat.ST_MODE]):
md5digest = salt.utils.hashutils.get_hash(fullpath, "md5")
result.append(md5digest)
else:
result.append("")
if len(result) == 1:
return result[0]
else:
return result
class DeleteOption(TypeOption):
"""
Deletes matched file.
Delete options are one or more of the following:
a: all file types
b: block device
c: character device
d: directory
p: FIFO (named pipe)
f: plain file
l: symlink
s: socket
"""
def __init__(self, key, value):
if "a" in value:
value = "bcdpfls"
super().__init__(key, value)
def execute(self, fullpath, fstat, test=False):
if test:
return fullpath
try:
if os.path.isfile(fullpath) or os.path.islink(fullpath):
os.remove(fullpath)
elif os.path.isdir(fullpath):
shutil.rmtree(fullpath)
except OSError as exc:
return None
return fullpath
class ExecOption(Option):
"""
Execute the given command, {} replaced by filename.
Quote the {} if commands might include whitespace.
"""
def __init__(self, key, value):
self.command = value
def execute(self, fullpath, fstat, test=False):
try:
command = self.command.replace("{}", fullpath)
print(salt.utils.args.shlex_split(command))
p = Popen(salt.utils.args.shlex_split(command), stdout=PIPE, stderr=PIPE)
(out, err) = p.communicate()
if err:
log.error(
"Error running command: %s\n\n%s",
command,
salt.utils.stringutils.to_str(err),
)
return "{}:\n{}\n".format(command, salt.utils.stringutils.to_str(out))
except Exception as e: # pylint: disable=broad-except
log.error('Exception while executing command "%s":\n\n%s', command, e)
return "{}: Failed".format(fullpath)
class Finder:
def __init__(self, options):
self.actions = []
self.maxdepth = None
self.mindepth = 0
self.test = False
criteria = {
_REQUIRES_PATH: list(),
_REQUIRES_STAT: list(),
_REQUIRES_CONTENTS: list(),
}
if "mindepth" in options:
self.mindepth = options["mindepth"]
del options["mindepth"]
if "maxdepth" in options:
self.maxdepth = options["maxdepth"]
del options["maxdepth"]
if "test" in options:
self.test = options["test"]
del options["test"]
for key, value in options.items():
if key.startswith("_"):
# this is a passthrough object, continue
continue
if not value:
raise ValueError('missing value for "{}" option'.format(key))
try:
obj = globals()[key.title() + "Option"](key, value)
except KeyError:
raise ValueError('invalid option "{}"'.format(key))
if hasattr(obj, "match"):
requires = obj.requires()
if requires & _REQUIRES_CONTENTS:
criteria[_REQUIRES_CONTENTS].append(obj)
elif requires & _REQUIRES_STAT:
criteria[_REQUIRES_STAT].append(obj)
else:
criteria[_REQUIRES_PATH].append(obj)
if hasattr(obj, "execute"):
self.actions.append(obj)
if not self.actions:
self.actions.append(PrintOption("print", ""))
# order criteria so that least expensive checks are done first
self.criteria = (
criteria[_REQUIRES_PATH]
+ criteria[_REQUIRES_STAT]
+ criteria[_REQUIRES_CONTENTS]
)
def find(self, path):
"""
Generate filenames in path that satisfy criteria specified in
the constructor.
This method is a generator and should be repeatedly called
until there are no more results.
"""
if self.mindepth < 1:
dirpath, name = os.path.split(path)
match, fstat = self._check_criteria(dirpath, name, path)
if match:
yield from self._perform_actions(path, fstat=fstat)
for dirpath, dirs, files in salt.utils.path.os_walk(path):
relpath = os.path.relpath(dirpath, path)
depth = path_depth(relpath) + 1
if depth >= self.mindepth and (
self.maxdepth is None or self.maxdepth >= depth
):
for name in dirs + files:
fullpath = os.path.join(dirpath, name)
match, fstat = self._check_criteria(dirpath, name, fullpath)
if match:
yield from self._perform_actions(fullpath, fstat=fstat)
if self.maxdepth is not None and depth > self.maxdepth:
dirs[:] = []
def _check_criteria(self, dirpath, name, fullpath, fstat=None):
match = True
for criterion in self.criteria:
if fstat is None and criterion.requires() & _REQUIRES_STAT:
try:
fstat = os.stat(fullpath)
except OSError:
fstat = os.lstat(fullpath)
if not criterion.match(dirpath, name, fstat):
match = False
break
return match, fstat
def _perform_actions(self, fullpath, fstat=None):
for action in self.actions:
if fstat is None and action.requires() & _REQUIRES_STAT:
try:
fstat = os.stat(fullpath)
except OSError:
fstat = os.lstat(fullpath)
result = action.execute(fullpath, fstat, test=self.test)
if result is not None:
yield result
def path_depth(path):
depth = 0
head = path
while True:
head, tail = os.path.split(head)
if not tail and (not head or head in _PATH_DEPTH_IGNORED):
break
if tail and tail not in _PATH_DEPTH_IGNORED:
depth += 1
return depth
def find(path, options):
"""
WRITEME
"""
finder = Finder(options)
for path in finder.find(path):
yield path
def _main():
if len(sys.argv) < 2:
sys.stderr.write("usage: {} path [options]\n".format(sys.argv[0]))
sys.exit(salt.defaults.exitcodes.EX_USAGE)
path = sys.argv[1]
criteria = {}
for arg in sys.argv[2:]:
key, value = arg.split("=")
criteria[key] = value
try:
finder = Finder(criteria)
except ValueError as ex:
sys.stderr.write("error: {}\n".format(ex))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
for result in finder.find(path):
print(result)
if __name__ == "__main__":
_main()
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/find.py
| 0.407569 | 0.233936 |
find.py
|
pypi
|
import copy
import fnmatch
import functools
import logging
import re
# Try to import range from https://github.com/ytoolshed/range
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
# pylint: enable=import-error
log = logging.getLogger(__name__)
def targets(conditioned_raw, tgt, tgt_type, ipv="ipv4"):
rmatcher = RosterMatcher(conditioned_raw, tgt, tgt_type, ipv)
return rmatcher.targets()
def _tgt_set(tgt):
"""
Return the tgt as a set of literal names
"""
try:
# A comma-delimited string
return set(tgt.split(","))
except AttributeError:
# Assume tgt is already a non-string iterable.
return set(tgt)
class RosterMatcher:
"""
Matcher for the roster data structure
"""
def __init__(self, raw, tgt, tgt_type, ipv="ipv4"):
self.tgt = tgt
self.tgt_type = tgt_type
self.raw = raw
self.ipv = ipv
def targets(self):
"""
Execute the correct tgt_type routine and return
"""
try:
return getattr(self, "ret_{}_minions".format(self.tgt_type))()
except AttributeError:
return {}
def _ret_minions(self, filter_):
"""
Filter minions by a generic filter.
"""
minions = {}
for minion in filter_(self.raw):
data = self.get_data(minion)
if data:
minions[minion] = data.copy()
return minions
def ret_glob_minions(self):
"""
Return minions that match via glob
"""
fnfilter = functools.partial(fnmatch.filter, pat=self.tgt)
return self._ret_minions(fnfilter)
def ret_pcre_minions(self):
"""
Return minions that match via pcre
"""
tgt = re.compile(self.tgt)
refilter = functools.partial(filter, tgt.match)
return self._ret_minions(refilter)
def ret_list_minions(self):
"""
Return minions that match via list
"""
tgt = _tgt_set(self.tgt)
return self._ret_minions(tgt.intersection)
def ret_nodegroup_minions(self):
"""
Return minions which match the special list-only groups defined by
ssh_list_nodegroups
"""
nodegroup = __opts__.get("ssh_list_nodegroups", {}).get(self.tgt, [])
nodegroup = _tgt_set(nodegroup)
return self._ret_minions(nodegroup.intersection)
def ret_range_minions(self):
"""
Return minions that are returned by a range query
"""
if HAS_RANGE is False:
raise RuntimeError("Python lib 'seco.range' is not available")
minions = {}
range_hosts = _convert_range_to_list(self.tgt, __opts__["range_server"])
return self._ret_minions(range_hosts.__contains__)
def get_data(self, minion):
"""
Return the configured ip
"""
ret = copy.deepcopy(__opts__.get("roster_defaults", {}))
if isinstance(self.raw[minion], str):
ret.update({"host": self.raw[minion]})
return ret
elif isinstance(self.raw[minion], dict):
ret.update(self.raw[minion])
return ret
return False
def _convert_range_to_list(tgt, range_server):
"""
convert a seco.range range into a list target
"""
r = seco.range.Range(range_server)
try:
return r.expand(tgt)
except seco.range.RangeException as err:
log.error("Range server exception: %s", err)
return []
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/roster_matcher.py
| 0.676727 | 0.331823 |
roster_matcher.py
|
pypi
|
import re
import sys
from urllib.parse import urlparse, urlunparse
import salt.utils.data
import salt.utils.path
import salt.utils.platform
import salt.utils.versions
def parse(url):
"""
Parse a salt:// URL; return the path and a possible saltenv query.
"""
if not url.startswith("salt://"):
return url, None
# urlparse will split on valid filename chars such as '?' and '&'
resource = url.split("salt://", 1)[-1]
if "?env=" in resource:
# "env" is not supported; Use "saltenv".
path, saltenv = resource.split("?env=", 1)[0], None
elif "?saltenv=" in resource:
path, saltenv = resource.split("?saltenv=", 1)
else:
path, saltenv = resource, None
if salt.utils.platform.is_windows():
path = salt.utils.path.sanitize_win_path(path)
return path, saltenv
def create(path, saltenv=None):
"""
join `path` and `saltenv` into a 'salt://' URL.
"""
path = path.replace("\\", "/")
if salt.utils.platform.is_windows():
path = salt.utils.path.sanitize_win_path(path)
path = salt.utils.data.decode(path)
query = "saltenv={}".format(saltenv) if saltenv else ""
url = salt.utils.data.decode(urlunparse(("file", "", path, "", query, "")))
return "salt://{}".format(url[len("file:///") :])
def is_escaped(url):
"""
test whether `url` is escaped with `|`
"""
scheme = urlparse(url).scheme
if not scheme:
return url.startswith("|")
elif scheme == "salt":
path, saltenv = parse(url)
if salt.utils.platform.is_windows() and "|" in url:
return path.startswith("_")
else:
return path.startswith("|")
else:
return False
def escape(url):
"""
add escape character `|` to `url`
"""
if salt.utils.platform.is_windows():
return url
scheme = urlparse(url).scheme
if not scheme:
if url.startswith("|"):
return url
else:
return "|{}".format(url)
elif scheme == "salt":
path, saltenv = parse(url)
if path.startswith("|"):
return create(path, saltenv)
else:
return create("|{}".format(path), saltenv)
else:
return url
def unescape(url):
"""
remove escape character `|` from `url`
"""
scheme = urlparse(url).scheme
if not scheme:
return url.lstrip("|")
elif scheme == "salt":
path, saltenv = parse(url)
if salt.utils.platform.is_windows() and "|" in url:
return create(path.lstrip("_"), saltenv)
else:
return create(path.lstrip("|"), saltenv)
else:
return url
def add_env(url, saltenv):
"""
append `saltenv` to `url` as a query parameter to a 'salt://' url
"""
if not url.startswith("salt://"):
return url
path, senv = parse(url)
return create(path, saltenv)
def split_env(url):
"""
remove the saltenv query parameter from a 'salt://' url
"""
if not url.startswith("salt://"):
return url, None
path, senv = parse(url)
return create(path), senv
def validate(url, protos):
"""
Return true if the passed URL scheme is in the list of accepted protos
"""
if urlparse(url).scheme in protos:
return True
return False
def strip_proto(url):
"""
Return a copy of the string with the protocol designation stripped, if one
was present.
"""
return re.sub("^[^:/]+://", "", url)
def add_http_basic_auth(url, user=None, password=None, https_only=False):
"""
Return a string with http basic auth incorporated into it
"""
if user is None and password is None:
return url
else:
urltuple = urlparse(url)
if https_only and urltuple.scheme != "https":
raise ValueError("Basic Auth only supported for HTTPS")
if password is None:
netloc = "{}@{}".format(user, urltuple.netloc)
urltuple = urltuple._replace(netloc=netloc)
return urlunparse(urltuple)
else:
netloc = "{}:{}@{}".format(user, password, urltuple.netloc)
urltuple = urltuple._replace(netloc=netloc)
return urlunparse(urltuple)
def redact_http_basic_auth(output):
"""
Remove HTTP user and password
"""
# We can't use re.compile because re.compile(someregex).sub() doesn't
# support flags even in Python 2.7.
url_re = "(https?)://.*@"
redacted = r"\1://<redacted>@"
if sys.version_info >= (2, 7):
# re.sub() supports flags as of 2.7, use this to do a case-insensitive
# match.
return re.sub(url_re, redacted, output, flags=re.IGNORECASE)
else:
# We're on python 2.6, test if a lowercased version of the output
# string matches the regex...
if re.search(url_re, output.lower()):
# ... and if it does, perform the regex substitution.
return re.sub(url_re, redacted, output.lower())
# No match, just return the original string
return output
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/url.py
| 0.435181 | 0.18429 |
url.py
|
pypi
|
import logging
import math
import os
import re
from numbers import Number
import salt.modules.cmdmod
from salt.utils.decorators import memoize as real_memoize
from salt.utils.odict import OrderedDict
from salt.utils.stringutils import to_num as str_to_num
# Size conversion data
re_zfs_size = re.compile(r"^(\d+|\d+(?=\d*)\.\d+)([KkMmGgTtPpEe][Bb]?)$")
zfs_size = ["K", "M", "G", "T", "P", "E"]
log = logging.getLogger(__name__)
def _check_retcode(cmd):
"""
Simple internal wrapper for cmdmod.retcode
"""
return (
salt.modules.cmdmod.retcode(cmd, output_loglevel="quiet", ignore_retcode=True)
== 0
)
def _exec(**kwargs):
"""
Simple internal wrapper for cmdmod.run
"""
if "ignore_retcode" not in kwargs:
kwargs["ignore_retcode"] = True
if "output_loglevel" not in kwargs:
kwargs["output_loglevel"] = "quiet"
return salt.modules.cmdmod.run_all(**kwargs)
def _merge_last(values, merge_after, merge_with=" "):
"""
Merge values all values after X into the last value
"""
if len(values) > merge_after:
values = values[0 : (merge_after - 1)] + [
merge_with.join(values[(merge_after - 1) :])
]
return values
def _property_normalize_name(name):
"""
Normalizes property names
"""
if "@" in name:
name = name[: name.index("@") + 1]
return name
def _property_detect_type(name, values):
"""
Detect the datatype of a property
"""
value_type = "str"
if values.startswith("on | off"):
value_type = "bool"
elif values.startswith("yes | no"):
value_type = "bool_alt"
elif values in ["<size>", "<size> | none"]:
value_type = "size"
elif values in ["<count>", "<count> | none", "<guid>"]:
value_type = "numeric"
elif name in ["sharenfs", "sharesmb", "canmount"]:
value_type = "bool"
elif name in ["version", "copies"]:
value_type = "numeric"
return value_type
def _property_create_dict(header, data):
"""
Create a property dict
"""
prop = dict(zip(header, _merge_last(data, len(header))))
prop["name"] = _property_normalize_name(prop["property"])
prop["type"] = _property_detect_type(prop["name"], prop["values"])
prop["edit"] = from_bool(prop["edit"])
if "inherit" in prop:
prop["inherit"] = from_bool(prop["inherit"])
del prop["property"]
return prop
def _property_parse_cmd(cmd, alias=None):
"""
Parse output of zpool/zfs get command
"""
if not alias:
alias = {}
properties = {}
# NOTE: append get to command
if cmd[-3:] != "get":
cmd += " get"
# NOTE: parse output
prop_hdr = []
for prop_data in _exec(cmd=cmd)["stderr"].split("\n"):
# NOTE: make the line data more manageable
prop_data = prop_data.lower().split()
# NOTE: skip empty lines
if not prop_data:
continue
# NOTE: parse header
elif prop_data[0] == "property":
prop_hdr = prop_data
continue
# NOTE: skip lines after data
elif not prop_hdr or prop_data[1] not in ["no", "yes"]:
continue
# NOTE: create property dict
prop = _property_create_dict(prop_hdr, prop_data)
# NOTE: add property to dict
properties[prop["name"]] = prop
if prop["name"] in alias:
properties[alias[prop["name"]]] = prop
# NOTE: cleanup some duplicate data
del prop["name"]
return properties
def _auto(direction, name, value, source="auto", convert_to_human=True):
"""
Internal magic for from_auto and to_auto
"""
# NOTE: check direction
if direction not in ["to", "from"]:
return value
# NOTE: collect property data
props = property_data_zpool()
if source == "zfs":
props = property_data_zfs()
elif source == "auto":
props.update(property_data_zfs())
# NOTE: figure out the conversion type
value_type = props[name]["type"] if name in props else "str"
# NOTE: convert
if value_type == "size" and direction == "to":
return globals()["{}_{}".format(direction, value_type)](value, convert_to_human)
return globals()["{}_{}".format(direction, value_type)](value)
@real_memoize
def _zfs_cmd():
"""
Return the path of the zfs binary if present
"""
# Get the path to the zfs binary.
return salt.utils.path.which("zfs")
@real_memoize
def _zpool_cmd():
"""
Return the path of the zpool binary if present
"""
# Get the path to the zfs binary.
return salt.utils.path.which("zpool")
def _command(
source,
command,
flags=None,
opts=None,
property_name=None,
property_value=None,
filesystem_properties=None,
pool_properties=None,
target=None,
):
"""
Build and properly escape a zfs command
.. note::
Input is not considered safe and will be passed through
to_auto(from_auto('input_here')), you do not need to do so
your self first.
"""
# NOTE: start with the zfs binary and command
cmd = [_zpool_cmd() if source == "zpool" else _zfs_cmd(), command]
# NOTE: append flags if we have any
if flags is None:
flags = []
for flag in flags:
cmd.append(flag)
# NOTE: append options
# we pass through 'sorted' to guarantee the same order
if opts is None:
opts = {}
for opt in sorted(opts):
if not isinstance(opts[opt], list):
opts[opt] = [opts[opt]]
for val in opts[opt]:
cmd.append(opt)
cmd.append(to_str(val))
# NOTE: append filesystem properties (really just options with a key/value)
# we pass through 'sorted' to guarantee the same order
if filesystem_properties is None:
filesystem_properties = {}
for fsopt in sorted(filesystem_properties):
cmd.append("-O" if source == "zpool" else "-o")
cmd.append(
"{key}={val}".format(
key=fsopt,
val=to_auto(
fsopt,
filesystem_properties[fsopt],
source="zfs",
convert_to_human=False,
),
)
)
# NOTE: append pool properties (really just options with a key/value)
# we pass through 'sorted' to guarantee the same order
if pool_properties is None:
pool_properties = {}
for fsopt in sorted(pool_properties):
cmd.append("-o")
cmd.append(
"{key}={val}".format(
key=fsopt,
val=to_auto(
fsopt,
pool_properties[fsopt],
source="zpool",
convert_to_human=False,
),
)
)
# NOTE: append property and value
# the set command takes a key=value pair, we need to support this
if property_name is not None:
if property_value is not None:
if not isinstance(property_name, list):
property_name = [property_name]
if not isinstance(property_value, list):
property_value = [property_value]
for key, val in zip(property_name, property_value):
cmd.append(
"{key}={val}".format(
key=key,
val=to_auto(key, val, source=source, convert_to_human=False),
)
)
else:
cmd.append(property_name)
# NOTE: append the target(s)
if target is not None:
if not isinstance(target, list):
target = [target]
for tgt in target:
# NOTE: skip None list items
# we do not want to skip False and 0!
if tgt is None:
continue
cmd.append(to_str(tgt))
return " ".join(cmd)
def is_supported():
"""
Check the system for ZFS support
"""
# Check for supported platforms
# NOTE: ZFS on Windows is in development
# NOTE: ZFS on NetBSD is in development
on_supported_platform = False
if salt.utils.platform.is_sunos():
on_supported_platform = True
elif salt.utils.platform.is_freebsd() and _check_retcode("kldstat -q -m zfs"):
on_supported_platform = True
elif salt.utils.platform.is_linux() and os.path.exists("/sys/module/zfs"):
on_supported_platform = True
elif salt.utils.platform.is_linux() and salt.utils.path.which("zfs-fuse"):
on_supported_platform = True
elif (
salt.utils.platform.is_darwin()
and os.path.exists("/Library/Extensions/zfs.kext")
and os.path.exists("/dev/zfs")
):
on_supported_platform = True
# Additional check for the zpool command
return (salt.utils.path.which("zpool") and on_supported_platform) is True
@real_memoize
def has_feature_flags():
"""
Check if zpool-features is available
"""
# get man location
man = salt.utils.path.which("man")
return _check_retcode("{man} zpool-features".format(man=man)) if man else False
@real_memoize
def property_data_zpool():
"""
Return a dict of zpool properties
.. note::
Each property will have an entry with the following info:
- edit : boolean - is this property editable after pool creation
- type : str - either bool, bool_alt, size, numeric, or string
- values : str - list of possible values
.. warning::
This data is probed from the output of 'zpool get' with some supplemental
data that is hardcoded. There is no better way to get this information aside
from reading the code.
"""
# NOTE: man page also mentions a few short forms
property_data = _property_parse_cmd(
_zpool_cmd(),
{
"allocated": "alloc",
"autoexpand": "expand",
"autoreplace": "replace",
"listsnapshots": "listsnaps",
"fragmentation": "frag",
},
)
# NOTE: zpool status/iostat has a few extra fields
zpool_size_extra = [
"capacity-alloc",
"capacity-free",
"operations-read",
"operations-write",
"bandwidth-read",
"bandwidth-write",
"read",
"write",
]
zpool_numeric_extra = [
"cksum",
"cap",
]
for prop in zpool_size_extra:
property_data[prop] = {
"edit": False,
"type": "size",
"values": "<size>",
}
for prop in zpool_numeric_extra:
property_data[prop] = {
"edit": False,
"type": "numeric",
"values": "<count>",
}
return property_data
@real_memoize
def property_data_zfs():
"""
Return a dict of zfs properties
.. note::
Each property will have an entry with the following info:
- edit : boolean - is this property editable after pool creation
- inherit : boolean - is this property inheritable
- type : str - either bool, bool_alt, size, numeric, or string
- values : str - list of possible values
.. warning::
This data is probed from the output of 'zfs get' with some supplemental
data that is hardcoded. There is no better way to get this information aside
from reading the code.
"""
return _property_parse_cmd(
_zfs_cmd(),
{
"available": "avail",
"logicalreferenced": "lrefer.",
"logicalused": "lused.",
"referenced": "refer",
"volblocksize": "volblock",
"compression": "compress",
"readonly": "rdonly",
"recordsize": "recsize",
"refreservation": "refreserv",
"reservation": "reserv",
},
)
def from_numeric(value):
"""
Convert zfs numeric to python int
"""
if value == "none":
value = None
elif value:
value = str_to_num(value)
return value
def to_numeric(value):
"""
Convert python int to zfs numeric
"""
value = from_numeric(value)
if value is None:
value = "none"
return value
def from_bool(value):
"""
Convert zfs bool to python bool
"""
if value in ["on", "yes"]:
value = True
elif value in ["off", "no"]:
value = False
elif value == "none":
value = None
return value
def from_bool_alt(value):
"""
Convert zfs bool_alt to python bool
"""
return from_bool(value)
def to_bool(value):
"""
Convert python bool to zfs on/off bool
"""
value = from_bool(value)
if isinstance(value, bool):
value = "on" if value else "off"
elif value is None:
value = "none"
return value
def to_bool_alt(value):
"""
Convert python to zfs yes/no value
"""
value = from_bool_alt(value)
if isinstance(value, bool):
value = "yes" if value else "no"
elif value is None:
value = "none"
return value
def from_size(value):
"""
Convert zfs size (human readable) to python int (bytes)
"""
match_size = re_zfs_size.match(str(value))
if match_size:
v_unit = match_size.group(2).upper()[0]
v_size = float(match_size.group(1))
v_multiplier = math.pow(1024, zfs_size.index(v_unit) + 1)
value = v_size * v_multiplier
if int(value) == value:
value = int(value)
elif value is not None:
value = str(value)
return from_numeric(value)
def to_size(value, convert_to_human=True):
"""
Convert python int (bytes) to zfs size
NOTE: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/pyzfs/common/util.py#114
"""
value = from_size(value)
if value is None:
value = "none"
if isinstance(value, Number) and value > 1024 and convert_to_human:
v_power = int(math.floor(math.log(value, 1024)))
v_multiplier = math.pow(1024, v_power)
# NOTE: zfs is a bit odd on how it does the rounding,
# see libzfs implementation linked above
v_size_float = float(value) / v_multiplier
if v_size_float == int(v_size_float):
value = "{:.0f}{}".format(
v_size_float,
zfs_size[v_power - 1],
)
else:
for v_precision in ["{:.2f}{}", "{:.1f}{}", "{:.0f}{}"]:
v_size = v_precision.format(
v_size_float,
zfs_size[v_power - 1],
)
if len(v_size) <= 5:
value = v_size
break
return value
def from_str(value):
"""
Decode zfs safe string (used for name, path, ...)
"""
if value == "none":
value = None
if value:
value = str(value)
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
value = value.replace('\\"', '"')
return value
def to_str(value):
"""
Encode zfs safe string (used for name, path, ...)
"""
value = from_str(value)
if value:
value = value.replace('"', '\\"')
if " " in value:
value = '"' + value + '"'
elif value is None:
value = "none"
return value
def from_auto(name, value, source="auto"):
"""
Convert zfs value to python value
"""
return _auto("from", name, value, source)
def to_auto(name, value, source="auto", convert_to_human=True):
"""
Convert python value to zfs value
"""
return _auto("to", name, value, source, convert_to_human)
def from_auto_dict(values, source="auto"):
"""
Pass an entire dictionary to from_auto
.. note::
The key will be passed as the name
"""
for name, value in values.items():
values[name] = from_auto(name, value, source)
return values
def to_auto_dict(values, source="auto", convert_to_human=True):
"""
Pass an entire dictionary to to_auto
.. note::
The key will be passed as the name
"""
for name, value in values.items():
values[name] = to_auto(name, value, source, convert_to_human)
return values
def is_snapshot(name):
"""
Check if name is a valid snapshot name
"""
return from_str(name).count("@") == 1
def is_bookmark(name):
"""
Check if name is a valid bookmark name
"""
return from_str(name).count("#") == 1
def is_dataset(name):
"""
Check if name is a valid filesystem or volume name
"""
return not is_snapshot(name) and not is_bookmark(name)
def zfs_command(
command,
flags=None,
opts=None,
property_name=None,
property_value=None,
filesystem_properties=None,
target=None,
):
"""
Build and properly escape a zfs command
.. note::
Input is not considered safe and will be passed through
to_auto(from_auto('input_here')), you do not need to do so
your self first.
"""
return _command(
"zfs",
command=command,
flags=flags,
opts=opts,
property_name=property_name,
property_value=property_value,
filesystem_properties=filesystem_properties,
pool_properties=None,
target=target,
)
def zpool_command(
command,
flags=None,
opts=None,
property_name=None,
property_value=None,
filesystem_properties=None,
pool_properties=None,
target=None,
):
"""
Build and properly escape a zpool command
.. note::
Input is not considered safe and will be passed through
to_auto(from_auto('input_here')), you do not need to do so
your self first.
"""
return _command(
"zpool",
command=command,
flags=flags,
opts=opts,
property_name=property_name,
property_value=property_value,
filesystem_properties=filesystem_properties,
pool_properties=pool_properties,
target=target,
)
def parse_command_result(res, label=None):
"""
Parse the result of a zpool/zfs command
.. note::
Output on failure is rather predictable.
- retcode > 0
- each 'error' is a line on stderr
- optional 'Usage:' block under those with hits
We simple check those and return a OrderedDict were
we set label = True|False and error = error_messages
"""
ret = OrderedDict()
if label:
ret[label] = res["retcode"] == 0
if res["retcode"] != 0:
ret["error"] = []
for error in res["stderr"].splitlines():
if error.lower().startswith("usage:"):
break
if error.lower().startswith("use '-f'"):
error = error.replace("-f", "force=True")
if error.lower().startswith("use '-r'"):
error = error.replace("-r", "recursive=True")
ret["error"].append(error)
if ret["error"]:
ret["error"] = "\n".join(ret["error"])
else:
del ret["error"]
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/zfs.py
| 0.471223 | 0.245763 |
zfs.py
|
pypi
|
def compare_and_update_config(config, update_config, changes, namespace=""):
"""
Recursively compare two configs, writing any needed changes to the
update_config and capturing changes in the changes dict.
"""
if isinstance(config, dict):
if not update_config:
if config:
# the updated config is more valid--report that we are using it
changes[namespace] = {
"new": config,
"old": update_config,
}
return config
elif not isinstance(update_config, dict):
# new config is a dict, other isn't--new one wins
changes[namespace] = {
"new": config,
"old": update_config,
}
return config
else:
# compare each key in the base config with the values in the
# update_config, overwriting the values that are different but
# keeping any that are not defined in config
for key, value in config.items():
_namespace = key
if namespace:
_namespace = "{}.{}".format(namespace, _namespace)
update_config[key] = compare_and_update_config(
value,
update_config.get(key, None),
changes,
namespace=_namespace,
)
return update_config
elif isinstance(config, list):
if not update_config:
if config:
# the updated config is more valid--report that we are using it
changes[namespace] = {
"new": config,
"old": update_config,
}
return config
elif not isinstance(update_config, list):
# new config is a list, other isn't--new one wins
changes[namespace] = {
"new": config,
"old": update_config,
}
return config
else:
# iterate through config list, ensuring that each index in the
# update_config list is the same
for idx, item in enumerate(config):
_namespace = "[{}]".format(idx)
if namespace:
_namespace = "{}{}".format(namespace, _namespace)
_update = None
if len(update_config) > idx:
_update = update_config[idx]
if _update:
update_config[idx] = compare_and_update_config(
config[idx],
_update,
changes,
namespace=_namespace,
)
else:
changes[_namespace] = {
"new": config[idx],
"old": _update,
}
update_config.append(config[idx])
if len(update_config) > len(config):
# trim any items in update_config that are not in config
for idx, old_item in enumerate(update_config):
if idx < len(config):
continue
_namespace = "[{}]".format(idx)
if namespace:
_namespace = "{}{}".format(namespace, _namespace)
changes[_namespace] = {
"new": None,
"old": old_item,
}
del update_config[len(config) :]
return update_config
else:
if config != update_config:
changes[namespace] = {
"new": config,
"old": update_config,
}
return config
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/configcomparer.py
| 0.431824 | 0.276929 |
configcomparer.py
|
pypi
|
import os
def get_module_environment(env=None, function=None):
"""
Get module optional environment.
To setup an environment option for a particular module,
add either pillar or config at the minion as follows:
system-environment:
modules:
pkg:
_:
LC_ALL: en_GB.UTF-8
FOO: bar
install:
HELLO: world
states:
pkg:
_:
LC_ALL: en_US.Latin-1
NAME: Fred
So this will export the environment to all the modules,
states, returnes etc. And calling this function with the globals()
in that context will fetch the environment for further reuse.
Underscore '_' exports environment for all functions within the module.
If you want to specifially export environment only for one function,
specify it as in the example above "install".
First will be fetched configuration, where virtual name goes first,
then the physical name of the module overrides the virtual settings.
Then pillar settings will override the configuration in the same order.
:param env:
:param function: name of a particular function
:return: dict
"""
result = {}
if not env:
env = {}
for env_src in [env.get("__opts__", {}), env.get("__pillar__", {})]:
fname = env.get("__file__", "")
physical_name = os.path.basename(fname).split(".")[0]
section = os.path.basename(os.path.dirname(fname))
m_names = [env.get("__virtualname__")]
if physical_name not in m_names:
m_names.append(physical_name)
for m_name in m_names:
if not m_name:
continue
result.update(
env_src.get("system-environment", {})
.get(section, {})
.get(m_name, {})
.get("_", {})
.copy()
)
if function is not None:
result.update(
env_src.get("system-environment", {})
.get(section, {})
.get(m_name, {})
.get(function, {})
.copy()
)
return result
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/environment.py
| 0.53777 | 0.376594 |
environment.py
|
pypi
|
import random
import salt.loader
from salt.exceptions import SaltInvocationError
def sdb_get(uri, opts, utils=None, strict=False):
"""
Get a value from a db, using a uri in the form of ``sdb://<profile>/<key>``. If
the uri provided is not valid, then it will be returned as-is, unless ``strict=True`` was passed.
"""
if not isinstance(uri, str) or not uri.startswith("sdb://"):
if strict:
raise SaltInvocationError('SDB uri must start with "sdb://"')
else:
return uri
if utils is None:
utils = salt.loader.utils(opts)
sdlen = len("sdb://")
indx = uri.find("/", sdlen)
if (indx == -1) or not uri[(indx + 1) :]:
if strict:
raise SaltInvocationError(
"SDB uri must have a profile name as a first part of the uri before"
" the /"
)
else:
return uri
profile = opts.get(uri[sdlen:indx], {})
if not profile:
profile = opts.get("pillar", {}).get(uri[sdlen:indx], {})
if "driver" not in profile:
if strict:
raise SaltInvocationError(
'SDB profile "{}" wasnt found in the minion configuration'.format(
uri[sdlen:indx]
)
)
else:
return uri
fun = "{}.get".format(profile["driver"])
query = uri[indx + 1 :]
loaded_db = salt.loader.sdb(opts, fun, utils=utils)
return loaded_db[fun](query, profile=profile)
def sdb_set(uri, value, opts, utils=None):
"""
Set a value in a db, using a uri in the form of ``sdb://<profile>/<key>``.
If the uri provided does not start with ``sdb://`` or the value is not
successfully set, return ``False``.
"""
if not isinstance(uri, str) or not uri.startswith("sdb://"):
return False
if utils is None:
utils = salt.loader.utils(opts)
sdlen = len("sdb://")
indx = uri.find("/", sdlen)
if (indx == -1) or not uri[(indx + 1) :]:
return False
profile = opts.get(uri[sdlen:indx], {})
if not profile:
profile = opts.get("pillar", {}).get(uri[sdlen:indx], {})
if "driver" not in profile:
return False
fun = "{}.set".format(profile["driver"])
query = uri[indx + 1 :]
loaded_db = salt.loader.sdb(opts, fun, utils=utils)
return loaded_db[fun](query, value, profile=profile)
def sdb_delete(uri, opts, utils=None):
"""
Delete a value from a db, using a uri in the form of ``sdb://<profile>/<key>``. If
the uri provided does not start with ``sdb://`` or the value is not successfully
deleted, return ``False``.
"""
if not isinstance(uri, str) or not uri.startswith("sdb://"):
return False
if utils is None:
utils = salt.loader.utils(opts)
sdlen = len("sdb://")
indx = uri.find("/", sdlen)
if (indx == -1) or not uri[(indx + 1) :]:
return False
profile = opts.get(uri[sdlen:indx], {})
if not profile:
profile = opts.get("pillar", {}).get(uri[sdlen:indx], {})
if "driver" not in profile:
return False
fun = "{}.delete".format(profile["driver"])
query = uri[indx + 1 :]
loaded_db = salt.loader.sdb(opts, fun, utils=utils)
return loaded_db[fun](query, profile=profile)
def sdb_get_or_set_hash(
uri,
opts,
length=8,
chars="abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)",
utils=None,
):
"""
Check if value exists in sdb. If it does, return, otherwise generate a
random string and store it. This can be used for storing secrets in a
centralized place.
"""
if not isinstance(uri, str) or not uri.startswith("sdb://"):
return False
if utils is None:
utils = salt.loader.utils(opts)
ret = sdb_get(uri, opts, utils=utils)
if ret is None:
val = "".join([random.SystemRandom().choice(chars) for _ in range(length)])
sdb_set(uri, val, opts, utils)
return ret or val
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/sdb.py
| 0.537527 | 0.187188 |
sdb.py
|
pypi
|
import json
import logging
import salt.utils.data
import salt.utils.stringutils
log = logging.getLogger(__name__)
# One to one mappings
JSONEncoder = json.JSONEncoder
def __split(raw):
"""
Performs a splitlines on the string. This function exists to make mocking
possible in unit tests, since the member functions of the str/unicode
builtins cannot be mocked.
"""
return raw.splitlines()
def find_json(raw):
"""
Pass in a raw string and load the json when it starts. This allows for a
string to start with garbage and end with json but be cleanly loaded
"""
ret = {}
lines = __split(raw)
for ind, _ in enumerate(lines):
try:
working = "\n".join(lines[ind:])
except UnicodeDecodeError:
working = "\n".join(salt.utils.data.decode(lines[ind:]))
try:
ret = json.loads(working)
except ValueError:
continue
if ret:
return ret
if not ret:
# Not json, raise an error
raise ValueError
def import_json():
"""
Import a json module, starting with the quick ones and going down the list)
"""
for fast_json in ("ujson", "yajl", "json"):
try:
mod = __import__(fast_json)
log.trace("loaded %s json lib", fast_json)
return mod
except ImportError:
continue
def load(fp, **kwargs):
"""
.. versionadded:: 2018.3.0
Wraps json.load
You can pass an alternate json module (loaded via import_json() above)
using the _json_module argument)
"""
return kwargs.pop("_json_module", json).load(fp, **kwargs)
def loads(s, **kwargs):
"""
.. versionadded:: 2018.3.0
Wraps json.loads and prevents a traceback in the event that a bytestring is
passed to the function. (Python < 3.6 cannot load bytestrings)
You can pass an alternate json module (loaded via import_json() above)
using the _json_module argument)
"""
json_module = kwargs.pop("_json_module", json)
try:
return json_module.loads(s, **kwargs)
except TypeError as exc:
# json.loads cannot load bytestrings in Python < 3.6
if isinstance(s, bytes):
return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)
else:
raise
def dump(obj, fp, **kwargs):
"""
.. versionadded:: 2018.3.0
Wraps json.dump, and assumes that ensure_ascii is False (unless explicitly
passed as True) for unicode compatibility. Note that setting it to True
will mess up any unicode characters, as they will be dumped as the string
literal version of the unicode code point.
On Python 2, encodes the result to a str since json.dump does not want
unicode types.
You can pass an alternate json module (loaded via import_json() above)
using the _json_module argument)
"""
json_module = kwargs.pop("_json_module", json)
if "ensure_ascii" not in kwargs:
kwargs["ensure_ascii"] = False
return json_module.dump(obj, fp, **kwargs)
def dumps(obj, **kwargs):
"""
.. versionadded:: 2018.3.0
Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly
passed as True) for unicode compatibility. Note that setting it to True
will mess up any unicode characters, as they will be dumped as the string
literal version of the unicode code point.
On Python 2, encodes the result to a str since json.dumps does not want
unicode types.
You can pass an alternate json module (loaded via import_json() above)
using the _json_module argument)
"""
json_module = kwargs.pop("_json_module", json)
if "ensure_ascii" not in kwargs:
kwargs["ensure_ascii"] = False
return json_module.dumps(obj, **kwargs)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/json.py
| 0.547948 | 0.219379 |
json.py
|
pypi
|
import logging
import types
import salt.utils.args
import salt.utils.versions
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
def namespaced_function(function, global_dict, defaults=None, preserve_context=None):
"""
Redefine (clone) a function under a different globals() namespace scope.
Any keys missing in the passed ``global_dict`` that is present in the
passed function ``__globals__`` attribute get's copied over into
``global_dict``, thus avoiding ``NameError`` from modules imported in
the original function module.
:param defaults:
.. deprecated:: 3005
:param preserve_context:
.. deprecated:: 3005
Allow keeping the context taken from orignal namespace,
and extend it with globals() taken from
new targetted namespace.
"""
if defaults is not None:
salt.utils.versions.warn_until(
3008,
"Passing 'defaults' to 'namespaced_function' is deprecated, slated "
"for removal in {version} and no longer does anything for the "
"function being namespaced.",
)
if preserve_context is not None:
salt.utils.versions.warn_until(
3008,
"Passing 'preserve_context' to 'namespaced_function' is deprecated, "
"slated for removal in {version} and no longer does anything for the "
"function being namespaced.",
)
# Make sure that any key on the globals of the function being copied get's
# added to the destination globals dictionary, if not present.
for key, value in function.__globals__.items():
if key not in global_dict:
global_dict[key] = value
new_namespaced_function = types.FunctionType(
function.__code__,
global_dict,
name=function.__name__,
argdefs=function.__defaults__,
closure=function.__closure__,
)
new_namespaced_function.__dict__.update(function.__dict__)
return new_namespaced_function
def alias_function(fun, name, doc=None):
"""
Copy a function
"""
alias_fun = types.FunctionType(
fun.__code__,
fun.__globals__,
str(name),
fun.__defaults__,
fun.__closure__,
)
alias_fun.__dict__.update(fun.__dict__)
if doc and isinstance(doc, str):
alias_fun.__doc__ = doc
else:
orig_name = fun.__name__
alias_msg = "\nThis function is an alias of ``{}``.\n".format(orig_name)
alias_fun.__doc__ = alias_msg + (fun.__doc__ or "")
return alias_fun
def parse_function(function_arguments):
"""
Helper function to parse function_arguments (module.run format)
into args and kwargs.
This function is similar to salt.utils.data.repack_dictlist, except that this
handles mixed (i.e. dict and non-dict) arguments in the input list.
:param list function_arguments: List of items and dicts with kwargs.
:rtype: dict
:return: Dictionary with ``args`` and ``kwargs`` keyword.
"""
function_args = []
function_kwargs = {}
for item in function_arguments:
if isinstance(item, dict):
function_kwargs.update(item)
else:
function_args.append(item)
return {"args": function_args, "kwargs": function_kwargs}
def call_function(salt_function, *args, **kwargs):
"""
Calls a function from the specified module.
:param function salt_function: Function reference to call
:return: The result of the function call
"""
argspec = salt.utils.args.get_function_argspec(salt_function)
# function_kwargs is initialized to a dictionary of keyword arguments the function to be run accepts
function_kwargs = dict(
zip(
argspec.args[
-len(argspec.defaults or []) :
], # pylint: disable=incompatible-py3-code
argspec.defaults or [],
)
)
# expected_args is initialized to a list of positional arguments that the function to be run accepts
expected_args = argspec.args[
: len(argspec.args or []) - len(argspec.defaults or [])
]
function_args, kw_to_arg_type = [], {}
for funcset in reversed(args or []):
if not isinstance(funcset, dict):
# We are just receiving a list of args to the function to be run, so just append
# those to the arg list that we will pass to the func.
function_args.append(funcset)
else:
for kwarg_key in funcset.keys():
# We are going to pass in a keyword argument. The trick here is to make certain
# that if we find that in the *args* list that we pass it there and not as a kwarg
if kwarg_key in expected_args:
kw_to_arg_type[kwarg_key] = funcset[kwarg_key]
else:
# Otherwise, we're good and just go ahead and pass the keyword/value pair into
# the kwargs list to be run.
function_kwargs.update(funcset)
function_args.reverse()
# Add kwargs passed as kwargs :)
function_kwargs.update(kwargs)
for arg in expected_args:
if arg in kw_to_arg_type:
function_args.append(kw_to_arg_type[arg])
_exp_prm = len(argspec.args or []) - len(argspec.defaults or [])
_passed_prm = len(function_args)
missing = []
if _exp_prm > _passed_prm:
for arg in argspec.args[_passed_prm:]:
if arg not in function_kwargs:
missing.append(arg)
else:
# Found the expected argument as a keyword
# increase the _passed_prm count
_passed_prm += 1
if missing:
raise SaltInvocationError("Missing arguments: {}".format(", ".join(missing)))
elif _exp_prm > _passed_prm:
raise SaltInvocationError(
"Function expects {} positional parameters, got only {}".format(
_exp_prm, _passed_prm
)
)
return salt_function(*function_args, **function_kwargs)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/functools.py
| 0.653238 | 0.262546 |
functools.py
|
pypi
|
import base64
import hashlib
import hmac
import os
import random
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
from salt.utils.decorators.jinja import jinja_filter
@jinja_filter("base64_encode")
def base64_b64encode(instr):
"""
Encode a string as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder does not include
newline ('\\n') characters in the encoded output.
"""
return salt.utils.stringutils.to_unicode(
base64.b64encode(salt.utils.stringutils.to_bytes(instr)),
encoding="utf8" if salt.utils.platform.is_windows() else None,
)
@jinja_filter("base64_decode")
def base64_b64decode(instr):
"""
Decode a base64-encoded string using the "modern" Python interface.
"""
decoded = base64.b64decode(salt.utils.stringutils.to_bytes(instr))
try:
return salt.utils.stringutils.to_unicode(
decoded, encoding="utf8" if salt.utils.platform.is_windows() else None
)
except UnicodeDecodeError:
return decoded
def base64_encodestring(instr):
"""
Encode a byte-like object as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder includes
a newline ('\\n') character after every 76 characters and always
at the end of the encoded string.
"""
return salt.utils.stringutils.to_unicode(
base64.encodebytes(salt.utils.stringutils.to_bytes(instr)),
encoding="utf8" if salt.utils.platform.is_windows() else None,
)
def base64_decodestring(instr):
"""
Decode a base64-encoded byte-like object using the "modern" Python interface.
"""
bvalue = salt.utils.stringutils.to_bytes(instr)
decoded = base64.decodebytes(bvalue)
try:
return salt.utils.stringutils.to_unicode(
decoded, encoding="utf8" if salt.utils.platform.is_windows() else None
)
except UnicodeDecodeError:
return decoded
@jinja_filter("md5")
def md5_digest(instr):
"""
Generate an md5 hash of a given string.
"""
return salt.utils.stringutils.to_unicode(
hashlib.md5(salt.utils.stringutils.to_bytes(instr)).hexdigest()
)
@jinja_filter("sha1")
def sha1_digest(instr):
"""
Generate an sha1 hash of a given string.
"""
return hashlib.sha1(salt.utils.stringutils.to_bytes(instr)).hexdigest()
@jinja_filter("sha256")
def sha256_digest(instr):
"""
Generate a sha256 hash of a given string.
"""
return salt.utils.stringutils.to_unicode(
hashlib.sha256(salt.utils.stringutils.to_bytes(instr)).hexdigest()
)
@jinja_filter("sha512")
def sha512_digest(instr):
"""
Generate a sha512 hash of a given string
"""
return salt.utils.stringutils.to_unicode(
hashlib.sha512(salt.utils.stringutils.to_bytes(instr)).hexdigest()
)
@jinja_filter("hmac")
def hmac_signature(string, shared_secret, challenge_hmac):
"""
Verify a challenging hmac signature against a string / shared-secret
Returns a boolean if the verification succeeded or failed.
"""
msg = salt.utils.stringutils.to_bytes(string)
key = salt.utils.stringutils.to_bytes(shared_secret)
challenge = salt.utils.stringutils.to_bytes(challenge_hmac)
hmac_hash = hmac.new(key, msg, hashlib.sha256)
valid_hmac = base64.b64encode(hmac_hash.digest())
return valid_hmac == challenge
@jinja_filter("hmac_compute")
def hmac_compute(string, shared_secret):
"""
Create an hmac digest.
"""
msg = salt.utils.stringutils.to_bytes(string)
key = salt.utils.stringutils.to_bytes(shared_secret)
hmac_hash = hmac.new(key, msg, hashlib.sha256).hexdigest()
return hmac_hash
@jinja_filter("rand_str")
@jinja_filter("random_hash")
def random_hash(size=9999999999, hash_type=None):
"""
Return a hash of a randomized data from random.SystemRandom()
"""
if not hash_type:
hash_type = "md5"
hasher = getattr(hashlib, hash_type)
return hasher(
salt.utils.stringutils.to_bytes(str(random.SystemRandom().randint(0, size)))
).hexdigest()
@jinja_filter("file_hashsum")
def get_hash(path, form="sha256", chunk_size=65536):
"""
Get the hash sum of a file
This is better than ``get_sum`` for the following reasons:
- It does not read the entire file into memory.
- It does not return a string on error. The returned value of
``get_sum`` cannot really be trusted since it is vulnerable to
collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'``
"""
hash_type = hasattr(hashlib, form) and getattr(hashlib, form) or None
if hash_type is None:
raise ValueError("Invalid hash type: {}".format(form))
with salt.utils.files.fopen(path, "rb") as ifile:
hash_obj = hash_type()
# read the file in in chunks, not the entire file
for chunk in iter(lambda: ifile.read(chunk_size), b""):
hash_obj.update(chunk)
return hash_obj.hexdigest()
class DigestCollector:
"""
Class to collect digest of the file tree.
"""
def __init__(self, form="sha256", buff=0x10000):
"""
Constructor of the class.
:param form:
"""
self.__digest = hasattr(hashlib, form) and getattr(hashlib, form)() or None
if self.__digest is None:
raise ValueError("Invalid hash type: {}".format(form))
self.__buff = buff
def add(self, path):
"""
Update digest with the file content by path.
:param path:
:return:
"""
with salt.utils.files.fopen(path, "rb") as ifile:
for chunk in iter(lambda: ifile.read(self.__buff), b""):
self.__digest.update(chunk)
def digest(self):
"""
Get digest.
:return:
"""
return salt.utils.stringutils.to_str(self.__digest.hexdigest() + os.linesep)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/hashutils.py
| 0.761982 | 0.338104 |
hashutils.py
|
pypi
|
import os
import salt.utils.data
import salt.utils.network
from salt.exceptions import SaltInvocationError
NOTSET = object()
def split(item, sep=",", maxsplit=-1):
return [x.strip() for x in item.split(sep, maxsplit)]
def get_port_def(port_num, proto="tcp"):
"""
Given a port number and protocol, returns the port definition expected by
docker-py. For TCP ports this is simply an integer, for UDP ports this is
(port_num, 'udp').
port_num can also be a string in the format 'port_num/udp'. If so, the
"proto" argument will be ignored. The reason we need to be able to pass in
the protocol separately is because this function is sometimes invoked on
data derived from a port range (e.g. '2222-2223/udp'). In these cases the
protocol has already been stripped off and the port range resolved into the
start and end of the range, and get_port_def() is invoked once for each
port number in that range. So, rather than munge udp ports back into
strings before passing them to this function, the function will see if it
has a string and use the protocol from it if present.
This function does not catch the TypeError or ValueError which would be
raised if the port number is non-numeric. This function either needs to be
run on known good input, or should be run within a try/except that catches
these two exceptions.
"""
try:
port_num, _, port_num_proto = port_num.partition("/")
except AttributeError:
pass
else:
if port_num_proto:
proto = port_num_proto
try:
if proto.lower() == "udp":
return int(port_num), "udp"
except AttributeError:
pass
return int(port_num)
def get_port_range(port_def):
"""
Given a port number or range, return a start and end to that range. Port
ranges are defined as a string containing two numbers separated by a dash
(e.g. '4505-4506').
A ValueError will be raised if bad input is provided.
"""
if isinstance(port_def, int):
# Single integer, start/end of range is the same
return port_def, port_def
try:
comps = [int(x) for x in split(port_def, "-")]
if len(comps) == 1:
range_start = range_end = comps[0]
else:
range_start, range_end = comps
if range_start > range_end:
raise ValueError("start > end")
except (TypeError, ValueError) as exc:
if exc.__str__() == "start > end":
msg = (
"Start of port range ({}) cannot be greater than end of "
"port range ({})".format(range_start, range_end)
)
else:
msg = "'{}' is non-numeric or an invalid port range".format(port_def)
raise ValueError(msg)
else:
return range_start, range_end
def map_vals(val, *names, **extra_opts):
"""
Many arguments come in as a list of VAL1:VAL2 pairs, but map to a list
of dicts in the format {NAME1: VAL1, NAME2: VAL2}. This function
provides common code to handle these instances.
"""
fill = extra_opts.pop("fill", NOTSET)
expected_num_elements = len(names)
val = translate_stringlist(val)
for idx, item in enumerate(val):
if not isinstance(item, dict):
elements = [x.strip() for x in item.split(":")]
num_elements = len(elements)
if num_elements < expected_num_elements:
if fill is NOTSET:
raise SaltInvocationError(
"'{}' contains {} value(s) (expected {})".format(
item, num_elements, expected_num_elements
)
)
elements.extend([fill] * (expected_num_elements - num_elements))
elif num_elements > expected_num_elements:
raise SaltInvocationError(
"'{}' contains {} value(s) (expected {})".format(
item,
num_elements,
expected_num_elements
if fill is NOTSET
else "up to {}".format(expected_num_elements),
)
)
val[idx] = dict(zip(names, elements))
return val
def validate_ip(val):
try:
if not salt.utils.network.is_ip(val):
raise SaltInvocationError("'{}' is not a valid IP address".format(val))
except RuntimeError:
pass
def validate_subnet(val):
try:
if not salt.utils.network.is_subnet(val):
raise SaltInvocationError("'{}' is not a valid subnet".format(val))
except RuntimeError:
pass
def translate_str(val):
return str(val) if not isinstance(val, str) else val
def translate_int(val):
if not isinstance(val, int):
try:
val = int(val)
except (TypeError, ValueError):
raise SaltInvocationError("'{}' is not an integer".format(val))
return val
def translate_bool(val):
return bool(val) if not isinstance(val, bool) else val
def translate_dict(val):
"""
Not really translating, just raising an exception if it's not a dict
"""
if not isinstance(val, dict):
raise SaltInvocationError("'{}' is not a dictionary".format(val))
return val
def translate_command(val):
"""
Input should either be a single string, or a list of strings. This is used
for the two args that deal with commands ("command" and "entrypoint").
"""
if isinstance(val, str):
return val
elif isinstance(val, list):
for idx, item in enumerate(val):
if not isinstance(item, str):
val[idx] = str(item)
else:
# Make sure we have a string
val = str(val)
return val
def translate_bytes(val):
"""
These values can be expressed as an integer number of bytes, or a string
expression (i.e. 100mb, 1gb, etc.).
"""
try:
val = int(val)
except (TypeError, ValueError):
if not isinstance(val, str):
val = str(val)
return val
def translate_stringlist(val):
"""
On the CLI, these are passed as multiple instances of a given CLI option.
In Salt, we accept these as a comma-delimited list but the API expects a
Python list. This function accepts input and returns it back as a Python
list of strings. If the input is a string which is a comma-separated list
of items, split that string and return it.
"""
if not isinstance(val, list):
try:
val = split(val)
except AttributeError:
val = split(str(val))
for idx, item in enumerate(val):
if not isinstance(item, str):
val[idx] = str(item)
return val
def translate_device_rates(val, numeric_rate=True):
"""
CLI input is a list of PATH:RATE pairs, but the API expects a list of
dictionaries in the format [{'Path': path, 'Rate': rate}]
"""
val = map_vals(val, "Path", "Rate")
for item in val:
try:
is_abs = os.path.isabs(item["Path"])
except AttributeError:
is_abs = False
if not is_abs:
raise SaltInvocationError("Path '{Path}' is not absolute".format(**item))
# Attempt to convert to an integer. Will fail if rate was specified as
# a shorthand (e.g. 1mb), this is OK as we will check to make sure the
# value is an integer below if that is what is required.
try:
item["Rate"] = int(item["Rate"])
except (TypeError, ValueError):
pass
if numeric_rate:
try:
item["Rate"] = int(item["Rate"])
except ValueError:
raise SaltInvocationError(
"Rate '{Rate}' for path '{Path}' is non-numeric".format(**item)
)
return val
def translate_key_val(val, delimiter="="):
"""
CLI input is a list of key/val pairs, but the API expects a dictionary in
the format {key: val}
"""
if isinstance(val, dict):
return val
val = translate_stringlist(val)
new_val = {}
for item in val:
try:
lvalue, rvalue = split(item, delimiter, 1)
except (AttributeError, TypeError, ValueError):
raise SaltInvocationError(
"'{}' is not a key{}value pair".format(item, delimiter)
)
new_val[lvalue] = rvalue
return new_val
def translate_labels(val):
"""
Can either be a list of label names, or a list of name=value pairs. The API
can accept either a list of label names or a dictionary mapping names to
values, so the value we translate will be different depending on the input.
"""
if not isinstance(val, dict):
if not isinstance(val, list):
val = split(val)
new_val = {}
for item in val:
if isinstance(item, dict):
if len(item) != 1:
raise SaltInvocationError("Invalid label(s)")
key = next(iter(item))
val = item[key]
else:
try:
key, val = split(item, "=", 1)
except ValueError:
key = item
val = ""
if not isinstance(key, str):
key = str(key)
if not isinstance(val, str):
val = str(val)
new_val[key] = val
val = new_val
return val
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/dockermod/translate/helpers.py
| 0.511961 | 0.438304 |
helpers.py
|
pypi
|
import re
import socket
import salt.utils.platform
if salt.utils.platform.is_windows():
from salt.ext import win_inet_pton # pylint: disable=unused-import
def mac(addr):
"""
Validates a mac address
"""
valid = re.compile(
r"""
(^([0-9A-F]{1,2}[-]){5}([0-9A-F]{1,2})$
|^([0-9A-F]{1,2}[:]){5}([0-9A-F]{1,2})$
|^([0-9A-F]{1,2}[.]){5}([0-9A-F]{1,2})$)
""",
re.VERBOSE | re.IGNORECASE,
)
return valid.match(addr) is not None
def __ip_addr(addr, address_family=socket.AF_INET):
"""
Returns True if the IP address (and optional subnet) are valid, otherwise
returns False.
"""
mask_max = "32"
if address_family == socket.AF_INET6:
mask_max = "128"
try:
if "/" not in addr:
addr = "{addr}/{mask_max}".format(addr=addr, mask_max=mask_max)
except TypeError:
return False
ip, mask = addr.rsplit("/", 1)
# Verify that IP address is valid
try:
socket.inet_pton(address_family, ip)
except OSError:
return False
# Verify that mask is valid
try:
mask = int(mask)
except ValueError:
return False
else:
if not 1 <= mask <= int(mask_max):
return False
return True
def ipv4_addr(addr):
"""
Returns True if the IPv4 address (and optional subnet) are valid, otherwise
returns False.
"""
return __ip_addr(addr, socket.AF_INET)
def ipv6_addr(addr):
"""
Returns True if the IPv6 address (and optional subnet) are valid, otherwise
returns False.
"""
return __ip_addr(addr, socket.AF_INET6)
def ip_addr(addr):
"""
Returns True if the IPv4 or IPv6 address (and optional subnet) are valid,
otherwise returns False.
"""
return ipv4_addr(addr) or ipv6_addr(addr)
def netmask(mask):
"""
Returns True if the value passed is a valid netmask, otherwise return False
"""
if not isinstance(mask, str):
return False
octets = mask.split(".")
if not len(octets) == 4:
return False
return ipv4_addr(mask) and octets == sorted(octets, reverse=True)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/validate/net.py
| 0.52975 | 0.207114 |
net.py
|
pypi
|
import errno
import logging
import os
import re
import sys
import salt.utils.data
import salt.utils.files
import salt.utils.versions
log = logging.getLogger(__name__)
def rtag(opts):
"""
Return the rtag file location. This file is used to ensure that we don't
refresh more than once (unless explicitly configured to do so).
"""
return os.path.join(opts["cachedir"], "pkg_refresh")
def clear_rtag(opts):
"""
Remove the rtag file
"""
try:
os.remove(rtag(opts))
except OSError as exc:
if exc.errno != errno.ENOENT:
# Using __str__() here to get the fully-formatted error message
# (error number, error message, path)
log.warning("Encountered error removing rtag: %s", exc.__str__())
def write_rtag(opts):
"""
Write the rtag file
"""
rtag_file = rtag(opts)
if not os.path.exists(rtag_file):
try:
with salt.utils.files.fopen(rtag_file, "w+"):
pass
except OSError as exc:
log.warning("Encountered error writing rtag: %s", exc.__str__())
def check_refresh(opts, refresh=None):
"""
Check whether or not a refresh is necessary
Returns:
- True if refresh evaluates as True
- False if refresh is False
- A boolean if refresh is not False and the rtag file exists
"""
return bool(
salt.utils.data.is_true(refresh)
or (os.path.isfile(rtag(opts)) and refresh is not False)
)
def split_comparison(version):
match = re.match(r"^(<=>|!=|>=|<=|>>|<<|<>|>|<|=)?\s?([^<>=]+)$", version)
if match:
comparison = match.group(1) or ""
version = match.group(2)
else:
comparison = ""
return comparison, version
def match_version(desired, available, cmp_func=None, ignore_epoch=False):
"""
Returns the first version of the list of available versions which matches
the desired version comparison expression, or None if no match is found.
"""
oper, version = split_comparison(desired)
if not oper:
oper = "=="
for candidate in available:
if salt.utils.versions.compare(
ver1=candidate,
oper=oper,
ver2=version,
cmp_func=cmp_func,
ignore_epoch=ignore_epoch,
):
return candidate
return None
def check_bundled():
"""
Gather run-time information to indicate if we are running from source or bundled.
"""
if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"):
return True
return False
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/pkg/__init__.py
| 0.402627 | 0.150528 |
__init__.py
|
pypi
|
import logging
import salt.utils.stringutils
from salt.exceptions import SaltException
log = logging.getLogger(__name__)
class OutputUnifier:
def __init__(self, *policies):
self.policies = []
for pls in policies:
if not hasattr(self, pls):
raise SaltException("Unknown policy: {}".format(pls))
else:
self.policies.append(getattr(self, pls))
def _run_policies(self, data):
for pls in self.policies:
try:
data = pls(data)
except Exception as exc: # pylint: disable=broad-except
log.debug(
"An exception occurred in this state: %s",
exc,
exc_info_on_loglevel=logging.DEBUG,
)
data = {
"result": False,
"name": "later",
"changes": {},
"comment": "An exception occurred in this state: {}".format(exc),
}
return data
def __call__(self, func):
def _func(*args, **kwargs):
result = func(*args, **kwargs)
sub_state_run = None
if isinstance(result, dict):
sub_state_run = result.get("sub_state_run", ())
result = self._run_policies(result)
if sub_state_run:
result["sub_state_run"] = [
self._run_policies(sub_state_data)
for sub_state_data in sub_state_run
]
return result
return _func
def content_check(self, result):
"""
Checks for specific types in the state output.
Raises an Exception in case particular rule is broken.
:param result:
:return:
"""
if not isinstance(result, dict):
err_msg = "Malformed state return. Data must be a dictionary type."
elif not isinstance(result.get("changes"), dict):
err_msg = "'Changes' should be a dictionary."
else:
missing = []
for val in ["name", "result", "changes", "comment"]:
if val not in result:
missing.append(val)
if missing:
err_msg = "The following keys were not present in the state return: {}.".format(
", ".join(missing)
)
else:
err_msg = None
if err_msg:
raise SaltException(err_msg)
for sub_state in result.get("sub_state_run", ()):
self.content_check(sub_state)
return result
def unify(self, result):
"""
While comments as a list are allowed,
comments needs to be strings for backward compatibility.
See such claim here: https://github.com/saltstack/salt/pull/43070
Rules applied:
- 'comment' is joined into a multi-line string, in case the value is a list.
- 'result' should be always either True, False or None.
:param result:
:return:
"""
if isinstance(result.get("comment"), list):
result["comment"] = "\n".join(
[salt.utils.stringutils.to_unicode(elm) for elm in result["comment"]]
)
if result.get("result") is not None:
result["result"] = bool(result["result"])
return result
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/decorators/state.py
| 0.57821 | 0.206774 |
state.py
|
pypi
|
try:
import stringprep
except ImportError:
HAVE_STRINGPREP = False
def saslprep(data):
"""SASLprep dummy"""
if isinstance(data, str):
raise TypeError(
"The stringprep module is not available. Usernames and "
"passwords must be ASCII strings.")
return data
else:
HAVE_STRINGPREP = True
import unicodedata
# RFC4013 section 2.3 prohibited output.
_PROHIBITED = (
# A strict reading of RFC 4013 requires table c12 here, but
# characters from it are mapped to SPACE in the Map step. Can
# normalization reintroduce them somehow?
stringprep.in_table_c12,
stringprep.in_table_c21_c22,
stringprep.in_table_c3,
stringprep.in_table_c4,
stringprep.in_table_c5,
stringprep.in_table_c6,
stringprep.in_table_c7,
stringprep.in_table_c8,
stringprep.in_table_c9)
def saslprep(data, prohibit_unassigned_code_points=True):
"""An implementation of RFC4013 SASLprep.
:Parameters:
- `data`: The string to SASLprep. Unicode strings
(python 2.x unicode, 3.x str) are supported. Byte strings
(python 2.x str, 3.x bytes) are ignored.
- `prohibit_unassigned_code_points`: True / False. RFC 3454
and RFCs for various SASL mechanisms distinguish between
`queries` (unassigned code points allowed) and
`stored strings` (unassigned code points prohibited). Defaults
to ``True`` (unassigned code points are prohibited).
:Returns:
The SASLprep'ed version of `data`.
"""
if not isinstance(data, str):
return data
if prohibit_unassigned_code_points:
prohibited = _PROHIBITED + (stringprep.in_table_a1,)
else:
prohibited = _PROHIBITED
# RFC3454 section 2, step 1 - Map
# RFC4013 section 2.1 mappings
# Map Non-ASCII space characters to SPACE (U+0020). Map
# commonly mapped to nothing characters to, well, nothing.
in_table_c12 = stringprep.in_table_c12
in_table_b1 = stringprep.in_table_b1
data = "".join(
["\u0020" if in_table_c12(elt) else elt
for elt in data if not in_table_b1(elt)])
# RFC3454 section 2, step 2 - Normalize
# RFC4013 section 2.2 normalization
data = unicodedata.ucd_3_2_0.normalize('NFKC', data)
in_table_d1 = stringprep.in_table_d1
if in_table_d1(data[0]):
if not in_table_d1(data[-1]):
# RFC3454, Section 6, #3. If a string contains any
# RandALCat character, the first and last characters
# MUST be RandALCat characters.
raise ValueError("SASLprep: failed bidirectional check")
# RFC3454, Section 6, #2. If a string contains any RandALCat
# character, it MUST NOT contain any LCat character.
prohibited = prohibited + (stringprep.in_table_d2,)
else:
# RFC3454, Section 6, #3. Following the logic of #3, if
# the first character is not a RandALCat, no other character
# can be either.
prohibited = prohibited + (in_table_d1,)
# RFC3454 section 2, step 3 and 4 - Prohibit and check bidi
for char in data:
if any(in_table(char) for in_table in prohibited):
raise ValueError(
"SASLprep: failed prohibited character check")
return data
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/saslprep.py
| 0.778986 | 0.278379 |
saslprep.py
|
pypi
|
import socket
import ctypes
import os
from salt._compat import ipaddress
class sockaddr(ctypes.Structure):
_fields_ = [("sa_family", ctypes.c_short),
("__pad1", ctypes.c_ushort),
("ipv4_addr", ctypes.c_byte * 4),
("ipv6_addr", ctypes.c_byte * 16),
("__pad2", ctypes.c_ulong)]
if hasattr(ctypes, 'windll'):
WSAStringToAddressA = ctypes.windll.ws2_32.WSAStringToAddressA
WSAAddressToStringA = ctypes.windll.ws2_32.WSAAddressToStringA
else:
def not_windows(*args):
raise SystemError(
"Invalid platform. ctypes.windll must be available."
)
WSAStringToAddressA = not_windows
WSAAddressToStringA = not_windows
def inet_pton(address_family, ip_string):
# Verify IP Address
# This will catch IP Addresses such as 10.1.2
if address_family == socket.AF_INET:
try:
ipaddress.ip_address(str(ip_string))
except ValueError:
raise OSError('illegal IP address string passed to inet_pton')
return socket.inet_aton(ip_string)
# Verify IP Address
# The `WSAStringToAddressA` function handles notations used by Berkeley
# software which includes 3 part IP Addresses such as `10.1.2`. That's why
# the above check is needed to enforce more strict IP Address validation as
# used by the `inet_pton` function in Unix.
# See the following:
# https://stackoverflow.com/a/29286098
# Docs for the `inet_addr` function on MSDN
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms738563.aspx
addr = sockaddr()
addr.sa_family = address_family
addr_size = ctypes.c_int(ctypes.sizeof(addr))
if WSAStringToAddressA(
ip_string.encode('ascii'),
address_family,
None,
ctypes.byref(addr),
ctypes.byref(addr_size)
) != 0:
raise OSError(ctypes.FormatError())
if address_family == socket.AF_INET:
return ctypes.string_at(addr.ipv4_addr, 4)
if address_family == socket.AF_INET6:
return ctypes.string_at(addr.ipv6_addr, 16)
raise OSError('unknown address family')
def inet_ntop(address_family, packed_ip):
addr = sockaddr()
addr.sa_family = address_family
addr_size = ctypes.c_int(ctypes.sizeof(addr))
ip_string = ctypes.create_string_buffer(128)
ip_string_size = ctypes.c_int(ctypes.sizeof(ip_string))
if address_family == socket.AF_INET:
if len(packed_ip) != ctypes.sizeof(addr.ipv4_addr):
raise OSError('packed IP wrong length for inet_ntoa')
ctypes.memmove(addr.ipv4_addr, packed_ip, 4)
elif address_family == socket.AF_INET6:
if len(packed_ip) != ctypes.sizeof(addr.ipv6_addr):
raise OSError('packed IP wrong length for inet_ntoa')
ctypes.memmove(addr.ipv6_addr, packed_ip, 16)
else:
raise OSError('unknown address family')
if WSAAddressToStringA(
ctypes.byref(addr),
addr_size,
None,
ip_string,
ctypes.byref(ip_string_size)
) != 0:
raise OSError(ctypes.FormatError())
return ip_string[:ip_string_size.value - 1]
# Adding our two functions to the socket library
if os.name == 'nt':
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/win_inet_pton.py
| 0.608478 | 0.199795 |
win_inet_pton.py
|
pypi
|
# THIS CODE SHOULD BE REMOVED WITH importlib-metadata >= 3.3.0 IS AVAILABLE
# AS A SYSTEM PACKAGE ON ALL THE PLATFORMS FOR WHICH SALT BUILDS PACKAGES OR
# WHEN THE MINIMUM PYTHON VERSION IS 3.10
# pylint: skip-file
import io
import posixpath
import zipfile
import itertools
import contextlib
import sys
import pathlib
if sys.version_info < (3, 7):
from collections import OrderedDict
else:
OrderedDict = dict
def _parents(path):
"""
Given a path with elements separated by
posixpath.sep, generate all parents of that path.
>>> list(_parents('b/d'))
['b']
>>> list(_parents('/b/d/'))
['/b']
>>> list(_parents('b/d/f/'))
['b/d', 'b']
>>> list(_parents('b'))
[]
>>> list(_parents(''))
[]
"""
return itertools.islice(_ancestry(path), 1, None)
def _ancestry(path):
"""
Given a path with elements separated by
posixpath.sep, generate all elements of that path
>>> list(_ancestry('b/d'))
['b/d', 'b']
>>> list(_ancestry('/b/d/'))
['/b/d', '/b']
>>> list(_ancestry('b/d/f/'))
['b/d/f', 'b/d', 'b']
>>> list(_ancestry('b'))
['b']
>>> list(_ancestry(''))
[]
"""
path = path.rstrip(posixpath.sep)
while path and path != posixpath.sep:
yield path
path, tail = posixpath.split(path)
_dedupe = OrderedDict.fromkeys
"""Deduplicate an iterable in original order"""
def _difference(minuend, subtrahend):
"""
Return items in minuend not in subtrahend, retaining order
with O(1) lookup.
"""
return itertools.filterfalse(set(subtrahend).__contains__, minuend)
class CompleteDirs(zipfile.ZipFile):
"""
A ZipFile subclass that ensures that implied directories
are always included in the namelist.
"""
@staticmethod
def _implied_dirs(names):
parents = itertools.chain.from_iterable(map(_parents, names))
as_dirs = (p + posixpath.sep for p in parents)
return _dedupe(_difference(as_dirs, names))
def namelist(self):
names = super().namelist()
return names + list(self._implied_dirs(names))
def _name_set(self):
return set(self.namelist())
def resolve_dir(self, name):
"""
If the name represents a directory, return that name
as a directory (with the trailing slash).
"""
names = self._name_set()
dirname = name + '/'
dir_match = name not in names and dirname in names
return dirname if dir_match else name
@classmethod
def make(cls, source):
"""
Given a source (filename or zipfile), return an
appropriate CompleteDirs subclass.
"""
if isinstance(source, CompleteDirs):
return source
if not isinstance(source, zipfile.ZipFile):
return cls(_pathlib_compat(source))
# Only allow for FastLookup when supplied zipfile is read-only
if 'r' not in source.mode:
cls = CompleteDirs
source.__class__ = cls
return source
class FastLookup(CompleteDirs):
"""
ZipFile subclass to ensure implicit
dirs exist and are resolved rapidly.
"""
def namelist(self):
with contextlib.suppress(AttributeError):
return self.__names
self.__names = super().namelist()
return self.__names
def _name_set(self):
with contextlib.suppress(AttributeError):
return self.__lookup
self.__lookup = super()._name_set()
return self.__lookup
def _pathlib_compat(path):
"""
For path-like objects, convert to a filename for compatibility
on Python 3.6.1 and earlier.
"""
try:
return path.__fspath__()
except AttributeError:
return str(path)
class Path:
"""
A pathlib-compatible interface for zip files.
Consider a zip file with this structure::
.
├── a.txt
└── b
├── c.txt
└── d
└── e.txt
>>> data = io.BytesIO()
>>> zf = zipfile.ZipFile(data, 'w')
>>> zf.writestr('a.txt', 'content of a')
>>> zf.writestr('b/c.txt', 'content of c')
>>> zf.writestr('b/d/e.txt', 'content of e')
>>> zf.filename = 'mem/abcde.zip'
Path accepts the zipfile object itself or a filename
>>> root = Path(zf)
From there, several path operations are available.
Directory iteration (including the zip file itself):
>>> a, b = root.iterdir()
>>> a
Path('mem/abcde.zip', 'a.txt')
>>> b
Path('mem/abcde.zip', 'b/')
name property:
>>> b.name
'b'
join with divide operator:
>>> c = b / 'c.txt'
>>> c
Path('mem/abcde.zip', 'b/c.txt')
>>> c.name
'c.txt'
Read text:
>>> c.read_text()
'content of c'
existence:
>>> c.exists()
True
>>> (b / 'missing.txt').exists()
False
Coercion to string:
>>> import os
>>> str(c).replace(os.sep, posixpath.sep)
'mem/abcde.zip/b/c.txt'
At the root, ``name``, ``filename``, and ``parent``
resolve to the zipfile. Note these attributes are not
valid and will raise a ``ValueError`` if the zipfile
has no filename.
>>> root.name
'abcde.zip'
>>> str(root.filename).replace(os.sep, posixpath.sep)
'mem/abcde.zip'
>>> str(root.parent)
'mem'
"""
__repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
def __init__(self, root, at=""):
"""
Construct a Path from a ZipFile or filename.
Note: When the source is an existing ZipFile object,
its type (__class__) will be mutated to a
specialized type. If the caller wishes to retain the
original type, the caller should either create a
separate ZipFile object or pass a filename.
"""
self.root = FastLookup.make(root)
self.at = at
def open(self, mode='r', *args, pwd=None, **kwargs):
"""
Open this entry as text or binary following the semantics
of ``pathlib.Path.open()`` by passing arguments through
to io.TextIOWrapper().
"""
if self.is_dir():
raise IsADirectoryError(self)
zip_mode = mode[0]
if not self.exists() and zip_mode == 'r':
raise FileNotFoundError(self)
stream = self.root.open(self.at, zip_mode, pwd=pwd)
if 'b' in mode:
if args or kwargs:
raise ValueError("encoding args invalid for binary operation")
return stream
return io.TextIOWrapper(stream, *args, **kwargs)
@property
def name(self):
return pathlib.Path(self.at).name or self.filename.name
@property
def suffix(self):
return pathlib.Path(self.at).suffix or self.filename.suffix
@property
def suffixes(self):
return pathlib.Path(self.at).suffixes or self.filename.suffixes
@property
def stem(self):
return pathlib.Path(self.at).stem or self.filename.stem
@property
def filename(self):
return pathlib.Path(self.root.filename).joinpath(self.at)
def read_text(self, *args, **kwargs):
with self.open('r', *args, **kwargs) as strm:
return strm.read()
def read_bytes(self):
with self.open('rb') as strm:
return strm.read()
def _is_child(self, path):
return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
def _next(self, at):
return self.__class__(self.root, at)
def is_dir(self):
return not self.at or self.at.endswith("/")
def is_file(self):
return self.exists() and not self.is_dir()
def exists(self):
return self.at in self.root._name_set()
def iterdir(self):
if not self.is_dir():
raise ValueError("Can't listdir a file")
subs = map(self._next, self.root.namelist())
return filter(self._is_child, subs)
def __str__(self):
return posixpath.join(self.root.filename, self.at)
def __repr__(self):
return self.__repr.format(self=self)
def joinpath(self, *other):
next = posixpath.join(self.at, *map(_pathlib_compat, other))
return self._next(self.root.resolve_dir(next))
__truediv__ = joinpath
@property
def parent(self):
if not self.at:
return self.filename.parent
parent_at = posixpath.dirname(self.at.rstrip('/'))
if parent_at:
parent_at += '/'
return self._next(parent_at)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/zipp.py
| 0.531939 | 0.181299 |
zipp.py
|
pypi
|
from pyVmomi.VmomiSupport import (
CreateDataType,
CreateManagedType,
CreateEnumType,
AddVersion,
AddVersionParent,
F_LINK,
F_LINKABLE,
F_OPTIONAL,
)
CreateManagedType(
"vim.cluster.VsanPerformanceManager",
"VsanPerformanceManager",
"vmodl.ManagedObject",
"vim.version.version9",
[],
[
(
"setStatsObjectPolicy",
"VsanPerfSetStatsObjectPolicy",
"vim.version.version9",
(
(
"cluster",
"vim.ComputeResource",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
(
"profile",
"vim.vm.ProfileSpec",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "boolean", "boolean"),
"System.Read",
None,
),
(
"deleteStatsObject",
"VsanPerfDeleteStatsObject",
"vim.version.version9",
(
(
"cluster",
"vim.ComputeResource",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "boolean", "boolean"),
"System.Read",
None,
),
(
"createStatsObjectTask",
"VsanPerfCreateStatsObjectTask",
"vim.version.version9",
(
(
"cluster",
"vim.ComputeResource",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
(
"profile",
"vim.vm.ProfileSpec",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
(
"deleteStatsObjectTask",
"VsanPerfDeleteStatsObjectTask",
"vim.version.version9",
(
(
"cluster",
"vim.ComputeResource",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
(
"queryClusterHealth",
"VsanPerfQueryClusterHealth",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
),
(0, "vmodl.DynamicData[]", "vmodl.DynamicData[]"),
"System.Read",
None,
),
(
"queryStatsObjectInformation",
"VsanPerfQueryStatsObjectInformation",
"vim.version.version9",
(
(
"cluster",
"vim.ComputeResource",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(
0,
"vim.cluster.VsanObjectInformation",
"vim.cluster.VsanObjectInformation",
),
"System.Read",
None,
),
(
"queryNodeInformation",
"VsanPerfQueryNodeInformation",
"vim.version.version9",
(
(
"cluster",
"vim.ComputeResource",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(
0 | F_OPTIONAL,
"vim.cluster.VsanPerfNodeInformation[]",
"vim.cluster.VsanPerfNodeInformation[]",
),
"System.Read",
None,
),
(
"queryVsanPerf",
"VsanPerfQueryPerf",
"vim.version.version9",
(
(
"querySpecs",
"vim.cluster.VsanPerfQuerySpec[]",
"vim.version.version9",
0,
None,
),
(
"cluster",
"vim.ComputeResource",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(
0,
"vim.cluster.VsanPerfEntityMetricCSV[]",
"vim.cluster.VsanPerfEntityMetricCSV[]",
),
"System.Read",
None,
),
(
"getSupportedEntityTypes",
"VsanPerfGetSupportedEntityTypes",
"vim.version.version9",
tuple(),
(
0 | F_OPTIONAL,
"vim.cluster.VsanPerfEntityType[]",
"vim.cluster.VsanPerfEntityType[]",
),
"System.Read",
None,
),
(
"createStatsObject",
"VsanPerfCreateStatsObject",
"vim.version.version9",
(
(
"cluster",
"vim.ComputeResource",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
(
"profile",
"vim.vm.ProfileSpec",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "string", "string"),
"System.Read",
None,
),
],
)
CreateManagedType(
"vim.cluster.VsanVcDiskManagementSystem",
"VimClusterVsanVcDiskManagementSystem",
"vmodl.ManagedObject",
"vim.version.version10",
[],
[
(
"initializeDiskMappings",
"InitializeDiskMappings",
"vim.version.version10",
(
(
"spec",
"vim.vsan.host.DiskMappingCreationSpec",
"vim.version.version10",
0,
None,
),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
(
"retrieveAllFlashCapabilities",
"RetrieveAllFlashCapabilities",
"vim.version.version10",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version10",
0,
None,
),
),
(
0 | F_OPTIONAL,
"vim.vsan.host.VsanHostCapability[]",
"vim.vsan.host.VsanHostCapability[]",
),
"System.Read",
None,
),
(
"queryDiskMappings",
"QueryDiskMappings",
"vim.version.version10",
(("host", "vim.HostSystem", "vim.version.version10", 0, None),),
(
0 | F_OPTIONAL,
"vim.vsan.host.DiskMapInfoEx[]",
"vim.vsan.host.DiskMapInfoEx[]",
),
"System.Read",
None,
),
],
)
CreateManagedType(
"vim.cluster.VsanObjectSystem",
"VsanObjectSystem",
"vmodl.ManagedObject",
"vim.version.version9",
[],
[
(
"setVsanObjectPolicy",
"VosSetVsanObjectPolicy",
"vim.version.version9",
(
(
"cluster",
"vim.ComputeResource",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
("vsanObjectUuid", "string", "vim.version.version9", 0, None),
(
"profile",
"vim.vm.ProfileSpec",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "boolean", "boolean"),
"System.Read",
None,
),
(
"queryObjectIdentities",
"VsanQueryObjectIdentities",
"vim.version.version9",
(
(
"cluster",
"vim.ComputeResource",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
("objUuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None),
(
"includeHealth",
"boolean",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
(
"includeObjIdentity",
"boolean",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
(
"includeSpaceSummary",
"boolean",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(
0 | F_OPTIONAL,
"vim.cluster.VsanObjectIdentityAndHealth",
"vim.cluster.VsanObjectIdentityAndHealth",
),
"System.Read",
None,
),
(
"queryVsanObjectInformation",
"VosQueryVsanObjectInformation",
"vim.version.version9",
(
(
"cluster",
"vim.ComputeResource",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
(
"vsanObjectQuerySpecs",
"vim.cluster.VsanObjectQuerySpec[]",
"vim.version.version9",
0,
None,
),
),
(
0,
"vim.cluster.VsanObjectInformation[]",
"vim.cluster.VsanObjectInformation[]",
),
"System.Read",
None,
),
],
)
CreateManagedType(
"vim.host.VsanStretchedClusterSystem",
"VimHostVsanStretchedClusterSystem",
"vmodl.ManagedObject",
"vim.version.version10",
[],
[
(
"getStretchedClusterInfoFromCmmds",
"VSANHostGetStretchedClusterInfoFromCmmds",
"vim.version.version10",
tuple(),
(
0 | F_OPTIONAL,
"vim.host.VSANStretchedClusterHostInfo[]",
"vim.host.VSANStretchedClusterHostInfo[]",
),
"System.Read",
None,
),
(
"witnessJoinVsanCluster",
"VSANWitnessJoinVsanCluster",
"vim.version.version10",
(
("clusterUuid", "string", "vim.version.version10", 0, None),
("preferredFd", "string", "vim.version.version10", 0, None),
(
"disableVsanAllowed",
"boolean",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
),
(0, "void", "void"),
"System.Read",
None,
),
(
"witnessSetPreferredFaultDomain",
"VSANWitnessSetPreferredFaultDomain",
"vim.version.version10",
(("preferredFd", "string", "vim.version.version10", 0, None),),
(0, "void", "void"),
"System.Read",
None,
),
(
"addUnicastAgent",
"VSANHostAddUnicastAgent",
"vim.version.version10",
(
("witnessAddress", "string", "vim.version.version10", 0, None),
("witnessPort", "int", "vim.version.version10", 0 | F_OPTIONAL, None),
("overwrite", "boolean", "vim.version.version10", 0 | F_OPTIONAL, None),
),
(0, "void", "void"),
"System.Read",
None,
),
(
"clusterGetPreferredFaultDomain",
"VSANClusterGetPreferredFaultDomain",
"vim.version.version10",
tuple(),
(
0 | F_OPTIONAL,
"vim.host.VSANCmmdsPreferredFaultDomainInfo",
"vim.host.VSANCmmdsPreferredFaultDomainInfo",
),
"System.Read",
None,
),
(
"witnessLeaveVsanCluster",
"VSANWitnessLeaveVsanCluster",
"vim.version.version10",
tuple(),
(0, "void", "void"),
"System.Read",
None,
),
(
"getStretchedClusterCapability",
"VSANHostGetStretchedClusterCapability",
"vim.version.version10",
tuple(),
(
0,
"vim.host.VSANStretchedClusterHostCapability",
"vim.host.VSANStretchedClusterHostCapability",
),
"System.Read",
None,
),
(
"removeUnicastAgent",
"VSANHostRemoveUnicastAgent",
"vim.version.version10",
(
("witnessAddress", "string", "vim.version.version10", 0, None),
(
"ignoreExistence",
"boolean",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
),
(0, "void", "void"),
"System.Read",
None,
),
(
"listUnicastAgent",
"VSANHostListUnicastAgent",
"vim.version.version10",
tuple(),
(0, "string", "string"),
"System.Read",
None,
),
],
)
CreateManagedType(
"vim.VsanUpgradeSystemEx",
"VsanUpgradeSystemEx",
"vmodl.ManagedObject",
"vim.version.version10",
[],
[
(
"performUpgrade",
"PerformVsanUpgradeEx",
"vim.version.version10",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version10",
0,
None,
),
(
"performObjectUpgrade",
"boolean",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
(
"downgradeFormat",
"boolean",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
(
"allowReducedRedundancy",
"boolean",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
(
"excludeHosts",
"vim.HostSystem[]",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
(
"spec",
"vim.cluster.VsanDiskFormatConversionSpec",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
(
"performUpgradePreflightCheck",
"PerformVsanUpgradePreflightCheckEx",
"vim.version.version10",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version10",
0,
None,
),
(
"downgradeFormat",
"boolean",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
(
"spec",
"vim.cluster.VsanDiskFormatConversionSpec",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
),
(
0,
"vim.cluster.VsanDiskFormatConversionCheckResult",
"vim.cluster.VsanDiskFormatConversionCheckResult",
),
"System.Read",
None,
),
(
"retrieveSupportedFormatVersion",
"RetrieveSupportedVsanFormatVersion",
"vim.version.version10",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version10",
0,
None,
),
),
(0, "int", "int"),
"System.Read",
None,
),
],
)
CreateManagedType(
"vim.cluster.VsanCapabilitySystem",
"VsanCapabilitySystem",
"vmodl.ManagedObject",
"vim.version.version10",
[],
[
(
"getCapabilities",
"VsanGetCapabilities",
"vim.version.version10",
(
(
"targets",
"vmodl.ManagedObject[]",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
),
(0, "vim.cluster.VsanCapability[]", "vim.cluster.VsanCapability[]"),
"System.Read",
None,
),
],
)
CreateManagedType(
"vim.cluster.VsanSpaceReportSystem",
"VsanSpaceReportSystem",
"vmodl.ManagedObject",
"vim.version.version9",
[],
[
(
"querySpaceUsage",
"VsanQuerySpaceUsage",
"vim.version.version9",
(("cluster", "vim.ComputeResource", "vim.version.version9", 0, None),),
(0, "vim.cluster.VsanSpaceUsage", "vim.cluster.VsanSpaceUsage"),
"System.Read",
None,
),
],
)
CreateManagedType(
"vim.cluster.VsanVcClusterConfigSystem",
"VsanVcClusterConfigSystem",
"vmodl.ManagedObject",
"vim.version.version10",
[],
[
(
"getConfigInfoEx",
"VsanClusterGetConfig",
"vim.version.version10",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version10",
0,
None,
),
),
(0, "vim.vsan.ConfigInfoEx", "vim.vsan.ConfigInfoEx"),
"System.Read",
None,
),
(
"reconfigureEx",
"VsanClusterReconfig",
"vim.version.version10",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version10",
0,
None,
),
(
"vsanReconfigSpec",
"vim.vsan.ReconfigSpec",
"vim.version.version10",
0,
None,
),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
],
)
CreateManagedType(
"vim.host.VsanHealthSystem",
"HostVsanHealthSystem",
"vmodl.ManagedObject",
"vim.version.version9",
[],
[
(
"queryAdvCfg",
"VsanHostQueryAdvCfg",
"vim.version.version9",
(("options", "string[]", "vim.version.version9", 0, None),),
(0, "vim.option.OptionValue[]", "vim.option.OptionValue[]"),
"System.Read",
None,
),
(
"queryPhysicalDiskHealthSummary",
"VsanHostQueryPhysicalDiskHealthSummary",
"vim.version.version9",
tuple(),
(
0,
"vim.host.VsanPhysicalDiskHealthSummary",
"vim.host.VsanPhysicalDiskHealthSummary",
),
"System.Read",
None,
),
(
"startProactiveRebalance",
"VsanStartProactiveRebalance",
"vim.version.version9",
(
("timeSpan", "int", "vim.version.version9", 0 | F_OPTIONAL, None),
(
"varianceThreshold",
"float",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
("timeThreshold", "int", "vim.version.version9", 0 | F_OPTIONAL, None),
("rateThreshold", "int", "vim.version.version9", 0 | F_OPTIONAL, None),
),
(0, "boolean", "boolean"),
"System.Read",
None,
),
(
"queryHostInfoByUuids",
"VsanHostQueryHostInfoByUuids",
"vim.version.version9",
(("uuids", "string[]", "vim.version.version9", 0, None),),
(
0,
"vim.host.VsanQueryResultHostInfo[]",
"vim.host.VsanQueryResultHostInfo[]",
),
"System.Read",
None,
),
(
"queryVersion",
"VsanHostQueryHealthSystemVersion",
"vim.version.version9",
tuple(),
(0, "string", "string"),
"System.Read",
None,
),
(
"queryVerifyNetworkSettings",
"VsanHostQueryVerifyNetworkSettings",
"vim.version.version9",
(("peers", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None),),
(0, "vim.host.VsanNetworkHealthResult", "vim.host.VsanNetworkHealthResult"),
"System.Read",
None,
),
(
"queryRunIperfClient",
"VsanHostQueryRunIperfClient",
"vim.version.version9",
(
("multicast", "boolean", "vim.version.version9", 0, None),
("serverIp", "string", "vim.version.version9", 0, None),
),
(
0,
"vim.host.VsanNetworkLoadTestResult",
"vim.host.VsanNetworkLoadTestResult",
),
"System.Read",
None,
),
(
"runVmdkLoadTest",
"VsanHostRunVmdkLoadTest",
"vim.version.version9",
(
("runname", "string", "vim.version.version9", 0, None),
("durationSec", "int", "vim.version.version9", 0, None),
(
"specs",
"vim.host.VsanVmdkLoadTestSpec[]",
"vim.version.version9",
0,
None,
),
),
(
0,
"vim.host.VsanVmdkLoadTestResult[]",
"vim.host.VsanVmdkLoadTestResult[]",
),
"System.Read",
None,
),
(
"queryObjectHealthSummary",
"VsanHostQueryObjectHealthSummary",
"vim.version.version9",
(
("objUuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None),
(
"includeObjUuids",
"boolean",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
(
"localHostOnly",
"boolean",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "vim.host.VsanObjectOverallHealth", "vim.host.VsanObjectOverallHealth"),
"System.Read",
None,
),
(
"getHclInfo",
"VsanGetHclInfo",
"vim.version.version9",
tuple(),
(0, "vim.host.VsanHostHclInfo", "vim.host.VsanHostHclInfo"),
"System.Read",
None,
),
(
"cleanupVmdkLoadTest",
"VsanHostCleanupVmdkLoadTest",
"vim.version.version9",
(
("runname", "string", "vim.version.version9", 0, None),
(
"specs",
"vim.host.VsanVmdkLoadTestSpec[]",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "string", "string"),
"System.Read",
None,
),
(
"waitForVsanHealthGenerationIdChange",
"VsanWaitForVsanHealthGenerationIdChange",
"vim.version.version9",
(("timeout", "int", "vim.version.version9", 0, None),),
(0, "boolean", "boolean"),
"System.Read",
None,
),
(
"stopProactiveRebalance",
"VsanStopProactiveRebalance",
"vim.version.version9",
tuple(),
(0, "boolean", "boolean"),
"System.Read",
None,
),
(
"repairImmediateObjects",
"VsanHostRepairImmediateObjects",
"vim.version.version9",
(
("uuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None),
("repairType", "string", "vim.version.version9", 0 | F_OPTIONAL, None),
),
(0, "vim.host.VsanRepairObjectsResult", "vim.host.VsanRepairObjectsResult"),
"System.Read",
None,
),
(
"prepareVmdkLoadTest",
"VsanHostPrepareVmdkLoadTest",
"vim.version.version9",
(
("runname", "string", "vim.version.version9", 0, None),
(
"specs",
"vim.host.VsanVmdkLoadTestSpec[]",
"vim.version.version9",
0,
None,
),
),
(0, "string", "string"),
"System.Read",
None,
),
(
"queryRunIperfServer",
"VsanHostQueryRunIperfServer",
"vim.version.version9",
(
("multicast", "boolean", "vim.version.version9", 0, None),
("serverIp", "string", "vim.version.version9", 0 | F_OPTIONAL, None),
),
(
0,
"vim.host.VsanNetworkLoadTestResult",
"vim.host.VsanNetworkLoadTestResult",
),
"System.Read",
None,
),
(
"queryCheckLimits",
"VsanHostQueryCheckLimits",
"vim.version.version9",
tuple(),
(0, "vim.host.VsanLimitHealthResult", "vim.host.VsanLimitHealthResult"),
"System.Read",
None,
),
(
"getProactiveRebalanceInfo",
"VsanGetProactiveRebalanceInfo",
"vim.version.version9",
tuple(),
(
0,
"vim.host.VsanProactiveRebalanceInfoEx",
"vim.host.VsanProactiveRebalanceInfoEx",
),
"System.Read",
None,
),
(
"checkClomdLiveness",
"VsanHostClomdLiveness",
"vim.version.version9",
tuple(),
(0, "boolean", "boolean"),
"System.Read",
None,
),
],
)
CreateManagedType(
"vim.cluster.VsanVcClusterHealthSystem",
"VsanVcClusterHealthSystem",
"vmodl.ManagedObject",
"vim.version.version9",
[],
[
(
"queryClusterCreateVmHealthHistoryTest",
"VsanQueryVcClusterCreateVmHealthHistoryTest",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
("count", "int", "vim.version.version9", 0 | F_OPTIONAL, None),
),
(
0 | F_OPTIONAL,
"vim.cluster.VsanClusterCreateVmHealthTestResult[]",
"vim.cluster.VsanClusterCreateVmHealthTestResult[]",
),
"System.Read",
None,
),
(
"setLogLevel",
"VsanHealthSetLogLevel",
"vim.version.version9",
(
(
"level",
"vim.cluster.VsanHealthLogLevelEnum",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "void", "void"),
"System.Read",
None,
),
(
"testVsanClusterTelemetryProxy",
"VsanHealthTestVsanClusterTelemetryProxy",
"vim.version.version9",
(
(
"proxyConfig",
"vim.cluster.VsanClusterTelemetryProxyConfig",
"vim.version.version9",
0,
None,
),
),
(0, "boolean", "boolean"),
"System.Read",
None,
),
(
"uploadHclDb",
"VsanVcUploadHclDb",
"vim.version.version9",
(("db", "string", "vim.version.version9", 0, None),),
(0, "boolean", "boolean"),
"System.Read",
None,
),
(
"updateHclDbFromWeb",
"VsanVcUpdateHclDbFromWeb",
"vim.version.version9",
(("url", "string", "vim.version.version9", 0 | F_OPTIONAL, None),),
(0, "boolean", "boolean"),
"System.Read",
None,
),
(
"repairClusterObjectsImmediate",
"VsanHealthRepairClusterObjectsImmediate",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
("uuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
(
"queryClusterNetworkPerfTest",
"VsanQueryVcClusterNetworkPerfTest",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
("multicast", "boolean", "vim.version.version9", 0, None),
),
(
0,
"vim.cluster.VsanClusterNetworkLoadTestResult",
"vim.cluster.VsanClusterNetworkLoadTestResult",
),
"System.Read",
None,
),
(
"queryClusterVmdkLoadHistoryTest",
"VsanQueryVcClusterVmdkLoadHistoryTest",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
("count", "int", "vim.version.version9", 0 | F_OPTIONAL, None),
("taskId", "string", "vim.version.version9", 0 | F_OPTIONAL, None),
),
(
0 | F_OPTIONAL,
"vim.cluster.VsanClusterVmdkLoadTestResult[]",
"vim.cluster.VsanClusterVmdkLoadTestResult[]",
),
"System.Read",
None,
),
(
"queryVsanClusterHealthCheckInterval",
"VsanHealthQueryVsanClusterHealthCheckInterval",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
),
(0, "int", "int"),
"System.Read",
None,
),
(
"queryClusterCreateVmHealthTest",
"VsanQueryVcClusterCreateVmHealthTest",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
("timeout", "int", "vim.version.version9", 0, None),
),
(
0,
"vim.cluster.VsanClusterCreateVmHealthTestResult",
"vim.cluster.VsanClusterCreateVmHealthTestResult",
),
"System.Read",
None,
),
(
"getClusterHclInfo",
"VsanVcClusterGetHclInfo",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
(
"includeHostsResult",
"boolean",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "vim.cluster.VsanClusterHclInfo", "vim.cluster.VsanClusterHclInfo"),
"System.Read",
None,
),
(
"queryAttachToSrHistory",
"VsanQueryAttachToSrHistory",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
("count", "int", "vim.version.version9", 0 | F_OPTIONAL, None),
("taskId", "string", "vim.version.version9", 0 | F_OPTIONAL, None),
),
(
0 | F_OPTIONAL,
"vim.cluster.VsanAttachToSrOperation[]",
"vim.cluster.VsanAttachToSrOperation[]",
),
"System.Read",
None,
),
(
"rebalanceCluster",
"VsanRebalanceCluster",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
(
"targetHosts",
"vim.HostSystem[]",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
(
"runVmdkLoadTest",
"VsanVcClusterRunVmdkLoadTest",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
("runname", "string", "vim.version.version9", 0, None),
("durationSec", "int", "vim.version.version9", 0 | F_OPTIONAL, None),
(
"specs",
"vim.host.VsanVmdkLoadTestSpec[]",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
("action", "string", "vim.version.version9", 0 | F_OPTIONAL, None),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
(
"sendVsanTelemetry",
"VsanHealthSendVsanTelemetry",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
),
(0, "void", "void"),
"System.Read",
None,
),
(
"queryClusterNetworkPerfHistoryTest",
"VsanQueryVcClusterNetworkPerfHistoryTest",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
("count", "int", "vim.version.version9", 0 | F_OPTIONAL, None),
),
(
0 | F_OPTIONAL,
"vim.cluster.VsanClusterNetworkLoadTestResult[]",
"vim.cluster.VsanClusterNetworkLoadTestResult[]",
),
"System.Read",
None,
),
(
"queryClusterHealthSummary",
"VsanQueryVcClusterHealthSummary",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
(
"vmCreateTimeout",
"int",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
("objUuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None),
(
"includeObjUuids",
"boolean",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
("fields", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None),
(
"fetchFromCache",
"boolean",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(
0,
"vim.cluster.VsanClusterHealthSummary",
"vim.cluster.VsanClusterHealthSummary",
),
"System.Read",
None,
),
(
"stopRebalanceCluster",
"VsanStopRebalanceCluster",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
(
"targetHosts",
"vim.HostSystem[]",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
(
"queryVsanClusterHealthConfig",
"VsanHealthQueryVsanClusterHealthConfig",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
),
(
0,
"vim.cluster.VsanClusterHealthConfigs",
"vim.cluster.VsanClusterHealthConfigs",
),
"System.Read",
None,
),
(
"attachVsanSupportBundleToSr",
"VsanAttachVsanSupportBundleToSr",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
("srNumber", "string", "vim.version.version9", 0, None),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
(
"queryClusterVmdkWorkloadTypes",
"VsanQueryVcClusterVmdkWorkloadTypes",
"vim.version.version9",
tuple(),
(
0,
"vim.cluster.VsanStorageWorkloadType[]",
"vim.cluster.VsanStorageWorkloadType[]",
),
"System.Read",
None,
),
(
"queryVerifyClusterHealthSystemVersions",
"VsanVcClusterQueryVerifyHealthSystemVersions",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
),
(
0,
"vim.cluster.VsanClusterHealthSystemVersionResult",
"vim.cluster.VsanClusterHealthSystemVersionResult",
),
"System.Read",
None,
),
(
"isRebalanceRunning",
"VsanHealthIsRebalanceRunning",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
(
"targetHosts",
"vim.HostSystem[]",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
),
(0, "boolean", "boolean"),
"System.Read",
None,
),
(
"setVsanClusterHealthCheckInterval",
"VsanHealthSetVsanClusterHealthCheckInterval",
"vim.version.version9",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version9",
0,
None,
),
(
"vsanClusterHealthCheckInterval",
"int",
"vim.version.version9",
0,
None,
),
),
(0, "void", "void"),
"System.Read",
None,
),
],
)
CreateManagedType(
"vim.cluster.VsanVcStretchedClusterSystem",
"VimClusterVsanVcStretchedClusterSystem",
"vmodl.ManagedObject",
"vim.version.version10",
[],
[
(
"isWitnessHost",
"VSANVcIsWitnessHost",
"vim.version.version10",
(("host", "vim.HostSystem", "vim.version.version10", 0, None),),
(0, "boolean", "boolean"),
"System.Read",
None,
),
(
"setPreferredFaultDomain",
"VSANVcSetPreferredFaultDomain",
"vim.version.version10",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version10",
0,
None,
),
("preferredFd", "string", "vim.version.version10", 0, None),
(
"witnessHost",
"vim.HostSystem",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
(
"getPreferredFaultDomain",
"VSANVcGetPreferredFaultDomain",
"vim.version.version10",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version10",
0,
None,
),
),
(
0 | F_OPTIONAL,
"vim.cluster.VSANPreferredFaultDomainInfo",
"vim.cluster.VSANPreferredFaultDomainInfo",
),
"System.Read",
None,
),
(
"getWitnessHosts",
"VSANVcGetWitnessHosts",
"vim.version.version10",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version10",
0,
None,
),
),
(
0 | F_OPTIONAL,
"vim.cluster.VSANWitnessHostInfo[]",
"vim.cluster.VSANWitnessHostInfo[]",
),
"System.Read",
None,
),
(
"retrieveStretchedClusterVcCapability",
"VSANVcRetrieveStretchedClusterVcCapability",
"vim.version.version10",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version10",
0,
None,
),
(
"verifyAllConnected",
"boolean",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
),
(
0 | F_OPTIONAL,
"vim.cluster.VSANStretchedClusterCapability[]",
"vim.cluster.VSANStretchedClusterCapability[]",
),
"System.Read",
None,
),
(
"convertToStretchedCluster",
"VSANVcConvertToStretchedCluster",
"vim.version.version10",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version10",
0,
None,
),
(
"faultDomainConfig",
"vim.cluster.VSANStretchedClusterFaultDomainConfig",
"vim.version.version10",
0,
None,
),
("witnessHost", "vim.HostSystem", "vim.version.version10", 0, None),
("preferredFd", "string", "vim.version.version10", 0, None),
(
"diskMapping",
"vim.vsan.host.DiskMapping",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
(
"removeWitnessHost",
"VSANVcRemoveWitnessHost",
"vim.version.version10",
(
(
"cluster",
"vim.ClusterComputeResource",
"vim.version.version10",
0,
None,
),
(
"witnessHost",
"vim.HostSystem",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
(
"witnessAddress",
"string",
"vim.version.version10",
0 | F_OPTIONAL,
None,
),
),
(0, "vim.Task", "vim.Task"),
"System.Read",
None,
),
],
)
CreateManagedType(
"vim.cluster.VsanClusterHealthSystem",
"VsanClusterHealthSystem",
"vmodl.ManagedObject",
"vim.version.version9",
[],
[
(
"queryPhysicalDiskHealthSummary",
"VsanQueryClusterPhysicalDiskHealthSummary",
"vim.version.version9",
(
("hosts", "string[]", "vim.version.version9", 0, None),
("esxRootPassword", "string", "vim.version.version9", 0, None),
),
(
0,
"vim.host.VsanPhysicalDiskHealthSummary[]",
"vim.host.VsanPhysicalDiskHealthSummary[]",
),
"System.Read",
None,
),
(
"queryClusterNetworkPerfTest",
"VsanQueryClusterNetworkPerfTest",
"vim.version.version9",
(
("hosts", "string[]", "vim.version.version9", 0, None),
("esxRootPassword", "string", "vim.version.version9", 0, None),
("multicast", "boolean", "vim.version.version9", 0, None),
),
(
0,
"vim.cluster.VsanClusterNetworkLoadTestResult",
"vim.cluster.VsanClusterNetworkLoadTestResult",
),
"System.Read",
None,
),
(
"queryAdvCfgSync",
"VsanQueryClusterAdvCfgSync",
"vim.version.version9",
(
("hosts", "string[]", "vim.version.version9", 0, None),
("esxRootPassword", "string", "vim.version.version9", 0, None),
),
(
0,
"vim.cluster.VsanClusterAdvCfgSyncResult[]",
"vim.cluster.VsanClusterAdvCfgSyncResult[]",
),
"System.Read",
None,
),
(
"repairClusterImmediateObjects",
"VsanRepairClusterImmediateObjects",
"vim.version.version9",
(
("hosts", "string[]", "vim.version.version9", 0, None),
("esxRootPassword", "string", "vim.version.version9", 0, None),
("uuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL, None),
),
(
0,
"vim.cluster.VsanClusterHealthSystemObjectsRepairResult",
"vim.cluster.VsanClusterHealthSystemObjectsRepairResult",
),
"System.Read",
None,
),
(
"queryVerifyClusterNetworkSettings",
"VsanQueryVerifyClusterNetworkSettings",
"vim.version.version9",
(
("hosts", "string[]", "vim.version.version9", 0, None),
("esxRootPassword", "string", "vim.version.version9", 0, None),
),
(
0,
"vim.cluster.VsanClusterNetworkHealthResult",
"vim.cluster.VsanClusterNetworkHealthResult",
),
"System.Read",
None,
),
(
"queryClusterCreateVmHealthTest",
"VsanQueryClusterCreateVmHealthTest",
"vim.version.version9",
(
("hosts", "string[]", "vim.version.version9", 0, None),
("esxRootPassword", "string", "vim.version.version9", 0, None),
("timeout", "int", "vim.version.version9", 0, None),
),
(
0,
"vim.cluster.VsanClusterCreateVmHealthTestResult",
"vim.cluster.VsanClusterCreateVmHealthTestResult",
),
"System.Read",
None,
),
(
"queryClusterHealthSystemVersions",
"VsanQueryClusterHealthSystemVersions",
"vim.version.version9",
(
("hosts", "string[]", "vim.version.version9", 0, None),
("esxRootPassword", "string", "vim.version.version9", 0, None),
),
(
0,
"vim.cluster.VsanClusterHealthSystemVersionResult",
"vim.cluster.VsanClusterHealthSystemVersionResult",
),
"System.Read",
None,
),
(
"getClusterHclInfo",
"VsanClusterGetHclInfo",
"vim.version.version9",
(
("hosts", "string[]", "vim.version.version9", 0, None),
("esxRootPassword", "string", "vim.version.version9", 0, None),
),
(0, "vim.cluster.VsanClusterHclInfo", "vim.cluster.VsanClusterHclInfo"),
"System.Read",
None,
),
(
"queryCheckLimits",
"VsanQueryClusterCheckLimits",
"vim.version.version9",
(
("hosts", "string[]", "vim.version.version9", 0, None),
("esxRootPassword", "string", "vim.version.version9", 0, None),
),
(
0,
"vim.cluster.VsanClusterLimitHealthResult",
"vim.cluster.VsanClusterLimitHealthResult",
),
"System.Read",
None,
),
(
"queryCaptureVsanPcap",
"VsanQueryClusterCaptureVsanPcap",
"vim.version.version9",
(
("hosts", "string[]", "vim.version.version9", 0, None),
("esxRootPassword", "string", "vim.version.version9", 0, None),
("duration", "int", "vim.version.version9", 0, None),
(
"vmknic",
"vim.cluster.VsanClusterHostVmknicMapping[]",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
(
"includeRawPcap",
"boolean",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
(
"includeIgmp",
"boolean",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
(
"cmmdsMsgTypeFilter",
"string[]",
"vim.version.version9",
0 | F_OPTIONAL,
None,
),
("cmmdsPorts", "int[]", "vim.version.version9", 0 | F_OPTIONAL, None),
("clusterUuid", "string", "vim.version.version9", 0 | F_OPTIONAL, None),
),
(
0,
"vim.cluster.VsanVsanClusterPcapResult",
"vim.cluster.VsanVsanClusterPcapResult",
),
"System.Read",
None,
),
(
"checkClusterClomdLiveness",
"VsanCheckClusterClomdLiveness",
"vim.version.version9",
(
("hosts", "string[]", "vim.version.version9", 0, None),
("esxRootPassword", "string", "vim.version.version9", 0, None),
),
(
0,
"vim.cluster.VsanClusterClomdLivenessResult",
"vim.cluster.VsanClusterClomdLivenessResult",
),
"System.Read",
None,
),
],
)
CreateDataType(
"vim.host.VSANCmmdsNodeInfo",
"VimHostVSANCmmdsNodeInfo",
"vmodl.DynamicData",
"vim.version.version10",
[
("nodeUuid", "string", "vim.version.version10", 0),
("isWitness", "boolean", "vim.version.version10", 0),
],
)
CreateDataType(
"vim.host.VsanPhysicalDiskHealth",
"VsanPhysicalDiskHealth",
"vmodl.DynamicData",
"vim.version.version9",
[
("name", "string", "vim.version.version9", 0),
("uuid", "string", "vim.version.version9", 0),
("inCmmds", "boolean", "vim.version.version9", 0),
("inVsi", "boolean", "vim.version.version9", 0),
("dedupScope", "long", "vim.version.version9", 0 | F_OPTIONAL),
("formatVersion", "int", "vim.version.version9", 0 | F_OPTIONAL),
("isAllFlash", "int", "vim.version.version9", 0 | F_OPTIONAL),
("congestionValue", "int", "vim.version.version9", 0 | F_OPTIONAL),
("congestionArea", "string", "vim.version.version9", 0 | F_OPTIONAL),
("congestionHealth", "string", "vim.version.version9", 0 | F_OPTIONAL),
("metadataHealth", "string", "vim.version.version9", 0 | F_OPTIONAL),
(
"operationalHealthDescription",
"string",
"vim.version.version9",
0 | F_OPTIONAL,
),
("operationalHealth", "string", "vim.version.version9", 0 | F_OPTIONAL),
("dedupUsageHealth", "string", "vim.version.version9", 0 | F_OPTIONAL),
("capacityHealth", "string", "vim.version.version9", 0 | F_OPTIONAL),
("summaryHealth", "string", "vim.version.version9", 0),
("capacity", "long", "vim.version.version9", 0 | F_OPTIONAL),
("usedCapacity", "long", "vim.version.version9", 0 | F_OPTIONAL),
("reservedCapacity", "long", "vim.version.version9", 0 | F_OPTIONAL),
("totalBytes", "long", "vim.version.version9", 0 | F_OPTIONAL),
("freeBytes", "long", "vim.version.version9", 0 | F_OPTIONAL),
("hashedBytes", "long", "vim.version.version9", 0 | F_OPTIONAL),
("dedupedBytes", "long", "vim.version.version9", 0 | F_OPTIONAL),
("scsiDisk", "vim.host.ScsiDisk", "vim.version.version9", 0 | F_OPTIONAL),
("usedComponents", "long", "vim.version.version9", 0 | F_OPTIONAL),
("maxComponents", "long", "vim.version.version9", 0 | F_OPTIONAL),
("compLimitHealth", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.vsan.DataEfficiencyConfig",
"VsanDataEfficiencyConfig",
"vmodl.DynamicData",
"vim.version.version10",
[
("dedupEnabled", "boolean", "vim.version.version10", 0),
("compressionEnabled", "boolean", "vim.version.version10", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.StorageComplianceResult",
"VsanStorageComplianceResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("checkTime", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL),
("profile", "string", "vim.version.version9", 0 | F_OPTIONAL),
("objectUUID", "string", "vim.version.version9", 0 | F_OPTIONAL),
(
"complianceStatus",
"vim.cluster.StorageComplianceStatus",
"vim.version.version9",
0,
),
("mismatch", "boolean", "vim.version.version9", 0),
(
"violatedPolicies",
"vim.cluster.StoragePolicyStatus[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"operationalStatus",
"vim.cluster.StorageOperationalStatus",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthGroup",
"VsanClusterHealthGroup",
"vmodl.DynamicData",
"vim.version.version9",
[
("groupId", "string", "vim.version.version9", 0),
("groupName", "string", "vim.version.version9", 0),
("groupHealth", "string", "vim.version.version9", 0),
(
"groupTests",
"vim.cluster.VsanClusterHealthTest[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"groupDetails",
"vim.cluster.VsanClusterHealthResultBase[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanSpaceUsageDetailResult",
"VsanSpaceUsageDetailResult",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"spaceUsageByObjectType",
"vim.cluster.VsanObjectSpaceSummary[]",
"vim.version.version9",
0 | F_OPTIONAL,
)
],
)
CreateDataType(
"vim.cluster.VsanAttachToSrOperation",
"VsanAttachToSrOperation",
"vmodl.DynamicData",
"vim.version.version9",
[
("task", "vim.Task", "vim.version.version9", 0 | F_OPTIONAL),
("success", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("timestamp", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL),
("srNumber", "string", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.cluster.VsanObjectSpaceSummary",
"VsanObjectSpaceSummary",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"objType",
"vim.cluster.VsanObjectTypeEnum",
"vim.version.version9",
0 | F_OPTIONAL,
),
("overheadB", "long", "vim.version.version9", 0 | F_OPTIONAL),
("temporaryOverheadB", "long", "vim.version.version9", 0 | F_OPTIONAL),
("primaryCapacityB", "long", "vim.version.version9", 0 | F_OPTIONAL),
("provisionCapacityB", "long", "vim.version.version9", 0 | F_OPTIONAL),
("reservedCapacityB", "long", "vim.version.version9", 0 | F_OPTIONAL),
("overReservedB", "long", "vim.version.version9", 0 | F_OPTIONAL),
("physicalUsedB", "long", "vim.version.version9", 0 | F_OPTIONAL),
("usedB", "long", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterHclInfo",
"VsanClusterHclInfo",
"vmodl.DynamicData",
"vim.version.version9",
[
("hclDbLastUpdate", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL),
("hclDbAgeHealth", "string", "vim.version.version9", 0 | F_OPTIONAL),
(
"hostResults",
"vim.host.VsanHostHclInfo[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanPerfGraph",
"VsanPerfGraph",
"vmodl.DynamicData",
"vim.version.version9",
[
("id", "string", "vim.version.version9", 0),
("metrics", "vim.cluster.VsanPerfMetricId[]", "vim.version.version9", 0),
("unit", "vim.cluster.VsanPerfStatsUnitType", "vim.version.version9", 0),
(
"threshold",
"vim.cluster.VsanPerfThreshold",
"vim.version.version9",
0 | F_OPTIONAL,
),
("name", "string", "vim.version.version9", 0 | F_OPTIONAL),
("description", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthResultBase",
"VsanClusterHealthResultBase",
"vmodl.DynamicData",
"vim.version.version9",
[("label", "string", "vim.version.version9", 0 | F_OPTIONAL)],
)
CreateDataType(
"vim.cluster.VsanPerfTopEntity",
"VsanPerfTopEntity",
"vmodl.DynamicData",
"vim.version.version9",
[
("entityRefId", "string", "vim.version.version9", 0),
("value", "string", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.cluster.VsanClusterBalancePerDiskInfo",
"VsanClusterBalancePerDiskInfo",
"vmodl.DynamicData",
"vim.version.version9",
[
("uuid", "string", "vim.version.version9", 0 | F_OPTIONAL),
("fullness", "long", "vim.version.version9", 0),
("variance", "long", "vim.version.version9", 0),
("fullnessAboveThreshold", "long", "vim.version.version9", 0),
("dataToMoveB", "long", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthTest",
"VsanClusterHealthTest",
"vmodl.DynamicData",
"vim.version.version9",
[
("testId", "string", "vim.version.version9", 0 | F_OPTIONAL),
("testName", "string", "vim.version.version9", 0 | F_OPTIONAL),
("testDescription", "string", "vim.version.version9", 0 | F_OPTIONAL),
("testShortDescription", "string", "vim.version.version9", 0 | F_OPTIONAL),
("testHealth", "string", "vim.version.version9", 0 | F_OPTIONAL),
(
"testDetails",
"vim.cluster.VsanClusterHealthResultBase[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"testActions",
"vim.cluster.VsanClusterHealthAction[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.StoragePolicyStatus",
"VsanStoragePolicyStatus",
"vmodl.DynamicData",
"vim.version.version9",
[
("id", "string", "vim.version.version9", 0 | F_OPTIONAL),
("expectedValue", "string", "vim.version.version9", 0 | F_OPTIONAL),
("currentValue", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanPerfMemberInfo",
"VsanPerfMemberInfo",
"vmodl.DynamicData",
"vim.version.version9",
[("thumbprint", "string", "vim.version.version9", 0)],
)
CreateDataType(
"vim.cluster.VsanPerfMetricId",
"VsanPerfMetricId",
"vmodl.DynamicData",
"vim.version.version9",
[
("label", "string", "vim.version.version9", 0),
("group", "string", "vim.version.version9", 0 | F_OPTIONAL),
(
"rollupType",
"vim.cluster.VsanPerfSummaryType",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"statsType",
"vim.cluster.VsanPerfStatsType",
"vim.version.version9",
0 | F_OPTIONAL,
),
("name", "string", "vim.version.version9", 0 | F_OPTIONAL),
("description", "string", "vim.version.version9", 0 | F_OPTIONAL),
("metricsCollectInterval", "int", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VSANWitnessHostInfo",
"VimClusterVSANWitnessHostInfo",
"vmodl.DynamicData",
"vim.version.version10",
[
("nodeUuid", "string", "vim.version.version10", 0),
("faultDomainName", "string", "vim.version.version10", 0 | F_OPTIONAL),
("preferredFdName", "string", "vim.version.version10", 0 | F_OPTIONAL),
("preferredFdUuid", "string", "vim.version.version10", 0 | F_OPTIONAL),
("unicastAgentAddr", "string", "vim.version.version10", 0 | F_OPTIONAL),
("host", "vim.HostSystem", "vim.version.version10", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanHealthExtMgmtPreCheckResult",
"VsanHealthExtMgmtPreCheckResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("overallResult", "boolean", "vim.version.version9", 0),
("esxVersionCheckPassed", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("drsCheckPassed", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("eamConnectionCheckPassed", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("installStateCheckPassed", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("results", "vim.cluster.VsanClusterHealthTest[]", "vim.version.version9", 0),
("vumRegistered", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.vsan.upgradesystem.HostWithHybridDiskgroupIssue",
"VsanHostWithHybridDiskgroupIssue",
"vim.VsanUpgradeSystem.PreflightCheckIssue",
"vim.version.version10",
[("hosts", "vim.HostSystem[]", "vim.version.version10", 0)],
)
CreateDataType(
"vim.cluster.VsanPerfMetricSeriesCSV",
"VsanPerfMetricSeriesCSV",
"vmodl.DynamicData",
"vim.version.version9",
[
("metricId", "vim.cluster.VsanPerfMetricId", "vim.version.version9", 0),
("values", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanPerfQuerySpec",
"VsanPerfQuerySpec",
"vmodl.DynamicData",
"vim.version.version9",
[
("entityRefId", "string", "vim.version.version9", 0),
("startTime", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL),
("endTime", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL),
("group", "string", "vim.version.version9", 0 | F_OPTIONAL),
("labels", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
("interval", "int", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.host.VsanRepairObjectsResult",
"VsanRepairObjectsResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("inQueueObjects", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
(
"failedRepairObjects",
"vim.host.VsanFailedRepairObjectResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("notInQueueObjects", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterNetworkPartitionInfo",
"VsanClusterNetworkPartitionInfo",
"vmodl.DynamicData",
"vim.version.version9",
[("hosts", "string[]", "vim.version.version9", 0 | F_OPTIONAL)],
)
CreateDataType(
"vim.vsan.upgradesystem.MixedEsxVersionIssue",
"VsanMixedEsxVersionIssue",
"vim.VsanUpgradeSystem.PreflightCheckIssue",
"vim.version.version10",
[],
)
CreateDataType(
"vim.cluster.VsanClusterClomdLivenessResult",
"VsanClusterClomdLivenessResult",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"clomdLivenessResult",
"vim.cluster.VsanHostClomdLivenessResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("issueFound", "boolean", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.cluster.VsanVsanClusterPcapResult",
"VsanVsanClusterPcapResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("pkts", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
(
"groups",
"vim.cluster.VsanVsanClusterPcapGroup[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("issues", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
(
"hostResults",
"vim.host.VsanVsanPcapResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanPerfMasterInformation",
"VsanPerfMasterInformation",
"vmodl.DynamicData",
"vim.version.version9",
[
("secSinceLastStatsWrite", "long", "vim.version.version9", 0 | F_OPTIONAL),
("secSinceLastStatsCollect", "long", "vim.version.version9", 0 | F_OPTIONAL),
("statsIntervalSec", "long", "vim.version.version9", 0),
(
"collectionFailureHostUuids",
"string[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("renamedStatsDirectories", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
("statsDirectoryPercentFree", "long", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanHostCreateVmHealthTestResult",
"VsanHostCreateVmHealthTestResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("hostname", "string", "vim.version.version9", 0),
("state", "string", "vim.version.version9", 0),
("fault", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanDiskFormatConversionCheckResult",
"VsanDiskFormatConversionCheckResult",
"vim.VsanUpgradeSystem.PreflightCheckResult",
"vim.version.version10",
[
("isSupported", "boolean", "vim.version.version10", 0),
("targetVersion", "int", "vim.version.version10", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthSystemObjectsRepairResult",
"VsanClusterHealthSystemObjectsRepairResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("inRepairingQueueObjects", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
(
"failedRepairObjects",
"vim.host.VsanFailedRepairObjectResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("issueFound", "boolean", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.host.VsanHostHclInfo",
"VsanHostHclInfo",
"vmodl.DynamicData",
"vim.version.version9",
[
("hostname", "string", "vim.version.version9", 0),
("hclChecked", "boolean", "vim.version.version9", 0),
("releaseName", "string", "vim.version.version9", 0 | F_OPTIONAL),
("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL),
(
"controllers",
"vim.host.VsanHclControllerInfo[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VSANStretchedClusterCapability",
"VimClusterVSANStretchedClusterCapability",
"vmodl.DynamicData",
"vim.version.version10",
[
("hostMoId", "string", "vim.version.version10", 0),
("connStatus", "string", "vim.version.version10", 0 | F_OPTIONAL),
("isSupported", "boolean", "vim.version.version10", 0 | F_OPTIONAL),
(
"hostCapability",
"vim.host.VSANStretchedClusterHostCapability",
"vim.version.version10",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanDiskMappingsConfigSpec",
"VimClusterVsanDiskMappingsConfigSpec",
"vmodl.DynamicData",
"vim.version.version10",
[
(
"hostDiskMappings",
"vim.cluster.VsanHostDiskMapping[]",
"vim.version.version10",
0,
)
],
)
CreateDataType(
"vim.host.VsanHostVmdkLoadTestResult",
"VsanHostVmdkLoadTestResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("hostname", "string", "vim.version.version9", 0),
("issueFound", "boolean", "vim.version.version9", 0),
("faultMessage", "string", "vim.version.version9", 0 | F_OPTIONAL),
(
"vmdkResults",
"vim.host.VsanVmdkLoadTestResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.vsan.ReconfigSpec",
"VimVsanReconfigSpec",
"vmodl.DynamicData",
"vim.version.version10",
[
(
"vsanClusterConfig",
"vim.vsan.cluster.ConfigInfo",
"vim.version.version10",
0 | F_OPTIONAL,
),
(
"dataEfficiencyConfig",
"vim.vsan.DataEfficiencyConfig",
"vim.version.version10",
0 | F_OPTIONAL,
),
(
"diskMappingSpec",
"vim.cluster.VsanDiskMappingsConfigSpec",
"vim.version.version10",
0 | F_OPTIONAL,
),
(
"faultDomainsSpec",
"vim.cluster.VsanFaultDomainsConfigSpec",
"vim.version.version10",
0 | F_OPTIONAL,
),
("modify", "boolean", "vim.version.version10", 0),
("allowReducedRedundancy", "boolean", "vim.version.version10", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.host.VsanNetworkPeerHealthResult",
"VsanNetworkPeerHealthResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("peer", "string", "vim.version.version9", 0 | F_OPTIONAL),
("peerHostname", "string", "vim.version.version9", 0 | F_OPTIONAL),
("peerVmknicName", "string", "vim.version.version9", 0 | F_OPTIONAL),
("smallPingTestSuccessPct", "int", "vim.version.version9", 0 | F_OPTIONAL),
("largePingTestSuccessPct", "int", "vim.version.version9", 0 | F_OPTIONAL),
("maxLatencyUs", "long", "vim.version.version9", 0 | F_OPTIONAL),
("onSameIpSubnet", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("sourceVmknicName", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanWitnessSpec",
"VimClusterVsanWitnessSpec",
"vmodl.DynamicData",
"vim.version.version10",
[
("host", "vim.HostSystem", "vim.version.version10", 0),
("preferredFaultDomainName", "string", "vim.version.version10", 0),
(
"diskMapping",
"vim.vsan.host.DiskMapping",
"vim.version.version10",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.vsan.host.DiskMappingCreationSpec",
"VimVsanHostDiskMappingCreationSpec",
"vmodl.DynamicData",
"vim.version.version10",
[
("host", "vim.HostSystem", "vim.version.version10", 0),
("cacheDisks", "vim.host.ScsiDisk[]", "vim.version.version10", 0 | F_OPTIONAL),
("capacityDisks", "vim.host.ScsiDisk[]", "vim.version.version10", 0),
(
"creationType",
"vim.vsan.host.DiskMappingCreationType",
"vim.version.version10",
0,
),
],
)
CreateDataType(
"vim.host.VsanLimitHealthResult",
"VsanLimitHealthResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("hostname", "string", "vim.version.version9", 0 | F_OPTIONAL),
("issueFound", "boolean", "vim.version.version9", 0),
("maxComponents", "int", "vim.version.version9", 0),
("freeComponents", "int", "vim.version.version9", 0),
("componentLimitHealth", "string", "vim.version.version9", 0),
("lowestFreeDiskSpacePct", "int", "vim.version.version9", 0),
("usedDiskSpaceB", "long", "vim.version.version9", 0),
("totalDiskSpaceB", "long", "vim.version.version9", 0),
("diskFreeSpaceHealth", "string", "vim.version.version9", 0),
("reservedRcSizeB", "long", "vim.version.version9", 0),
("totalRcSizeB", "long", "vim.version.version9", 0),
("rcFreeReservationHealth", "string", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.cluster.VSANPreferredFaultDomainInfo",
"VimClusterVSANPreferredFaultDomainInfo",
"vmodl.DynamicData",
"vim.version.version10",
[
("preferredFaultDomainName", "string", "vim.version.version10", 0),
("preferredFaultDomainId", "string", "vim.version.version10", 0),
],
)
CreateDataType(
"vim.host.VsanObjectOverallHealth",
"VsanObjectOverallHealth",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"objectHealthDetail",
"vim.host.VsanObjectHealth[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("objectVersionCompliance", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanVsanClusterPcapGroup",
"VsanVsanClusterPcapGroup",
"vmodl.DynamicData",
"vim.version.version9",
[
("master", "string", "vim.version.version9", 0),
("members", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthResultColumnInfo",
"VsanClusterHealthResultColumnInfo",
"vmodl.DynamicData",
"vim.version.version9",
[
("label", "string", "vim.version.version9", 0),
("type", "string", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.cluster.VsanClusterNetworkHealthResult",
"VsanClusterNetworkHealthResult",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"hostResults",
"vim.host.VsanNetworkHealthResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("issueFound", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("vsanVmknicPresent", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("matchingMulticastConfig", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("matchingIpSubnets", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("pingTestSuccess", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("largePingTestSuccess", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("potentialMulticastIssue", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("otherHostsInVsanCluster", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
(
"partitions",
"vim.cluster.VsanClusterNetworkPartitionInfo[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("hostsWithVsanDisabled", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
("hostsDisconnected", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
("hostsCommFailure", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
(
"hostsInEsxMaintenanceMode",
"string[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"hostsInVsanMaintenanceMode",
"string[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"infoAboutUnexpectedHosts",
"vim.host.VsanQueryResultHostInfo[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanPerfNodeInformation",
"VsanPerfNodeInformation",
"vmodl.DynamicData",
"vim.version.version9",
[
("version", "string", "vim.version.version9", 0),
("hostname", "string", "vim.version.version9", 0 | F_OPTIONAL),
("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL),
("isCmmdsMaster", "boolean", "vim.version.version9", 0),
("isStatsMaster", "boolean", "vim.version.version9", 0),
("vsanMasterUuid", "string", "vim.version.version9", 0 | F_OPTIONAL),
("vsanNodeUuid", "string", "vim.version.version9", 0 | F_OPTIONAL),
(
"masterInfo",
"vim.cluster.VsanPerfMasterInformation",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanPerfEntityMetricCSV",
"VsanPerfEntityMetricCSV",
"vmodl.DynamicData",
"vim.version.version9",
[
("entityRefId", "string", "vim.version.version9", 0),
("sampleInfo", "string", "vim.version.version9", 0 | F_OPTIONAL),
(
"value",
"vim.cluster.VsanPerfMetricSeriesCSV[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.vsan.upgradesystem.DiskUnhealthIssue",
"VsanDiskUnhealthIssue",
"vim.VsanUpgradeSystem.PreflightCheckIssue",
"vim.version.version10",
[("uuids", "string[]", "vim.version.version10", 0)],
)
CreateDataType(
"vim.cluster.VsanFaultDomainSpec",
"VimClusterVsanFaultDomainSpec",
"vmodl.DynamicData",
"vim.version.version10",
[
("hosts", "vim.HostSystem[]", "vim.version.version10", 0),
("name", "string", "vim.version.version10", 0),
],
)
CreateDataType(
"vim.vsan.upgradesystem.ObjectInaccessibleIssue",
"VsanObjectInaccessibleIssue",
"vim.VsanUpgradeSystem.PreflightCheckIssue",
"vim.version.version10",
[("uuids", "string[]", "vim.version.version10", 0)],
)
CreateDataType(
"vim.cluster.VsanDiskFormatConversionSpec",
"VsanDiskFormatConversionSpec",
"vmodl.DynamicData",
"vim.version.version10",
[
(
"dataEfficiencyConfig",
"vim.vsan.DataEfficiencyConfig",
"vim.version.version10",
0 | F_OPTIONAL,
)
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthAction",
"VsanClusterHealthAction",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"actionId",
"vim.cluster.VsanClusterHealthActionIdEnum",
"vim.version.version9",
0,
),
("actionLabel", "vmodl.LocalizableMessage", "vim.version.version9", 0),
("actionDescription", "vmodl.LocalizableMessage", "vim.version.version9", 0),
("enabled", "boolean", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthSystemVersionResult",
"VsanClusterHealthSystemVersionResult",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"hostResults",
"vim.cluster.VsanHostHealthSystemVersionResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("vcVersion", "string", "vim.version.version9", 0 | F_OPTIONAL),
("issueFound", "boolean", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthResultRow",
"VsanClusterHealthResultRow",
"vmodl.DynamicData",
"vim.version.version9",
[
("values", "string[]", "vim.version.version9", 0),
(
"nestedRows",
"vim.cluster.VsanClusterHealthResultRow[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthSystemStatusResult",
"VsanClusterHealthSystemStatusResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("status", "string", "vim.version.version9", 0),
("goalState", "string", "vim.version.version9", 0),
("untrackedHosts", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
(
"trackedHostsStatus",
"vim.host.VsanHostHealthSystemStatusResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanHostDiskMapping",
"VimClusterVsanHostDiskMapping",
"vmodl.DynamicData",
"vim.version.version10",
[
("host", "vim.HostSystem", "vim.version.version10", 0),
("cacheDisks", "vim.host.ScsiDisk[]", "vim.version.version10", 0 | F_OPTIONAL),
("capacityDisks", "vim.host.ScsiDisk[]", "vim.version.version10", 0),
("type", "vim.cluster.VsanDiskGroupCreationType", "vim.version.version10", 0),
],
)
CreateDataType(
"vim.cluster.VSANStretchedClusterFaultDomainConfig",
"VimClusterVSANStretchedClusterFaultDomainConfig",
"vmodl.DynamicData",
"vim.version.version10",
[
("firstFdName", "string", "vim.version.version10", 0),
("firstFdHosts", "vim.HostSystem[]", "vim.version.version10", 0),
("secondFdName", "string", "vim.version.version10", 0),
("secondFdHosts", "vim.HostSystem[]", "vim.version.version10", 0),
],
)
CreateDataType(
"vim.host.VSANStretchedClusterHostInfo",
"VimHostVSANStretchedClusterHostInfo",
"vmodl.DynamicData",
"vim.version.version10",
[
("nodeInfo", "vim.host.VSANCmmdsNodeInfo", "vim.version.version10", 0),
(
"faultDomainInfo",
"vim.host.VSANCmmdsFaultDomainInfo",
"vim.version.version10",
0 | F_OPTIONAL,
),
(
"preferredFaultDomainInfo",
"vim.host.VSANCmmdsPreferredFaultDomainInfo",
"vim.version.version10",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.vsan.upgradesystem.HigherObjectsPresentDuringDowngradeIssue",
"VsanHigherObjectsPresentDuringDowngradeIssue",
"vim.VsanUpgradeSystem.PreflightCheckIssue",
"vim.version.version10",
[("uuids", "string[]", "vim.version.version10", 0)],
)
CreateDataType(
"vim.host.VSANCmmdsFaultDomainInfo",
"VimHostVSANCmmdsFaultDomainInfo",
"vmodl.DynamicData",
"vim.version.version10",
[
("faultDomainId", "string", "vim.version.version10", 0),
("faultDomainName", "string", "vim.version.version10", 0),
],
)
CreateDataType(
"vim.fault.VsanNodeNotMaster",
"VsanNodeNotMaster",
"vim.fault.VimFault",
"vim.version.version9",
[
("vsanMasterUuid", "string", "vim.version.version9", 0 | F_OPTIONAL),
(
"cmmdsMasterButNotStatsMaster",
"boolean",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanHostHealthSystemVersionResult",
"VsanHostHealthSystemVersionResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("hostname", "string", "vim.version.version9", 0),
("version", "string", "vim.version.version9", 0 | F_OPTIONAL),
("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthConfigs",
"VsanClusterHealthConfigs",
"vmodl.DynamicData",
"vim.version.version9",
[
("enableVsanTelemetry", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("vsanTelemetryInterval", "int", "vim.version.version9", 0 | F_OPTIONAL),
(
"vsanTelemetryProxy",
"vim.cluster.VsanClusterTelemetryProxyConfig",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"configs",
"vim.cluster.VsanClusterHealthResultKeyValuePair[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanClusterWhatifHostFailuresResult",
"VsanClusterWhatifHostFailuresResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("numFailures", "long", "vim.version.version9", 0),
("totalUsedCapacityB", "long", "vim.version.version9", 0),
("totalCapacityB", "long", "vim.version.version9", 0),
("totalRcReservationB", "long", "vim.version.version9", 0),
("totalRcSizeB", "long", "vim.version.version9", 0),
("usedComponents", "long", "vim.version.version9", 0),
("totalComponents", "long", "vim.version.version9", 0),
("componentLimitHealth", "string", "vim.version.version9", 0 | F_OPTIONAL),
("diskFreeSpaceHealth", "string", "vim.version.version9", 0 | F_OPTIONAL),
("rcFreeReservationHealth", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanObjectIdentityAndHealth",
"VsanObjectIdentityAndHealth",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"identities",
"vim.cluster.VsanObjectIdentity[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"health",
"vim.host.VsanObjectOverallHealth",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"spaceSummary",
"vim.cluster.VsanObjectSpaceSummary[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("rawData", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.host.VsanHclControllerInfo",
"VsanHclControllerInfo",
"vmodl.DynamicData",
"vim.version.version9",
[
("deviceName", "string", "vim.version.version9", 0),
("deviceDisplayName", "string", "vim.version.version9", 0 | F_OPTIONAL),
("driverName", "string", "vim.version.version9", 0 | F_OPTIONAL),
("driverVersion", "string", "vim.version.version9", 0 | F_OPTIONAL),
("vendorId", "long", "vim.version.version9", 0 | F_OPTIONAL),
("deviceId", "long", "vim.version.version9", 0 | F_OPTIONAL),
("subVendorId", "long", "vim.version.version9", 0 | F_OPTIONAL),
("subDeviceId", "long", "vim.version.version9", 0 | F_OPTIONAL),
("extraInfo", "vim.KeyValue[]", "vim.version.version9", 0 | F_OPTIONAL),
("deviceOnHcl", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("releaseSupported", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("releasesOnHcl", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
("driverVersionsOnHcl", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
("driverVersionSupported", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("fwVersionSupported", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("fwVersionOnHcl", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
("cacheConfigSupported", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("cacheConfigOnHcl", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
("raidConfigSupported", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("raidConfigOnHcl", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
("fwVersion", "string", "vim.version.version9", 0 | F_OPTIONAL),
("raidConfig", "string", "vim.version.version9", 0 | F_OPTIONAL),
("cacheConfig", "string", "vim.version.version9", 0 | F_OPTIONAL),
(
"cimProviderInfo",
"vim.host.VsanHostCimProviderInfo",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthResultKeyValuePair",
"VsanClusterHealthResultKeyValuePair",
"vmodl.DynamicData",
"vim.version.version9",
[
("key", "string", "vim.version.version9", 0 | F_OPTIONAL),
("value", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.StorageOperationalStatus",
"VsanStorageOperationalStatus",
"vmodl.DynamicData",
"vim.version.version9",
[
("healthy", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("operationETA", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL),
("operationProgress", "long", "vim.version.version9", 0 | F_OPTIONAL),
("transitional", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanSpaceUsage",
"VsanSpaceUsage",
"vmodl.DynamicData",
"vim.version.version9",
[
("totalCapacityB", "long", "vim.version.version9", 0),
("freeCapacityB", "long", "vim.version.version9", 0 | F_OPTIONAL),
(
"spaceOverview",
"vim.cluster.VsanObjectSpaceSummary",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"spaceDetail",
"vim.cluster.VsanSpaceUsageDetailResult",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthResultTable",
"VsanClusterHealthResultTable",
"vim.cluster.VsanClusterHealthResultBase",
"vim.version.version9",
[
(
"columns",
"vim.cluster.VsanClusterHealthResultColumnInfo[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"rows",
"vim.cluster.VsanClusterHealthResultRow[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanClusterConfig",
"VsanClusterConfig",
"vmodl.DynamicData",
"vim.version.version9",
[
("config", "vim.vsan.cluster.ConfigInfo", "vim.version.version9", 0),
("name", "string", "vim.version.version9", 0),
("hosts", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.vsan.host.VsanHostCapability",
"VimVsanHostVsanHostCapability",
"vmodl.DynamicData",
"vim.version.version10",
[
("host", "vim.HostSystem", "vim.version.version10", 0),
("isSupported", "boolean", "vim.version.version10", 0),
("isLicensed", "boolean", "vim.version.version10", 0),
],
)
CreateDataType(
"vim.cluster.VsanPerfThreshold",
"VsanPerfThreshold",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"direction",
"vim.cluster.VsanPerfThresholdDirectionType",
"vim.version.version9",
0,
),
("yellow", "string", "vim.version.version9", 0 | F_OPTIONAL),
("red", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.host.VsanNetworkHealthResult",
"VsanNetworkHealthResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("host", "vim.HostSystem", "vim.version.version9", 0 | F_OPTIONAL),
("hostname", "string", "vim.version.version9", 0 | F_OPTIONAL),
("vsanVmknicPresent", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("ipSubnets", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
("issueFound", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
(
"peerHealth",
"vim.host.VsanNetworkPeerHealthResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("multicastConfig", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.vsan.ConfigInfoEx",
"VsanConfigInfoEx",
"vim.vsan.cluster.ConfigInfo",
"vim.version.version10",
[
(
"dataEfficiencyConfig",
"vim.vsan.DataEfficiencyConfig",
"vim.version.version10",
0 | F_OPTIONAL,
)
],
)
CreateDataType(
"vim.host.VsanVmdkLoadTestResult",
"VsanVmdkLoadTestResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("success", "boolean", "vim.version.version9", 0),
("faultMessage", "string", "vim.version.version9", 0 | F_OPTIONAL),
("spec", "vim.host.VsanVmdkLoadTestSpec", "vim.version.version9", 0),
("actualDurationSec", "int", "vim.version.version9", 0 | F_OPTIONAL),
("totalBytes", "long", "vim.version.version9", 0 | F_OPTIONAL),
("iops", "long", "vim.version.version9", 0 | F_OPTIONAL),
("tputBps", "long", "vim.version.version9", 0 | F_OPTIONAL),
("avgLatencyUs", "long", "vim.version.version9", 0 | F_OPTIONAL),
("maxLatencyUs", "long", "vim.version.version9", 0 | F_OPTIONAL),
("numIoAboveLatencyThreshold", "long", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterVMsHealthOverallResult",
"VsanClusterVMsHealthOverAllResult",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"healthStateList",
"vim.cluster.VsanClusterVMsHealthSummaryResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("overallHealthState", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.host.VsanHostHealthSystemStatusResult",
"VsanHostHealthSystemStatusResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("hostname", "string", "vim.version.version9", 0),
("status", "string", "vim.version.version9", 0),
("issues", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterAdvCfgSyncResult",
"VsanClusterAdvCfgSyncResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("inSync", "boolean", "vim.version.version9", 0),
("name", "string", "vim.version.version9", 0),
(
"hostValues",
"vim.cluster.VsanClusterAdvCfgSyncHostResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.host.VsanQueryResultHostInfo",
"VsanQueryResultHostInfo",
"vmodl.DynamicData",
"vim.version.version9",
[
("uuid", "string", "vim.version.version9", 0 | F_OPTIONAL),
("hostnameInCmmds", "string", "vim.version.version9", 0 | F_OPTIONAL),
("vsanIpv4Addresses", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.vsan.host.DiskMapInfoEx",
"VimVsanHostDiskMapInfoEx",
"vmodl.DynamicData",
"vim.version.version10",
[
("mapping", "vim.vsan.host.DiskMapping", "vim.version.version10", 0),
("isMounted", "boolean", "vim.version.version10", 0),
("isAllFlash", "boolean", "vim.version.version10", 0),
("isDataEfficiency", "boolean", "vim.version.version10", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.host.VsanVmdkLoadTestSpec",
"VsanVmdkLoadTestSpec",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"vmdkCreateSpec",
"vim.VirtualDiskManager.FileBackedVirtualDiskSpec",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"vmdkIOSpec",
"vim.host.VsanVmdkIOLoadSpec",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"vmdkIOSpecSequence",
"vim.host.VsanVmdkIOLoadSpec[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("stepDurationSec", "long", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterHealthSummary",
"VsanClusterHealthSummary",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"clusterStatus",
"vim.cluster.VsanClusterHealthSystemStatusResult",
"vim.version.version9",
0 | F_OPTIONAL,
),
("timestamp", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL),
(
"clusterVersions",
"vim.cluster.VsanClusterHealthSystemVersionResult",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"objectHealth",
"vim.host.VsanObjectOverallHealth",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"vmHealth",
"vim.cluster.VsanClusterVMsHealthOverallResult",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"networkHealth",
"vim.cluster.VsanClusterNetworkHealthResult",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"limitHealth",
"vim.cluster.VsanClusterLimitHealthResult",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"advCfgSync",
"vim.cluster.VsanClusterAdvCfgSyncResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"createVmHealth",
"vim.cluster.VsanHostCreateVmHealthTestResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"physicalDisksHealth",
"vim.host.VsanPhysicalDiskHealthSummary[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"hclInfo",
"vim.cluster.VsanClusterHclInfo",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"groups",
"vim.cluster.VsanClusterHealthGroup[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("overallHealth", "string", "vim.version.version9", 0),
("overallHealthDescription", "string", "vim.version.version9", 0),
(
"clomdLiveness",
"vim.cluster.VsanClusterClomdLivenessResult",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"diskBalance",
"vim.cluster.VsanClusterBalanceSummary",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanPerfEntityType",
"VsanPerfEntityType",
"vmodl.DynamicData",
"vim.version.version9",
[
("name", "string", "vim.version.version9", 0),
("id", "string", "vim.version.version9", 0),
("graphs", "vim.cluster.VsanPerfGraph[]", "vim.version.version9", 0),
("description", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.host.VsanNetworkLoadTestResult",
"VsanNetworkLoadTestResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("hostname", "string", "vim.version.version9", 0),
("status", "string", "vim.version.version9", 0 | F_OPTIONAL),
("client", "boolean", "vim.version.version9", 0),
("bandwidthBps", "long", "vim.version.version9", 0),
("totalBytes", "long", "vim.version.version9", 0),
("lostDatagrams", "long", "vim.version.version9", 0 | F_OPTIONAL),
("lossPct", "long", "vim.version.version9", 0 | F_OPTIONAL),
("sentDatagrams", "long", "vim.version.version9", 0 | F_OPTIONAL),
("jitterMs", "float", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.host.VsanPhysicalDiskHealthSummary",
"VsanPhysicalDiskHealthSummary",
"vmodl.DynamicData",
"vim.version.version9",
[
("overallHealth", "string", "vim.version.version9", 0),
(
"heapsWithIssues",
"vim.host.VsanResourceHealth[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"slabsWithIssues",
"vim.host.VsanResourceHealth[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"disks",
"vim.host.VsanPhysicalDiskHealth[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"componentsWithIssues",
"vim.host.VsanResourceHealth[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("hostname", "string", "vim.version.version9", 0 | F_OPTIONAL),
("hostDedupScope", "int", "vim.version.version9", 0 | F_OPTIONAL),
("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.vsan.host.VsanDiskManagementSystemCapability",
"VimVsanHostVsanDiskManagementSystemCapability",
"vmodl.DynamicData",
"vim.version.version10",
[("version", "string", "vim.version.version10", 0)],
)
CreateDataType(
"vim.host.VsanHostCimProviderInfo",
"VsanHostCimProviderInfo",
"vmodl.DynamicData",
"vim.version.version9",
[
("cimProviderSupported", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("installedCIMProvider", "string", "vim.version.version9", 0 | F_OPTIONAL),
("cimProviderOnHcl", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanObjectInformation",
"VsanObjectInformation",
"vmodl.DynamicData",
"vim.version.version9",
[
("directoryName", "string", "vim.version.version9", 0 | F_OPTIONAL),
("vsanObjectUuid", "string", "vim.version.version9", 0 | F_OPTIONAL),
("vsanHealth", "string", "vim.version.version9", 0 | F_OPTIONAL),
("policyAttributes", "vim.KeyValue[]", "vim.version.version9", 0 | F_OPTIONAL),
("spbmProfileUuid", "string", "vim.version.version9", 0 | F_OPTIONAL),
("spbmProfileGenerationId", "string", "vim.version.version9", 0 | F_OPTIONAL),
(
"spbmComplianceResult",
"vim.cluster.StorageComplianceResult",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanObjectIdentity",
"VsanObjectIdentity",
"vmodl.DynamicData",
"vim.version.version9",
[
("uuid", "string", "vim.version.version9", 0),
("type", "string", "vim.version.version9", 0),
("vmInstanceUuid", "string", "vim.version.version9", 0 | F_OPTIONAL),
("vmNsObjectUuid", "string", "vim.version.version9", 0 | F_OPTIONAL),
("vm", "vim.VirtualMachine", "vim.version.version9", 0 | F_OPTIONAL),
("description", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.host.VsanResourceHealth",
"VsanResourceHealth",
"vmodl.DynamicData",
"vim.version.version9",
[
("resource", "string", "vim.version.version9", 0),
("health", "string", "vim.version.version9", 0),
("description", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanCapability",
"VsanCapability",
"vmodl.DynamicData",
"vim.version.version10",
[
("target", "vmodl.ManagedObject", "vim.version.version10", 0 | F_OPTIONAL),
("capabilities", "string[]", "vim.version.version10", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanHostClomdLivenessResult",
"VsanHostClomdLivenessResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("hostname", "string", "vim.version.version9", 0),
("clomdStat", "string", "vim.version.version9", 0),
("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanObjectQuerySpec",
"VsanObjectQuerySpec",
"vmodl.DynamicData",
"vim.version.version9",
[
("uuid", "string", "vim.version.version9", 0),
("spbmProfileGenerationId", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterLimitHealthResult",
"VsanClusterLimitHealthResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("issueFound", "boolean", "vim.version.version9", 0),
("componentLimitHealth", "string", "vim.version.version9", 0),
("diskFreeSpaceHealth", "string", "vim.version.version9", 0),
("rcFreeReservationHealth", "string", "vim.version.version9", 0),
(
"hostResults",
"vim.host.VsanLimitHealthResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"whatifHostFailures",
"vim.cluster.VsanClusterWhatifHostFailuresResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
("hostsCommFailure", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanStorageWorkloadType",
"VsanStorageWorkloadType",
"vmodl.DynamicData",
"vim.version.version9",
[
("specs", "vim.host.VsanVmdkLoadTestSpec[]", "vim.version.version9", 0),
("typeId", "string", "vim.version.version9", 0),
("name", "string", "vim.version.version9", 0),
("description", "string", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.cluster.VsanClusterAdvCfgSyncHostResult",
"VsanClusterAdvCfgSyncHostResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("hostname", "string", "vim.version.version9", 0),
("value", "string", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.vsan.upgradesystem.ObjectPolicyIssue",
"VsanObjectPolicyIssue",
"vim.VsanUpgradeSystem.PreflightCheckIssue",
"vim.version.version10",
[("uuids", "string[]", "vim.version.version10", 0)],
)
CreateDataType(
"vim.cluster.VsanPerfTopEntities",
"VsanPerfTopEntities",
"vmodl.DynamicData",
"vim.version.version9",
[
("metricId", "vim.cluster.VsanPerfMetricId", "vim.version.version9", 0),
("entities", "vim.cluster.VsanPerfTopEntity[]", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.host.VsanProactiveRebalanceInfoEx",
"VsanProactiveRebalanceInfoEx",
"vmodl.DynamicData",
"vim.version.version9",
[
("running", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
("startTs", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL),
("stopTs", "vmodl.DateTime", "vim.version.version9", 0 | F_OPTIONAL),
("varianceThreshold", "float", "vim.version.version9", 0 | F_OPTIONAL),
("timeThreshold", "int", "vim.version.version9", 0 | F_OPTIONAL),
("rateThreshold", "int", "vim.version.version9", 0 | F_OPTIONAL),
("hostname", "string", "vim.version.version9", 0 | F_OPTIONAL),
("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterProactiveTestResult",
"VsanClusterProactiveTestResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("overallStatus", "string", "vim.version.version9", 0),
("overallStatusDescription", "string", "vim.version.version9", 0),
("timestamp", "vmodl.DateTime", "vim.version.version9", 0),
(
"healthTest",
"vim.cluster.VsanClusterHealthTest",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.host.VSANCmmdsPreferredFaultDomainInfo",
"VimHostVSANCmmdsPreferredFaultDomainInfo",
"vmodl.DynamicData",
"vim.version.version10",
[
("preferredFaultDomainId", "string", "vim.version.version10", 0),
("preferredFaultDomainName", "string", "vim.version.version10", 0),
],
)
CreateDataType(
"vim.cluster.VsanFaultDomainsConfigSpec",
"VimClusterVsanFaultDomainsConfigSpec",
"vmodl.DynamicData",
"vim.version.version10",
[
(
"faultDomains",
"vim.cluster.VsanFaultDomainSpec[]",
"vim.version.version10",
0,
),
(
"witness",
"vim.cluster.VsanWitnessSpec",
"vim.version.version10",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanClusterHostVmknicMapping",
"VsanClusterHostVmknicMapping",
"vmodl.DynamicData",
"vim.version.version9",
[
("host", "string", "vim.version.version9", 0),
("vmknic", "string", "vim.version.version9", 0),
],
)
CreateDataType(
"vim.cluster.VsanClusterVmdkLoadTestResult",
"VsanClusterVmdkLoadTestResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("task", "vim.Task", "vim.version.version9", 0 | F_OPTIONAL),
(
"clusterResult",
"vim.cluster.VsanClusterProactiveTestResult",
"vim.version.version9",
0 | F_OPTIONAL,
),
(
"hostResults",
"vim.host.VsanHostVmdkLoadTestResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanClusterVMsHealthSummaryResult",
"VsanClusterVMsHealthSummaryResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("numVMs", "int", "vim.version.version9", 0),
("state", "string", "vim.version.version9", 0 | F_OPTIONAL),
("health", "string", "vim.version.version9", 0),
("vmInstanceUuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.host.VSANStretchedClusterHostCapability",
"VimHostVSANStretchedClusterHostCapability",
"vmodl.DynamicData",
"vim.version.version10",
[("featureVersion", "string", "vim.version.version10", 0)],
)
CreateDataType(
"vim.host.VsanFailedRepairObjectResult",
"VsanFailedRepairObjectResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("uuid", "string", "vim.version.version9", 0),
("errMessage", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterCreateVmHealthTestResult",
"VsanClusterCreateVmHealthTestResult",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"clusterResult",
"vim.cluster.VsanClusterProactiveTestResult",
"vim.version.version9",
0,
),
(
"hostResults",
"vim.cluster.VsanHostCreateVmHealthTestResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.host.VsanObjectHealth",
"VsanObjectHealth",
"vmodl.DynamicData",
"vim.version.version9",
[
("numObjects", "int", "vim.version.version9", 0),
("health", "vim.host.VsanObjectHealthState", "vim.version.version9", 0),
("objUuids", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterBalanceSummary",
"VsanClusterBalanceSummary",
"vmodl.DynamicData",
"vim.version.version9",
[
("varianceThreshold", "long", "vim.version.version9", 0),
(
"disks",
"vim.cluster.VsanClusterBalancePerDiskInfo[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.cluster.VsanClusterTelemetryProxyConfig",
"VsanClusterTelemetryProxyConfig",
"vmodl.DynamicData",
"vim.version.version9",
[
("host", "string", "vim.version.version9", 0 | F_OPTIONAL),
("port", "int", "vim.version.version9", 0 | F_OPTIONAL),
("user", "string", "vim.version.version9", 0 | F_OPTIONAL),
("password", "string", "vim.version.version9", 0 | F_OPTIONAL),
("autoDiscovered", "boolean", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.host.VsanVmdkIOLoadSpec",
"VsanVmdkIOLoadSpec",
"vmodl.DynamicData",
"vim.version.version9",
[
("readPct", "int", "vim.version.version9", 0),
("oio", "int", "vim.version.version9", 0),
("iosizeB", "int", "vim.version.version9", 0),
("dataSizeMb", "long", "vim.version.version9", 0),
("random", "boolean", "vim.version.version9", 0),
("startOffsetB", "long", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.host.VsanVsanPcapResult",
"VsanVsanPcapResult",
"vmodl.DynamicData",
"vim.version.version9",
[
("calltime", "float", "vim.version.version9", 0),
("vmknic", "string", "vim.version.version9", 0),
("tcpdumpFilter", "string", "vim.version.version9", 0),
("snaplen", "int", "vim.version.version9", 0),
("pkts", "string[]", "vim.version.version9", 0 | F_OPTIONAL),
("pcap", "string", "vim.version.version9", 0 | F_OPTIONAL),
("error", "vmodl.MethodFault", "vim.version.version9", 0 | F_OPTIONAL),
("hostname", "string", "vim.version.version9", 0 | F_OPTIONAL),
],
)
CreateDataType(
"vim.cluster.VsanClusterNetworkLoadTestResult",
"VsanClusterNetworkLoadTestResult",
"vmodl.DynamicData",
"vim.version.version9",
[
(
"clusterResult",
"vim.cluster.VsanClusterProactiveTestResult",
"vim.version.version9",
0,
),
(
"hostResults",
"vim.host.VsanNetworkLoadTestResult[]",
"vim.version.version9",
0 | F_OPTIONAL,
),
],
)
CreateDataType(
"vim.vsan.upgradesystem.HostPropertyRetrieveIssue",
"VsanHostPropertyRetrieveIssue",
"vim.VsanUpgradeSystem.PreflightCheckIssue",
"vim.version.version10",
[("hosts", "vim.HostSystem[]", "vim.version.version10", 0)],
)
CreateEnumType(
"vim.host.VsanObjectHealthState",
"VsanObjectHealthState",
"vim.version.version9",
[
"inaccessible",
"reducedavailabilitywithnorebuild",
"reducedavailabilitywithnorebuilddelaytimer",
"reducedavailabilitywithactiverebuild",
"datamove",
"nonavailabilityrelatedreconfig",
"nonavailabilityrelatedincompliance",
"healthy",
],
)
CreateEnumType(
"vim.cluster.VsanObjectTypeEnum",
"VsanObjectTypeEnum",
"vim.version.version9",
[
"vmswap",
"vdisk",
"namespace",
"vmem",
"statsdb",
"iscsi",
"other",
"fileSystemOverhead",
"dedupOverhead",
"checksumOverhead",
],
)
CreateEnumType(
"vim.cluster.VsanCapabilityType",
"VsanCapabilityType",
"vim.version.version10",
[
"capability",
"allflash",
"stretchedcluster",
"dataefficiency",
"clusterconfig",
"upgrade",
"objectidentities",
],
)
CreateEnumType(
"vim.cluster.VsanHealthLogLevelEnum",
"VsanHealthLogLevelEnum",
"vim.version.version9",
[
"INFO",
"WARNING",
"ERROR",
"DEBUG",
"CRITICAL",
],
)
CreateEnumType(
"vim.cluster.VsanPerfSummaryType",
"VsanPerfSummaryType",
"vim.version.version9",
[
"average",
"maximum",
"minimum",
"latest",
"summation",
"none",
],
)
CreateEnumType(
"vim.cluster.StorageComplianceStatus",
"VsanStorageComplianceStatus",
"vim.version.version9",
[
"compliant",
"nonCompliant",
"unknown",
"notApplicable",
],
)
CreateEnumType(
"vim.cluster.VsanPerfStatsUnitType",
"VsanPerfStatsUnitType",
"vim.version.version9",
[
"number",
"time_ms",
"percentage",
"size_bytes",
"rate_bytes",
],
)
CreateEnumType(
"vim.cluster.VsanPerfThresholdDirectionType",
"VsanPerfThresholdDirectionType",
"vim.version.version9",
[
"upper",
"lower",
],
)
CreateEnumType(
"vim.cluster.VsanPerfStatsType",
"VsanPerfStatsType",
"vim.version.version9",
[
"absolute",
"delta",
"rate",
],
)
CreateEnumType(
"vim.vsan.host.DiskMappingCreationType",
"VimVsanHostDiskMappingCreationType",
"vim.version.version10",
[
"hybrid",
"allFlash",
],
)
CreateEnumType(
"vim.cluster.VsanClusterHealthActionIdEnum",
"VsanClusterHealthActionIdEnum",
"vim.version.version9",
[
"RepairClusterObjectsAction",
"UploadHclDb",
"UpdateHclDbFromInternet",
"EnableHealthService",
"DiskBalance",
"StopDiskBalance",
"RemediateDedup",
"UpgradeVsanDiskFormat",
],
)
CreateEnumType(
"vim.cluster.VsanDiskGroupCreationType",
"VimClusterVsanDiskGroupCreationType",
"vim.version.version10",
[
"allflash",
"hybrid",
],
)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/vsan/vsanmgmtObjects.py
| 0.415729 | 0.201872 |
vsanmgmtObjects.py
|
pypi
|
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import socket
from salt.ext.tornado.escape import native_str
from salt.ext.tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters
from salt.ext.tornado import gen
from salt.ext.tornado import httputil
from salt.ext.tornado import iostream
from salt.ext.tornado import netutil
from salt.ext.tornado.tcpserver import TCPServer
from salt.ext.tornado.util import Configurable
class HTTPServer(TCPServer, Configurable,
httputil.HTTPServerConnectionDelegate):
r"""A non-blocking, single-threaded HTTP server.
A server is defined by a subclass of `.HTTPServerConnectionDelegate`,
or, for backwards compatibility, a callback that takes an
`.HTTPServerRequest` as an argument. The delegate is usually a
`tornado.web.Application`.
`HTTPServer` supports keep-alive connections by default
(automatically for HTTP/1.1, or for HTTP/1.0 when the client
requests ``Connection: keep-alive``).
If ``xheaders`` is ``True``, we support the
``X-Real-Ip``/``X-Forwarded-For`` and
``X-Scheme``/``X-Forwarded-Proto`` headers, which override the
remote IP and URI scheme/protocol for all requests. These headers
are useful when running Tornado behind a reverse proxy or load
balancer. The ``protocol`` argument can also be set to ``https``
if Tornado is run behind an SSL-decoding proxy that does not set one of
the supported ``xheaders``.
By default, when parsing the ``X-Forwarded-For`` header, Tornado will
select the last (i.e., the closest) address on the list of hosts as the
remote host IP address. To select the next server in the chain, a list of
trusted downstream hosts may be passed as the ``trusted_downstream``
argument. These hosts will be skipped when parsing the ``X-Forwarded-For``
header.
To make this server serve SSL traffic, send the ``ssl_options`` keyword
argument with an `ssl.SSLContext` object. For compatibility with older
versions of Python ``ssl_options`` may also be a dictionary of keyword
arguments for the `ssl.wrap_socket` method.::
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
os.path.join(data_dir, "mydomain.key"))
HTTPServer(applicaton, ssl_options=ssl_ctx)
`HTTPServer` initialization follows one of three patterns (the
initialization methods are defined on `tornado.tcpserver.TCPServer`):
1. `~tornado.tcpserver.TCPServer.listen`: simple single-process::
server = HTTPServer(app)
server.listen(8888)
IOLoop.current().start()
In many cases, `tornado.web.Application.listen` can be used to avoid
the need to explicitly create the `HTTPServer`.
2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`:
simple multi-process::
server = HTTPServer(app)
server.bind(8888)
server.start(0) # Forks multiple sub-processes
IOLoop.current().start()
When using this interface, an `.IOLoop` must *not* be passed
to the `HTTPServer` constructor. `~.TCPServer.start` will always start
the server on the default singleton `.IOLoop`.
3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process::
sockets = tornado.netutil.bind_sockets(8888)
tornado.process.fork_processes(0)
server = HTTPServer(app)
server.add_sockets(sockets)
IOLoop.current().start()
The `~.TCPServer.add_sockets` interface is more complicated,
but it can be used with `tornado.process.fork_processes` to
give you more flexibility in when the fork happens.
`~.TCPServer.add_sockets` can also be used in single-process
servers if you want to create your listening sockets in some
way other than `tornado.netutil.bind_sockets`.
.. versionchanged:: 4.0
Added ``decompress_request``, ``chunk_size``, ``max_header_size``,
``idle_connection_timeout``, ``body_timeout``, ``max_body_size``
arguments. Added support for `.HTTPServerConnectionDelegate`
instances as ``request_callback``.
.. versionchanged:: 4.1
`.HTTPServerConnectionDelegate.start_request` is now called with
two arguments ``(server_conn, request_conn)`` (in accordance with the
documentation) instead of one ``(request_conn)``.
.. versionchanged:: 4.2
`HTTPServer` is now a subclass of `tornado.util.Configurable`.
.. versionchanged:: 4.5
Added the ``trusted_downstream`` argument.
"""
def __init__(self, *args, **kwargs):
# Ignore args to __init__; real initialization belongs in
# initialize since we're Configurable. (there's something
# weird in initialization order between this class,
# Configurable, and TCPServer so we can't leave __init__ out
# completely)
pass
def initialize(self, request_callback, no_keep_alive=False, io_loop=None,
xheaders=False, ssl_options=None, protocol=None,
decompress_request=False,
chunk_size=None, max_header_size=None,
idle_connection_timeout=None, body_timeout=None,
max_body_size=None, max_buffer_size=None,
trusted_downstream=None):
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders
self.protocol = protocol
self.conn_params = HTTP1ConnectionParameters(
decompress=decompress_request,
chunk_size=chunk_size,
max_header_size=max_header_size,
header_timeout=idle_connection_timeout or 3600,
max_body_size=max_body_size,
body_timeout=body_timeout,
no_keep_alive=no_keep_alive)
TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options,
max_buffer_size=max_buffer_size,
read_chunk_size=chunk_size)
self._connections = set()
self.trusted_downstream = trusted_downstream
@classmethod
def configurable_base(cls):
return HTTPServer
@classmethod
def configurable_default(cls):
return HTTPServer
@gen.coroutine
def close_all_connections(self):
while self._connections:
# Peek at an arbitrary element of the set
conn = next(iter(self._connections))
yield conn.close()
def handle_stream(self, stream, address):
context = _HTTPRequestContext(stream, address,
self.protocol,
self.trusted_downstream)
conn = HTTP1ServerConnection(
stream, self.conn_params, context)
self._connections.add(conn)
conn.start_serving(self)
def start_request(self, server_conn, request_conn):
if isinstance(self.request_callback, httputil.HTTPServerConnectionDelegate):
delegate = self.request_callback.start_request(server_conn, request_conn)
else:
delegate = _CallableAdapter(self.request_callback, request_conn)
if self.xheaders:
delegate = _ProxyAdapter(delegate, request_conn)
return delegate
def on_close(self, server_conn):
self._connections.remove(server_conn)
class _CallableAdapter(httputil.HTTPMessageDelegate):
def __init__(self, request_callback, request_conn):
self.connection = request_conn
self.request_callback = request_callback
self.request = None
self.delegate = None
self._chunks = []
def headers_received(self, start_line, headers):
self.request = httputil.HTTPServerRequest(
connection=self.connection, start_line=start_line,
headers=headers)
def data_received(self, chunk):
self._chunks.append(chunk)
def finish(self):
self.request.body = b''.join(self._chunks)
self.request._parse_body()
self.request_callback(self.request)
def on_connection_close(self):
self._chunks = None
class _HTTPRequestContext(object):
def __init__(self, stream, address, protocol, trusted_downstream=None):
self.address = address
# Save the socket's address family now so we know how to
# interpret self.address even after the stream is closed
# and its socket attribute replaced with None.
if stream.socket is not None:
self.address_family = stream.socket.family
else:
self.address_family = None
# In HTTPServerRequest we want an IP, not a full socket address.
if (self.address_family in (socket.AF_INET, socket.AF_INET6) and
address is not None):
self.remote_ip = address[0]
else:
# Unix (or other) socket; fake the remote address.
self.remote_ip = '0.0.0.0'
if protocol:
self.protocol = protocol
elif isinstance(stream, iostream.SSLIOStream):
self.protocol = "https"
else:
self.protocol = "http"
self._orig_remote_ip = self.remote_ip
self._orig_protocol = self.protocol
self.trusted_downstream = set(trusted_downstream or [])
def __str__(self):
if self.address_family in (socket.AF_INET, socket.AF_INET6):
return self.remote_ip
elif isinstance(self.address, bytes):
# Python 3 with the -bb option warns about str(bytes),
# so convert it explicitly.
# Unix socket addresses are str on mac but bytes on linux.
return native_str(self.address)
else:
return str(self.address)
def _apply_xheaders(self, headers):
"""Rewrite the ``remote_ip`` and ``protocol`` fields."""
# Squid uses X-Forwarded-For, others use X-Real-Ip
ip = headers.get("X-Forwarded-For", self.remote_ip)
# Skip trusted downstream hosts in X-Forwarded-For list
for ip in (cand.strip() for cand in reversed(ip.split(','))):
if ip not in self.trusted_downstream:
break
ip = headers.get("X-Real-Ip", ip)
if netutil.is_valid_ip(ip):
self.remote_ip = ip
# AWS uses X-Forwarded-Proto
proto_header = headers.get(
"X-Scheme", headers.get("X-Forwarded-Proto",
self.protocol))
if proto_header in ("http", "https"):
self.protocol = proto_header
def _unapply_xheaders(self):
"""Undo changes from `_apply_xheaders`.
Xheaders are per-request so they should not leak to the next
request on the same connection.
"""
self.remote_ip = self._orig_remote_ip
self.protocol = self._orig_protocol
class _ProxyAdapter(httputil.HTTPMessageDelegate):
def __init__(self, delegate, request_conn):
self.connection = request_conn
self.delegate = delegate
def headers_received(self, start_line, headers):
self.connection.context._apply_xheaders(headers)
return self.delegate.headers_received(start_line, headers)
def data_received(self, chunk):
return self.delegate.data_received(chunk)
def finish(self):
self.delegate.finish()
self._cleanup()
def on_connection_close(self):
self.delegate.on_connection_close()
self._cleanup()
def _cleanup(self):
self.connection.context._unapply_xheaders()
HTTPRequest = httputil.HTTPServerRequest
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/tornado/httpserver.py
| 0.774242 | 0.188772 |
httpserver.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import array
import atexit
import os
import re
import sys
import zlib
PY3 = sys.version_info >= (3,)
if PY3:
xrange = range
# inspect.getargspec() raises DeprecationWarnings in Python 3.5.
# The two functions have compatible interfaces for the parts we need.
if PY3:
from inspect import getfullargspec as getargspec
else:
from inspect import getargspec
# Aliases for types that are spelled differently in different Python
# versions. bytes_type is deprecated and no longer used in Tornado
# itself but is left in case anyone outside Tornado is using it.
bytes_type = bytes
if PY3:
unicode_type = str
basestring_type = str
else:
# The names unicode and basestring don't exist in py3 so silence flake8.
unicode_type = unicode # noqa
basestring_type = basestring # noqa
try:
import typing # noqa
from typing import cast
_ObjectDictBase = typing.Dict[str, typing.Any]
except ImportError:
_ObjectDictBase = dict
def cast(typ, x):
return x
else:
# More imports that are only needed in type comments.
import datetime # noqa
import types # noqa
from typing import Any, AnyStr, Union, Optional, Dict, Mapping # noqa
from typing import Tuple, Match, Callable # noqa
if PY3:
_BaseString = str
else:
_BaseString = Union[bytes, unicode_type]
try:
from sys import is_finalizing
except ImportError:
# Emulate it
def _get_emulated_is_finalizing():
L = []
atexit.register(lambda: L.append(None))
def is_finalizing():
# Not referencing any globals here
return L != []
return is_finalizing
is_finalizing = _get_emulated_is_finalizing()
class ObjectDict(_ObjectDictBase):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
# type: (str) -> Any
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
# type: (str, Any) -> None
self[name] = value
class GzipDecompressor(object):
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without some of the
optional arguments, but it understands gzip headers and checksums.
"""
def __init__(self):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value, max_length=None):
# type: (bytes, Optional[int]) -> bytes
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
"""
return self.decompressobj.decompress(value, max_length)
@property
def unconsumed_tail(self):
# type: () -> bytes
"""Returns the unconsumed portion left over
"""
return self.decompressobj.unconsumed_tail
def flush(self):
# type: () -> bytes
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
def import_object(name):
# type: (_BaseString) -> Any
"""Imports an object by name.
import_object('x') is equivalent to 'import x'.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if not isinstance(name, str):
# on python 2 a byte string is required.
name = name.encode('utf-8')
if name.count('.') == 0:
return __import__(name, None, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
# Stubs to make mypy happy (and later for actual type-checking).
def raise_exc_info(exc_info):
# type: (Tuple[type, BaseException, types.TracebackType]) -> None
pass
def exec_in(code, glob, loc=None):
# type: (Any, Dict[str, Any], Optional[Mapping[str, Any]]) -> Any
if isinstance(code, basestring_type):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
if PY3:
exec("""
def raise_exc_info(exc_info):
try:
raise exc_info[1].with_traceback(exc_info[2])
finally:
exc_info = None
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
""")
def errno_from_exception(e):
# type: (BaseException) -> Optional[int]
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instantiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno # type: ignore
elif e.args:
return e.args[0]
else:
return None
_alphanum = frozenset(
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
def _re_unescape_replacement(match):
# type: (Match[str]) -> str
group = match.group(1)
if group[0] in _alphanum:
raise ValueError("cannot unescape '\\\\%s'" % group[0])
return group
_re_unescape_pattern = re.compile(r'\\(.)', re.DOTALL)
def re_unescape(s):
# type: (str) -> str
"""Unescape a string escaped by `re.escape`.
May raise ``ValueError`` for regular expressions which could not
have been produced by `re.escape` (for example, strings containing
``\d`` cannot be unescaped).
.. versionadded:: 4.4
"""
return _re_unescape_pattern.sub(_re_unescape_replacement, s)
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None # type: type
__impl_kwargs = None # type: Dict[str, Any]
def __new__(cls, *args, **kwargs):
base = cls.configurable_base()
init_kwargs = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
init_kwargs.update(base.__impl_kwargs)
else:
impl = cls
init_kwargs.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatibility with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(*args, **init_kwargs)
return instance
@classmethod
def configurable_base(cls):
# type: () -> Any
# TODO: This class needs https://github.com/python/typing/issues/107
# to be fully typeable.
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
# type: () -> type
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
# type: () -> None
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
.. versionchanged:: 4.2
Now accepts positional arguments in addition to keyword arguments.
"""
@classmethod
def configure(cls, impl, **kwargs):
# type: (Any, **Any) -> None
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (str, unicode_type)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
# type: () -> type
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
# type: () -> Tuple[type, Dict[str, Any]]
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
# type: (Tuple[type, Dict[str, Any]]) -> None
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
class ArgReplacer(object):
"""Replaces one value in an ``args, kwargs`` pair.
Inspects the function signature to find an argument by name
whether it is passed by position or keyword. For use in decorators
and similar wrappers.
"""
def __init__(self, func, name):
# type: (Callable, str) -> None
self.name = name
try:
self.arg_pos = self._getargnames(func).index(name)
except ValueError:
# Not a positional parameter
self.arg_pos = None
def _getargnames(self, func):
# type: (Callable) -> List[str]
try:
return getargspec(func).args
except TypeError:
if hasattr(func, 'func_code'):
# Cython-generated code has all the attributes needed
# by inspect.getargspec, but the inspect module only
# works with ordinary functions. Inline the portion of
# getargspec that we need here. Note that for static
# functions the @cython.binding(True) decorator must
# be used (for methods it works out of the box).
code = func.func_code # type: ignore
return code.co_varnames[:code.co_argcount]
raise
def get_old_value(self, args, kwargs, default=None):
# type: (List[Any], Dict[str, Any], Any) -> Any
"""Returns the old value of the named argument without replacing it.
Returns ``default`` if the argument is not present.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
return args[self.arg_pos]
else:
return kwargs.get(self.name, default)
def replace(self, new_value, args, kwargs):
# type: (Any, List[Any], Dict[str, Any]) -> Tuple[Any, List[Any], Dict[str, Any]]
"""Replace the named argument in ``args, kwargs`` with ``new_value``.
Returns ``(old_value, args, kwargs)``. The returned ``args`` and
``kwargs`` objects may not be the same as the input objects, or
the input objects may be mutated.
If the named argument was not found, ``new_value`` will be added
to ``kwargs`` and None will be returned as ``old_value``.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
# The arg to replace is passed positionally
old_value = args[self.arg_pos]
args = list(args) # *args is normally a tuple
args[self.arg_pos] = new_value
else:
# The arg to replace is either omitted or passed by keyword.
old_value = kwargs.get(self.name)
kwargs[self.name] = new_value
return old_value, args, kwargs
def timedelta_to_seconds(td):
# type: (datetime.timedelta) -> float
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
def _websocket_mask_python(mask, data):
# type: (bytes, bytes) -> bytes
"""Websocket masking function.
`mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
Returns a `bytes` object of the same length as `data` with the mask applied
as specified in section 5.3 of RFC 6455.
This pure-python implementation may be replaced by an optimized version when available.
"""
mask_arr = array.array("B", mask)
unmasked_arr = array.array("B", data)
for i in xrange(len(data)):
unmasked_arr[i] = unmasked_arr[i] ^ mask_arr[i % 4]
if PY3:
# tostring was deprecated in py32. It hasn't been removed,
# but since we turn on deprecation warnings in our tests
# we need to use the right one.
return unmasked_arr.tobytes()
else:
return unmasked_arr.tostring()
if (os.environ.get('TORNADO_NO_EXTENSION') or
os.environ.get('TORNADO_EXTENSION') == '0'):
# These environment variables exist to make it easier to do performance
# comparisons; they are not guaranteed to remain supported in the future.
_websocket_mask = _websocket_mask_python
else:
try:
from salt.ext.tornado.speedups import websocket_mask as _websocket_mask
except ImportError:
if os.environ.get('TORNADO_EXTENSION') == '1':
raise
_websocket_mask = _websocket_mask_python
def doctests():
import doctest
return doctest.DocTestSuite()
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/tornado/util.py
| 0.58059 | 0.225651 |
util.py
|
pypi
|
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import functools
import socket
from salt.ext.tornado.concurrent import Future
from salt.ext.tornado.ioloop import IOLoop
from salt.ext.tornado.iostream import IOStream
from salt.ext.tornado import gen
from salt.ext.tornado.netutil import Resolver
from salt.ext.tornado.platform.auto import set_close_exec
_INITIAL_CONNECT_TIMEOUT = 0.3
class _Connector(object):
"""A stateless implementation of the "Happy Eyeballs" algorithm.
"Happy Eyeballs" is documented in RFC6555 as the recommended practice
for when both IPv4 and IPv6 addresses are available.
In this implementation, we partition the addresses by family, and
make the first connection attempt to whichever address was
returned first by ``getaddrinfo``. If that connection fails or
times out, we begin a connection in parallel to the first address
of the other family. If there are additional failures we retry
with other addresses, keeping one connection attempt per family
in flight at a time.
http://tools.ietf.org/html/rfc6555
"""
def __init__(self, addrinfo, io_loop, connect):
self.io_loop = io_loop
self.connect = connect
self.future = Future()
self.timeout = None
self.last_error = None
self.remaining = len(addrinfo)
self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
@staticmethod
def split(addrinfo):
"""Partition the ``addrinfo`` list by address family.
Returns two lists. The first list contains the first entry from
``addrinfo`` and all others with the same family, and the
second list contains all other addresses (normally one list will
be AF_INET and the other AF_INET6, although non-standard resolvers
may return additional families).
"""
primary = []
secondary = []
primary_af = addrinfo[0][0]
for af, addr in addrinfo:
if af == primary_af:
primary.append((af, addr))
else:
secondary.append((af, addr))
return primary, secondary
def start(self, timeout=_INITIAL_CONNECT_TIMEOUT):
self.try_connect(iter(self.primary_addrs))
self.set_timout(timeout)
return self.future
def try_connect(self, addrs):
try:
af, addr = next(addrs)
except StopIteration:
# We've reached the end of our queue, but the other queue
# might still be working. Send a final error on the future
# only when both queues are finished.
if self.remaining == 0 and not self.future.done():
self.future.set_exception(self.last_error or
IOError("connection failed"))
return
future = self.connect(af, addr)
future.add_done_callback(functools.partial(self.on_connect_done,
addrs, af, addr))
def on_connect_done(self, addrs, af, addr, future):
self.remaining -= 1
try:
stream = future.result()
except Exception as e:
if self.future.done():
return
# Error: try again (but remember what happened so we have an
# error to raise in the end)
self.last_error = e
self.try_connect(addrs)
if self.timeout is not None:
# If the first attempt failed, don't wait for the
# timeout to try an address from the secondary queue.
self.io_loop.remove_timeout(self.timeout)
self.on_timeout()
return
self.clear_timeout()
if self.future.done():
# This is a late arrival; just drop it.
stream.close()
else:
self.future.set_result((af, addr, stream))
def set_timout(self, timeout):
self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
self.on_timeout)
def on_timeout(self):
self.timeout = None
self.try_connect(iter(self.secondary_addrs))
def clear_timeout(self):
if self.timeout is not None:
self.io_loop.remove_timeout(self.timeout)
class TCPClient(object):
"""A non-blocking TCP connection factory.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, resolver=None, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
if resolver is not None:
self.resolver = resolver
self._own_resolver = False
else:
self.resolver = Resolver(io_loop=io_loop)
self._own_resolver = True
def close(self):
if self._own_resolver:
self.resolver.close()
@gen.coroutine
def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
max_buffer_size=None, source_ip=None, source_port=None):
"""Connect to the given host and port.
Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
``ssl_options`` is not None).
Using the ``source_ip`` kwarg, one can specify the source
IP address to use when establishing the connection.
In case the user needs to resolve and
use a specific interface, it has to be handled outside
of Tornado as this depends very much on the platform.
Similarly, when the user requires a certain source port, it can
be specified using the ``source_port`` arg.
.. versionchanged:: 4.5
Added the ``source_ip`` and ``source_port`` arguments.
"""
addrinfo = yield self.resolver.resolve(host, port, af)
connector = _Connector(
addrinfo, self.io_loop,
functools.partial(self._create_stream, max_buffer_size,
source_ip=source_ip, source_port=source_port)
)
af, addr, stream = yield connector.start()
# TODO: For better performance we could cache the (af, addr)
# information here and re-use it on subsequent connections to
# the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
if ssl_options is not None:
stream = yield stream.start_tls(False, ssl_options=ssl_options,
server_hostname=host)
raise gen.Return(stream)
def _create_stream(self, max_buffer_size, af, addr, source_ip=None,
source_port=None):
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
source_port_bind = source_port if isinstance(source_port, int) else 0
source_ip_bind = source_ip
if source_port_bind and not source_ip:
# User required a specific port, but did not specify
# a certain source IP, will bind to the default loopback.
source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1'
# Trying to use the same address family as the requested af socket:
# - 127.0.0.1 for IPv4
# - ::1 for IPv6
socket_obj = socket.socket(af)
set_close_exec(socket_obj.fileno())
if source_port_bind or source_ip_bind:
# If the user requires binding also to a specific IP/port.
try:
socket_obj.bind((source_ip_bind, source_port_bind))
except socket.error:
socket_obj.close()
# Fail loudly if unable to use the IP/port.
raise
try:
stream = IOStream(socket_obj,
io_loop=self.io_loop,
max_buffer_size=max_buffer_size)
except socket.error as e:
fu = Future()
fu.set_exception(e)
return fu
else:
return stream.connect(addr)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/tornado/tcpclient.py
| 0.63477 | 0.179297 |
tcpclient.py
|
pypi
|
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import collections
import heapq
from salt.ext.tornado import gen, ioloop
from salt.ext.tornado.concurrent import Future
from salt.ext.tornado.locks import Event
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
class QueueEmpty(Exception):
"""Raised by `.Queue.get_nowait` when the queue has no items."""
pass
class QueueFull(Exception):
"""Raised by `.Queue.put_nowait` when a queue is at its maximum size."""
pass
def _set_timeout(future, timeout):
if timeout:
def on_timeout():
future.set_exception(gen.TimeoutError())
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
future.add_done_callback(
lambda _: io_loop.remove_timeout(timeout_handle))
class _QueueIterator(object):
def __init__(self, q):
self.q = q
def __anext__(self):
return self.q.get()
class Queue(object):
"""Coordinate producer and consumer coroutines.
If maxsize is 0 (the default) the queue size is unbounded.
.. testcode::
from salt.ext.tornado import gen
from salt.ext.tornado.ioloop import IOLoop
from salt.ext.tornado.queues import Queue
q = Queue(maxsize=2)
@gen.coroutine
def consumer():
while True:
item = yield q.get()
try:
print('Doing work on %s' % item)
yield gen.sleep(0.01)
finally:
q.task_done()
@gen.coroutine
def producer():
for item in range(5):
yield q.put(item)
print('Put %s' % item)
@gen.coroutine
def main():
# Start consumer without waiting (since it never finishes).
IOLoop.current().spawn_callback(consumer)
yield producer() # Wait for producer to put all tasks.
yield q.join() # Wait for consumer to finish all tasks.
print('Done')
IOLoop.current().run_sync(main)
.. testoutput::
Put 0
Put 1
Doing work on 0
Put 2
Doing work on 1
Put 3
Doing work on 2
Put 4
Doing work on 3
Doing work on 4
Done
In Python 3.5, `Queue` implements the async iterator protocol, so
``consumer()`` could be rewritten as::
async def consumer():
async for item in q:
try:
print('Doing work on %s' % item)
yield gen.sleep(0.01)
finally:
q.task_done()
.. versionchanged:: 4.3
Added ``async for`` support in Python 3.5.
"""
def __init__(self, maxsize=0):
if maxsize is None:
raise TypeError("maxsize can't be None")
if maxsize < 0:
raise ValueError("maxsize can't be negative")
self._maxsize = maxsize
self._init()
self._getters = collections.deque([]) # Futures.
self._putters = collections.deque([]) # Pairs of (item, Future).
self._unfinished_tasks = 0
self._finished = Event()
self._finished.set()
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
def empty(self):
return not self._queue
def full(self):
if self.maxsize == 0:
return False
else:
return self.qsize() >= self.maxsize
def put(self, item, timeout=None):
"""Put an item into the queue, perhaps waiting until there is room.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
timeout.
"""
try:
self.put_nowait(item)
except QueueFull:
future = Future()
self._putters.append((item, future))
_set_timeout(future, timeout)
return future
else:
return gen._null_future
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise `QueueFull`.
"""
self._consume_expired()
if self._getters:
assert self.empty(), "queue non-empty, why are getters waiting?"
getter = self._getters.popleft()
self.__put_internal(item)
getter.set_result(self._get())
elif self.full():
raise QueueFull
else:
self.__put_internal(item)
def get(self, timeout=None):
"""Remove and return an item from the queue.
Returns a Future which resolves once an item is available, or raises
`tornado.gen.TimeoutError` after a timeout.
"""
future = Future()
try:
future.set_result(self.get_nowait())
except QueueEmpty:
self._getters.append(future)
_set_timeout(future, timeout)
return future
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Return an item if one is immediately available, else raise
`QueueEmpty`.
"""
self._consume_expired()
if self._putters:
assert self.full(), "queue not full, why are putters waiting?"
item, putter = self._putters.popleft()
self.__put_internal(item)
putter.set_result(None)
return self._get()
elif self.qsize():
return self._get()
else:
raise QueueEmpty
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each `.get` used to fetch a task, a
subsequent call to `.task_done` tells the queue that the processing
on the task is complete.
If a `.join` is blocking, it resumes when all items have been
processed; that is, when every `.put` is matched by a `.task_done`.
Raises `ValueError` if called more times than `.put`.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
def join(self, timeout=None):
"""Block until all items in the queue are processed.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
timeout.
"""
return self._finished.wait(timeout)
def __aiter__(self):
return _QueueIterator(self)
# These three are overridable in subclasses.
def _init(self):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
# End of the overridable methods.
def __put_internal(self, item):
self._unfinished_tasks += 1
self._finished.clear()
self._put(item)
def _consume_expired(self):
# Remove timed-out waiters.
while self._putters and self._putters[0][1].done():
self._putters.popleft()
while self._getters and self._getters[0].done():
self._getters.popleft()
def __repr__(self):
return '<%s at %s %s>' % (
type(self).__name__, hex(id(self)), self._format())
def __str__(self):
return '<%s %s>' % (type(self).__name__, self._format())
def _format(self):
result = 'maxsize=%r' % (self.maxsize, )
if getattr(self, '_queue', None):
result += ' queue=%r' % self._queue
if self._getters:
result += ' getters[%s]' % len(self._getters)
if self._putters:
result += ' putters[%s]' % len(self._putters)
if self._unfinished_tasks:
result += ' tasks=%s' % self._unfinished_tasks
return result
class PriorityQueue(Queue):
"""A `.Queue` that retrieves entries in priority order, lowest first.
Entries are typically tuples like ``(priority number, data)``.
.. testcode::
from salt.ext.tornado.queues import PriorityQueue
q = PriorityQueue()
q.put((1, 'medium-priority item'))
q.put((0, 'high-priority item'))
q.put((10, 'low-priority item'))
print(q.get_nowait())
print(q.get_nowait())
print(q.get_nowait())
.. testoutput::
(0, 'high-priority item')
(1, 'medium-priority item')
(10, 'low-priority item')
"""
def _init(self):
self._queue = []
def _put(self, item):
heapq.heappush(self._queue, item)
def _get(self):
return heapq.heappop(self._queue)
class LifoQueue(Queue):
"""A `.Queue` that retrieves the most recently put items first.
.. testcode::
from salt.ext.tornado.queues import LifoQueue
q = LifoQueue()
q.put(3)
q.put(2)
q.put(1)
print(q.get_nowait())
print(q.get_nowait())
print(q.get_nowait())
.. testoutput::
1
2
3
"""
def _init(self):
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/tornado/queues.py
| 0.808861 | 0.152568 |
queues.py
|
pypi
|
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import sys
from io import BytesIO
import salt.ext.tornado as tornado
from salt.ext.tornado.concurrent import Future
from salt.ext.tornado import escape
from salt.ext.tornado import httputil
from salt.ext.tornado.log import access_log
from salt.ext.tornado import web
from salt.ext.tornado.escape import native_str
from salt.ext.tornado.util import unicode_type, PY3
if PY3:
import urllib.parse as urllib_parse # py3
else:
import urllib as urllib_parse
# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
# that are smuggled inside objects of type unicode (via the latin1 encoding).
# These functions are like those in the tornado.escape module, but defined
# here to minimize the temptation to use them in non-wsgi contexts.
if str is unicode_type:
def to_wsgi_str(s):
assert isinstance(s, bytes)
return s.decode('latin1')
def from_wsgi_str(s):
assert isinstance(s, str)
return s.encode('latin1')
else:
def to_wsgi_str(s):
assert isinstance(s, bytes)
return s
def from_wsgi_str(s):
assert isinstance(s, str)
return s
class WSGIApplication(web.Application):
"""A WSGI equivalent of `tornado.web.Application`.
.. deprecated:: 4.0
Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
"""
def __call__(self, environ, start_response):
return WSGIAdapter(self)(environ, start_response)
# WSGI has no facilities for flow control, so just return an already-done
# Future when the interface requires it.
_dummy_future = Future()
_dummy_future.set_result(None)
class _WSGIConnection(httputil.HTTPConnection):
def __init__(self, method, start_response, context):
self.method = method
self.start_response = start_response
self.context = context
self._write_buffer = []
self._finished = False
self._expected_content_remaining = None
self._error = None
def set_close_callback(self, callback):
# WSGI has no facility for detecting a closed connection mid-request,
# so we can simply ignore the callback.
pass
def write_headers(self, start_line, headers, chunk=None, callback=None):
if self.method == 'HEAD':
self._expected_content_remaining = 0
elif 'Content-Length' in headers:
self._expected_content_remaining = int(headers['Content-Length'])
else:
self._expected_content_remaining = None
self.start_response(
'%s %s' % (start_line.code, start_line.reason),
[(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
if chunk is not None:
self.write(chunk, callback)
elif callback is not None:
callback()
return _dummy_future
def write(self, chunk, callback=None):
if self._expected_content_remaining is not None:
self._expected_content_remaining -= len(chunk)
if self._expected_content_remaining < 0:
self._error = httputil.HTTPOutputError(
"Tried to write more data than Content-Length")
raise self._error
self._write_buffer.append(chunk)
if callback is not None:
callback()
return _dummy_future
def finish(self):
if (self._expected_content_remaining is not None and
self._expected_content_remaining != 0):
self._error = httputil.HTTPOutputError(
"Tried to write %d bytes less than Content-Length" %
self._expected_content_remaining)
raise self._error
self._finished = True
class _WSGIRequestContext(object):
def __init__(self, remote_ip, protocol):
self.remote_ip = remote_ip
self.protocol = protocol
def __str__(self):
return self.remote_ip
class WSGIAdapter(object):
"""Converts a `tornado.web.Application` instance into a WSGI application.
Example usage::
import tornado.web
import tornado.wsgi
import wsgiref.simple_server
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
wsgi_app = tornado.wsgi.WSGIAdapter(application)
server = wsgiref.simple_server.make_server('', 8888, wsgi_app)
server.serve_forever()
See the `appengine demo
<https://github.com/tornadoweb/tornado/tree/stable/demos/appengine>`_
for an example of using this module to run a Tornado app on Google
App Engine.
In WSGI mode asynchronous methods are not supported. This means
that it is not possible to use `.AsyncHTTPClient`, or the
`tornado.auth` or `tornado.websocket` modules.
.. versionadded:: 4.0
"""
def __init__(self, application):
if isinstance(application, WSGIApplication):
self.application = lambda request: web.Application.__call__(
application, request)
else:
self.application = application
def __call__(self, environ, start_response):
method = environ["REQUEST_METHOD"]
uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
if environ.get("QUERY_STRING"):
uri += "?" + environ["QUERY_STRING"]
headers = httputil.HTTPHeaders()
if environ.get("CONTENT_TYPE"):
headers["Content-Type"] = environ["CONTENT_TYPE"]
if environ.get("CONTENT_LENGTH"):
headers["Content-Length"] = environ["CONTENT_LENGTH"]
for key in environ:
if key.startswith("HTTP_"):
headers[key[5:].replace("_", "-")] = environ[key]
if headers.get("Content-Length"):
body = environ["wsgi.input"].read(
int(headers["Content-Length"]))
else:
body = b""
protocol = environ["wsgi.url_scheme"]
remote_ip = environ.get("REMOTE_ADDR", "")
if environ.get("HTTP_HOST"):
host = environ["HTTP_HOST"]
else:
host = environ["SERVER_NAME"]
connection = _WSGIConnection(method, start_response,
_WSGIRequestContext(remote_ip, protocol))
request = httputil.HTTPServerRequest(
method, uri, "HTTP/1.1", headers=headers, body=body,
host=host, connection=connection)
request._parse_body()
self.application(request)
if connection._error:
raise connection._error
if not connection._finished:
raise Exception("request did not finish synchronously")
return connection._write_buffer
class WSGIContainer(object):
r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
.. warning::
WSGI is a *synchronous* interface, while Tornado's concurrency model
is based on single-threaded asynchronous execution. This means that
running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
than running the same app in a multi-threaded WSGI server like
``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are
benefits to combining Tornado and WSGI in the same process that
outweigh the reduced scalability.
Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
run it. For example::
def simple_app(environ, start_response):
status = "200 OK"
response_headers = [("Content-type", "text/plain")]
start_response(status, response_headers)
return ["Hello world!\n"]
container = tornado.wsgi.WSGIContainer(simple_app)
http_server = tornado.httpserver.HTTPServer(container)
http_server.listen(8888)
tornado.ioloop.IOLoop.current().start()
This class is intended to let other frameworks (Django, web.py, etc)
run on the Tornado HTTP server and I/O loop.
The `tornado.web.FallbackHandler` class is often useful for mixing
Tornado and WSGI apps in the same server. See
https://github.com/bdarnell/django-tornado-demo for a complete example.
"""
def __init__(self, wsgi_application):
self.wsgi_application = wsgi_application
def __call__(self, request):
data = {}
response = []
def start_response(status, response_headers, exc_info=None):
data["status"] = status
data["headers"] = response_headers
return response.append
app_response = self.wsgi_application(
WSGIContainer.environ(request), start_response)
try:
response.extend(app_response)
body = b"".join(response)
finally:
if hasattr(app_response, "close"):
app_response.close()
if not data:
raise Exception("WSGI app did not call start_response")
status_code, reason = data["status"].split(' ', 1)
status_code = int(status_code)
headers = data["headers"]
header_set = set(k.lower() for (k, v) in headers)
body = escape.utf8(body)
if status_code != 304:
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "content-type" not in header_set:
headers.append(("Content-Type", "text/html; charset=UTF-8"))
if "server" not in header_set:
headers.append(("Server", "TornadoServer/%s" % salt.ext.tornado.version))
start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
header_obj = httputil.HTTPHeaders()
for key, value in headers:
header_obj.add(key, value)
request.connection.write_headers(start_line, header_obj, chunk=body)
request.connection.finish()
self._log(status_code, request)
@staticmethod
def environ(request):
"""Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
"""
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": to_wsgi_str(escape.url_unescape(
request.path, encoding=None, plus=False)),
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": str(port),
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": BytesIO(escape.utf8(request.body)),
"wsgi.errors": sys.stderr,
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
for key, value in request.headers.items():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ
def _log(self, status_code, request):
if status_code < 400:
log_method = access_log.info
elif status_code < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * request.request_time()
summary = request.method + " " + request.uri + " (" + \
request.remote_ip + ")"
log_method("%d %s %.2fms", status_code, summary, request_time)
HTTPRequest = httputil.HTTPServerRequest
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/tornado/wsgi.py
| 0.623492 | 0.224555 |
wsgi.py
|
pypi
|
"""Data used by the tornado.locale module."""
# pylint: skip-file
from __future__ import absolute_import, division, print_function
LOCALE_NAMES = {
"af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"},
"am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"},
"ar_AR": {"name_en": u"Arabic", "name": u"العربية"},
"bg_BG": {"name_en": u"Bulgarian", "name": u"Български"},
"bn_IN": {"name_en": u"Bengali", "name": u"বাংলা"},
"bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"},
"ca_ES": {"name_en": u"Catalan", "name": u"Català"},
"cs_CZ": {"name_en": u"Czech", "name": u"Čeština"},
"cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"},
"da_DK": {"name_en": u"Danish", "name": u"Dansk"},
"de_DE": {"name_en": u"German", "name": u"Deutsch"},
"el_GR": {"name_en": u"Greek", "name": u"Ελληνικά"},
"en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"},
"en_US": {"name_en": u"English (US)", "name": u"English (US)"},
"es_ES": {"name_en": u"Spanish (Spain)", "name": u"Español (España)"},
"es_LA": {"name_en": u"Spanish", "name": u"Español"},
"et_EE": {"name_en": u"Estonian", "name": u"Eesti"},
"eu_ES": {"name_en": u"Basque", "name": u"Euskara"},
"fa_IR": {"name_en": u"Persian", "name": u"فارسی"},
"fi_FI": {"name_en": u"Finnish", "name": u"Suomi"},
"fr_CA": {"name_en": u"French (Canada)", "name": u"Français (Canada)"},
"fr_FR": {"name_en": u"French", "name": u"Français"},
"ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"},
"gl_ES": {"name_en": u"Galician", "name": u"Galego"},
"he_IL": {"name_en": u"Hebrew", "name": u"עברית"},
"hi_IN": {"name_en": u"Hindi", "name": u"हिन्दी"},
"hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"},
"hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"},
"id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"},
"is_IS": {"name_en": u"Icelandic", "name": u"Íslenska"},
"it_IT": {"name_en": u"Italian", "name": u"Italiano"},
"ja_JP": {"name_en": u"Japanese", "name": u"日本語"},
"ko_KR": {"name_en": u"Korean", "name": u"한국어"},
"lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvių"},
"lv_LV": {"name_en": u"Latvian", "name": u"Latviešu"},
"mk_MK": {"name_en": u"Macedonian", "name": u"Македонски"},
"ml_IN": {"name_en": u"Malayalam", "name": u"മലയാളം"},
"ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"},
"nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokmål)"},
"nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"},
"nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"},
"pa_IN": {"name_en": u"Punjabi", "name": u"ਪੰਜਾਬੀ"},
"pl_PL": {"name_en": u"Polish", "name": u"Polski"},
"pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Português (Brasil)"},
"pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Português (Portugal)"},
"ro_RO": {"name_en": u"Romanian", "name": u"Română"},
"ru_RU": {"name_en": u"Russian", "name": u"Русский"},
"sk_SK": {"name_en": u"Slovak", "name": u"Slovenčina"},
"sl_SI": {"name_en": u"Slovenian", "name": u"Slovenščina"},
"sq_AL": {"name_en": u"Albanian", "name": u"Shqip"},
"sr_RS": {"name_en": u"Serbian", "name": u"Српски"},
"sv_SE": {"name_en": u"Swedish", "name": u"Svenska"},
"sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"},
"ta_IN": {"name_en": u"Tamil", "name": u"தமிழ்"},
"te_IN": {"name_en": u"Telugu", "name": u"తెలుగు"},
"th_TH": {"name_en": u"Thai", "name": u"ภาษาไทย"},
"tl_PH": {"name_en": u"Filipino", "name": u"Filipino"},
"tr_TR": {"name_en": u"Turkish", "name": u"Türkçe"},
"uk_UA": {"name_en": u"Ukraini ", "name": u"Українська"},
"vi_VN": {"name_en": u"Vietnamese", "name": u"Tiếng Việt"},
"zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"中文(简体)"},
"zh_TW": {"name_en": u"Chinese (Traditional)", "name": u"中文(繁體)"},
}
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/tornado/_locale_data.py
| 0.65202 | 0.31513 |
_locale_data.py
|
pypi
|
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import errno
import os
import signal
import subprocess
import sys
import time
from binascii import hexlify
from salt.ext.tornado.concurrent import Future
from salt.ext.tornado import ioloop
from salt.ext.tornado.iostream import PipeIOStream
from salt.ext.tornado.log import gen_log
from salt.ext.tornado.platform.auto import set_close_exec
from salt.ext.tornado import stack_context
from salt.ext.tornado.util import errno_from_exception, PY3
try:
import multiprocessing
except ImportError:
# Multiprocessing is not available on Google App Engine.
multiprocessing = None
if PY3:
long = int
# Re-export this exception for convenience.
try:
CalledProcessError = subprocess.CalledProcessError
except AttributeError:
# The subprocess module exists in Google App Engine, but is empty.
# This module isn't very useful in that case, but it should
# at least be importable.
if 'APPENGINE_RUNTIME' not in os.environ:
raise
def cpu_count():
"""Returns the number of processors on this machine."""
if multiprocessing is None:
return 1
try:
return multiprocessing.cpu_count()
except NotImplementedError:
pass
try:
return os.sysconf("SC_NPROCESSORS_CONF")
except (AttributeError, ValueError):
pass
gen_log.error("Could not detect number of processors; assuming 1")
return 1
def _reseed_random():
if 'random' not in sys.modules:
return
import random
# If os.urandom is available, this method does the same thing as
# random.seed (at least as of python 2.6). If os.urandom is not
# available, we mix in the pid in addition to a timestamp.
try:
seed = long(hexlify(os.urandom(16)), 16)
except NotImplementedError:
seed = int(time.time() * 1000) ^ os.getpid()
random.seed(seed)
def _pipe_cloexec():
r, w = os.pipe()
set_close_exec(r)
set_close_exec(w)
return r, w
_task_id = None
def fork_processes(num_processes, max_restarts=100):
"""Starts multiple worker processes.
If ``num_processes`` is None or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If ``num_processes`` is given and > 0, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``fork_processes``.
In each child process, ``fork_processes`` returns its *task id*, a
number between 0 and ``num_processes``. Processes that exit
abnormally (due to a signal or non-zero exit status) are restarted
with the same id (up to ``max_restarts`` times). In the parent
process, ``fork_processes`` returns None if all child processes
have exited normally, but will otherwise only exit by throwing an
exception.
"""
global _task_id
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = cpu_count()
if ioloop.IOLoop.initialized():
raise RuntimeError("Cannot run in multiple processes: IOLoop instance "
"has already been initialized. You cannot call "
"IOLoop.instance() before calling start_processes()")
gen_log.info("Starting %d processes", num_processes)
children = {}
def start_child(i):
pid = os.fork()
if pid == 0:
# child process
_reseed_random()
global _task_id
_task_id = i
return i
else:
children[pid] = i
return None
for i in range(num_processes):
id = start_child(i)
if id is not None:
return id
num_restarts = 0
while children:
try:
pid, status = os.wait()
except OSError as e:
if errno_from_exception(e) == errno.EINTR:
continue
raise
if pid not in children:
continue
id = children.pop(pid)
if os.WIFSIGNALED(status):
gen_log.warning("child %d (pid %d) killed by signal %d, restarting",
id, pid, os.WTERMSIG(status))
elif os.WEXITSTATUS(status) != 0:
gen_log.warning("child %d (pid %d) exited with status %d, restarting",
id, pid, os.WEXITSTATUS(status))
else:
gen_log.info("child %d (pid %d) exited normally", id, pid)
continue
num_restarts += 1
if num_restarts > max_restarts:
raise RuntimeError("Too many child restarts, giving up")
new_id = start_child(id)
if new_id is not None:
return new_id
# All child processes exited cleanly, so exit the master process
# instead of just returning to right after the call to
# fork_processes (which will probably just start up another IOLoop
# unless the caller checks the return value).
sys.exit(0)
def task_id():
"""Returns the current task id, if any.
Returns None if this process was not created by `fork_processes`.
"""
global _task_id
return _task_id
class Subprocess(object):
"""Wraps ``subprocess.Popen`` with IOStream support.
The constructor is the same as ``subprocess.Popen`` with the following
additions:
* ``stdin``, ``stdout``, and ``stderr`` may have the value
``tornado.process.Subprocess.STREAM``, which will make the corresponding
attribute of the resulting Subprocess a `.PipeIOStream`.
* A new keyword argument ``io_loop`` may be used to pass in an IOLoop.
The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and
``wait_for_exit`` methods do not work on Windows. There is
therefore no reason to use this class instead of
``subprocess.Popen`` on that platform.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
STREAM = object()
_initialized = False
_waiting = {} # type: ignore
def __init__(self, *args, **kwargs):
self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current()
# All FDs we create should be closed on error; those in to_close
# should be closed in the parent process on success.
pipe_fds = []
to_close = []
if kwargs.get('stdin') is Subprocess.STREAM:
in_r, in_w = _pipe_cloexec()
kwargs['stdin'] = in_r
pipe_fds.extend((in_r, in_w))
to_close.append(in_r)
self.stdin = PipeIOStream(in_w, io_loop=self.io_loop)
if kwargs.get('stdout') is Subprocess.STREAM:
out_r, out_w = _pipe_cloexec()
kwargs['stdout'] = out_w
pipe_fds.extend((out_r, out_w))
to_close.append(out_w)
self.stdout = PipeIOStream(out_r, io_loop=self.io_loop)
if kwargs.get('stderr') is Subprocess.STREAM:
err_r, err_w = _pipe_cloexec()
kwargs['stderr'] = err_w
pipe_fds.extend((err_r, err_w))
to_close.append(err_w)
self.stderr = PipeIOStream(err_r, io_loop=self.io_loop)
try:
self.proc = subprocess.Popen(*args, **kwargs)
except:
for fd in pipe_fds:
os.close(fd)
raise
for fd in to_close:
os.close(fd)
for attr in ['stdin', 'stdout', 'stderr', 'pid']:
if not hasattr(self, attr): # don't clobber streams set above
setattr(self, attr, getattr(self.proc, attr))
self._exit_callback = None
self.returncode = None
def set_exit_callback(self, callback):
"""Runs ``callback`` when this process exits.
The callback takes one argument, the return code of the process.
This method uses a ``SIGCHLD`` handler, which is a global setting
and may conflict if you have other libraries trying to handle the
same signal. If you are using more than one ``IOLoop`` it may
be necessary to call `Subprocess.initialize` first to designate
one ``IOLoop`` to run the signal handlers.
In many cases a close callback on the stdout or stderr streams
can be used as an alternative to an exit callback if the
signal handler is causing a problem.
"""
self._exit_callback = stack_context.wrap(callback)
Subprocess.initialize(self.io_loop)
Subprocess._waiting[self.pid] = self
Subprocess._try_cleanup_process(self.pid)
def wait_for_exit(self, raise_error=True):
"""Returns a `.Future` which resolves when the process exits.
Usage::
ret = yield proc.wait_for_exit()
This is a coroutine-friendly alternative to `set_exit_callback`
(and a replacement for the blocking `subprocess.Popen.wait`).
By default, raises `subprocess.CalledProcessError` if the process
has a non-zero exit status. Use ``wait_for_exit(raise_error=False)``
to suppress this behavior and return the exit status without raising.
.. versionadded:: 4.2
"""
future = Future()
def callback(ret):
if ret != 0 and raise_error:
# Unfortunately we don't have the original args any more.
future.set_exception(CalledProcessError(ret, None))
else:
future.set_result(ret)
self.set_exit_callback(callback)
return future
@classmethod
def initialize(cls, io_loop=None):
"""Initializes the ``SIGCHLD`` handler.
The signal handler is run on an `.IOLoop` to avoid locking issues.
Note that the `.IOLoop` used for signal handling need not be the
same one used by individual Subprocess objects (as long as the
``IOLoops`` are each running in separate threads).
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
if cls._initialized:
return
if io_loop is None:
io_loop = ioloop.IOLoop.current()
cls._old_sigchld = signal.signal(
signal.SIGCHLD,
lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup))
cls._initialized = True
@classmethod
def uninitialize(cls):
"""Removes the ``SIGCHLD`` handler."""
if not cls._initialized:
return
signal.signal(signal.SIGCHLD, cls._old_sigchld)
cls._initialized = False
@classmethod
def _cleanup(cls):
for pid in list(cls._waiting.keys()): # make a copy
cls._try_cleanup_process(pid)
@classmethod
def _try_cleanup_process(cls, pid):
try:
ret_pid, status = os.waitpid(pid, os.WNOHANG)
except OSError as e:
if errno_from_exception(e) == errno.ECHILD:
return
if ret_pid == 0:
return
assert ret_pid == pid
subproc = cls._waiting.pop(pid)
subproc.io_loop.add_callback_from_signal(
subproc._set_returncode, status)
def _set_returncode(self, status):
if os.WIFSIGNALED(status):
self.returncode = -os.WTERMSIG(status)
else:
assert os.WIFEXITED(status)
self.returncode = os.WEXITSTATUS(status)
# We've taken over wait() duty from the subprocess.Popen
# object. If we don't inform it of the process's return code,
# it will log a warning at destruction in python 3.6+.
self.proc.returncode = self.returncode
if self._exit_callback:
callback = self._exit_callback
self._exit_callback = None
callback(self.returncode)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/tornado/process.py
| 0.459319 | 0.154887 |
process.py
|
pypi
|
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import datetime
import numbers
import re
import sys
import os
import textwrap
from salt.ext.tornado.escape import _unicode, native_str
from salt.ext.tornado.log import define_logging_options
from salt.ext.tornado import stack_context
from salt.ext.tornado.util import basestring_type, exec_in
class Error(Exception):
"""Exception raised by errors in the options module."""
pass
class OptionParser(object):
"""A collection of options, a dictionary with object-like access.
Normally accessed via static functions in the `tornado.options` module,
which reference a global instance.
"""
def __init__(self):
# we have to use self.__dict__ because we override setattr.
self.__dict__['_options'] = {}
self.__dict__['_parse_callbacks'] = []
self.define("help", type=bool, help="show this help information",
callback=self._help_callback)
def _normalize_name(self, name):
return name.replace('_', '-')
def __getattr__(self, name):
name = self._normalize_name(name)
if isinstance(self._options.get(name), _Option):
return self._options[name].value()
raise AttributeError("Unrecognized option %r" % name)
def __setattr__(self, name, value):
name = self._normalize_name(name)
if isinstance(self._options.get(name), _Option):
return self._options[name].set(value)
raise AttributeError("Unrecognized option %r" % name)
def __iter__(self):
return (opt.name for opt in self._options.values())
def __contains__(self, name):
name = self._normalize_name(name)
return name in self._options
def __getitem__(self, name):
return self.__getattr__(name)
def __setitem__(self, name, value):
return self.__setattr__(name, value)
def items(self):
"""A sequence of (name, value) pairs.
.. versionadded:: 3.1
"""
return [(opt.name, opt.value()) for name, opt in self._options.items()]
def groups(self):
"""The set of option-groups created by ``define``.
.. versionadded:: 3.1
"""
return set(opt.group_name for opt in self._options.values())
def group_dict(self, group):
"""The names and values of options in a group.
Useful for copying options into Application settings::
from salt.ext.tornado.options import define, parse_command_line, options
define('template_path', group='application')
define('static_path', group='application')
parse_command_line()
application = Application(
handlers, **options.group_dict('application'))
.. versionadded:: 3.1
"""
return dict(
(opt.name, opt.value()) for name, opt in self._options.items()
if not group or group == opt.group_name)
def as_dict(self):
"""The names and values of all options.
.. versionadded:: 3.1
"""
return dict(
(opt.name, opt.value()) for name, opt in self._options.items())
def define(self, name, default=None, type=None, help=None, metavar=None,
multiple=False, group=None, callback=None):
"""Defines a new command line option.
If ``type`` is given (one of str, float, int, datetime, or timedelta)
or can be inferred from the ``default``, we parse the command line
arguments based on the given type. If ``multiple`` is True, we accept
comma-separated values, and the option value is always a list.
For multi-value integers, we also accept the syntax ``x:y``, which
turns into ``range(x, y)`` - very useful for long integer ranges.
``help`` and ``metavar`` are used to construct the
automatically generated command line help string. The help
message is formatted like::
--name=METAVAR help string
``group`` is used to group the defined options in logical
groups. By default, command line options are grouped by the
file in which they are defined.
Command line option names must be unique globally. They can be parsed
from the command line with `parse_command_line` or parsed from a
config file with `parse_config_file`.
If a ``callback`` is given, it will be run with the new value whenever
the option is changed. This can be used to combine command-line
and file-based options::
define("config", type=str, help="path to config file",
callback=lambda path: parse_config_file(path, final=False))
With this definition, options in the file specified by ``--config`` will
override options set earlier on the command line, but can be overridden
by later flags.
"""
normalized = self._normalize_name(name)
if normalized in self._options:
raise Error("Option %r already defined in %s" %
(normalized, self._options[normalized].file_name))
frame = sys._getframe(0)
options_file = frame.f_code.co_filename
# Can be called directly, or through top level define() fn, in which
# case, step up above that frame to look for real caller.
if (frame.f_back.f_code.co_filename == options_file and
frame.f_back.f_code.co_name == 'define'):
frame = frame.f_back
file_name = frame.f_back.f_code.co_filename
if file_name == options_file:
file_name = ""
if type is None:
if not multiple and default is not None:
type = default.__class__
else:
type = str
if group:
group_name = group
else:
group_name = file_name
option = _Option(name, file_name=file_name,
default=default, type=type, help=help,
metavar=metavar, multiple=multiple,
group_name=group_name,
callback=callback)
self._options[normalized] = option
def parse_command_line(self, args=None, final=True):
"""Parses all options given on the command line (defaults to
`sys.argv`).
Note that ``args[0]`` is ignored since it is the program name
in `sys.argv`.
We return a list of all arguments that are not parsed as options.
If ``final`` is ``False``, parse callbacks will not be run.
This is useful for applications that wish to combine configurations
from multiple sources.
"""
if args is None:
args = sys.argv
remaining = []
for i in range(1, len(args)):
# All things after the last option are command line arguments
if not args[i].startswith("-"):
remaining = args[i:]
break
if args[i] == "--":
remaining = args[i + 1:]
break
arg = args[i].lstrip("-")
name, equals, value = arg.partition("=")
name = self._normalize_name(name)
if name not in self._options:
self.print_help()
raise Error('Unrecognized command line option: %r' % name)
option = self._options[name]
if not equals:
if option.type == bool:
value = "true"
else:
raise Error('Option %r requires a value' % name)
option.parse(value)
if final:
self.run_parse_callbacks()
return remaining
def parse_config_file(self, path, final=True):
"""Parses and loads the Python config file at the given path.
If ``final`` is ``False``, parse callbacks will not be run.
This is useful for applications that wish to combine configurations
from multiple sources.
.. versionchanged:: 4.1
Config files are now always interpreted as utf-8 instead of
the system default encoding.
.. versionchanged:: 4.4
The special variable ``__file__`` is available inside config
files, specifying the absolute path to the config file itself.
"""
config = {'__file__': os.path.abspath(path)}
with open(path, 'rb') as f:
exec_in(native_str(f.read()), config, config)
for name in config:
normalized = self._normalize_name(name)
if normalized in self._options:
self._options[normalized].set(config[name])
if final:
self.run_parse_callbacks()
def print_help(self, file=None):
"""Prints all the command line options to stderr (or another file)."""
if file is None:
file = sys.stderr
print("Usage: %s [OPTIONS]" % sys.argv[0], file=file)
print("\nOptions:\n", file=file)
by_group = {}
for option in self._options.values():
by_group.setdefault(option.group_name, []).append(option)
for filename, o in sorted(by_group.items()):
if filename:
print("\n%s options:\n" % os.path.normpath(filename), file=file)
o.sort(key=lambda option: option.name)
for option in o:
# Always print names with dashes in a CLI context.
prefix = self._normalize_name(option.name)
if option.metavar:
prefix += "=" + option.metavar
description = option.help or ""
if option.default is not None and option.default != '':
description += " (default %s)" % option.default
lines = textwrap.wrap(description, 79 - 35)
if len(prefix) > 30 or len(lines) == 0:
lines.insert(0, '')
print(" --%-30s %s" % (prefix, lines[0]), file=file)
for line in lines[1:]:
print("%-34s %s" % (' ', line), file=file)
print(file=file)
def _help_callback(self, value):
if value:
self.print_help()
sys.exit(0)
def add_parse_callback(self, callback):
"""Adds a parse callback, to be invoked when option parsing is done."""
self._parse_callbacks.append(stack_context.wrap(callback))
def run_parse_callbacks(self):
for callback in self._parse_callbacks:
callback()
def mockable(self):
"""Returns a wrapper around self that is compatible with
`mock.patch <unittest.mock.patch>`.
The `mock.patch <unittest.mock.patch>` function (included in
the standard library `unittest.mock` package since Python 3.3,
or in the third-party ``mock`` package for older versions of
Python) is incompatible with objects like ``options`` that
override ``__getattr__`` and ``__setattr__``. This function
returns an object that can be used with `mock.patch.object
<unittest.mock.patch.object>` to modify option values::
with mock.patch.object(options.mockable(), 'name', value):
assert options.name == value
"""
return _Mockable(self)
class _Mockable(object):
"""`mock.patch` compatible wrapper for `OptionParser`.
As of ``mock`` version 1.0.1, when an object uses ``__getattr__``
hooks instead of ``__dict__``, ``patch.__exit__`` tries to delete
the attribute it set instead of setting a new one (assuming that
the object does not catpure ``__setattr__``, so the patch
created a new attribute in ``__dict__``).
_Mockable's getattr and setattr pass through to the underlying
OptionParser, and delattr undoes the effect of a previous setattr.
"""
def __init__(self, options):
# Modify __dict__ directly to bypass __setattr__
self.__dict__['_options'] = options
self.__dict__['_originals'] = {}
def __getattr__(self, name):
return getattr(self._options, name)
def __setattr__(self, name, value):
assert name not in self._originals, "don't reuse mockable objects"
self._originals[name] = getattr(self._options, name)
setattr(self._options, name, value)
def __delattr__(self, name):
setattr(self._options, name, self._originals.pop(name))
class _Option(object):
UNSET = object()
def __init__(self, name, default=None, type=basestring_type, help=None,
metavar=None, multiple=False, file_name=None, group_name=None,
callback=None):
if default is None and multiple:
default = []
self.name = name
self.type = type
self.help = help
self.metavar = metavar
self.multiple = multiple
self.file_name = file_name
self.group_name = group_name
self.callback = callback
self.default = default
self._value = _Option.UNSET
def value(self):
return self.default if self._value is _Option.UNSET else self._value
def parse(self, value):
_parse = {
datetime.datetime: self._parse_datetime,
datetime.timedelta: self._parse_timedelta,
bool: self._parse_bool,
basestring_type: self._parse_string,
}.get(self.type, self.type)
if self.multiple:
self._value = []
for part in value.split(","):
if issubclass(self.type, numbers.Integral):
# allow ranges of the form X:Y (inclusive at both ends)
lo, _, hi = part.partition(":")
lo = _parse(lo)
hi = _parse(hi) if hi else lo
self._value.extend(range(lo, hi + 1))
else:
self._value.append(_parse(part))
else:
self._value = _parse(value)
if self.callback is not None:
self.callback(self._value)
return self.value()
def set(self, value):
if self.multiple:
if not isinstance(value, list):
raise Error("Option %r is required to be a list of %s" %
(self.name, self.type.__name__))
for item in value:
if item is not None and not isinstance(item, self.type):
raise Error("Option %r is required to be a list of %s" %
(self.name, self.type.__name__))
else:
if value is not None and not isinstance(value, self.type):
raise Error("Option %r is required to be a %s (%s given)" %
(self.name, self.type.__name__, type(value)))
self._value = value
if self.callback is not None:
self.callback(self._value)
# Supported date/time formats in our options
_DATETIME_FORMATS = [
"%a %b %d %H:%M:%S %Y",
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M",
"%Y-%m-%dT%H:%M",
"%Y%m%d %H:%M:%S",
"%Y%m%d %H:%M",
"%Y-%m-%d",
"%Y%m%d",
"%H:%M:%S",
"%H:%M",
]
def _parse_datetime(self, value):
for format in self._DATETIME_FORMATS:
try:
return datetime.datetime.strptime(value, format)
except ValueError:
pass
raise Error('Unrecognized date/time format: %r' % value)
_TIMEDELTA_ABBREV_DICT = {
'h': 'hours',
'm': 'minutes',
'min': 'minutes',
's': 'seconds',
'sec': 'seconds',
'ms': 'milliseconds',
'us': 'microseconds',
'd': 'days',
'w': 'weeks',
}
_FLOAT_PATTERN = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?'
_TIMEDELTA_PATTERN = re.compile(
r'\s*(%s)\s*(\w*)\s*' % _FLOAT_PATTERN, re.IGNORECASE)
def _parse_timedelta(self, value):
try:
sum = datetime.timedelta()
start = 0
while start < len(value):
m = self._TIMEDELTA_PATTERN.match(value, start)
if not m:
raise Exception()
num = float(m.group(1))
units = m.group(2) or 'seconds'
units = self._TIMEDELTA_ABBREV_DICT.get(units, units)
sum += datetime.timedelta(**{units: num})
start = m.end()
return sum
except Exception:
raise
def _parse_bool(self, value):
return value.lower() not in ("false", "0", "f")
def _parse_string(self, value):
return _unicode(value)
options = OptionParser()
"""Global options object.
All defined options are available as attributes on this object.
"""
def define(name, default=None, type=None, help=None, metavar=None,
multiple=False, group=None, callback=None):
"""Defines an option in the global namespace.
See `OptionParser.define`.
"""
return options.define(name, default=default, type=type, help=help,
metavar=metavar, multiple=multiple, group=group,
callback=callback)
def parse_command_line(args=None, final=True):
"""Parses global options from the command line.
See `OptionParser.parse_command_line`.
"""
return options.parse_command_line(args, final=final)
def parse_config_file(path, final=True):
"""Parses global options from a config file.
See `OptionParser.parse_config_file`.
"""
return options.parse_config_file(path, final=final)
def print_help(file=None):
"""Prints all the command line options to stderr (or another file).
See `OptionParser.print_help`.
"""
return options.print_help(file)
def add_parse_callback(callback):
"""Adds a parse callback, to be invoked when option parsing is done.
See `OptionParser.add_parse_callback`
"""
options.add_parse_callback(callback)
# Default options
define_logging_options(options)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/tornado/options.py
| 0.670177 | 0.170923 |
options.py
|
pypi
|
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import sys
import threading
from salt.ext.tornado.util import raise_exc_info
class StackContextInconsistentError(Exception):
pass
class _State(threading.local):
def __init__(self):
self.contexts = (tuple(), None)
_state = _State()
class StackContext(object):
"""Establishes the given context as a StackContext that will be transferred.
Note that the parameter is a callable that returns a context
manager, not the context itself. That is, where for a
non-transferable context manager you would say::
with my_context():
StackContext takes the function itself rather than its result::
with StackContext(my_context):
The result of ``with StackContext() as cb:`` is a deactivation
callback. Run this callback when the StackContext is no longer
needed to ensure that it is not propagated any further (note that
deactivating a context does not affect any instances of that
context that are currently pending). This is an advanced feature
and not necessary in most applications.
"""
def __init__(self, context_factory):
self.context_factory = context_factory
self.contexts = []
self.active = True
def _deactivate(self):
self.active = False
# StackContext protocol
def enter(self):
context = self.context_factory()
self.contexts.append(context)
context.__enter__()
def exit(self, type, value, traceback):
context = self.contexts.pop()
context.__exit__(type, value, traceback)
# Note that some of this code is duplicated in ExceptionStackContext
# below. ExceptionStackContext is more common and doesn't need
# the full generality of this class.
def __enter__(self):
self.old_contexts = _state.contexts
self.new_contexts = (self.old_contexts[0] + (self,), self)
_state.contexts = self.new_contexts
try:
self.enter()
except:
_state.contexts = self.old_contexts
raise
return self._deactivate
def __exit__(self, type, value, traceback):
try:
self.exit(type, value, traceback)
finally:
final_contexts = _state.contexts
_state.contexts = self.old_contexts
# Generator coroutines and with-statements with non-local
# effects interact badly. Check here for signs of
# the stack getting out of sync.
# Note that this check comes after restoring _state.context
# so that if it fails things are left in a (relatively)
# consistent state.
if final_contexts is not self.new_contexts:
raise StackContextInconsistentError(
'stack_context inconsistency (may be caused by yield '
'within a "with StackContext" block)')
# Break up a reference to itself to allow for faster GC on CPython.
self.new_contexts = None
class ExceptionStackContext(object):
"""Specialization of StackContext for exception handling.
The supplied ``exception_handler`` function will be called in the
event of an uncaught exception in this context. The semantics are
similar to a try/finally clause, and intended use cases are to log
an error, close a socket, or similar cleanup actions. The
``exc_info`` triple ``(type, value, traceback)`` will be passed to the
exception_handler function.
If the exception handler returns true, the exception will be
consumed and will not be propagated to other exception handlers.
"""
def __init__(self, exception_handler):
self.exception_handler = exception_handler
self.active = True
def _deactivate(self):
self.active = False
def exit(self, type, value, traceback):
if type is not None:
return self.exception_handler(type, value, traceback)
def __enter__(self):
self.old_contexts = _state.contexts
self.new_contexts = (self.old_contexts[0], self)
_state.contexts = self.new_contexts
return self._deactivate
def __exit__(self, type, value, traceback):
try:
if type is not None:
return self.exception_handler(type, value, traceback)
finally:
final_contexts = _state.contexts
_state.contexts = self.old_contexts
if final_contexts is not self.new_contexts:
raise StackContextInconsistentError(
'stack_context inconsistency (may be caused by yield '
'within a "with StackContext" block)')
# Break up a reference to itself to allow for faster GC on CPython.
self.new_contexts = None
class NullContext(object):
"""Resets the `StackContext`.
Useful when creating a shared resource on demand (e.g. an
`.AsyncHTTPClient`) where the stack that caused the creating is
not relevant to future operations.
"""
def __enter__(self):
self.old_contexts = _state.contexts
_state.contexts = (tuple(), None)
def __exit__(self, type, value, traceback):
_state.contexts = self.old_contexts
def _remove_deactivated(contexts):
"""Remove deactivated handlers from the chain"""
# Clean ctx handlers
stack_contexts = tuple([h for h in contexts[0] if h.active])
# Find new head
head = contexts[1]
while head is not None and not head.active:
head = head.old_contexts[1]
# Process chain
ctx = head
while ctx is not None:
parent = ctx.old_contexts[1]
while parent is not None:
if parent.active:
break
ctx.old_contexts = parent.old_contexts
parent = parent.old_contexts[1]
ctx = parent
return (stack_contexts, head)
def wrap(fn):
"""Returns a callable object that will restore the current `StackContext`
when executed.
Use this whenever saving a callback to be executed later in a
different execution context (either in a different thread or
asynchronously in the same thread).
"""
# Check if function is already wrapped
if fn is None or hasattr(fn, '_wrapped'):
return fn
# Capture current stack head
# TODO: Any other better way to store contexts and update them in wrapped function?
cap_contexts = [_state.contexts]
if not cap_contexts[0][0] and not cap_contexts[0][1]:
# Fast path when there are no active contexts.
def null_wrapper(*args, **kwargs):
try:
current_state = _state.contexts
_state.contexts = cap_contexts[0]
return fn(*args, **kwargs)
finally:
_state.contexts = current_state
null_wrapper._wrapped = True
return null_wrapper
def wrapped(*args, **kwargs):
ret = None
try:
# Capture old state
current_state = _state.contexts
# Remove deactivated items
cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0])
# Force new state
_state.contexts = contexts
# Current exception
exc = (None, None, None)
top = None
# Apply stack contexts
last_ctx = 0
stack = contexts[0]
# Apply state
for n in stack:
try:
n.enter()
last_ctx += 1
except:
# Exception happened. Record exception info and store top-most handler
exc = sys.exc_info()
top = n.old_contexts[1]
# Execute callback if no exception happened while restoring state
if top is None:
try:
ret = fn(*args, **kwargs)
except:
exc = sys.exc_info()
top = contexts[1]
# If there was exception, try to handle it by going through the exception chain
if top is not None:
exc = _handle_exception(top, exc)
else:
# Otherwise take shorter path and run stack contexts in reverse order
while last_ctx > 0:
last_ctx -= 1
c = stack[last_ctx]
try:
c.exit(*exc)
except:
exc = sys.exc_info()
top = c.old_contexts[1]
break
else:
top = None
# If if exception happened while unrolling, take longer exception handler path
if top is not None:
exc = _handle_exception(top, exc)
# If exception was not handled, raise it
if exc != (None, None, None):
raise_exc_info(exc)
finally:
_state.contexts = current_state
return ret
wrapped._wrapped = True
return wrapped
def _handle_exception(tail, exc):
while tail is not None:
try:
if tail.exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
tail = tail.old_contexts[1]
return exc
def run_with_stack_context(context, func):
"""Run a coroutine ``func`` in the given `StackContext`.
It is not safe to have a ``yield`` statement within a ``with StackContext``
block, so it is difficult to use stack context with `.gen.coroutine`.
This helper function runs the function in the correct context while
keeping the ``yield`` and ``with`` statements syntactically separate.
Example::
@gen.coroutine
def incorrect():
with StackContext(ctx):
# ERROR: this will raise StackContextInconsistentError
yield other_coroutine()
@gen.coroutine
def correct():
yield run_with_stack_context(StackContext(ctx), other_coroutine)
.. versionadded:: 3.1
"""
with context:
return func()
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/tornado/stack_context.py
| 0.601242 | 0.204223 |
stack_context.py
|
pypi
|
"""KQueue-based IOLoop implementation for BSD/Mac systems."""
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import select
from salt.ext.tornado.ioloop import IOLoop, PollIOLoop
assert hasattr(select, 'kqueue'), 'kqueue not supported'
class _KQueue(object):
"""A kqueue-based event loop for BSD/Mac systems."""
def __init__(self):
self._kqueue = select.kqueue()
self._active = {}
def fileno(self):
return self._kqueue.fileno()
def close(self):
self._kqueue.close()
def register(self, fd, events):
if fd in self._active:
raise IOError("fd %s already registered" % fd)
self._control(fd, events, select.KQ_EV_ADD)
self._active[fd] = events
def modify(self, fd, events):
self.unregister(fd)
self.register(fd, events)
def unregister(self, fd):
events = self._active.pop(fd)
self._control(fd, events, select.KQ_EV_DELETE)
def _control(self, fd, events, flags):
kevents = []
if events & IOLoop.WRITE:
kevents.append(select.kevent(
fd, filter=select.KQ_FILTER_WRITE, flags=flags))
if events & IOLoop.READ:
kevents.append(select.kevent(
fd, filter=select.KQ_FILTER_READ, flags=flags))
# Even though control() takes a list, it seems to return EINVAL
# on Mac OS X (10.6) when there is more than one event in the list.
for kevent in kevents:
self._kqueue.control([kevent], 0)
def poll(self, timeout):
kevents = self._kqueue.control(None, 1000, timeout)
events = {}
for kevent in kevents:
fd = kevent.ident
if kevent.filter == select.KQ_FILTER_READ:
events[fd] = events.get(fd, 0) | IOLoop.READ
if kevent.filter == select.KQ_FILTER_WRITE:
if kevent.flags & select.KQ_EV_EOF:
# If an asynchronous connection is refused, kqueue
# returns a write event with the EOF flag set.
# Turn this into an error for consistency with the
# other IOLoop implementations.
# Note that for read events, EOF may be returned before
# all data has been consumed from the socket buffer,
# so we only check for EOF on write events.
events[fd] = IOLoop.ERROR
else:
events[fd] = events.get(fd, 0) | IOLoop.WRITE
if kevent.flags & select.KQ_EV_ERROR:
events[fd] = events.get(fd, 0) | IOLoop.ERROR
return events.items()
class KQueueIOLoop(PollIOLoop):
def initialize(self, **kwargs):
super(KQueueIOLoop, self).initialize(impl=_KQueue(), **kwargs)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/ext/tornado/platform/kqueue.py
| 0.813609 | 0.155367 |
kqueue.py
|
pypi
|
import hashlib
import logging
import os
import salt.payload
import salt.utils.files
import salt.utils.path
import salt.utils.verify
log = logging.getLogger(__name__)
__virtualname__ = "localfs"
def mk_token(opts, tdata):
"""
Mint a new token using the config option hash_type and store tdata with 'token' attribute set
to the token.
This module uses the hash of random 512 bytes as a token.
:param opts: Salt master config options
:param tdata: Token data to be stored with 'token' attribute of this dict set to the token.
:returns: tdata with token if successful. Empty dict if failed.
"""
hash_type = getattr(hashlib, opts.get("hash_type", "md5"))
tok = str(hash_type(os.urandom(512)).hexdigest())
t_path = os.path.join(opts["token_dir"], tok)
temp_t_path = "{}.tmp".format(t_path)
while os.path.isfile(t_path):
tok = str(hash_type(os.urandom(512)).hexdigest())
t_path = os.path.join(opts["token_dir"], tok)
tdata["token"] = tok
try:
with salt.utils.files.set_umask(0o177):
with salt.utils.files.fopen(temp_t_path, "w+b") as fp_:
fp_.write(salt.payload.dumps(tdata))
os.rename(temp_t_path, t_path)
except OSError:
log.warning('Authentication failure: can not write token file "%s".', t_path)
return {}
return tdata
def get_token(opts, tok):
"""
Fetch the token data from the store.
:param opts: Salt master config options
:param tok: Token value to get
:returns: Token data if successful. Empty dict if failed.
"""
t_path = os.path.join(opts["token_dir"], tok)
if not salt.utils.verify.clean_path(opts["token_dir"], t_path):
return {}
if not os.path.isfile(t_path):
return {}
try:
with salt.utils.files.fopen(t_path, "rb") as fp_:
tdata = salt.payload.loads(fp_.read())
return tdata
except OSError:
log.warning('Authentication failure: can not read token file "%s".', t_path)
return {}
def rm_token(opts, tok):
"""
Remove token from the store.
:param opts: Salt master config options
:param tok: Token to remove
:returns: Empty dict if successful. None if failed.
"""
t_path = os.path.join(opts["token_dir"], tok)
try:
os.remove(t_path)
return {}
except OSError:
log.warning("Could not remove token %s", tok)
def list_tokens(opts):
"""
List all tokens in the store.
:param opts: Salt master config options
:returns: List of dicts (tokens)
"""
ret = []
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(opts["token_dir"]):
for token in filenames:
ret.append(token)
return ret
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/tokens/localfs.py
| 0.544801 | 0.175786 |
localfs.py
|
pypi
|
r"""
Renderer that will decrypt NACL ciphers
Any key in the SLS file can be an NACL cipher, and this renderer will decrypt it
before passing it off to Salt. This allows you to safely store secrets in
source control, in such a way that only your Salt master can decrypt them and
distribute them only to the minions that need them.
The typical use-case would be to use ciphers in your pillar data, and keep a
secret key on your master. You can put the public key in source control so that
developers can add new secrets quickly and easily.
This renderer requires the libsodium library binary and libnacl >= 1.5.1
python package (support for sealed boxes came in 1.5.1 version).
Setup
-----
To set things up, first generate a keypair. On the master, run the following:
.. code-block:: bash
# salt-call --local nacl.keygen sk_file=/root/.nacl
Using encrypted pillar
----------------------
To encrypt secrets, copy the public key to your local machine and run:
.. code-block:: bash
$ salt-call --local nacl.enc datatoenc pk_file=/root/.nacl.pub
To apply the renderer on a file-by-file basis add the following line to the
top of any pillar with nacl encrypted data in it:
.. code-block:: yaml
#!yaml|nacl
Now with your renderer configured, you can include your ciphers in your pillar
data like so:
.. code-block:: yaml
#!yaml|nacl
a-secret: "NACL[MRN3cc+fmdxyQbz6WMF+jq1hKdU5X5BBI7OjK+atvHo1ll+w1gZ7XyWtZVfq9gK9rQaMfkDxmidJKwE0Mw==]"
"""
import logging
import re
import salt.syspaths
import salt.utils.stringio
log = logging.getLogger(__name__)
NACL_REGEX = r"^NACL\[(.*)\]$"
def _decrypt_object(obj, **kwargs):
"""
Recursively try to decrypt any object. If the object is a str, and it
contains a valid NACLENC pretext, decrypt it, otherwise keep going until a
string is found.
"""
if salt.utils.stringio.is_readable(obj):
return _decrypt_object(obj.getvalue(), **kwargs)
if isinstance(obj, str):
if re.search(NACL_REGEX, obj) is not None:
return __salt__["nacl.dec"](re.search(NACL_REGEX, obj).group(1), **kwargs)
else:
return obj
elif isinstance(obj, dict):
for key, value in obj.items():
obj[key] = _decrypt_object(value, **kwargs)
return obj
elif isinstance(obj, list):
for key, value in enumerate(obj):
obj[key] = _decrypt_object(value, **kwargs)
return obj
else:
return obj
def render(nacl_data, saltenv="base", sls="", argline="", **kwargs):
"""
Decrypt the data to be rendered using the given nacl key or the one given
in config
"""
return _decrypt_object(nacl_data, **kwargs)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/renderers/nacl.py
| 0.566738 | 0.384594 |
nacl.py
|
pypi
|
import logging
import warnings
import salt.utils.url
import salt.utils.yamlloader as yamlloader_new
import salt.utils.yamlloader_old as yamlloader_old
from salt.exceptions import SaltRenderError
from salt.utils.odict import OrderedDict
from yaml.constructor import ConstructorError
from yaml.parser import ParserError
from yaml.scanner import ScannerError
log = logging.getLogger(__name__)
_ERROR_MAP = {
"found character '\\t' that cannot start any token": "Illegal tab character"
}
def get_yaml_loader(argline):
"""
Return the ordered dict yaml loader
"""
def yaml_loader(*args):
if __opts__.get("use_yamlloader_old"):
yamlloader = yamlloader_old
else:
yamlloader = yamlloader_new
return yamlloader.SaltYamlSafeLoader(*args, dictclass=OrderedDict)
return yaml_loader
def render(yaml_data, saltenv="base", sls="", argline="", **kws):
"""
Accepts YAML as a string or as a file object and runs it through the YAML
parser.
:rtype: A Python data structure
"""
if __opts__.get("use_yamlloader_old"):
log.warning(
"Using the old YAML Loader for rendering, "
"consider disabling this and using the tojson"
" filter."
)
yamlloader = yamlloader_old
else:
yamlloader = yamlloader_new
if not isinstance(yaml_data, str):
yaml_data = yaml_data.read()
with warnings.catch_warnings(record=True) as warn_list:
try:
data = yamlloader.load(yaml_data, Loader=get_yaml_loader(argline))
except ScannerError as exc:
err_type = _ERROR_MAP.get(exc.problem, exc.problem)
line_num = exc.problem_mark.line + 1
raise SaltRenderError(err_type, line_num, exc.problem_mark.buffer)
except (ParserError, ConstructorError) as exc:
raise SaltRenderError(exc)
if len(warn_list) > 0:
for item in warn_list:
log.warning(
"%s found in %s saltenv=%s",
item.message,
salt.utils.url.create(sls),
saltenv,
)
if not data:
data = {}
def _validate_data(data):
"""
PyYAML will for some reason allow improper YAML to be formed into
an unhashable dict (that is, one with a dict as a key). This
function will recursively go through and check the keys to make
sure they're not dicts.
"""
if isinstance(data, dict):
for key, value in data.items():
if isinstance(key, dict):
raise SaltRenderError(
"Invalid YAML, possible double curly-brace"
)
_validate_data(value)
elif isinstance(data, list):
for item in data:
_validate_data(item)
_validate_data(data)
return data
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/renderers/yaml.py
| 0.527317 | 0.187542 |
yaml.py
|
pypi
|
r"""
Renderer that will decrypt GPG ciphers
Any key in the SLS file can be a GPG cipher, and this renderer will decrypt it
before passing it off to Salt. This allows you to safely store secrets in
source control, in such a way that only your Salt master can decrypt them and
distribute them only to the minions that need them.
The typical use-case would be to use ciphers in your pillar data, and keep a
secret key on your master. You can put the public key in source control so that
developers can add new secrets quickly and easily.
This renderer requires the gpg_ binary. No python libraries are required as of
the 2015.8.0 release.
.. _gpg-homedir:
GPG Homedir
-----------
When running gpg commands, it is important to run commands as the user that owns
the keys directory. If salt-master runs as user salt, then ``su salt`` before
running any gpg commands.
To avoid compatibility and upgrade problems and to provide a standardized location
for keys, salt uses ``/etc/salt/gpgkeys``. In order to make the gpg command use
this directory, use ``gpg --homedir /etc/salt/gpgkeys`` with gpg commands or set
the homedir for that user using ``echo 'homedir /etc/salt/gpgkeys' >> ~/.gnupg``.
.. _gpg: https://gnupg.org
Setup
-----
To set things up, first generate a keypair. On the master, run the following:
.. code-block:: bash
# mkdir -p /etc/salt/gpgkeys
# chmod 0700 /etc/salt/gpgkeys
# gpg --gen-key --homedir /etc/salt/gpgkeys
Do not supply a password for the keypair, and use a name that makes sense for
your application. Be sure to back up the ``gpgkeys`` directory someplace safe!
.. note::
Unfortunately, there are some scenarios - for example, on virtual machines
which don’t have real hardware - where insufficient entropy causes key
generation to be extremely slow. In these cases, there are usually means of
increasing the system entropy. On virtualised Linux systems, this can often
be achieved by installing the ``rng-tools`` package.
Import keys to a master
***********************
If the keys already exist and need to be imported to the salt master, run the
following to import them.
.. code-block:: bash
gpg --homedir /etc/salt/gpgkeys --import /path/to/private.key
gpg --homedir /etc/salt/gpgkeys --import /path/to/pubkey.gpg
Note: The default `GPG Homedir <gpg-homedir>` is ``~/.gnupg`` and needs to be
set using ``--homedir``.
Adjust trust level of imported keys
***********************************
In some cases, importing existing keys may not be enough and the trust level of
the key needs to be adjusted. This can be done by editing the key. The ``key_id``
and the actual trust level of the key can be seen by listing the already imported
keys.
.. code-block:: bash
gpg --homedir /etc/salt/gpgkeys --list-keys
gpg --homedir /etc/salt/gpgkeys --list-secret-keys
If the trust-level is not ``ultimate`` it needs to be changed by running
.. code-block:: bash
gpg --homedir /etc/salt/gpgkeys --edit-key <key_id>
This will open an interactive shell for the management of the GPG encryption key.
Type ``trust`` to be able to set the trust level for the key and then select ``5
(I trust ultimately)``. Then quit the shell by typing ``save``.
Different GPG Location
**********************
In some cases, it's preferable to have gpg keys stored on removable media or
other non-standard locations. This can be done using the ``gpg_keydir`` option
on the salt master. This will also require using a different path to ``--homedir``,
as mentioned in the `GPG Homedir <gpg-homedir>` section.
.. code-block:: bash
gpg_keydir: <path/to/homedir>
Export the Public Key
---------------------
.. code-block:: bash
# gpg --homedir /etc/salt/gpgkeys --armor --export <KEY-NAME> > exported_pubkey.gpg
Import the Public Key
---------------------
To encrypt secrets, copy the public key to your local machine and run:
.. code-block:: bash
$ gpg --import exported_pubkey.gpg
To generate a cipher from a secret:
.. code-block:: bash
$ echo -n "supersecret" | gpg --armor --batch --trust-model always --encrypt -r <KEY-name>
To apply the renderer on a file-by-file basis add the following line to the
top of any pillar with gpg data in it:
.. code-block:: yaml
#!yaml|gpg
Now with your renderer configured, you can include your ciphers in your pillar
data like so:
.. code-block:: yaml
#!yaml|gpg
a-secret: |
-----BEGIN PGP MESSAGE-----
Version: GnuPG v1
hQEMAweRHKaPCfNeAQf9GLTN16hCfXAbPwU6BbBK0unOc7i9/etGuVc5CyU9Q6um
QuetdvQVLFO/HkrC4lgeNQdM6D9E8PKonMlgJPyUvC8ggxhj0/IPFEKmrsnv2k6+
cnEfmVexS7o/U1VOVjoyUeliMCJlAz/30RXaME49Cpi6No2+vKD8a4q4nZN1UZcG
RhkhC0S22zNxOXQ38TBkmtJcqxnqT6YWKTUsjVubW3bVC+u2HGqJHu79wmwuN8tz
m4wBkfCAd8Eyo2jEnWQcM4TcXiF01XPL4z4g1/9AAxh+Q4d8RIRP4fbw7ct4nCJv
Gr9v2DTF7HNigIMl4ivMIn9fp+EZurJNiQskLgNbktJGAeEKYkqX5iCuB1b693hJ
FKlwHiJt5yA8X2dDtfk8/Ph1Jx2TwGS+lGjlZaNqp3R1xuAZzXzZMLyZDe5+i3RJ
skqmFTbOiA===Eqsm
-----END PGP MESSAGE-----
.. _encrypted-cli-pillar-data:
Encrypted CLI Pillar Data
-------------------------
.. versionadded:: 2016.3.0
Functions like :py:func:`state.highstate <salt.modules.state.highstate>` and
:py:func:`state.sls <salt.modules.state.sls>` allow for pillar data to be
passed on the CLI.
.. code-block:: bash
salt myminion state.highstate pillar="{'mypillar': 'foo'}"
Starting with the 2016.3.0 release of Salt, it is now possible for this pillar
data to be GPG-encrypted, and to use the GPG renderer to decrypt it.
Replacing Newlines
******************
To pass encrypted pillar data on the CLI, the ciphertext must have its newlines
replaced with a literal backslash-n (``\n``), as newlines are not supported
within Salt CLI arguments. There are a number of ways to do this:
With awk or Perl:
.. code-block:: bash
# awk
ciphertext=`echo -n "supersecret" | gpg --armor --batch --trust-model always --encrypt -r [email protected] | awk '{printf "%s\\n",$0} END {print ""}'`
# Perl
ciphertext=`echo -n "supersecret" | gpg --armor --batch --trust-model always --encrypt -r [email protected] | perl -pe 's/\n/\\n/g'`
With Python:
.. code-block:: python
import subprocess
secret, stderr = subprocess.Popen(
['gpg', '--armor', '--batch', '--trust-model', 'always', '--encrypt',
'-r', '[email protected]'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate(input='supersecret')
if secret:
print(secret.replace('\n', r'\n'))
else:
raise ValueError('No ciphertext found: {0}'.format(stderr))
.. code-block:: bash
ciphertext=`python /path/to/script.py`
The ciphertext can be included in the CLI pillar data like so:
.. code-block:: bash
salt myminion state.sls secretstuff pillar_enc=gpg pillar="{secret_pillar: '$ciphertext'}"
The ``pillar_enc=gpg`` argument tells Salt that there is GPG-encrypted pillar
data, so that the CLI pillar data is passed through the GPG renderer, which
will iterate recursively though the CLI pillar dictionary to decrypt any
encrypted values.
Encrypting the Entire CLI Pillar Dictionary
*******************************************
If several values need to be encrypted, it may be more convenient to encrypt
the entire CLI pillar dictionary. Again, this can be done in several ways:
With awk or Perl:
.. code-block:: bash
# awk
ciphertext=`echo -n "{'secret_a': 'CorrectHorseBatteryStaple', 'secret_b': 'GPG is fun!'}" | gpg --armor --batch --trust-model always --encrypt -r [email protected] | awk '{printf "%s\\n",$0} END {print ""}'`
# Perl
ciphertext=`echo -n "{'secret_a': 'CorrectHorseBatteryStaple', 'secret_b': 'GPG is fun!'}" | gpg --armor --batch --trust-model always --encrypt -r [email protected] | perl -pe 's/\n/\\n/g'`
With Python:
.. code-block:: python
import subprocess
pillar_data = {'secret_a': 'CorrectHorseBatteryStaple',
'secret_b': 'GPG is fun!'}
secret, stderr = subprocess.Popen(
['gpg', '--armor', '--batch', '--trust-model', 'always', '--encrypt',
'-r', '[email protected]'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate(input=repr(pillar_data))
if secret:
print(secret.replace('\n', r'\n'))
else:
raise ValueError('No ciphertext found: {0}'.format(stderr))
.. code-block:: bash
ciphertext=`python /path/to/script.py`
With the entire pillar dictionary now encrypted, it can be included in the CLI
pillar data like so:
.. code-block:: bash
salt myminion state.sls secretstuff pillar_enc=gpg pillar="$ciphertext"
Configuration
*************
The default behaviour of this renderer is to log a warning if a block could not
be decrypted; in other words, it just returns the ciphertext rather than the
encrypted secret.
This behaviour can be changed via the `gpg_decrypt_must_succeed` configuration
option. If set to `True`, any gpg block that cannot be decrypted raises a
`SaltRenderError` exception, which registers an error in ``_errors`` during
rendering.
In the Chlorine release, the default behavior will be reversed and an error
message will be added to ``_errors`` by default.
"""
import logging
import os
import re
from subprocess import PIPE, Popen
import salt.syspaths
import salt.utils.cache
import salt.utils.path
import salt.utils.stringio
import salt.utils.stringutils
import salt.utils.versions
from salt.exceptions import SaltRenderError
log = logging.getLogger(__name__)
GPG_CIPHERTEXT = re.compile(
salt.utils.stringutils.to_bytes(
r"-----BEGIN PGP MESSAGE-----.*?-----END PGP MESSAGE-----"
),
re.DOTALL,
)
GPG_CACHE = None
def _get_gpg_exec():
"""
return the GPG executable or raise an error
"""
gpg_exec = salt.utils.path.which("gpg")
if gpg_exec:
return gpg_exec
else:
raise SaltRenderError("GPG unavailable")
def _get_key_dir():
"""
return the location of the GPG key directory
"""
gpg_keydir = None
if "config.get" in __salt__:
gpg_keydir = __salt__["config.get"]("gpg_keydir")
if not gpg_keydir:
gpg_keydir = __opts__.get(
"gpg_keydir",
os.path.join(
__opts__.get("config_dir", os.path.dirname(__opts__["conf_file"])),
"gpgkeys",
),
)
return gpg_keydir
def _get_cache():
global GPG_CACHE
if not GPG_CACHE:
cachedir = __opts__.get("cachedir")
GPG_CACHE = salt.utils.cache.CacheFactory.factory(
__opts__.get("gpg_cache_backend"),
__opts__.get("gpg_cache_ttl"),
minion_cache_path=os.path.join(cachedir, "gpg_cache"),
)
return GPG_CACHE
def _decrypt_ciphertext(cipher):
"""
Given a block of ciphertext as a string, and a gpg object, try to decrypt
the cipher and return the decrypted string. If the cipher cannot be
decrypted, log the error, and return the ciphertext back out.
"""
try:
cipher = salt.utils.stringutils.to_unicode(cipher).replace(r"\n", "\n")
except UnicodeDecodeError:
# ciphertext is binary
pass
cipher = salt.utils.stringutils.to_bytes(cipher)
if __opts__.get("gpg_cache"):
cache = _get_cache()
if cipher in cache:
return cache[cipher]
cmd = [
_get_gpg_exec(),
"--homedir",
_get_key_dir(),
"--status-fd",
"2",
"--no-tty",
"-d",
]
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=False)
decrypted_data, decrypt_error = proc.communicate(input=cipher)
if not decrypted_data:
log.warning("Could not decrypt cipher %r, received: %r", cipher, decrypt_error)
if __opts__["gpg_decrypt_must_succeed"]:
raise SaltRenderError(
"Could not decrypt cipher {!r}, received: {!r}".format(
cipher,
decrypt_error,
)
)
else:
salt.utils.versions.warn_until(
"Chlorine",
"After the Chlorine release of Salt, gpg_decrypt_must_succeed will default to True.",
)
return cipher
else:
if __opts__.get("gpg_cache"):
cache[cipher] = decrypted_data
return decrypted_data
def _decrypt_ciphertexts(cipher, translate_newlines=False, encoding=None):
to_bytes = salt.utils.stringutils.to_bytes
cipher = to_bytes(cipher)
if translate_newlines:
cipher = cipher.replace(to_bytes(r"\n"), to_bytes("\n"))
def replace(match):
result = to_bytes(_decrypt_ciphertext(match.group()))
return result
ret, num = GPG_CIPHERTEXT.subn(replace, to_bytes(cipher))
if num > 0:
# Remove trailing newlines. Without if crypted value initially specified as a YAML multiline
# it will conain unexpected trailing newline.
ret = ret.rstrip(b"\n")
else:
ret = cipher
try:
ret = salt.utils.stringutils.to_unicode(ret, encoding=encoding)
except UnicodeDecodeError:
# decrypted data contains some sort of binary data - not our problem
pass
return ret
def _decrypt_object(obj, translate_newlines=False, encoding=None):
"""
Recursively try to decrypt any object. If the object is a string
or bytes and it contains a valid GPG header, decrypt it,
otherwise keep going until a string is found.
"""
if salt.utils.stringio.is_readable(obj):
return _decrypt_object(obj.getvalue(), translate_newlines)
if isinstance(obj, (str, bytes)):
return _decrypt_ciphertexts(
obj, translate_newlines=translate_newlines, encoding=encoding
)
elif isinstance(obj, dict):
for key, value in obj.items():
obj[key] = _decrypt_object(value, translate_newlines=translate_newlines)
return obj
elif isinstance(obj, list):
for key, value in enumerate(obj):
obj[key] = _decrypt_object(value, translate_newlines=translate_newlines)
return obj
else:
return obj
def render(gpg_data, saltenv="base", sls="", argline="", **kwargs):
"""
Create a gpg object given a gpg_keydir, and then use it to try to decrypt
the data to be rendered.
"""
if not _get_gpg_exec():
raise SaltRenderError("GPG unavailable")
log.debug("Reading GPG keys from: %s", _get_key_dir())
translate_newlines = kwargs.get("translate_newlines", False)
return _decrypt_object(
gpg_data,
translate_newlines=translate_newlines,
encoding=kwargs.get("encoding", None),
)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/renderers/gpg.py
| 0.719482 | 0.372363 |
gpg.py
|
pypi
|
r"""
.. _`AWS KMS Envelope Encryption`: https://docs.aws.amazon.com/kms/latest/developerguide/workflow.html
Renderer that will decrypt ciphers encrypted using `AWS KMS Envelope Encryption`_.
Any key in the data to be rendered can be a urlsafe_b64encoded string, and this renderer will attempt
to decrypt it before passing it off to Salt. This allows you to safely store secrets in
source control, in such a way that only your Salt master can decrypt them and
distribute them only to the minions that need them.
The typical use-case would be to use ciphers in your pillar data, and keep the encrypted
data key on your master. This way developers with appropriate AWS IAM privileges can add new secrets
quickly and easily.
This renderer requires the boto3_ Python library.
.. _boto3: https://boto3.readthedocs.io/
Setup
-----
First, set up your AWS client. For complete instructions on configuration the AWS client,
please read the `boto3 configuration documentation`_. By default, this renderer will use
the default AWS profile. You can override the profile name in salt configuration.
For example, if you have a profile in your aws client configuration named "salt",
you can add the following salt configuration:
.. code-block:: yaml
aws_kms:
profile_name: salt
.. _boto3 configuration documentation: https://boto3.readthedocs.io/en/latest/guide/configuration.html
The rest of these instructions assume that you will use the default profile for key generation
and setup. If not, export AWS_PROFILE and set it to the desired value.
Once the aws client is configured, generate a KMS customer master key and use that to generate
a local data key.
.. code-block:: bash
# data_key=$(aws kms generate-data-key --key-id your-key-id --key-spec AES_256
--query 'CiphertextBlob' --output text)
# echo 'aws_kms:'
# echo ' data_key: !!binary "%s"\n' "$data_key" >> config/master
To apply the renderer on a file-by-file basis add the following line to the
top of any pillar with gpg data in it:
.. code-block:: yaml
#!yaml|aws_kms
Now with your renderer configured, you can include your ciphers in your pillar
data like so:
.. code-block:: yaml
#!yaml|aws_kms
a-secret: gAAAAABaj5uzShPI3PEz6nL5Vhk2eEHxGXSZj8g71B84CZsVjAAtDFY1mfjNRl-1Su9YVvkUzNjI4lHCJJfXqdcTvwczBYtKy0Pa7Ri02s10Wn1tF0tbRwk=
"""
import base64
import logging
import salt.utils.stringio
from salt.exceptions import SaltConfigurationError
try:
import botocore.exceptions
import boto3
logging.getLogger("boto3").setLevel(logging.CRITICAL)
except ImportError:
pass
try:
import cryptography.fernet as fernet
HAS_FERNET = True
except ImportError:
HAS_FERNET = False
def __virtual__():
"""
Only load if boto libraries exist and if boto libraries are greater than
a given version.
"""
return HAS_FERNET and salt.utils.versions.check_boto_reqs()
log = logging.getLogger(__name__)
def _cfg(key, default=None):
"""
Return the requested value from the aws_kms key in salt configuration.
If it's not set, return the default.
"""
root_cfg = __salt__.get("config.get", __opts__.get)
kms_cfg = root_cfg("aws_kms", {})
return kms_cfg.get(key, default)
def _cfg_data_key():
"""
Return the encrypted KMS data key from configuration.
Raises SaltConfigurationError if not set.
"""
data_key = _cfg("data_key", "")
if data_key:
return data_key
raise SaltConfigurationError("aws_kms:data_key is not set")
def _session():
"""
Return the boto3 session to use for the KMS client.
If aws_kms:profile_name is set in the salt configuration, use that profile.
Otherwise, fall back on the default aws profile.
We use the boto3 profile system to avoid having to duplicate
individual boto3 configuration settings in salt configuration.
"""
profile_name = _cfg("profile_name")
if profile_name:
log.info('Using the "%s" aws profile.', profile_name)
else:
log.info(
"aws_kms:profile_name is not set in salt. Falling back on default profile."
)
try:
return boto3.Session(profile_name=profile_name)
except botocore.exceptions.ProfileNotFound as orig_exc:
raise SaltConfigurationError(
'Boto3 could not find the "{}" profile configured in Salt.'.format(
profile_name or "default"
)
) from orig_exc
except botocore.exceptions.NoRegionError as orig_exc:
raise SaltConfigurationError(
"Boto3 was unable to determine the AWS "
"endpoint region using the {} profile.".format(profile_name or "default")
) from orig_exc
def _kms():
"""
Return the boto3 client for the KMS API.
"""
session = _session()
return session.client("kms")
def _api_decrypt():
"""
Return the response dictionary from the KMS decrypt API call.
"""
kms = _kms()
data_key = _cfg_data_key()
try:
return kms.decrypt(CiphertextBlob=data_key)
except botocore.exceptions.ClientError as orig_exc:
error_code = orig_exc.response.get("Error", {}).get("Code", "")
if error_code != "InvalidCiphertextException":
raise
raise SaltConfigurationError(
"aws_kms:data_key is not a valid KMS data key"
) from orig_exc
def _plaintext_data_key():
"""
Return the configured KMS data key decrypted and encoded in urlsafe base64.
Cache the result to minimize API calls to AWS.
"""
response = getattr(_plaintext_data_key, "response", None)
cache_hit = response is not None
if not cache_hit:
response = _api_decrypt()
setattr(_plaintext_data_key, "response", response)
key_id = response["KeyId"]
plaintext = response["Plaintext"]
if hasattr(plaintext, "encode"):
plaintext = plaintext.encode(__salt_system_encoding__)
log.debug("Using key %s from %s", key_id, "cache" if cache_hit else "api call")
return plaintext
def _base64_plaintext_data_key():
"""
Return the configured KMS data key decrypted and encoded in urlsafe base64.
"""
plaintext_data_key = _plaintext_data_key()
return base64.urlsafe_b64encode(plaintext_data_key)
def _decrypt_ciphertext(cipher, translate_newlines=False):
"""
Given a blob of ciphertext as a bytestring, try to decrypt
the cipher and return the decrypted string. If the cipher cannot be
decrypted, log the error, and return the ciphertext back out.
"""
if translate_newlines:
cipher = cipher.replace(r"\n", "\n")
if hasattr(cipher, "encode"):
cipher = cipher.encode(__salt_system_encoding__)
# Decryption
data_key = _base64_plaintext_data_key()
plain_text = fernet.Fernet(data_key).decrypt(cipher)
if hasattr(plain_text, "decode"):
plain_text = plain_text.decode(__salt_system_encoding__)
return str(plain_text)
def _decrypt_object(obj, translate_newlines=False):
"""
Recursively try to decrypt any object.
Recur on objects that are not strings.
Decrypt strings that are valid Fernet tokens.
Return the rest unchanged.
"""
if salt.utils.stringio.is_readable(obj):
return _decrypt_object(obj.getvalue(), translate_newlines)
if isinstance(obj, (str, bytes)):
try:
return _decrypt_ciphertext(obj, translate_newlines=translate_newlines)
except (fernet.InvalidToken, TypeError):
return obj
elif isinstance(obj, dict):
for key, value in obj.items():
obj[key] = _decrypt_object(value, translate_newlines=translate_newlines)
return obj
elif isinstance(obj, list):
for key, value in enumerate(obj):
obj[key] = _decrypt_object(value, translate_newlines=translate_newlines)
return obj
else:
return obj
def render(data, saltenv="base", sls="", argline="", **kwargs):
"""
Decrypt the data to be rendered that was encrypted using AWS KMS envelope encryption.
"""
translate_newlines = kwargs.get("translate_newlines", False)
return _decrypt_object(data, translate_newlines=translate_newlines)
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/renderers/aws_kms.py
| 0.85036 | 0.428353 |
aws_kms.py
|
pypi
|
<p align="center"><img height="100" alt="Salt Tower (Logo)" src="./salt-tower.svg" /></p><br />
# Salt Tower — A Flexible External Pillar Module
[](https://github.com/jgraichen/salt-tower/actions/workflows/test.yml)
Salt Tower is an advanced and flexible `ext_pillar` that gives access to pillar values while processing and merging them, can render all usual salt file formats and include private and binary files for a minion.
Salt Tower is inspired by [pillarstack](https://github.com/bbinet/pillarstack) for merging pillar files and giving access to them. It also has a [top file](#top-file) like salt itself and utilizes salt renderers to supports all formats such as YAML, Jinja, Python and any combination. Supercharged [renderers for plain text and YAML](#yamlet-renderer) are included too.
Each tower data file is passed the current processed pillars. They can therefore access previously defined values. Data files can include other files that are all merged together.
Salt Tower is designed to completely replace the usual pillar repository or can be utilized beside salts original pillar that e.g. can bootstrap a salt master with Salt Tower.
## Questions or Need Help?
See [examples](examples/). They each have their own README further explaining the given example.
There is a [group](https://groups.google.com/d/forum/salt-tower) and [mailing list](mailto:[email protected]). You can join the group [here](https://groups.google.com/d/forum/salt-tower/join) or by sending a [subscribe-email](mailto:[email protected]).
Feel free to ask for help, discuss solutions or ideas there. Otherwise, you can open an [issue](https://github.com/jgraichen/salt-tower/issues/new).
## Installation
### GitFS
You can include this repository as a gitfs root and synchronize the extensions on the master:
```yaml
gitfs_remotes:
- https://github.com/jgraichen/salt-tower.git:
- base: v1.12.0
```
Sync all modules:
```session
$ salt-run saltutil.sync_all
pillar:
- pillar.tower
renderers:
- renderers.filter
- renderers.text
- renderers.yamlet
```
Please note that *everything* in this repository would be merged with your other roots.
### pip
```session
pip install salt-tower
```
### Manual installation
Install the extension files from the `salt_tower/{pillar,renderers}` directories into the `extension_modules` directory configured in salt.
## Configuration
Salt Tower is configured as an `ext_pillar`:
```yaml
ext_pillar:
- tower: /path/to/tower.sls
```
### Top File
The `tower.sls` file is similar to the usual `top.sls` with some important differences.
#### Ordered matchers
Pillar top items are ordered and processed in order of appearance. You can therefore define identical matchers multiple times.
```yaml
# tower.sls
base:
- '*':
- first
- '*':
- second
```
#### Common includes
You do not need to define a matcher at all, the files will be included for all minions. Furthermore, you also can use globs to match multiple files, e.g. include all files from `common/`.
```yaml
base:
- common/*
```
#### Grains
The top file itself is rendered using the default renderer (`yaml|jinja`). Therefore, you can use e.g. `grains` to include specific files.
```yaml
base:
- common/*
- dist/{{ grains['oscodename'] }}
```
#### Embedded data
You can directly include pillar data into the top file simply be defining a `dict` item.
```yaml
base:
- '*.a.example.org':
- site:
id: a
name: A Site
```
#### Iterative pillar processing
All matchers are compound matchers by default. As items are processes in order of appearance, later items can patch on previously defined pillar values. The above example includes `application.sls` for any minion matching `*.a.example.org` simply because it defines a `site` pillar value.
```yaml
base:
- '*.a.example.org':
- site: {id: a, name: A Site}
- 'I@site:*':
- applications
```
#### Late-bound variable replacement
File includes are preprocessed by a string formatter to late-bind pillar values.
```yaml
base:
- '*.a.example.org':
- site: {id: a, env: production}
- '*.a-staging.example.org':
- site: {id: a, env: staging}
- 'I@site:*':
- site/default
- site/{site.id}
- site/{site.id}/{site.env}/*
```
In the above example a minion `node0.a-staging.example.org` will include the following files:
```text
site/default
site/a
site/a/staging/*
```
#### File lookup
File names will be matches to files and directories, e.g. when including `path/to/file` the first existing match will be used:
```text
path/to/file
path/to/file.sls
path/to/file/init.sls
```
### Tower Data File
A data file is processed like a usual pillar file. Rendering uses salts template engines therefore all usual features should be available.
The injected `pillar` objects can be used to access previously defined values. The additional `.get` method allows to traverse the pillar tree.
```yaml
application:
title: Site of {{ pillar.get('tenant:name') }}
```
**Note:** Using `salt['pillar.get']()` will *not* work.
Tower data files can be [any supported template format](https://docs.saltstack.com/en/latest/ref/renderers/) including python files:
```py
#!py
def run():
ret = {'databases': []}
for app in __pillar__['application']:
ret['databases'].append({
'name': '{0}-{1}'.format(app['name'], app['env'])
})
return ret
```
#### Includes
Pillar data files can include other pillar files similar to how states can be included:
```yaml
include:
- another/pillar
data: more
```
Included files cannot be used in the pillar data file template itself but are merged in the pillar before the new pillar data. Includes can be relative to the current file by prefixing a dot:
```yaml
include:
- file/from/pillar/root.sls
- ./adjacent_file.sls
- ../parent_file.sls
```
### Yamlet renderer
The Yamlet renderer is an improved YAML renderer that supports loading other files and rendering templates:
```yaml
ssh_private_key: !read id_rsa
ssh_public_key: !read id_rsa.pub
```
This reads a file from the pillar directory in plain text or binary and embeds it into the pillar to e.g. ease shipping private file blobs to minions.
Using the `!include` tag files can be pushed through salts rendering pipeline on the server:
```yaml
nginx:
sites:
my-app: !include ../files/site.conf
```
```jinja
#!jinja | text strip
server {
listen {{ pillar.get('my-app:ip') }}:80;
root /var/www/my-app;
}
```
The pillar will return the following:
```yaml
nginx:
sites:
my-app: |
server {
listen 127.0.0.1:80;
root /var/www/my-app;
}
```
This can greatly simplify states as they only need to drop pillar values into config files and restart services:
```sls
nginx:
pkg.installed: []
service.running: []
{% for name in pillar.get('nginx:sites', {}) %}
/etc/nginx/sites-enabled/{{ name }}:
file.managed:
- contents_pillar: nginx:sites:{{ name }}
- makedirs: True
- watch_in:
- service: nginx
{% endfor %}
```
The yamlet renderer `!include` macro does accept context variables too:
```yaml
nginx:
sites:
my-app: !include
source: ../files/site.conf
context:
listen_ip: 127.0.0.1
```
```jinja
#!jinja | text strip
server {
listen {{ listen_ip }}:80;
root /var/www/my-app;
}
```
### Text renderer
The text renderer (used above) renders a file as plain text. It stripes the shebang and can optionally strip whitespace from the beginning and end.
```text
#!text strip
Hello World
```
This will return:
```text
Hello World
```
The text renderer is mostly used for embedding rendered configuration files into a Yamlet file.
### Filter renderer
The filter renderer returns only a subset of data that matches a given grain or pillar key value:
```yaml
#!yamlet | filter grain=os_family default='Unknown OS'
Debian:
package_source: apt
RedHat:
package_source: rpm
Unknown OS:
package_source: unknown
```
When this file is rendered, only the data from the matching top level key is returned. The renderer supports glob matches and uses the minion ID by default:
```yaml
#!yamlet | filter
minion-1:
monitoring:
type: ping
address: 10.0.0.1
webserver-*:
monitoring:
type: http
address: http://example.org
```
### Advanced usage (very dangerous)
The pillar object passed to the python template engine is the actual mutable dict reference used to process and merge the data. It is possible to modify this dict e.g. in a python template without returning anything:
```python
#!py
import copy
def run():
databases = __pillar__['databases']
default = databases.pop('default') # Deletes from actual pillar
for name, config in databases.items():
databases[name] = dict(default, **config)
return {}
```
*Note 1:* Do not return `None`. Otherwise, [Salt will render the template twice](https://github.com/saltstack/salt/blame/v2019.2.0/salt/template.py#L108) and all side effects will be applied twice.
*Note 2:* The `__pillar__` object in Python templates is different to other template engines. It is a dict and does not allow traversing using `get`.
```py
#!py
def run():
return {
'wrong': __pilar__.get('tenant:name'),
'python': __pillar__['tenant']['name'],
'alternative': tower.get('tenant:name')
}
```
The above example demonstrates different usages. The first example will only work if the pillar contains an actual `tenant:name` top-level key. The second example is idiomatic-python but will raise an error if the keys do not exist. The third example uses the additional `tower` helper module to traverse the pillar data.
The `tower` pillar object is available in all rendering engines and can be used for low-level interaction with the ext_pillar engine. Some available functions are:
#### tower.get(key, default=None, require=False)
Get a pillar value by given traverse path:
```python
tower.get('my:pillar:key')
```
If `require=True` is set, `default` will be ignored and a KeyError will be raised if the pillar key is not found.
#### tower.update(dict)
Merges given dictionary into the pillar data.
```python
tower.update({'my': {'pillar': 'data'}})
assert tower.get('my:pillar') == 'data'
```
#### tower.merge(tgt, *objects)
Merges given dictionaries or lists into the first one.
Note: The first given dictionary or list is *mutated* and returned.
```python
tgt = {}
ret = tower.merge(tgt, {'a': 1})
assert ret is tgt
assert tgt['a'] == 1
```
#### tower.format(obj, *args, **kwargs)
Performs recursive late-bind string formatting using tower pillar and given arguments ad keywords for resolving. Uses `string.Formatter` internally.
```python
tower.update({
'database': {
'password': 'secret'
}
})
ret = tower.format('postgres://user@{database.password}/db')
assert ret == 'postgres://user@secret/db'
```
Format accept dictionaries and list as well and can therefore be used to format full or partial pillar data, this can be used to e.g. format defaults with extra variables:
```python
#!py
def run():
returns = {}
defaults = __pillar__['default_app_config']
# e.g. {
# 'database': 'sqlite:///opt/{name}.sqlite'
# 'listen': '0.0.0.0:{app.port}'
# }
for name, conf in __pillar__['applications'].items():
# Merge defaults with conf into new dictionary
conf = tower.merge({}, defaults, conf)
# Format late-bind defaults with application config
conf = tower.format(conf, name=name, app=conf)
returns[name] = conf
return {'applications': returns}
```
|
/salt-tower-1.12.0.tar.gz/salt-tower-1.12.0/README.md
| 0.624523 | 0.961858 |
README.md
|
pypi
|
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
[email protected].
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
|
/salt-3006.2.tar.gz/salt-3006.2/CODE_OF_CONDUCT.md
| 0.577495 | 0.685755 |
CODE_OF_CONDUCT.md
|
pypi
|
.. _glossary:
========
Glossary
========
.. glossary::
Auto-Order
The evaluation of states in the order that they are defined in a SLS
file. *See also*: :ref:`ordering <ordering_auto_order>`.
Bootstrap
A stand-alone Salt project which can download and install a Salt master
and/or a Salt minion onto a host. *See also*: `salt-bootstrap
<https://github.com/saltstack/salt-bootstrap>`_.
Compound Matcher
A combination of many target definitions that can be combined with
boolean operators. *See also*: :ref:`targeting <targeting-compound>`.
EAuth
Shorthand for 'external authentication'. A system for calling to a
system outside of Salt in order to authenticate users and determine if
they are allowed to issue particular commands to Salt. *See also*:
:ref:`external auth<acl-eauth>`.
Environment
A directory tree containing state files which can be applied to
minions. *See also*: :ref:`top file<states-top-environments>`.
Execution Function
A Python function inside an Execution Module that may take arguments
and performs specific system-management tasks. *See also*: :ref:`the
list of execution modules <all-salt.modules>`.
External Job Cache
An external data-store that can archive information about jobs that
have been run. A default returner. *See also*:
:conf_master:`ext_job_cache`, :ref:`the list of returners
<all-salt.returners>`.
Execution Module
A Python module that contains execution functions which directly
perform various system-management tasks on a server. Salt ships with a
number of execution modules but users can also write their own
execution modules to perform specialized tasks. *See also*: :ref:`the
list of execution modules <all-salt.modules>`.
External Pillar
A module that accepts arbitrary arguments and returns a dictionary.
The dictionary is automatically added to a pillar for a minion.
Event
A notice emitted onto an event bus. Events are often driven by requests
for actions to occur on a minion or master and the results of those
actions. *See also*: :ref:`Salt Reactor <reactor>`.
File Server
A local or remote location for storing both Salt-specific files such as
top files or SLS files as well as files that can be distributed to
minions, such as system configuration files. *See also*: :ref:`Salt's
file server <file-server>`.
Grain
A key-value pair which contains a fact about a system, such as its
hostname, network addresses. *See also*: :ref:`targeting with grains
<targeting-grains>`.
Highdata
The data structure in a SLS file the represents a set of state
declarations. *See also*: :ref:`state layers
<state-layers-high-data>`.
Highstate
The collection of states to be applied to a system. *See also*:
:ref:`state layers <state-layers-highstate>`.
Idempotent
An action that ensures the system is in a well-known state regardless
of the system's state before the action is applied. A corollary to
this is that applying the action multiple times results in no changes
to the system. State module functions should be idempotent. Some
state module functions, such as :mod:`cmd.run <salt.states.cmd.run>`
are not idempotent by default but can be made idempotent with the
proper use of requisites such as :ref:`unless <unless-requisite>`
and :ref:`onlyif <onlyif-requisite>`. For more information, *see*
`wikipedia <https://en.wikipedia.org/wiki/Idempotent>`_.
Jinja
A templating language which allows variables and simple logic to be
dynamically inserted into static text files when they are rendered.
*See also*: :py:mod:`Salt's Jinja documentation
<salt.renderers.jinja>`.
Job
The complete set of tasks to be performed by the execution of a Salt
command are a single job. *See also*: :py:mod:`jobs runner
<salt.runners.jobs>`.
Job Cache
A storage location for job results, which may then be queried by a
salt runner or an external system. May be local to a salt master
or stored externally.
Job ID
A unique identifier to represent a given :term:`job <Job>`. This is often
shortened to JID.
Low State
The collection of processed states after requisites and order are
evaluated. *See also*: :ref:`state layers <state-layers-low-state>`.
Master
A central Salt daemon from which commands can be issued to listening
minions.
Masterless
A minion which does not require a Salt master to operate. All
configuration is local. *See also*: :conf_minion:`file_client`.
Master Tops
A system for the master that allows hooks into external systems to
generate top file data.
Mine
A facility to collect arbitrary data from minions and store that data
on the master. This data is then available to all other minions.
(Sometimes referred to as Salt Mine.) *See also*: :ref:`Salt Mine
<salt-mine>`.
Minion
A server running a Salt minion daemon which can listen to commands from
a master and perform the requested tasks. Generally, minions are
servers which are to be controlled using Salt.
Minion ID
A globally unique identifier for a minion. *See also*:
:conf_minion:`id`.
Multi-Master
The ability for a minion to be actively connected to multiple Salt
masters at the same time in high-availability environments.
Node Group
A pre-defined group of minions declared in the master configuration
file. *See also*: :ref:`targeting <targeting-nodegroups>`.
Outputter
A formatter for defining the characteristics of output data from a Salt
command. *See also*: :ref:`list of outputters <all-salt.output>`.
Peer Communication
The ability for minions to communicate directly with other minions
instead of brokering commands through the Salt master. *See also*:
:ref:`peer communication <peer>`.
Pillar
A simple key-value store for user-defined data to be made available to
a minion. Often used to store and distribute sensitive data to minions.
*See also*: :ref:`Pillar <salt-pillars>`, :ref:`list of Pillar
modules <all-salt.pillars>`.
Proxy Minion
A minion which can control devices that are unable to run a Salt minion
locally, such as routers and switches.
PyDSL
A Pythonic domain-specific-language used as a Salt renderer. PyDSL can
be used in cases where adding pure Python into SLS files is beneficial.
*See also*: :py:mod:`PyDSL <salt.renderers.pydsl>`.
Reactor
An interface for listening to events and defining actions that Salt
should taken upon receipt of given events. *See also*: :ref:`Reactor
<reactor>`.
Render Pipe
Allows SLS files to be rendered by multiple renderers, with each
renderer receiving the output of the previous. *See also*:
:ref:`composing renderers <renderers-composing>`.
Renderer
Responsible for translating a given data serialization format such as
YAML or JSON into a Python data structure that can be consumed by Salt.
*See also*: :ref:`list of renderers <all-salt.renderers>`.
Returner
Allows for the results of a Salt command to be sent to a given
data-store such as a database or log file for archival. *See also*:
:ref:`list of returners <all-salt.returners>`.
Roster
A flat-file list of target hosts. (Currently only used by salt-ssh.)
Runner Module
A module containing a set of runner functions. *See also*: :ref:`list
of runner modules <all-salt.runners>`.
Runner Function
A function which is called by the :command:`salt-run` command and
executes on the master instead of on a minion. *See also*:
:term:`Runner Module`.
Salt Cloud
A suite of tools used to create and deploy systems on many hosted cloud
providers. *See also*: :ref:`salt-cloud <salt-cloud>`.
Salt SSH
A configuration management and remote orchestration system that does
not require that any software besides SSH be installed on systems to be
controlled.
Salt Thin
A subset of the normal Salt distribution that does not include any
transport routines. A Salt Thin bundle can be dropped onto a host and
used directly without any requirement that the host be connected to a
network. Used by Salt SSH. *See also*: :py:mod:`thin runner
<salt.runners.thin>`.
Salt Virt
Used to manage the creation and deployment of virtual machines onto a
set of host machines. Often used to create and deploy private clouds.
*See also*: :py:mod:`virt runner <salt.runners.virt>`.
SLS Module
Contains a set of :term:`state declarations <State Declaration>`.
State Compiler
Translates :term:`highdata <Highdata>` into lowdata.
State Declaration
A data structure which contains a unique ID and describes one or more
states of a system such as ensuring that a package is installed or a
user is defined. *See also*: :ref:`highstate structure
<state-declaration>`.
State Function
A function contained inside a :term:`state module <State Module>` which
can manages the application of a particular state to a system. State
functions frequently call out to one or more :term:`execution modules
<Execution Module>` to perform a given task.
State Module
A module which contains a set of state functions. *See also*:
:ref:`list of state modules <all-salt.states>`.
State Run
The application of a set of states on a set of systems.
Syndic
A forwarder which can relay messages between tiered masters. **See
also**: :ref:`Syndic <syndic>`.
Target
Minion(s) to which a given salt command will apply. *See also*:
:ref:`targeting <targeting>`.
Top File
Determines which SLS files should be applied to various systems and
organizes those groups of systems into environments. *See also*:
:ref:`top file <states-top>`, :ref:`list of master top modules
<all-salt.tops>`.
__virtual__
A function in a module that is called on module load to determine
whether or not the module should be available to a minion. This
function commonly contains logic to determine if all requirements
for a module are available, such as external libraries.
Worker
A master process which can send notices and receive replies from
minions. *See also*:
:conf_master:`worker_threads`.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/glossary.rst
| 0.930521 | 0.730879 |
glossary.rst
|
pypi
|
===============================
SLS Template Variable Reference
===============================
.. warning::
In the 3005 release ``sls_path``, ``tplfile``, and ``tpldir`` have had some significant
improvements which have the potential to break states that rely on old and
broken functionality.
The template engines available to sls files and file templates come loaded
with a number of context variables. These variables contain information and
functions to assist in the generation of templates. See each variable below
for its availability -- not all variables are available in all templating
contexts.
Salt
====
The `salt` variable is available to abstract the salt library functions. This
variable is a python dictionary containing all of the functions available to
the running salt minion. It is available in all salt templates.
.. code-block:: jinja
{% for file in salt['cmd.run']('ls -1 /opt/to_remove').splitlines() %}
/opt/to_remove/{{ file }}:
file.absent
{% endfor %}
Opts
====
The `opts` variable abstracts the contents of the minion's configuration file
directly to the template. The `opts` variable is a dictionary. It is available
in all templates.
.. code-block:: jinja
{{ opts['cachedir'] }}
The ``config.get`` function also searches for values in the `opts` dictionary.
Pillar
======
The `pillar` dictionary can be referenced directly, and is available in all
templates:
.. code-block:: jinja
{{ pillar['key'] }}
Using the ``pillar.get`` function via the `salt` variable is generally
recommended since a default can be safely set in the event that the value
is not available in pillar and dictionaries can be traversed directly:
.. code-block:: jinja
{{ salt['pillar.get']('key', 'failover_value') }}
{{ salt['pillar.get']('stuff:more:deeper') }}
Grains
======
The `grains` dictionary makes the minion's grains directly available, and is
available in all templates:
.. code-block:: jinja
{{ grains['os'] }}
The ``grains.get`` function can be used to traverse deeper grains and set
defaults:
.. code-block:: jinja
{{ salt['grains.get']('os') }}
saltenv
=======
The `saltenv` variable is available in only in sls files when gathering the sls
from an environment.
.. code-block:: jinja
{{ saltenv }}
SLS Only Variables
==================
The following are only available when processing sls files. If you need these
in other templates, you can usually pass them in as template context.
sls
---
The `sls` variable contains the sls reference value, and is only available in
the actual SLS file (not in any files referenced in that SLS). The sls
reference value is the value used to include the sls in top files or via the
include option.
.. code-block:: jinja
{{ sls }}
slspath
-------
The `slspath` variable contains the path to the directory of the current sls
file. The value of `slspath` in files referenced in the current sls depends on
the reference method. For jinja includes `slspath` is the path to the current
directory of the file. For salt includes `slspath` is the path to the directory
of the included file. If current sls file is in root of the file roots, this
will return ""
.. code-block:: jinja
{{ slspath }}
sls_path
--------
A version of `slspath` with underscores as path separators instead of slashes.
So, if `slspath` is `path/to/state` then `sls_path` is `path_to_state`
.. code-block:: jinja
{{ sls_path }}
slsdotpath
----------
A version of `slspath` with dots as path separators instead of slashes. So, if
`slspath` is `path/to/state` then `slsdotpath` is `path.to.state`. This is same
as `sls` if `sls` points to a directory instead if a file.
.. code-block:: jinja
{{ slsdotpath }}
slscolonpath
------------
A version of `slspath` with colons (`:`) as path separators instead of slashes.
So, if `slspath` is `path/to/state` then `slscolonpath` is `path:to:state`.
.. code-block:: jinja
{{ slscolonpath }}
tplpath
-------
Full path to sls template file being process on local disk. This is usually
pointing to a copy of the sls file in a cache directory. This will be in OS
specific format (Windows vs POSIX). (It is probably best not to use this.)
.. code-block:: jinja
{{ tplpath }}
tplfile
-------
Relative path to exact sls template file being processed relative to file
roots.
.. code-block:: jinja
{{ tplfile }}
tpldir
------
Directory, relative to file roots, of the current sls file. If current sls file
is in root of the file roots, this will return ".". This is usually identical
to `slspath` except in case of root-level sls, where this will return a "`.`".
A Common use case for this variable is to generate relative salt urls like:
.. code-block:: jinja
my-file:
file.managed:
source: salt://{{ tpldir }}/files/my-template
tpldot
------
A version of `tpldir` with dots as path separators instead of slashes. So, if
`tpldir` is `path/to/state` then `tpldot` is `path.to.state`. NOTE: if `tpldir`
is `.`, this will be set to ""
.. code-block:: jinja
{{ tpldot }}
|
/salt-3006.2.tar.gz/salt-3006.2/doc/ref/states/vars.rst
| 0.814422 | 0.690331 |
vars.rst
|
pypi
|
.. _compiler-ordering:
=====================================
Understanding State Compiler Ordering
=====================================
.. note::
This tutorial is an intermediate level tutorial. Some basic understanding
of the state system and writing Salt Formulas is assumed.
Salt's state system is built to deliver all of the power of configuration
management systems without sacrificing simplicity. This tutorial is made to
help users understand in detail just how the order is defined for state
executions in Salt.
This tutorial is written to represent the behavior of Salt as of version
0.17.0.
Compiler Basics
===============
To understand ordering in depth some very basic knowledge about the state
compiler is very helpful. No need to worry though, this is very high level!
High Data and Low Data
----------------------
When defining Salt Formulas in YAML the data that is being represented is
referred to by the compiler as High Data. When the data is initially
loaded into the compiler it is a single large python dictionary, this
dictionary can be viewed raw by running:
.. code-block:: bash
salt '*' state.show_highstate
This "High Data" structure is then compiled down to "Low Data". The Low
Data is what is matched up to create individual executions in Salt's
configuration management system. The
low data is an ordered list of single state calls to execute. Once the
low data is compiled the evaluation order can be seen.
The low data can be viewed by running:
.. code-block:: bash
salt '*' state.show_lowstate
.. note::
The state execution module contains MANY functions for evaluating the
state system and is well worth a read! These routines can be very useful
when debugging states or to help deepen one's understanding of Salt's
state system.
As an example, a state written thusly:
.. code-block:: yaml
apache:
pkg.installed:
- name: httpd
service.running:
- name: httpd
- watch:
- file: apache_conf
- pkg: apache
apache_conf:
file.managed:
- name: /etc/httpd/conf.d/httpd.conf
- source: salt://apache/httpd.conf
Will have High Data which looks like this represented in json:
.. code-block:: json
{
"apache": {
"pkg": [
{
"name": "httpd"
},
"installed",
{
"order": 10000
}
],
"service": [
{
"name": "httpd"
},
{
"watch": [
{
"file": "apache_conf"
},
{
"pkg": "apache"
}
]
},
"running",
{
"order": 10001
}
],
"__sls__": "blah",
"__env__": "base"
},
"apache_conf": {
"file": [
{
"name": "/etc/httpd/conf.d/httpd.conf"
},
{
"source": "salt://apache/httpd.conf"
},
"managed",
{
"order": 10002
}
],
"__sls__": "blah",
"__env__": "base"
}
}
The subsequent Low Data will look like this:
.. code-block:: json
[
{
"name": "httpd",
"state": "pkg",
"__id__": "apache",
"fun": "installed",
"__env__": "base",
"__sls__": "blah",
"order": 10000
},
{
"name": "httpd",
"watch": [
{
"file": "apache_conf"
},
{
"pkg": "apache"
}
],
"state": "service",
"__id__": "apache",
"fun": "running",
"__env__": "base",
"__sls__": "blah",
"order": 10001
},
{
"name": "/etc/httpd/conf.d/httpd.conf",
"source": "salt://apache/httpd.conf",
"state": "file",
"__id__": "apache_conf",
"fun": "managed",
"__env__": "base",
"__sls__": "blah",
"order": 10002
}
]
This tutorial discusses the Low Data evaluation and the state runtime.
Ordering Layers
===============
Salt defines 2 order interfaces which are evaluated in the state runtime and
defines these orders in a number of passes.
Definition Order
----------------
.. note::
The Definition Order system can be disabled by turning the option
``state_auto_order`` to ``False`` in the master configuration file.
The top level of ordering is the `Definition Order`. The `Definition Order`
is the order in which states are defined in salt formulas. This is very
straightforward on basic states which do not contain ``include`` statements
or a ``top`` file, as the states are just ordered from the top of the file,
but the include system starts to bring in some simple rules for how the
`Definition Order` is defined.
Looking back at the "Low Data" and "High Data" shown above, the order key has
been transparently added to the data to enable the `Definition Order`.
The Include Statement
~~~~~~~~~~~~~~~~~~~~~
Basically, if there is an include statement in a formula, then the formulas
which are included will be run BEFORE the contents of the formula which
is including them. Also, the include statement is a list, so they will be
loaded in the order in which they are included.
In the following case:
``foo.sls``
.. code-block:: yaml
include:
- bar
- baz
``bar.sls``
.. code-block:: yaml
include:
- quo
``baz.sls``
.. code-block:: yaml
include:
- qux
In the above case if ``state.apply foo`` were called then the formulas will be
loaded in the following order:
1. quo
2. bar
3. qux
4. baz
5. foo
The `order` Flag
----------------
The `Definition Order` happens transparently in the background, but the
ordering can be explicitly overridden using the ``order`` flag in states:
.. code-block:: yaml
apache:
pkg.installed:
- name: httpd
- order: 1
This order flag will over ride the definition order, this makes it very
simple to create states that are always executed first, last or in specific
stages, a great example is defining a number of package repositories that
need to be set up before anything else, or final checks that need to be
run at the end of a state run by using ``order: last`` or ``order: -1``.
When the order flag is explicitly set the `Definition Order` system will omit
setting an order for that state and directly use the order flag defined.
Lexicographical Fall-back
-------------------------
Salt states were written to ALWAYS execute in the same order. Before the
introduction of `Definition Order` in version 0.17.0 everything was ordered
lexicographically according to the name of the state, then function then id.
This is the way Salt has always ensured that states always run in the same
order regardless of where they are deployed, the addition of the
`Definition Order` method mealy makes this finite ordering easier to follow.
The lexicographical ordering is still applied but it only has any effect when
two order statements collide. This means that if multiple states are assigned
the same order number that they will fall back to lexicographical ordering
to ensure that every execution still happens in a finite order.
.. note::
If running with ``state_auto_order: False`` the ``order`` key is not
set automatically, since the Lexicographical order can be derived
from other keys.
Requisite Ordering
------------------
Salt states are fully declarative, in that they are written to declare the
state in which a system should be. This means that components can require that
other components have been set up successfully. Unlike the other ordering
systems, the `Requisite` system in Salt is evaluated at runtime.
The requisite system is also built to ensure that the ordering of execution
never changes, but is always the same for a given set of states. This is
accomplished by using a runtime that processes states in a completely
predictable order instead of using an event loop based system like other
declarative configuration management systems.
Runtime Requisite Evaluation
----------------------------
The requisite system is evaluated as the components are found, and the
requisites are always evaluated in the same order. This explanation will
be followed by an example, as the raw explanation may be a little dizzying
at first as it creates a linear dependency evaluation sequence.
The "Low Data" is an ordered list or dictionaries, the state runtime evaluates
each dictionary in the order in which they are arranged in the list. When
evaluating a single dictionary it is checked for requisites, requisites are
evaluated in order, ``require`` then ``watch`` then ``prereq``.
.. note::
If using requisite in statements like require_in and watch_in these will
be compiled down to require and watch statements before runtime evaluation.
Each requisite contains an ordered list of requisites, these requisites are
looked up in the list of dictionaries and then executed. Once all requisites
have been evaluated and executed then the requiring state can safely be run
(or not run if requisites have not been met).
This means that the requisites are always evaluated in the same order, again
ensuring one of the core design principals of Salt's State system to ensure
that execution is always finite is intact.
Simple Runtime Evaluation Example
---------------------------------
Given the above "Low Data" the states will be evaluated in the following order:
1. The pkg.installed is executed ensuring that the apache package is
installed, it contains no requisites and is therefore the first defined
state to execute.
2. The service.running state is evaluated but NOT executed, a watch requisite
is found, therefore they are read in order, the runtime first checks for
the file, sees that it has not been executed and calls for the file state
to be evaluated.
3. The file state is evaluated AND executed, since it, like the pkg state does
not contain any requisites.
4. The evaluation of the service state continues, it next checks the pkg
requisite and sees that it is met, with all requisites met the service
state is now executed.
Best Practice
-------------
The best practice in Salt is to choose a method and stick with it, official
states are written using requisites for all associations since requisites
create clean, traceable dependency trails and make for the most portable
formulas. To accomplish something similar to how classical imperative
systems function all requisites can be omitted and the ``failhard`` option
then set to ``True`` in the master configuration, this will stop all state runs at
the first instance of a failure.
In the end, using requisites creates very tight and fine grained states,
not using requisites makes full sequence runs and while slightly easier
to write, and gives much less control over the executions.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/ref/states/compiler_ordering.rst
| 0.904661 | 0.669924 |
compiler_ordering.rst
|
pypi
|
.. _state-system-reference:
======================
State System Reference
======================
Salt offers an interface to manage the configuration or "state" of the
Salt minions. This interface is a fully capable mechanism used to enforce the
state of systems from a central manager.
.. toctree::
:glob:
*
State Management
================
State management, also frequently called Software Configuration Management
(SCM), is a program that puts and keeps a system into a predetermined state. It
installs software packages, starts or restarts services or puts configuration
files in place and watches them for changes.
Having a state management system in place allows one to easily and reliably
configure and manage a few servers or a few thousand servers. It allows
configurations to be kept under version control.
Salt States is an extension of the Salt Modules that we discussed in the
previous :ref:`remote execution <tutorial-remote-execution-modules>` tutorial. Instead
of calling one-off executions the state of a system can be easily defined and
then enforced.
Understanding the Salt State System Components
==============================================
The Salt state system is comprised of a number of components. As a user, an
understanding of the SLS and renderer systems are needed. But as a developer,
an understanding of Salt states and how to write the states is needed as well.
.. note::
States are compiled and executed only on minions that have been targeted.
To execute functions directly on masters, see :ref:`runners <runners>`.
Salt SLS System
---------------
The primary system used by the Salt state system is the SLS system. SLS stands
for **S**\ a\ **L**\ t **S**\ tate.
The Salt States are files which contain the information about how to configure
Salt minions. The states are laid out in a directory tree and can be written in
many different formats.
The contents of the files and the way they are laid out is intended to be as
simple as possible while allowing for maximum flexibility. The files are laid
out in states and contains information about how the minion needs to be
configured.
SLS File Layout
```````````````
SLS files are laid out in the Salt file server.
A simple layout can look like this:
.. code-block:: yaml
top.sls
ssh.sls
sshd_config
users/init.sls
users/admin.sls
salt/master.sls
web/init.sls
The ``top.sls`` file is a key component. The ``top.sls`` files
is used to determine which SLS files should be applied to which minions.
The rest of the files with the ``.sls`` extension in the above example are
state files.
Files without a ``.sls`` extensions are seen by the Salt master as
files that can be downloaded to a Salt minion.
States are translated into dot notation. For example, the ``ssh.sls`` file is
seen as the ssh state and the ``users/admin.sls`` file is seen as the
users.admin state.
Files named ``init.sls`` are translated to be the state name of the parent
directory, so the ``web/init.sls`` file translates to the ``web`` state.
In Salt, everything is a file; there is no "magic translation" of files and file
types. This means that a state file can be distributed to minions just like a
plain text or binary file.
SLS Files
`````````
The Salt state files are simple sets of data. Since SLS files are just data
they can be represented in a number of different ways.
The default format is YAML generated from a Jinja template. This allows for the
states files to have all the language constructs of Python and the simplicity of YAML.
State files can then be complicated Jinja templates that translate down to YAML, or just
plain and simple YAML files.
The State files are simply common data structures such as dictionaries and lists, constructed
using a templating language such as YAML.
Here is an example of a Salt State:
.. code-block:: yaml
vim:
pkg.installed: []
salt:
pkg.latest:
- name: salt
service.running:
- names:
- salt-master
- salt-minion
- require:
- pkg: salt
- watch:
- file: /etc/salt/minion
/etc/salt/minion:
file.managed:
- source: salt://salt/minion
- user: root
- group: root
- mode: 644
- require:
- pkg: salt
This short stanza will ensure that vim is installed, Salt is installed and up
to date, the salt-master and salt-minion daemons are running and the Salt
minion configuration file is in place. It will also ensure everything is
deployed in the right order and that the Salt services are restarted when the
watched file updated.
The Top File
````````````
The top file controls the mapping between minions and the states which should
be applied to them.
The top file specifies which minions should have which SLS files applied and
which environments they should draw those SLS files from.
The top file works by specifying environments on the top-level.
Each environment contains :ref:`target expressions <targeting>` to match
minions. Finally, each target expression contains a list of Salt states to
apply to matching minions:
.. code-block:: yaml
base:
'*':
- salt
- users
- users.admin
'saltmaster.*':
- match: pcre
- salt.master
This above example uses the base environment which is built into the default
Salt setup.
The base environment has target expressions. The first one matches all minions,
and the SLS files below it apply to all minions.
The second expression is a regular expression that will match all minions
with an ID matching ``saltmaster.*`` and specifies that for those minions, the
salt.master state should be applied.
.. important::
Since version 2014.7.0, the default matcher (when one is not explicitly
defined as in the second expression in the above example) is the
:ref:`compound <targeting-compound>` matcher. Since this matcher parses
individual words in the expression, minion IDs containing spaces will not
match properly using this matcher. Therefore, if your target expression is
designed to match a minion ID containing spaces, it will be necessary to
specify a different match type (such as ``glob``). For example:
.. code-block:: yaml
base:
'test minion':
- match: glob
- foo
- bar
- baz
A full table of match types available in the top file can be found :ref:`here
<top-file-match-types>`.
.. _reloading-modules:
Reloading Modules
-----------------
Some Salt states require that specific packages be installed in order for the
module to load. As an example the :mod:`pip <salt.states.pip_state>` state
module requires the `pip`_ package for proper name and version parsing.
In most of the common cases, Salt is clever enough to transparently reload the
modules. For example, if you install a package, Salt reloads modules because
some other module or state might require just that package which was installed.
On some edge-cases salt might need to be told to reload the modules. Consider
the following state file which we'll call ``pep8.sls``:
.. code-block:: yaml
python-pip:
cmd.run:
- name: |
easy_install --script-dir=/usr/bin -U pip
- cwd: /
pep8:
pip.installed:
- require:
- cmd: python-pip
The above example installs `pip`_ using ``easy_install`` from `setuptools`_ and
installs `pep8`_ using :mod:`pip <salt.states.pip_state>`, which, as told
earlier, requires `pip`_ to be installed system-wide. Let's execute this state:
.. code-block:: bash
salt-call state.apply pep8
The execution output would be something like:
.. code-block:: text
----------
State: - pip
Name: pep8
Function: installed
Result: False
Comment: State pip.installed found in sls pep8 is unavailable
Changes:
Summary
------------
Succeeded: 1
Failed: 1
------------
Total: 2
If we executed the state again the output would be:
.. code-block:: text
----------
State: - pip
Name: pep8
Function: installed
Result: True
Comment: Package was successfully installed
Changes: pep8==1.4.6: Installed
Summary
------------
Succeeded: 2
Failed: 0
------------
Total: 2
Since we installed `pip`_ using :mod:`cmd <salt.states.cmd>`, Salt has no way
to know that a system-wide package was installed.
On the second execution, since the required `pip`_ package was installed, the
state executed correctly.
.. note::
Salt does not reload modules on every state run because doing so would greatly
slow down state execution.
So how do we solve this *edge-case*? ``reload_modules``!
``reload_modules`` is a boolean option recognized by salt on **all** available
states which forces salt to reload its modules once a given state finishes.
The modified state file would now be:
.. code-block:: yaml
python-pip:
cmd.run:
- name: |
easy_install --script-dir=/usr/bin -U pip
- cwd: /
- reload_modules: true
pep8:
pip.installed:
- require:
- cmd: python-pip
Let's run it, once:
.. code-block:: bash
salt-call state.apply pep8
The output is:
.. code-block:: text
----------
State: - pip
Name: pep8
Function: installed
Result: True
Comment: Package was successfully installed
Changes: pep8==1.4.6: Installed
Summary
------------
Succeeded: 2
Failed: 0
------------
Total: 2
.. _`pip`: https://pypi.org/project/pip/
.. _`pep8`: https://pypi.org/project/pep8/
.. _`setuptools`: https://pypi.org/project/setuptools/
.. _`runners`: /ref/runners
|
/salt-3006.2.tar.gz/salt-3006.2/doc/ref/states/index.rst
| 0.778144 | 0.878419 |
index.rst
|
pypi
|
.. _ordering:
===============
Ordering States
===============
The way in which configuration management systems are executed is a hotly
debated topic in the configuration management world. Two major philosophies
exist on the subject, to either execute in an imperative fashion where things
are executed in the order in which they are defined, or in a declarative
fashion where dependencies need to be mapped between objects.
Imperative ordering is finite and generally considered easier to write, but
declarative ordering is much more powerful and flexible but generally considered
more difficult to create.
Salt has been created to get the best of both worlds. States are evaluated in
a finite order, which guarantees that states are always executed in the same
order, and the states runtime is declarative, making Salt fully aware of
dependencies via the `requisite` system.
.. _ordering_auto_order:
State Auto Ordering
===================
.. versionadded: 0.17.0
Salt always executes states in a finite manner, meaning that they will always
execute in the same order regardless of the system that is executing them. This
evaluation order makes it easy to know what order the states will be executed in,
but it is important to note that the requisite system will override the ordering
defined in the files, and the ``order`` option, described below, will also
override the order in which states are executed.
This ordering system can be disabled in preference of lexicographic (classic)
ordering by setting the ``state_auto_order`` option to ``False`` in the master
configuration file. Otherwise, ``state_auto_order`` defaults to ``True``.
How compiler ordering is managed is described further in :ref:`compiler-ordering`.
.. _ordering_requisites:
Requisite Statements
====================
.. note::
The behavior of requisites changed in version 0.9.7 of Salt. This
documentation applies to requisites in version 0.9.7 and later.
Often when setting up states any single action will require or depend on
another action. Salt allows for the building of relationships between states
with requisite statements. A requisite statement ensures that the named state
is evaluated before the state requiring it. There are three types of requisite
statements in Salt, **require**, **watch**, and **prereq**.
These requisite statements are applied to a specific state declaration:
.. code-block:: yaml
httpd:
pkg.installed: []
file.managed:
- name: /etc/httpd/conf/httpd.conf
- source: salt://httpd/httpd.conf
- require:
- pkg: httpd
In this example, the **require** requisite is used to declare that the file
/etc/httpd/conf/httpd.conf should only be set up if the pkg state executes
successfully.
The requisite system works by finding the states that are required and
executing them before the state that requires them. Then the required states
can be evaluated to see if they have executed correctly.
Require statements can refer to any state defined in Salt. The basic examples
are `pkg`, `service`, and `file`, but any used state can be referenced.
In addition to state declarations such as pkg, file, etc., **sls** type requisites
are also recognized, and essentially allow 'chaining' of states. This provides a
mechanism to ensure the proper sequence for complex state formulas, especially when
the discrete states are split or groups into separate sls files:
.. code-block:: yaml
include:
- network
httpd:
pkg.installed: []
service.running:
- require:
- pkg: httpd
- sls: network
In this example, the httpd service running state will not be applied
(i.e., the httpd service will not be started) unless both the httpd package is
installed AND the network state is satisfied.
.. note:: Requisite matching
Requisites match on both the ID Declaration and the ``name`` parameter.
Therefore, if using the ``pkgs`` or ``sources`` argument to install
a list of packages in a pkg state, it's important to note that it is
impossible to match an individual package in the list, since all packages
are installed as a single state.
Multiple Requisites
-------------------
The requisite statement is passed as a list, allowing for the easy addition of
more requisites. Both requisite types can also be separately declared:
.. code-block:: yaml
httpd:
pkg.installed: []
service.running:
- enable: True
- watch:
- file: /etc/httpd/conf/httpd.conf
- require:
- pkg: httpd
- user: httpd
- group: httpd
file.managed:
- name: /etc/httpd/conf/httpd.conf
- source: salt://httpd/httpd.conf
- require:
- pkg: httpd
user.present: []
group.present: []
In this example, the httpd service is only going to be started if the package,
user, group, and file are executed successfully.
Requisite Documentation
-----------------------
For detailed information on each of the individual requisites, :ref:`please
look here. <requisites>`
The Order Option
================
Before using the `order` option, remember that the majority of state ordering
should be done with a :ref:`requisite-declaration`, and that a requisite
declaration will override an `order` option, so a state with order option
should not require or required by other states.
The order option is used by adding an order number to a state declaration
with the option `order`:
.. code-block:: yaml
vim:
pkg.installed:
- order: 1
By adding the order option to `1` this ensures that the vim package will be
installed in tandem with any other state declaration set to the order `1`.
Any state declared without an order option will be executed after all states
with order options are executed.
But this construct can only handle ordering states from the beginning.
Certain circumstances will present a situation where it is desirable to send
a state to the end of the line. To do this, set the order to ``last``:
.. code-block:: yaml
vim:
pkg.installed:
- order: last
|
/salt-3006.2.tar.gz/salt-3006.2/doc/ref/states/ordering.rst
| 0.924308 | 0.763726 |
ordering.rst
|
pypi
|
.. _file-server:
================
Salt File Server
================
Salt comes with a simple file server suitable for distributing files to the
Salt minions. The file server is a stateless ZeroMQ server that is built into
the Salt master.
The main intent of the Salt file server is to present files for use in the
Salt state system. With this said, the Salt file server can be used for any
general file transfer from the master to the minions.
.. toctree::
:glob:
*
The cp Module
-------------
The cp module is the home of minion side file server operations. The cp module
is used by the Salt state system, salt-cp, and can be used to distribute files
presented by the Salt file server.
Escaping Special Characters
```````````````````````````
The ``salt://`` url format can potentially contain a query string, for example
``salt://dir/file.txt?saltenv=base``. You can prevent the fileclient/fileserver from
interpreting ``?`` as the initial token of a query string by referencing the file
with ``salt://|`` rather than ``salt://``.
.. code-block:: yaml
/etc/marathon/conf/?checkpoint:
file.managed:
- source: salt://|hw/config/?checkpoint
- makedirs: True
Environments
````````````
Since the file server is made to work with the Salt state system, it supports
environments. The environments are defined in the master config file and
when referencing an environment the file specified will be based on the root
directory of the environment.
get_file
````````
The cp.get_file function can be used on the minion to download a file from
the master, the syntax looks like this:
.. code-block:: bash
salt '*' cp.get_file salt://vimrc /etc/vimrc
This will instruct all Salt minions to download the vimrc file and copy it
to /etc/vimrc
Template rendering can be enabled on both the source and destination file names
like so:
.. code-block:: bash
salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja
This example would instruct all Salt minions to download the vimrc from a
directory with the same name as their OS grain and copy it to /etc/vimrc
For larger files, the cp.get_file module also supports gzip compression.
Because gzip is CPU-intensive, this should only be used in
scenarios where the compression ratio is very high (e.g. pretty-printed JSON
or YAML files).
To use compression, use the ``gzip`` named argument. Valid values are integers
from 1 to 9, where 1 is the lightest compression and 9 the heaviest. In other
words, 1 uses the least CPU on the master (and minion), while 9 uses the most.
.. code-block:: bash
salt '*' cp.get_file salt://vimrc /etc/vimrc gzip=5
Finally, note that by default cp.get_file does *not* create new destination
directories if they do not exist. To change this, use the ``makedirs``
argument:
.. code-block:: bash
salt '*' cp.get_file salt://vimrc /etc/vim/vimrc makedirs=True
In this example, /etc/vim/ would be created if it didn't already exist.
get_dir
```````
The cp.get_dir function can be used on the minion to download an entire
directory from the master. The syntax is very similar to get_file:
.. code-block:: bash
salt '*' cp.get_dir salt://etc/apache2 /etc
cp.get_dir supports template rendering and gzip compression arguments just like
get_file:
.. code-block:: bash
salt '*' cp.get_dir salt://etc/{{pillar.webserver}} /etc gzip=5 template=jinja
File Server Client Instance
---------------------------
A client instance is available which allows for modules and applications to be
written which make use of the Salt file server.
The file server uses the same authentication and encryption used by the rest
of the Salt system for network communication.
fileclient Module
`````````````````
The ``salt/fileclient.py`` module is used to set up the communication from the
minion to the master. When creating a client instance using the fileclient module,
the minion configuration needs to be passed in. When using the fileclient module
from within a minion module the built in ``__opts__`` data can be passed:
.. code-block:: python
import salt.minion
import salt.fileclient
def get_file(path, dest, saltenv="base"):
"""
Used to get a single file from the Salt master
CLI Example:
salt '*' cp.get_file salt://vimrc /etc/vimrc
"""
# Get the fileclient object
client = salt.fileclient.get_file_client(__opts__)
# Call get_file
return client.get_file(path, dest, False, saltenv)
Creating a fileclient instance outside of a minion module where the ``__opts__``
data is not available, it needs to be generated:
.. code-block:: python
import salt.fileclient
import salt.config
def get_file(path, dest, saltenv="base"):
"""
Used to get a single file from the Salt master
"""
# Get the configuration data
opts = salt.config.minion_config("/etc/salt/minion")
# Get the fileclient object
client = salt.fileclient.get_file_client(opts)
# Call get_file
return client.get_file(path, dest, False, saltenv)
|
/salt-3006.2.tar.gz/salt-3006.2/doc/ref/file_server/index.rst
| 0.840652 | 0.785391 |
index.rst
|
pypi
|
.. _delta-proxy-information:
.. _delta-proxy-intro:
===================
Delta proxy minions
===================
Welcome to the delta proxy minion installation guide. This installation
guide explains the process for installing and using delta proxy minion
which is available beginning in version 3004.
This guide is intended for system and network administrators with the general
knowledge and experience required in the field. This guide is also intended for
users that have ideally already tested and used standard Salt proxy minions in
their environment before deciding to move to a delta proxy minion environment.
See `Salt proxy minions <https://docs.saltproject.io/en/latest/topics/proxyminion/index.html>`_ for more information.
.. Note::
If you have not used standard Salt proxy minions before, consider testing
and deploying standard Salt proxy minions in your environment first.
Proxy minions vs. delta proxy minions
=====================================
Salt can target network devices through `Salt proxy minions
<https://docs.saltproject.io/en/latest/topics/proxyminion/index.html>`_,
Proxy minions allow you to control network devices that, for whatever reason,
cannot run the standard Salt minion. Examples include:
* Network gear that has an API but runs a proprietary operating system
* Devices with limited CPU or memory
* Devices that could run a minion but will not for security reasons
A proxy minion acts as an intermediary between the Salt master and the
device it represents. The proxy minion runs on the Salt master and then
translates commands from the Salt master to the device as needed.
By acting as an intermediary for the actual minion, proxy minions eliminate
the need to establish a constant connection from a Salt master to a minion. Proxy
minions generally only open a connection to the actual minion when necessary.
Proxy minions also reduce the amount of CPU or memory the minion must spend
checking for commands from the Salt master. Proxy minions use the Salt master's CPU
or memory to check for commands. The actual minion only needs to use CPU or
memory to run commands when needed.
.. Note::
For more information about Salt proxy minions, see:
* `Salt proxy minions
<https://docs.saltproject.io/en/latest/topics/proxyminion/index.html>`_
* `Salt proxy modules
<https://docs.saltproject.io/en/latest/ref/proxy/all/index.html#all-salt-proxy>`_
When delta proxy minions are needed
-----------------------------------
Normally, you would create a separate instance of proxy minion for each device
that needs to be managed. However, this doesn't always scale well if you have
thousands of devices. Running several thousand proxy minions can require a lot
of memory and CPU.
A delta proxy minion can solve this problem: it makes it possible to run one
minion that acts as the intermediary between the Salt master and the many network
devices it can represent. In this scenario, one device (the delta proxy minion
on the Salt master) runs several proxies. This configuration boosts performance and
improves the overall scalability of the network.
Key terms
=========
The following lists some important terminology that is used throughout this
guide:
.. list-table::
:widths: 25 75
:header-rows: 1
* - Term
- Definition
* - Salt master
- The Salt master is a central node running the Salt master server.
The Salt master issues commands to minions.
* - minion
- Minions are nodes running the Salt minion service. Minions listen
to commands from a Salt master and perform the requested tasks, then return
data back to the Salt master as needed.
* - proxy minion
- A Salt master that is running the proxy-minion service. The proxy minion
acts as an intermediary between the Salt master and the device it represents.
The proxy minion runs on the Salt master and then translates commands from
the Salt master to the device. A separate instance of proxy minion is
needed for each device that is managed.
* - delta proxy minion
- A Salt master that is running the delta proxy-minion service. The
delta proxy minion acts as the intermediary between the Salt master and the
many network devices it can represent. Only one instance of the delta
proxy service is needed to run several proxies.
* - control proxy
- The control proxy runs on the Salt master. It manages a list of devices and
issues commands to the network devices it represents. The Salt master needs
at least one control proxy, but it is possible to have more than one
control proxy, each managing a different set of devices.
* - managed device
- A device (such as Netmiko) that is managed by proxy minions or by a
control proxy minion. The proxy minion or control proxy only creates
a connection to the actual minion it needs to issue a command.
* - pillar file
- Pillars are structures of data (files) defined on the Salt master and passed
through to one or more minions when the minion needs access to the
pillar file. Pillars allow confidential, targeted data to be securely sent
only to the relevant minion. Because all configurations for
delta proxy minions are done on the Salt master (not on the minions), you
use pillar files to configure the delta proxy-minion service.
* - top file
- The top file is a pillar file that maps which states should be applied to
different minions in certain environments.
.. _delta-proxy-preinstall:
Pre-installation
================
Before you start
----------------
Before installing the delta proxy minion, ensure that:
* Your network device and firmware are supported.
* The Salt master that is acting as the control proxy minion has network
access to the devices it is managing.
* You have installed, configured, and tested standard Salt proxy minions in
your environment before introducing delta proxy minions into your
environment.
Install or upgrade Salt
-----------------------
Ensure your Salt masters are running at least Salt version 3004. For instructions
on installing or upgrading Salt, see `repo.saltproject.io
<http://repo.saltproject.io/>`_. For RedHat systems, see `Install or Upgrade Salt
<https://enterprise.saltproject.io/en/latest/docs/install-salt.html>`_.
.. _delta-proxy-install:
Installation
============
Before you begin the delta proxy minion installation process, ensure you
have read and completed the :ref:`delta-proxy-preinstall` steps.
Overview of the installation process
------------------------------------
Similar to proxy minions, all the delta proxy minion configurations are done
on the Salt master rather than on the minions that will be managed. The
installation process has the following phases:
#. `Configure the master to use delta proxy`_ - Create a
configuration file on the Salt master that defines its proxy settings.
#. `Create a pillar file for each managed device`_ - Create a
pillar file for each device that will be managed by the delta proxy minion
and reference these minions in the top file.
#. `Create a control proxy configuration file`_ - Create a control proxy file
that lists the devices that it will manage. Then, reference this file in the
top file.
#. `Start the delta proxy minion`_ - Start the delta proxy-minion service and
validate that it has been set up correctly.
Configure the master to use delta proxy
---------------------------------------
In this step, you'll create a configuration file on the Salt master that defines
its proxy settings. This is a general configuration file that tells the Salt master
how to handle all proxy minions.
To create this configuration:
#. On the Salt master, navigate to the ``/etc/salt`` directory. In this directory,
create a file named ``proxy`` if one doesn't already exist.
#. Open the file in your preferred editor and add the following configuration
information:
.. code-block:: yaml
# Use delta proxy metaproxy
metaproxy: deltaproxy
# Disable the FQDNS grain
enable_fqdns_grains: False
# Enabled multprocessing
multiprocessing: True
.. Note::
See the following section about `delta proxy configuration options`_ for
a more detailed description of these configuration options.
#. Save the file.
Your Salt master is now configured to use delta proxy. Next, you need to
`Create a pillar file for each managed device`_.
Delta proxy configuration options
---------------------------------
The following table describes the configuration options used in the delta
proxy configuration file:
.. list-table::
:widths: 25 75
:header-rows: 1
* - Field
- Description
* - metaproxy
- Set this configuration option to ``deltaproxy``. If this option is set to
``proxy`` or if this line is not included in the file, the Salt master will
use the standard proxy service instead of the delta proxy service.
* - enable_fqdns_grains
- If your router does not have the ability to use Reverse DNS lookup to
obtain the Fully Qualified Domain Name (fqdn) for an IP address, you'll
need to change the ``enable_fqdns_grains`` setting in the pillar
configuration file to ``False`` instead.
* - multiprocessing
- Multi-processing is the ability to run more than one task or process at
the same time. A delta proxy minion has the ability to run with
multi-processing turned off.
If you plan to run with multi-processing enabled, you should also enable
the ``skip_connect_on_init`` setting to ``True``.
* - skip_connect_on_init
- This setting tells the control proxy whether or not it should make a
connection to the managed device when it starts. When set to ``True``, the
delta proxy minion will only connect when it needs to issue commands to
the managed devices.
Create a pillar file for each managed device
--------------------------------------------
Each device that needs to be managed by delta proxy needs a separate pillar
file on the Salt master. To create this file:
#. Navigate to the ``/srv/pillar`` directory.
#. In this directory create a new pillar file for a minion. For example,
``my_managed_device_pillar_file_01.sls``.
#. Open the new file in your preferred editor and add the necessary
configuration information for that minion and your environment. The
following is an example pillar file for a Netmiko device:
.. code-block:: yaml
proxy:
proxytype: netmiko
device_type: arista_eos
host: 192.0.2.1
username: myusername
password: mypassword
always_alive: True
.. Note::
The available configuration options vary depending on the proxy type (in
other words, the type of device it is). To read a detailed explanation of
the configuration options, refer to the proxy module documentation for
the type of device you need to manage. See:
* `Salt proxy modules
<https://docs.saltproject.io/en/latest/ref/proxy/all/index.html#all-salt-proxy>`_
* `Netmiko Salt proxy module
<https://docs.saltproject.io/en/latest/ref/proxy/all/salt.proxy.netmiko_px.html#module-salt.proxy.netmiko_px>`_
#. Save the file.
#. In an editor, open the top file: ``/srv/pillar/top.sls``.
#. Add a section to the top file that indicates the minion ID of the device
that will be managed. Then, list the name of the pillar file you created in
the previous steps. For example:
.. code-block:: yaml
my_managed_device_minion_ID:
- my_managed_device_pillar_file_01
#. Repeat the previous steps for each minion that needs to be managed.
You've now created the pillar file for the minions that will be managed by the
delta proxy minion and you have referenced these files in the top file.
Proceed to the next section.
Create a control proxy configuration file
-----------------------------------------
On the Salt master, you'll need to create or edit a control proxy file for each
control proxy. The control proxy manages several devices and issues commands to
the network devices it represents. The Salt master needs at least one control
proxy, but it is possible to have more than one control proxy, each managing a
different set of devices.
To configure a control proxy, you'll create a file that lists the minion IDs
of the minions that it will manage. Then you will reference this control proxy
configuration file in the top file.
To create a control proxy configuration file:
#. On the Salt master, navigate to the ``/srv/pillar`` directory. In this
directory, create a new proxy configuration file. Give this file a
descriptive name, such as ``control_proxy_01_configuration.sls``.
#. Open the file in your preferred editor and add a list of the minion IDs for
each device that needs to be managed. For example:
.. code-block:: yaml
proxy:
proxytype: deltaproxy
ids:
- my_managed_device_01
- my_managed_device_02
- my_managed_device_03
#. Save the file.
#. In an editor, open the top file: ``/srv/pillar/top.sls``.
#. Add a section to the top file that indicates references the delta proxy
control proxy. For example:
.. code-block:: yaml
base:
my_managed_device_minion_01:
- my_managed_device_pillar_file_01
my_managed_device_minion_02:
- my_managed_device_pillar_file_02
my_managed_device_minion_03:
- my_managed_device_pillar_file_03
delta_proxy_control:
- control_proxy_01_configuration
#. Repeat the previous steps for each control proxy if needed.
#. In an editor, open the proxy config file: ``/etc/salt/proxy``.
Add a section for metaproxy and set it's value to deltaproxy.
.. code-block:: yaml
metaproxy: deltaproxy
Now that you have created the necessary configurations, proceed to the next
section.
Start the delta proxy minion
----------------------------
After you've successfully configured the delta proxy minion, you need to
start the proxy minion service for each managed device and validate that it is
working correctly.
.. Note::
This step explains the process for starting a single instance of a
delta proxy minion. Because starting each minion individually can
potentially be very time-consuming, most organizations use a script to start
their delta proxy minions since there are typically many devices being
managed. Consider implementing a similar script for your environment to save
time in deployment.
To start a single instance of a delta proxy minion and test that it is
configured correctly:
#. In the terminal for the Salt master, run the following command, replacing the
placeholder text with the actual minion ID:
.. code-block:: bash
sudo salt-proxy --proxyid=<control_proxy_id>
#. To test the delta proxy minion, run the following ``test.version`` command
on the Salt master and target a specific minion. For example:
.. code-block:: bash
salt my_managed_device_minion_ID test.version
This command returns an output similar to the following:
.. code-block:: bash
local:
3004
After you've successfully started the delta proxy minions and verified that
they are working correctly, you can now use these minions the same as standard
proxy minions.
.. _delta-proxy-additional-resources:
Additional resources
====================
This reference section includes additional resources for delta proxy minions.
For reference, see:
* `Salt proxy minions
<https://docs.saltproject.io/en/latest/topics/proxyminion/index.html>`_
* `Salt proxy modules
<https://docs.saltproject.io/en/latest/ref/proxy/all/index.html#all-salt-proxy>`_
* `Netmiko Salt proxy module
<https://docs.saltproject.io/en/latest/ref/proxy/all/salt.proxy.netmiko_px.html#module-salt.proxy.netmiko_px>`_
|
/salt-3006.2.tar.gz/salt-3006.2/doc/ref/configuration/delta_proxy.rst
| 0.943034 | 0.726717 |
delta_proxy.rst
|
pypi
|
import itertools
import os
import re
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
from sphinx import addnodes
from sphinx.domains import ObjType
from sphinx.domains import python as python_domain
from sphinx.domains.python import PyObject
from sphinx.locale import _
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode, nested_parse_with_titles, set_source_info
import salt
class Event(PyObject):
"""
Document Salt events
"""
domain = "salt"
class LiterateCoding(Directive):
"""
Auto-doc SLS files using literate-style comment/code separation
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
def parse_file(self, fpath):
"""
Read a file on the file system (relative to salt's base project dir)
:returns: A file-like object.
:raises IOError: If the file cannot be found or read.
"""
sdir = os.path.abspath(os.path.join(os.path.dirname(salt.__file__), os.pardir))
with open(os.path.join(sdir, fpath), "rb") as f:
return f.readlines()
def parse_lit(self, lines):
"""
Parse a string line-by-line delineating comments and code
:returns: An tuple of boolean/list-of-string pairs. True designates a
comment; False designates code.
"""
comment_char = "#" # TODO: move this into a directive option
comment = re.compile(r"^\s*{}[ \n]".format(comment_char))
section_test = lambda val: bool(comment.match(val))
sections = []
for is_doc, group in itertools.groupby(lines, section_test):
if is_doc:
text = [comment.sub("", i).rstrip("\r\n") for i in group]
else:
text = [i.rstrip("\r\n") for i in group]
sections.append((is_doc, text))
return sections
def run(self):
try:
lines = self.parse_lit(self.parse_file(self.arguments[0]))
except OSError as exc:
document = self.state.document
return [document.reporter.warning(str(exc), line=self.lineno)]
node = nodes.container()
node["classes"] = ["lit-container"]
node.document = self.state.document
enum = nodes.enumerated_list()
enum["classes"] = ["lit-docs"]
node.append(enum)
# make first list item
list_item = nodes.list_item()
list_item["classes"] = ["lit-item"]
for is_doc, line in lines:
if is_doc and line == [""]:
continue
section = nodes.section()
if is_doc:
section["classes"] = ["lit-annotation"]
nested_parse_with_titles(self.state, ViewList(line), section)
else:
section["classes"] = ["lit-content"]
code = "\n".join(line)
literal = nodes.literal_block(code, code)
literal["language"] = "yaml"
set_source_info(self, literal)
section.append(literal)
list_item.append(section)
# If we have a pair of annotation/content items, append the list
# item and create a new list item
if len(list_item.children) == 2:
enum.append(list_item)
list_item = nodes.list_item()
list_item["classes"] = ["lit-item"]
# Non-semantic div for styling
bg = nodes.container()
bg["classes"] = ["lit-background"]
node.append(bg)
return [node]
class LiterateFormula(LiterateCoding):
"""
Customizations to handle finding and parsing SLS files
"""
def parse_file(self, sls_path):
"""
Given a typical Salt SLS path (e.g.: apache.vhosts.standard), find the
file on the file system and parse it
"""
config = self.state.document.settings.env.config
formulas_dirs = config.formulas_dirs
fpath = sls_path.replace(".", "/")
name_options = ("{}.sls".format(fpath), os.path.join(fpath, "init.sls"))
paths = [
os.path.join(fdir, fname)
for fname in name_options
for fdir in formulas_dirs
]
for i in paths:
try:
with open(i, "rb") as f:
return f.readlines()
except OSError:
pass
raise OSError("Could not find sls file '{}'".format(sls_path))
class CurrentFormula(Directive):
domain = "salt"
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
env = self.state.document.settings.env
modname = self.arguments[0].strip()
if modname == "None":
env.temp_data["salt:formula"] = None
else:
env.temp_data["salt:formula"] = modname
return []
class Formula(Directive):
domain = "salt"
has_content = True
required_arguments = 1
def run(self):
env = self.state.document.settings.env
formname = self.arguments[0].strip()
env.temp_data["salt:formula"] = formname
if "noindex" in self.options:
return []
env.domaindata["salt"]["formulas"][formname] = (
env.docname,
self.options.get("synopsis", ""),
self.options.get("platform", ""),
"deprecated" in self.options,
)
targetnode = nodes.target("", "", ids=["module-" + formname], ismod=True)
self.state.document.note_explicit_target(targetnode)
indextext = "{}-formula)".format(formname)
inode = addnodes.index(
entries=[("single", indextext, "module-" + formname, "")]
)
return [targetnode, inode]
class State(Directive):
domain = "salt"
has_content = True
required_arguments = 1
def run(self):
env = self.state.document.settings.env
statename = self.arguments[0].strip()
if "noindex" in self.options:
return []
targetnode = nodes.target("", "", ids=["module-" + statename], ismod=True)
self.state.document.note_explicit_target(targetnode)
formula = env.temp_data.get("salt:formula")
indextext = "{1} ({0}-formula)".format(formula, statename)
inode = addnodes.index(
entries=[("single", indextext, "module-{}".format(statename), "")]
)
return [targetnode, inode]
class SLSXRefRole(XRefRole):
pass
class SaltModuleIndex(python_domain.PythonModuleIndex):
name = "modindex"
localname = _("Salt Module Index")
shortname = _("all salt modules")
class SaltDomain(python_domain.PythonDomain):
name = "salt"
label = "Salt"
data_version = 2
object_types = python_domain.PythonDomain.object_types
object_types.update({"state": ObjType(_("state"), "state")})
directives = python_domain.PythonDomain.directives
directives.update(
{
"event": Event,
"state": State,
"formula": LiterateFormula,
"currentformula": CurrentFormula,
"saltconfig": LiterateCoding,
}
)
roles = python_domain.PythonDomain.roles
roles.update({"formula": SLSXRefRole()})
initial_data = python_domain.PythonDomain.initial_data
initial_data.update({"formulas": {}})
indices = [
SaltModuleIndex,
]
def resolve_xref(self, env, fromdocname, builder, type, target, node, contnode):
if type == "formula" and target in self.data["formulas"]:
doc, _, _, _ = self.data["formulas"].get(target, (None, None))
if doc:
return make_refnode(builder, fromdocname, doc, target, contnode, target)
else:
super().resolve_xref(
env, fromdocname, builder, type, target, node, contnode
)
# Monkey-patch the Python domain remove the python module index
python_domain.PythonDomain.indices = [SaltModuleIndex]
def setup(app):
app.add_domain(SaltDomain)
formulas_path = "templates/formulas"
formulas_dir = os.path.join(
os.path.abspath(os.path.dirname(salt.__file__)), formulas_path
)
app.add_config_value("formulas_dirs", [formulas_dir], "env")
app.add_crossref_type(
directivename="conf_master",
rolename="conf_master",
indextemplate="pair: %s; conf/master",
)
app.add_crossref_type(
directivename="conf_minion",
rolename="conf_minion",
indextemplate="pair: %s; conf/minion",
)
app.add_crossref_type(
directivename="conf_proxy",
rolename="conf_proxy",
indextemplate="pair: %s; conf/proxy",
)
app.add_crossref_type(
directivename="conf_log",
rolename="conf_log",
indextemplate="pair: %s; conf/logging",
)
app.add_crossref_type(
directivename="jinja_ref",
rolename="jinja_ref",
indextemplate="pair: %s; jinja filters",
)
return dict(parallel_read_safe=True, parallel_write_safe=True)
|
/salt-3006.2.tar.gz/salt-3006.2/doc/_ext/saltdomain.py
| 0.417628 | 0.197754 |
saltdomain.py
|
pypi
|
.. _salt-system-architecture:
========================
Salt system architecture
========================
Overview
========
This page provides a high-level overview of the Salt system architecture and its
different components.
What is Salt?
=============
Salt is a Python-based open-source remote execution framework used for:
* Configuration management
* Automation
* Provisioning
* Orchestration
The Salt system architecture
============================
The following diagram shows the primary components of the basic Salt
architecture:
.. image:: /_static/salt-architecture.png
:width: 80%
The following sections describe some of the core components of the Salt
architecture.
Salt Masters and Salt Minions
-----------------------------
Salt uses the master-client model in which a master issues commands to a client
and the client executes the command. In the Salt ecosystem, the Salt Master is a
server that is running the ``salt-master`` service. It issues commands to one or
more Salt Minions, which are servers that are running the ``salt-minion``
service and that are registered with that particular Salt Master.
Another way to describe Salt is as a publisher-subscriber model. The master
publishes jobs that need to be executed and Salt Minions subscribe to those
jobs. When a specific job applies to that minion, it will execute the job.
When a minion finishes executing a job, it sends job return data back to the
master. Salt has two ports used by default for the minions to communicate with
their master(s). These ports work in concert to receive and deliver data to the
Message Bus. Salt’s message bus is ZeroMQ, which creates an asynchronous network
topology to provide the fastest communication possible.
Targets and grains
------------------
The master indicates which minions should execute the job by defining a
*target*. A target is the group of minions, across one or many masters, that a
job's Salt command applies to.
.. Note::
A master can also be managed like a minion and can be a target if it is
running the ``salt-minion`` service.
The following is an example of one of the many kinds of commands that a master
might issue to a minion. This command indicates that all minions should install
the Vim application:
.. code-block:: bash
salt -v '*' pkg.install vim
In this case the glob ``'*'`` is the target, which indicates that all minions
should execute this command. Many other targeting options are available,
including targeting a specific minion by its ID or targeting minions by their
shared traits or characteristics (called *grains* in Salt).
Salt comes with an interface to derive information about the underlying system.
This is called the *grains interface*, because it presents Salt with grains of
information. Grains are collected for the operating system, domain name,
IP address, kernel, OS type, memory, and many other system properties. You can
also create your own custom grain data.
Grain data is relatively static. However, grain data is refreshed when system
information changes (such as network settings) or when a new value is assigned
to a custom grain.
Open event system (event bus)
-----------------------------
The event system is used for inter-process communication between the Salt Master
and Salt Minions. In the event system:
* Events are seen by both the master and minions.
* Events can be monitored and evaluated by both.
The event bus lays the groundwork for orchestration and real-time monitoring.
All minions see jobs and results by subscribing to events published on the event
system. Salt uses a pluggable event system with two layers:
* **ZeroMQ (0MQ)** - The current default socket-level library providing a
flexible transport layer.
* **Tornado** - Full TCP-based transport layer event system.
One of the greatest strengths of Salt is the speed of execution. The event
system’s communication bus is more efficient than running a higher-level web
service (http). The remote execution system is the component that all components
are built upon, allowing for decentralized remote execution to spread load
across resources.
Salt states
-----------
In addition to remote execution, Salt provides another method for configuring
minions by declaring which *state* a minion should be in, otherwise referred to
as *Salt states*. Salt states make configuration management possible. You can
use Salt states to deploy and manage infrastructure with simple YAML files.
Using states, you can automate recursive and predictable tasks by queueing jobs
for Salt to implement without needing user input. You can also add more complex
conditional logic to state files with Jinja.
To illustrate the subtle differences between remote execution and configuration
management, take the command referenced in the previous section about
`Targets and grains`_ in which Salt installed the application Vim on all
minions:
.. list-table::
:widths: 25 25 50
:header-rows: 1
* - Methodology
- Implementation
- Result
* - Remote execution
- * Run ``salt -v '*' pkg.install vim`` from the terminal
- * Remotely installs Vim on the targeted minions
* - Configuration management
- * Write a YAML state file that checks whether Vim is installed
* This state file is then applied to the targeted minions
- * Ensures that Vim is always installed on the targeted minions
* Salt analyzes the state file and determines what actions need to be
taken to ensure the minion complies with the state declarations
* If Vim is not installed, it automates the processes to install Vim on
the targeted minions
The state file that verifies Vim is installed might look like the following
example:
.. code-block:: yaml
# File:/srv/salt/vim_install.sls
install_vim_now:
pkg.installed:
- pkgs:
- vim
To apply this state to a minion, you would use the ``state.apply`` module, such
as in the following example:
.. code-block:: bash
salt '*' state.apply vim_install
This command applies the ``vim_install`` state to all minions.
*Formulas* are collections of states that work in harmony to configure a minion
or application. For example, one state might trigger another state.
The Top file
------------
It is not practical to manually run each state individually targeting specific
minions each time. Some environments have hundreds of state files targeting
thousands of minions.
Salt offers two features to help with this scaling problem:
* **The top.sls file** - Maps Salt states to their applicable minions.
* **Highstate execution** - Runs all Salt states outlined in ``top.sls`` in a
single execution.
The top file maps which states should be applied to different minions in certain
environments. The following is an example of a simple top file:
.. code-block:: yaml
# File: /srv/salt/top.sls
base:
'*':
- all_server_setup
'01webserver':
- web_server_setup
In this example, ``base`` refers to the Salt environment, which is the default.
You can specify more than one environment as needed, such as prod, dev, QA, etc.
Groups of minions are specified under the environment, and states are listed for
each set of minions. This top file indicates that a state called
``all_server_setup`` should be applied to all minions ``'*'`` and the state
called ``web_server_setup`` should be applied to the ``01webserver`` minion.
To run the Salt command, you would use the state.highstate function:
.. code-block:: bash
salt \* state.highstate
This command applies the top file to the targeted minions.
Salt pillar
-----------
Salt’s pillar feature takes data defined on the Salt Master and distributes it
to minions as needed. Pillar is primarily used to store secrets or other highly
sensitive data, such as account credentials, cryptographic keys, or passwords.
Pillar is also useful for storing non-secret data that you don't want to place
directly in your state files, such as configuration data.
Salt pillar brings data into the cluster from the opposite direction as grains.
While grains are data generated from the minion, the pillar is data generated
from the master.
Pillars are organized similarly to states in a Pillar state tree, where
``top.sls`` acts to coordinate pillar data to environments and minions privy to
the data. Information transferred using pillar has a dictionary generated for
the targeted minion and encrypted with that minion’s key for secure data
transfer. Pillar data is encrypted on a per-minion basis, which makes it useful
for storing sensitive data specific to a particular minion.
Beacons and reactors
--------------------
The beacon system is a monitoring tool that can listen for a variety of system
processes on Salt Minions. Beacons can trigger reactors which can then help
implement a change or troubleshoot an issue. For example, if a service’s
response times out, the reactor system can restart the service.
Beacons are used for a variety of purposes, including:
* Automated reporting
* Error log delivery
* Microservice monitoring
* User shell activity
* Resource monitoring
When coupled with reactors, beacons can create automated pre-written responses
to infrastructure and application issues. Reactors expand Salt with automated
responses using pre-written remediation states.
Reactors can be applied in a variety of scenarios:
* Infrastructure scaling
* Notifying administrators
* Restarting failed applications
* Automatic rollback
When both beacons and reactors are used together , you can create unique states
customized to your specific needs.
Salt runners and orchestration
------------------------------
Salt runners are convenience applications executed with the ``salt-run``
command. Salt runners work similarly to Salt execution modules. However, they
execute on the Salt Master instead of the Salt Minions. A Salt runner can be a
simple client call or a complex application.
Salt provides the ability to orchestrate system administrative tasks throughout
the enterprise. Orchestration makes it possible to coordinate the activities of
multiple machines from a central place. It has the added advantage of being able
to control the sequence of when certain configuration events occur.
Orchestration states execute on the master using the state runner module.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/salt_system_architecture.rst
| 0.923178 | 0.824533 |
salt_system_architecture.rst
|
pypi
|
.. _release-2015-5-0:
==============================================
Salt 2015.5.0 Release Notes - Codename Lithium
==============================================
The 2015.5.0 feature release of Salt is focused on hardening Salt and mostly
on improving existing systems. A few major additions are present, primarily
the new Beacon system. Most enhancements have been focused around improving
existing features and interfaces.
As usual the release notes are not exhaustive and primarily include the most
notable additions and improvements. Hundreds of bugs have been fixed and many
modules have been substantially updated and added.
.. warning::
In order to fix potential shell injection vulnerabilities in salt modules,
a change has been made to the various ``cmd`` module functions. These
functions now default to ``python_shell=False``, which means that the
commands will not be sent to an actual shell.
The largest side effect of this change is that "shellisms", such as pipes,
will not work by default. The modules shipped with salt have been audited
to fix any issues that might have arisen from this change. Additionally,
the ``cmd`` state module has been unaffected, and use of ``cmd.run`` in
jinja is also unaffected. ``cmd.run`` calls on the CLI will also allow
shellisms.
However, custom execution modules which use shellisms in ``cmd`` calls
will break, unless you pass ``python_shell=True`` to these calls.
As a temporary workaround, you can set ``cmd_safe: False`` in your minion
and master configs. This will revert the default, but is also less secure,
as it will allow shell injection vulnerabilities to be written in custom
code. We recommend you only set this setting for as long as it takes to
resolve these issues in your custom code, then remove the override.
.. note::
Starting in this version of salt, ``pillar_opts`` defaults to False instead
of True. This means that master opts will not be present in minion pillar,
and as a result, ``config.get`` calls will not include master opts.
We recommend pillar is used for configuration options which need to make it
to the minion.
Beacons
=======
The beacon system allows the minion to hook into system processes and
continually translate external events into the salt event bus. The primary
example of this is the :py:mod:`~salt.beacons.inotify` beacon. This beacon uses
inotify to watch configured files or directories on the minion for changes,
creation, deletion etc.
This allows for the changes to be sent up to the master where the reactor can
respond to changes.
Sudo Minion Settings
====================
It is now possible to run the minion as a non-root user and for the minion to
execute commands via sudo. Simply add `sudo_user: root` to the minion config,
run the minion as a non-root user and grant that user sudo rights to execute
salt-call.
Lazy Loader
===========
The Lazy Loader is a significant overhaul of Salt's module loader system. The
Lazy Loader will lazily load modules on access instead of all on start. In
addition to a major performance improvement, this "sandboxes" modules so a
bad/broken import of a single module will only affect jobs that require
accessing the broken module. (:issue: `20274`)
Enhanced Active Directory Support
=================================
The eauth system for LDAP has been extended to support Microsoft Active
Directory out of the box. This includes Active Directory and LDAP group support
for eauth.
Salt LXC Enhancements
=====================
The LXC systems have been overhauled to be more consistent and to fix many
bugs.
This overhaul makes using LXC with Salt much easier and substantially improves
the underlying capabilities of Salt's LXC integration.
Salt SSH
========
- Additional configuration options and command line flags have been added to
configure the scan roster on the fly
- Added support for ``state.single`` in ``salt-ssh``
- Added support for ``publish.publish``, ``publish.full_data``, and
``publish.runner`` in ``salt-ssh``
- Added support for ``mine.get`` in ``salt-ssh``
New Windows Installer
=====================
The new Windows installer changes how Salt is installed on Windows.
The old installer used bbfreeze to create an isolated python environment to
execute in. This made adding modules and python libraries difficult. The new
installer sets up a more flexible python environment making it easy to manage
the python install and add python modules.
Instead of frozen packages, a full python implementation resides in the bin
directory (``C:\salt\bin``). By executing pip or easy_install from within the
Scripts directory (``C:\salt\bin\Scripts``) you can install any additional
python modules you may need for your custom environment.
The .exe's that once resided at the root of the salt directory (``C:\salt``)
have been replaced by .bat files and should function the same way as the .exe's
in previous versions.
The new Windows Installer will not replace the minion config file and key if
they already exist on the target system. Only the salt program files will be
replaced. ``C:\salt\conf`` and ``C:\salt\var`` will remain unchanged.
Removed Requests Dependency
===========================
The hard dependency on the requests library has been removed. Requests is still
required by a number of cloud modules but is no longer required for normal Salt
operations.
This removal fixes issues that were introduced with requests and salt-ssh, as
well as issues users experienced from the many different packaging methods used
by requests package maintainers.
Python 3 Updates
================
While Salt does not YET run on Python 3 it has been updated to INSTALL on
Python 3, taking us one step closer. What remains is getting the test suite to
the point where it can run on Python 3 so that we can verify compatibility.
RAET Additions
==============
The RAET support continues to improve. RAET now supports multi-master and many
bugs and performance issues have been fixed. RAET is much closer to being a
first class citizen.
Modified File Detection
=======================
A number of functions have been added to the RPM-based package managers to
detect and diff files that are modified from the original package installs.
This can be found in the new pkg.modified functions.
Reactor Update
==============
Fix an infinite recursion problem for runner/wheel reactor jobs by passing a
"user" (Reactor) to all jobs that the reactor starts. The reactor skips all
events created by that username -- thereby only reacting to events not caused
by itself. Because of this, runner and wheel executions from the runner will
have user "Reactor" in the job cache.
Misc Fixes/Additions
====================
- SDB driver for etcd. (:issue: `22043`)
- Add ``only_upgrade`` argument to apt-based ``pkg.install`` to only install a
package version if the package is already installed. (Great for security
updates!)
- Joyent now requires a ``keyname`` to be specified in the provider
configuration. This change was necessitated upstream by the 7.0+ API.
- Add ``args`` argument to ``cmd.script_retcode`` to match ``cmd.script`` in
the :py:mod:`cmd module <salt.cmd.cmdmod>`. (:issue: `21122`)
- Fixed bug where TCP keepalive was not being sent on the defined interval on
the return port (4506) from minion to master. (:issue: `21465`)
- LocalClient may now optionally raise SaltClientError exceptions. If using
this class directly, checking for and handling this exception is recommended.
(:issue: `21501`)
- The SAuth object is now a singleton, meaning authentication state is global
(per master) on each minion. This reduces sign-ins of minions from 3->1 per
startup.
- Nested outputter has been optimized, it is now much faster.
- Extensive fileserver backend updates.
Deprecations
============
- Removed ``parameter`` keyword argument from ``eselect.exec_action`` execution
module.
- Removed ``runas`` parameter from the following ``pip``` execution module
functions: ``install``, ``uninstall``, ``freeze``, ``list_``,
``list_upgrades``, ``upgrade_available``, ``upgrade``. Please migrate to
``user``.
- Removed ``runas`` parameter from the following ``pip`` state module
functions: ``installed``, ``removed``, ``uptodate`` . Please migrate to
``user``.
- Removed ``quiet`` option from all functions in ``cmdmod`` execution module.
Please use ``output_loglevel=quiet`` instead.
- Removed ``parameter`` argument from ``eselect.set_`` state. Please migrate to
``module_parameter`` or ``action_parameter``.
- The ``salt_events`` table schema has changed to include an additional field
called ``master_id`` to distinguish between events flowing into a database
from multiple masters. If ``event_return`` is enabled in the master config,
the database schema must first be updated to add the ``master_id`` field.
This alteration can be accomplished as follows:
``ALTER TABLE salt_events ADD master_id VARCHAR(255) NOT NULL;``
Known Issues
============
- In multi-master mode, a minion may become temporarily unresponsive if modules
or pillars are refreshed at the same time that one or more masters are down.
This can be worked around by setting 'auth_timeout' and 'auth_tries' down to
shorter periods.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/releases/2015.5.0.rst
| 0.794982 | 0.764935 |
2015.5.0.rst
|
pypi
|
===========================
Salt 2016.3.5 Release Notes
===========================
Version 2016.3.5 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
Statistics
==========
- Total Merges: **190**
- Total Issue References: **112**
- Total PR References: **281**
- Contributors: **74** (`Ch3LL`_, `DmitryKuzmenko`_, `Firewire2002`_, `Mrten`_, `Talkless`_,
`TronPaul`_, `UtahDave`_, `aaronm-cloudtek`_, `alex-zel`_, `alexandr-orlov`_, `alexbleotu`_,
`attiasr`_, `basepi`_, `bdrung`_, `bshelton229`_, `cachedout`_, `calve`_, `clan`_, `clinta`_,
`cro`_, `dere`_, `dereckson`_, `dhaines`_, `dincamihai`_, `do3meli`_, `dragon788`_, `edgan`_,
`fedusia`_, `fj40crawler`_, `genuss`_, `gtmanfred`_, `haeac`_, `heewa`_, `hu-dabao`_,
`jeanpralo`_, `jfindlay`_, `jinm`_, `kevinquinnyo`_, `kontrolld`_, `laleocen`_, `lorengordon`_,
`m03`_, `mcalmer`_, `mchugh19`_, `meaksh`_, `mikejford`_, `moio`_, `multani`_, `nevins-b`_,
`pass-by-value`_, `rallytime`_, `rbjorklin`_, `siccrusher`_, `silenius`_, `sjmh`_, `sjorge`_,
`skizunov`_, `slinn0`_, `sofixa`_, `techhat`_, `tedski`_, `terminalmage`_, `thatch45`_,
`thusoy`_, `toanju`_, `tobithiel`_, `twangboy`_, `tyhunt99`_, `vutny`_, `wanparo`_, `whiteinge`_,
`xiaoanyunfei`_, `yhekma`_, `zwo-bot`_)
Security Fixes
==============
**CVE-2017-5192** local_batch client external authentication not respected
The ``LocalClient.cmd_batch()`` method client does not accept ``external_auth``
credentials and so access to it from salt-api has been removed for now. This
vulnerability allows code execution for already-authenticated users and is only
in effect when running salt-api as the ``root`` user.
**CVE-2017-5200** Salt-api allows arbitrary command execution on a salt-master via
Salt's ssh_client
Users of Salt-API and salt-ssh could execute a command on the salt master via a
hole when both systems were enabled.
We recommend everyone on the 2016.3 branch upgrade to a patched release as soon
as possible.
Improved Checksum Handling in :py:func:`file.managed <salt.states.file.managed>`, :py:func:`archive.extracted <salt.states.archive.extracted>` States
=====================================================================================================================================================
When the ``source_hash`` argument for these states refers to a file containing
checksums, Salt now looks for checksums matching the name of the source URI, as
well as the file being managed. Prior releases only looked for checksums
matching the filename being managed. Additionally, a new argument
(``source_hash_name``) has been added, which allows the user to disambiguate
ambiguous matches when more than one matching checksum is found in the
``source_hash`` file.
A more detailed explanation of this functionality can be found in the
:py:func:`file.managed <salt.states.file.managed>` documentation, in the
section for the new ``source_hash_name`` argument.
Changelog for v2016.3.4..v2016.3.5
==================================
*Generated at: 2018-05-27 05:09:33 UTC*
* **PR** `#38833`_: (`Ch3LL`_) add 2016.3.5 changelog to release notes
@ *2017-01-19 23:27:26 UTC*
* a04ab86da1 Merge pull request `#38833`_ from Ch3LL/add_release_notes_2016.3.5
* 374dc1ab88 skip 2016.3.5 due to :doc: references
* 31f324c4ff add 2016.3.5 changelog to release notes
* **PR** `#38812`_: (`rallytime`_) Update pyobjects test to be a list
@ *2017-01-18 21:06:01 UTC*
* d14f0c64eb Merge pull request `#38812`_ from rallytime/pyobjects-test
* f3e84c1ab7 Update pyobjects test to be a list
* **ISSUE** `#36598`_: (`ikkaro`_) CloudClient vmware driver reusing SI bug (refs: `#38813`_)
* **PR** `#38813`_: (`gtmanfred`_) catch SIGPIPE in vmware connection
@ *2017-01-18 21:05:42 UTC*
* 50f03f8057 Merge pull request `#38813`_ from gtmanfred/2016.3
* ce3472cec2 catch SIGPIPE in vmware connection
* **PR** `#38809`_: (`twangboy`_) Fix get_hostname to handle longer computer names
@ *2017-01-18 19:32:00 UTC*
* 23b8b47258 Merge pull request `#38809`_ from twangboy/fix_hostname_2016.3
* d57a51f9f9 Fix tests for get_hostname
* 7ca3fd7484 Fix get_hostname to handle longer computer names
* **ISSUE** `#38388`_: (`johje349`_) No INFO logs in minion log file (refs: `#38808`_)
* **PR** `#38808`_: (`vutny`_) Fix `#38388`_
@ *2017-01-18 18:19:36 UTC*
* 1033bbdde8 Merge pull request `#38808`_ from vutny/fix-38388
* 9bd203ffcc Fix `#38388`_
* **ISSUE** `#38604`_: (`jsandas`_) Using "batch" with saltmod errors with "ValueError: need more than 2 values to unpack" (refs: `#38668`_)
* **PR** `#38668`_: (`terminalmage`_) Fix proposal for `#38604`_
@ *2017-01-18 17:53:09 UTC*
* f3ae3cd5c8 Merge pull request `#38668`_ from terminalmage/issue38604
* 0ea97cdad9 Merge pull request `#10`_ from cachedout/pr-38668
* db81afc035 Munge retcode into return data for batching
* a642a995dc Return the ret data from batch execution instead of raw data
* **ISSUE** `#38622`_: (`mikejford`_) Incorrect saltenv argument documentation in salt.modules.state (refs: `#38789`_)
* **PR** `#38789`_: (`rallytime`_) Update some saltenv refs to environment in salt.modules.state docs
@ *2017-01-18 15:39:22 UTC*
* c6a19a9e5a Merge pull request `#38789`_ from rallytime/fix-38622
* af41fe0c6e Update some saltenv refs to environment in salt.modules.state docs
* **PR** `#38790`_: (`cachedout`_) Fix typo in pyobjects test
@ *2017-01-18 15:38:57 UTC*
* e0bf700020 Merge pull request `#38790`_ from cachedout/fix_pyobjects_test_typo
* a66afb5f0f Fix typo in pyobjects test
* **ISSUE** `#38629`_: (`Arabus`_) Conflicting documentation about default value of pillar_opts (refs: `#38792`_)
* **PR** `#38792`_: (`rallytime`_) Update pillar tutorial lanuage regarding pillar_opts settings
@ *2017-01-18 15:38:19 UTC*
* 6e9785edea Merge pull request `#38792`_ from rallytime/fix-38629
* 1e125e2844 Update pillar tutorial lanuage regarding pillar_opts settings
* **PR** `saltstack/salt#38707`_: (`alexbleotu`_) Fixed prepending of root_dir override to the other paths (refs: `#38796`_)
* **PR** `#38796`_: (`cachedout`_) Revert "Fixed prepending of root_dir override to the other paths"
@ *2017-01-17 23:18:18 UTC*
* 3417adc617 Merge pull request `#38796`_ from saltstack/revert-38707-root_dir_fix-gh
* cb080f3bbe Revert "Fixed prepending of root_dir override to the other paths"
* **ISSUE** `#38524`_: (`rbjorklin`_) salt-api seems to ignore rest_timeout since 2016.11.0 (refs: `#38585`_, `#38527`_)
* **ISSUE** `#38479`_: (`tyeapple`_) api_logfile setting takes no effect (refs: `#38585`_)
* **PR** `#38585`_: (`rallytime`_) Follow up to PR `#38527`_
@ *2017-01-17 18:40:01 UTC*
* **PR** `#38570`_: (`rallytime`_) [2016.11] Merge forward from 2016.3 to 2016.11 (refs: `#38585`_)
* **PR** `#38560`_: (`Ch3LL`_) fix api logfile (refs: `#38585`_)
* **PR** `#38527`_: (`rbjorklin`_) salt-api no longer forces the default timeout (refs: `#38585`_)
* bab3479a3c Merge pull request `#38585`_ from rallytime/follow-up-38527
* 05587201b6 Pylint fix: add line at end of file
* fa01367599 Keep a copy of the DEFAULT_API_OPTS and restore them after the test run
* 2ad07634d9 Test clean up
* fd2ee7db30 Add some simple unit tests for salt.config.api_config function
* 3d2fefc83b Make sure the pidfile and log_file values are overridden by api opts
* 1f6b540e46 Make sure the pidfile and log_file values are overridden by api opts
* 04d307f917 salt-api no longer forces the default timeout
* **PR** `#38707`_: (`alexbleotu`_) Fixed prepending of root_dir override to the other paths
@ *2017-01-17 15:40:13 UTC*
* 0fb6bb7b77 Merge pull request `#38707`_ from alexbleotu/root_dir_fix-gh
* 0bac8c8be3 Fixed prepending of root_dir override to the other paths
* **PR** `#38774`_: (`vutny`_) DOCS: add C++ compiler installation on RHEL required for bundled 0mq
@ *2017-01-17 15:21:00 UTC*
* 96c9dc10f7 Merge pull request `#38774`_ from vutny/dev-test-docs
* 4620dc4afa DOCS: add C++ compiler installation on RHEL required for bundled 0mq
* **PR** `#38749`_: (`vutny`_) pkg build modules throw better exception message if keyid wasn't found
@ *2017-01-17 02:13:08 UTC*
* aedfbb7a43 Merge pull request `#38749`_ from vutny/pkg-build-better-exception-msg
* 53f2be5b21 pkg build modules throw better exception message if keyid wasn't found
* **PR** `#38743`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3
@ *2017-01-17 01:46:01 UTC*
* 8466b34e82 Merge pull request `#38743`_ from rallytime/merge-2016.3
* d24776f5e9 Merge branch '2015.8' into '2016.3'
* 6869621ed1 Merge pull request `#38731`_ from rallytime/merge-2015.8
* 9eb191b6ac Pylint fix
* b910499dbe Various follow up fixes
* e8309a6bbf Add release notes for 2015.8.13
* f881f366b7 Merge pull request `#20`_ from rallytime/2015.8.12_follow_up-batch-tests
* 34282322c0 Clean up tests and docs for batch execution
* c80b20b957 Merge pull request `#19`_ from whiteinge/batchclient
* 3d8f3d18f6 Remove batch execution from NetapiClient and Saltnado
* 97b0f64923 Lintfix
* d1516664f7 Add explanation comment
* 62f2c87080 Add docstring
* 9b0a786aeb Explain what it is about and how to configure that
* 5ea3579e10 Pick up a specified roster file from the configured locations
* 3a8614c5df Disable custom rosters in API
* c0e5a1171d Add roster disable flag
* e9c59e9b8f Merge pull request `#38602`_ from terminalmage/fix-boto-test
* 3424a108ac Fix failing unit.states.boto_vpc_test.BotoVpcRouteTableTestCase.test_present_with_routes
* **ISSUE** `#38674`_: (`jackywu`_) There is no code to use parameter 'event_publisher_pub_hwm' in saltstack-2016.3 (refs: `#38723`_)
* **PR** `#38723`_: (`rallytime`_) Remove "event_publisher_pub_hwm" and "salt_event_pub_hwm" from config/__init__.py
@ *2017-01-15 18:36:14 UTC*
* **PR** `#29294`_: (`skizunov`_) ZeroMQ no longer required when transport is TCP (refs: `#38723`_)
* a642cdef79 Merge pull request `#38723`_ from rallytime/fix-38674
* 706c885f55 Remove "event_publisher_pub_hwm" and "salt_event_pub_hwm" from config/__init__.py
* **PR** `#38669`_: (`rallytime`_) Update bootstrap script verstion to latest release
@ *2017-01-15 18:03:27 UTC*
* fc545af10b Merge pull request `#38669`_ from rallytime/update-bootstrap-script
* 78ba76e34c Update bootstrap script verstion to latest release
* **PR** `#38693`_: (`twangboy`_) Update jinja2 to 2.9.4
@ *2017-01-15 14:40:46 UTC*
* 50d417f267 Merge pull request `#38693`_ from twangboy/update_jinja
* e0c7e5549b Update jinja2 to 2.9.4
* **PR** `#38739`_: (`vutny`_) DOCS: correct examples of running test suite
@ *2017-01-15 14:35:47 UTC*
* f4233bb18d Merge pull request `#38739`_ from vutny/fix-runtests-doc
* b872bb63f6 DOCS: correct examples of running test suite
* **PR** `#38735`_: (`vutny`_) DOCS: add links to File State Backups page where necessary
* **PR** `#38720`_: (`dereckson`_) Proofread jinja_to_execution_module tutorial
* **ISSUE** `#36548`_: (`abonillasuse`_) openstack auth with nova driver (refs: `#38647`_)
* **PR** `#38647`_: (`gtmanfred`_) Allow novaclient to use keystoneauth1 sessions for authentication
@ *2017-01-10 17:48:26 UTC*
* 7b850d472d Merge pull request `#38647`_ from gtmanfred/nova
* 5be9b60851 add documentation about using keystoneauth for v3
* 7b657ca4ae add the ability to use keystone v2 and v3
* 5646ae1b34 add ability to use keystoneauth to authenticate in nova driver
* **ISSUE** `#38648`_: (`ericuldall`_) No release file error from PPA on Ubuntu (refs: `#38650`_)
* **ISSUE** `#38572`_: (`COLABORATI`_) ppa:saltstack/salt failure (refs: `#38650`_)
* **ISSUE** `#34504`_: (`AvinashDeluxeVR`_) Installation documentation for Ubuntu server and Windows minion leads the user to use different salt versions. (refs: `#38650`_)
* **PR** `#38650`_: (`rallytime`_) Remove the installation instructions for out-of-date community ppa
@ *2017-01-10 17:47:45 UTC*
* 383768d838 Merge pull request `#38650`_ from rallytime/remove-ubuntu-ppa-docs
* 30429b2e44 Remove the installation instructions for out-of-date community ppa
* **ISSUE** `#38087`_: (`UtahDave`_) The 'data' field in the return from a minion below a syndic is wrapped in an extra 'data' field. (refs: `#38657`_)
* **PR** `#38657`_: (`DmitryKuzmenko`_) Publish the 'data' field content for Syndic evets
@ *2017-01-10 16:59:33 UTC*
* 7d9f56e3b5 Merge pull request `#38657`_ from DSRCorporation/bugs/38087_syndic_event_format_fix
* 594c33f396 Publish the 'data' field content for Syndic evets
* **PR** `#38649`_: (`Ch3LL`_) fix unit.modules.file_test
@ *2017-01-10 16:44:45 UTC*
* 83987511fd Merge pull request `#38649`_ from Ch3LL/test_apply_template
* 47f8b68e0b fix unit.modules.file_test
* **ISSUE** `#37355`_: (`Firewire2002`_) salt-ssh - ImportError: No module named backports.ssl_match_hostname (refs: `#38626`_, #`saltstack/salt`#37358`_`_, `#37358`_)
* **ISSUE** `#34600`_: (`davidpsv17`_) Error trying a salt-ssh test.ping (refs: #`saltstack/salt`#37358`_`_, `#37358`_)
* **ISSUE** `#27355`_: (`jerob`_) salt ssh error with debian 7 on target (refs: #`saltstack/salt`#37358`_`_, `#37358`_)
* **PR** `saltstack/salt#37358`_: (`Firewire2002`_) Fix/workaround for issue `#37355`_ (refs: `#38626`_)
* **PR** `#38626`_: (`cachedout`_) Revert "Fix/workaround for issue `#37355`_"
@ *2017-01-06 21:28:09 UTC*
* 74ddc71be3 Merge pull request `#38626`_ from saltstack/revert-37358-2016.3.3_issue37355
* e912ac99c2 Revert "Fix/workaround for issue `#37355`_"
* **ISSUE** `#37355`_: (`Firewire2002`_) salt-ssh - ImportError: No module named backports.ssl_match_hostname (refs: `#38626`_, #`saltstack/salt`#37358`_`_, `#37358`_)
* **ISSUE** `#34600`_: (`davidpsv17`_) Error trying a salt-ssh test.ping (refs: #`saltstack/salt`#37358`_`_, `#37358`_)
* **ISSUE** `#27355`_: (`jerob`_) salt ssh error with debian 7 on target (refs: #`saltstack/salt`#37358`_`_, `#37358`_)
* **PR** `#37358`_: (`Firewire2002`_) Fix/workaround for issue `#37355`_
@ *2017-01-06 18:58:47 UTC*
* 5e58b32934 Merge pull request `#37358`_ from Firewire2002/2016.3.3_issue37355
* 910da18bfd fixed typo
* 4fbc5ddd06 fixed wrong renamed variable and spaces
* 92366e646c issue `#37355`_
* 7dc87ab7b8 issue `#37355`_
* 2878180405 issue `#37355`_
* **PR** `#35390`_: (`alexandr-orlov`_) Returns back missed proper grains dictionary for file module
@ *2017-01-06 18:02:13 UTC*
* 6c2fe615aa Merge pull request `#35390`_ from alexandr-orlov/2016.3
* cd5ae17e8d fxd missed proper grains dictionary
* **ISSUE** `#38558`_: (`multani`_) pillar.get("...", default=var, merge=true) updates default value (refs: `#38579`_)
* **PR** `#38618`_: (`rallytime`_) Back-port `#38579`_ to 2016.3
@ *2017-01-06 17:37:56 UTC*
* **PR** `#38579`_: (`zwo-bot`_) Fix `#38558`_ - pillar.get with default= ...,merge=true influence subsequent calls of pillar.get (refs: `#38618`_)
* 2579cfa42d Merge pull request `#38618`_ from rallytime/bp-38579
* 2052ecee2c Add copy import
* 2c8845aaa0 add test for pillar.get() + default value
* c2f98d2f04 ticket 38558: add unit test, deepcopy() only if necessary
* 30ae0a1958 added deepcopy of default if merge=True
* **PR** `#38601`_: (`terminalmage`_) pillar.get: Raise exception when merge=True and default is not a dict
@ *2017-01-05 23:15:51 UTC*
* da676cebd6 Merge pull request `#38601`_ from terminalmage/pillar-get
* 8613d7254d pillar.get: Raise exception when merge=True and default is not a dict
* **PR** `#38600`_: (`terminalmage`_) Avoid errors when sudo_user is set (2016.3 branch)
@ *2017-01-05 20:57:09 UTC*
* **PR** `#38598`_: (`terminalmage`_) Avoid errors when sudo_user is set (refs: `#38600`_)
* 224fc7712a Merge pull request `#38600`_ from terminalmage/issue38459-2016.3
* 8a45b13e76 Avoid errors when sudo_user is set
* **PR** `#38589`_: (`tobithiel`_) State Gem: fix incorrect warning about missing rvm/rbenv
@ *2017-01-05 20:12:15 UTC*
* a376970f88 Merge pull request `#38589`_ from tobithiel/fix_rvm_rbenv_warning
* 9ec470b4a5 State Gem: fix incorrect warning about missing rvm/rbenv
* **PR** `#38567`_: (`pass-by-value`_) Create queue if one doesn't exist
@ *2017-01-05 18:46:11 UTC*
* 02e6a78254 Merge pull request `#38567`_ from pass-by-value/pgjsonb_queue_changes_2016.3
* 67879ebe65 Create queue if one doesn't exist
* **ISSUE** `#37498`_: (`githubcdr`_) service.restart salt-minion fails on Ubuntu 14.04.5 LTS (refs: `#37748`_, `#38587`_)
* **PR** `#38587`_: (`rallytime`_) Change daemontools __virtualname__ from service to daemontools
@ *2017-01-05 18:06:01 UTC*
* 0889cbdb31 Merge pull request `#38587`_ from rallytime/fix-37498
* 2a5880966f Change daemontools __virtualname__ from service to daemontools
* **PR** `#38562`_: (`rallytime`_) Update arch installation docs with correct package name
@ *2017-01-04 20:04:28 UTC*
* 7b74436d13 Merge pull request `#38562`_ from rallytime/arch-install-docs
* 8b1897ace9 Update arch installation docs with correct package name
* **PR** `#38560`_: (`Ch3LL`_) fix api logfile (refs: `#38585`_)
@ *2017-01-04 19:03:17 UTC*
* 01860702cb Merge pull request `#38560`_ from Ch3LL/fix_api_log
* 1b45e9670b fix api logfile
* **PR** `#38531`_: (`rallytime`_) Back-port `#33601`_ to 2016.3
@ *2017-01-04 16:56:53 UTC*
* **PR** `#33601`_: (`mchugh19`_) Fix slack engine to run on python2.6 (refs: `#38531`_)
* 0056620a53 Merge pull request `#38531`_ from rallytime/bp-33601
* c36cb39825 remove the unnecessary double trigger
* 38414493bf fix spacing lint error
* 8c1defc710 Remove uncessary type from alias commands. Deduplicate alias handling to autodetect function selection. Add error reporting to slack connectivty problems. Cleanup slack's unicode conversion
* c2f23bc45e Fix slack engine to run on python2.6
* **ISSUE** `#38187`_: (`curiositycasualty`_) username/password saved as cleartext when using URIs with user:pass@ format (refs: `#38541`_)
* **PR** `#38541`_: (`techhat`_) Strip user:pass from cached URLs
@ *2017-01-04 15:39:57 UTC*
* 50242c7f17 Merge pull request `#38541`_ from techhat/issue38187
* eae3a435dd Strip user:pass from cached URLs
* **ISSUE** `#30454`_: (`favoretti`_) Using yaml serializer inside jinja template results in unicode being prepended by '!!python/unicode' (refs: `#30481`_, `#38554`_)
* **PR** `#38554`_: (`multani`_) Fix YAML deserialization of unicode
@ *2017-01-04 15:31:16 UTC*
* **PR** `#30481`_: (`basepi`_) Add yaml_safe jinja filter (refs: `#38554`_)
* 325dc56e59 Merge pull request `#38554`_ from multani/fix/30454
* 2e7f743371 yaml: support unicode serialization/deserialization
* df76113c5c jinja: test the "yaml" filter with ordered dicts
* f7712d417f Revert "Add yaml_safe filter"
* **PR** `#38536`_: (`UtahDave`_) add note about pyVmomi locale workaround
* **ISSUE** `#38353`_: (`Ch3LL`_) salt-cloud gce specifying (refs: `#38542`_)
* **PR** `#38542`_: (`Ch3LL`_) fix gce image bug
* **ISSUE** `#38449`_: (`swalladge`_) Parsing issues in `list_tab` (salt/modules/cron.py) (refs: `#38487`_)
* **PR** `#38487`_: (`gtmanfred`_) Fix crontab issues with spaces
@ *2017-01-01 20:33:29 UTC*
* ec60f9c721 Merge pull request `#38487`_ from gtmanfred/2016.3
* 048b9f6b9d add test
* c480c11528 allow spaces in cron env
* c529ec8c34 allow crons to have multiple spaces
* **ISSUE** `#37684`_: (`thusoy`_) State execution duration is timezone-dependent (refs: `#38491`_)
* **PR** `#38491`_: (`gtmanfred`_) Use UTC for timing in case timezone changes
@ *2017-01-01 20:30:57 UTC*
* c5ba11b5e0 Merge pull request `#38491`_ from gtmanfred/timing
* 79368c7528 Use UTC for timing in case timezone changes
* **ISSUE** `#38472`_: (`jinm`_) file.managed Unable to manage file: 'hash_type' (2016.3.4) (refs: `#38503`_)
* **PR** `#38503`_: (`jinm`_) Hash type fallback for file management
@ *2017-01-01 17:36:51 UTC*
* 86f0aa0bb3 Merge pull request `#38503`_ from jinm/issue_38472_jinm
* 0cd9df299f Hash type fallback for file management
* **PR** `#38457`_: (`bshelton229`_) Stops git.latest checking for local changes in a bare repo
@ *2016-12-30 14:28:47 UTC*
* ed2ba4bd1b Merge pull request `#38457`_ from bshelton229/git-latest-head-bug
* 558e7a771a Stops git.latest checking for local changes in a bare repo
* **PR** `#38385`_: (`dragon788`_) Use unambigous long names with double dashes
@ *2016-12-29 17:10:48 UTC*
* 36e21b22cb Merge pull request `#38385`_ from dragon788/2016.3-double-dash
* 86c4b56f47 Newline for lint compat
* 9d9b686057 Address review comments, consistency of quotes
* df9bd5e7f9 Use unambigous long names with double dashes
* **ISSUE** `#38209`_: (`limited`_) Accepting a minion causes tornado to exit (refs: `#38474`_)
* **PR** `#38474`_: (`cachedout`_) Allow an existing ioloop to be passed to salt-key
@ *2016-12-29 16:28:51 UTC*
* 59f2560d88 Merge pull request `#38474`_ from cachedout/key_loop
* de504538e1 Allow an existing ioloop to be passed to salt-key
* **ISSUE** `#38438`_: (`jf`_) file.line with mode=delete breaks on empty file (refs: `#38467`_)
* **PR** `#38467`_: (`gtmanfred`_) file.line fail with mode=delete
@ *2016-12-28 20:00:33 UTC*
* 3d0c752acd Merge pull request `#38467`_ from gtmanfred/2016.3
* 7b7c6b3878 file.line fail with mode=delete
* **PR** `#38434`_: (`slinn0`_) Make sysctl.persist fail when failing to set a value into the running kernel
@ *2016-12-27 15:37:53 UTC*
* 940025d5c4 Merge pull request `#38434`_ from slinn0/issue_38433_fixes
* 22af87a3fc Fixes for https://github.com/saltstack/salt/issues/38433
* **PR** `#38421`_: (`rallytime`_) Update deprecation notices to the correct version
* **PR** `#38420`_: (`rallytime`_) Removed various deprecation notices from salt/modules/* files (refs: `#38421`_)
* **ISSUE** `#38282`_: (`sash-kan`_) file.managed fails when file (which contains utf-characters in the name) exists (refs: `#38415`_)
* **PR** `#38415`_: (`terminalmage`_) file.managed: Fix failure when filename contains unicode chars
* **PR** `#38419`_: (`Ch3LL`_) fix scsci docs example
@ *2016-12-22 18:57:51 UTC*
* 2cdb59d055 Merge pull request `#38419`_ from Ch3LL/fix_doc_scsi
* 234043b8bb fix scsci docs example
* **PR** `#38407`_: (`terminalmage`_) Improve pillar documentation
* **ISSUE** `#38372`_: (`fanirama`_) Issue with cron.file. Source: salt://path/to/crontab_file not found (refs: `#38398`_)
* **PR** `#38398`_: (`terminalmage`_) Fix call to file.get_managed in cron.file state
@ *2016-12-22 16:46:14 UTC*
* 423b1fddff Merge pull request `#38398`_ from terminalmage/issue38372
* c80dbaa914 Fix call to file.get_managed in cron.file state
* **PR** `#38382`_: (`heewa`_) Fix http.query when result has no text
* **PR** `#38390`_: (`meaksh`_) Add "try-restart" to fix autorestarting on SUSE systems
@ *2016-12-21 16:06:24 UTC*
* b74b5c7d38 Merge pull request `#38390`_ from meaksh/2016.3-fix-try-restart-for-autorestarting-on-SUSE-systems
* de6ec05ec0 add try-restart to fix autorestarting on SUSE systems
* **PR** `#38221`_: (`UtahDave`_) Fix default returner
@ *2016-12-20 20:34:36 UTC*
* 2c3a39760a Merge pull request `#38221`_ from UtahDave/fix_default_returner
* 385640765b remove a blank line to satisfy linter
* 9c248aa14c validate return opt, remove default.
* 8bb37f9fe7 specify allowed types and default for "returner"
* 11863a4bfe add examples of default minion returners
* e7c6012655 add support for default returners using `return`
* **PR** `#38288`_: (`terminalmage`_) archive.extracted: don't try to cache local sources (2016.3 branch)
@ *2016-12-18 13:07:11 UTC*
* 09d9cff992 Merge pull request `#38288`_ from terminalmage/archive-extracted-local-source-2016.3
* 845e3d0e75 Update tests to reflect change in cache behavior
* 5a08d7c70a archive.extracted: don't try to cache local sources (2016.3 branch)
* **PR** `#38312`_: (`cro`_) Backport feature allowing proxy config to live in pillar OR /etc/salt/proxy
@ *2016-12-18 12:39:01 UTC*
* bf37667f8a Merge pull request `#38312`_ from cro/proxy_config_in_cfg
* 2006c4000e Typo
* 689d95b10f Backport feature allowing proxy config to live in pillar OR /etc/salt/proxy.
* **ISSUE** `#12788`_: (`whiteinge`_) Comb through docs to replace :doc: roles with :ref: (refs: `#38320`_)
* **PR** `#38320`_: (`rallytime`_) Cleanup doc internal markup references
@ *2016-12-18 12:31:28 UTC*
* c83db5a785 Merge pull request `#38320`_ from rallytime/cleanup-doc-refs
* 62978cb7a0 Don't check the doc/conf.py file for doc markup refs
* 770e732d76 Add a unit test to search for new doc markup refs
* 5c42a361a0 Remove ":doc:" references from all doc/topics/installation/* files
* 23bce1c929 Remove ":doc:" references from all doc/topics/releases/* files
* 4aafa41d22 Remove ":doc:" references from a bunch of doc/* files
* 02bfe7912c Remove more ":doc:" references from doc/* files
* 6e32267d0c Remove ":doc:" references in salt/* files
* **PR** `#38281`_: (`mikejford`_) Add nick to args for create_multi
* **ISSUE** `#38290`_: (`dragon788`_) Need to use machine automation friendly output (refs: `#38313`_)
* **PR** `#38313`_: (`dragon788`_) 2016.3 chocolatey fix
@ *2016-12-16 17:20:39 UTC*
* 235682b1e6 Merge pull request `#38313`_ from dragon788/2016.3-chocolatey-fix
* 1f5fc17551 Use machine readable output for list
* cdbd2fbe3c Added limit-output to eliminate false packages
* **ISSUE** `#38174`_: (`NickDubelman`_) [syndic] Why can't a syndic node signal when all of its minions have returned? (refs: `#38279`_)
* **ISSUE** `#32400`_: (`rallytime`_) Document Default Config Values (refs: `#38279`_)
* **PR** `#38279`_: (`rallytime`_) Add docs for syndic_wait setting
@ *2016-12-15 18:30:31 UTC*
* 9e78ddc80e Merge pull request `#38279`_ from rallytime/fix-38174
* 4a62d01577 Add docs for syndic_wait setting
* **PR** `#38248`_: (`meaksh`_) Successfully exit of salt-api child processes when SIGTERM is received
@ *2016-12-15 09:16:27 UTC*
* fc9e1dff35 Merge pull request `#38248`_ from meaksh/salt-api-successfully-close-child-processes
* ee6eae9855 Successfully exit of salt-api child processes when SIGTERM.
* **PR** `#38254`_: (`terminalmage`_) Also check if pillarenv is in opts
@ *2016-12-15 09:10:24 UTC*
* 3c718ed35e Merge pull request `#38254`_ from terminalmage/check-pillarenv
* fa9ad311c6 Also check if pillarenv is in opts
* **PR** `#38256`_: (`rallytime`_) [2016.3] Bump latest release version to 2016.11.1
* **PR** `#38198`_: (`vutny`_) Add missing requirements for running unit tests: libcloud and boto3
@ *2016-12-13 14:12:20 UTC*
* 004e46afe7 Merge pull request `#38198`_ from vutny/unit-tests-require-libcloud-boto3
* a6098bac1a Remove note about SaltTesting installation, now it is in the requirements
* 004bff113e Add missing requirements for running unit tests: libcloud and boto3
* **PR** `#38213`_: (`rallytime`_) Skip test_cert_info tls unit test on pyOpenSSL upstream errors
@ *2016-12-13 12:05:01 UTC*
* 9d497bc74c Merge pull request `#38213`_ from rallytime/skip-tls-test
* bdb807fc7c Skip test_cert_info tls unit test on pyOpenSSL upstream errors
* **PR** `#38224`_: (`whiteinge`_) Allow CORS OPTIONS requests to be unauthenticated
@ *2016-12-13 12:02:30 UTC*
* 203109dd17 Merge pull request `#38224`_ from whiteinge/cors-options-unauthed
* de4d3227ab Allow CORS OPTIONS requests to be unauthenticated
* **PR** `#38223`_: (`whiteinge`_) Add root_dir to salt-api file paths
@ *2016-12-13 07:44:19 UTC*
* **PR** `#37272`_: (`vutny`_) Get default logging level and log file from default opts dict (refs: `#38223`_)
* 721a5feccd Merge pull request `#38223`_ from whiteinge/salt-api-root_dirs
* bfbf390c0e Add root_dir to salt-api file paths
* **ISSUE** `#38162`_: (`747project`_) git_pillar does not detect changes to remote repository when told to update (refs: `#38191`_)
* **PR** `#38191`_: (`terminalmage`_) Clarify the fact that git_pillar.update does not fast-forward
@ *2016-12-12 09:45:48 UTC*
* 70f7d22ad6 Merge pull request `#38191`_ from terminalmage/issue38162
* 1ae543a98a Clarify the fact that git_pillar.update does not fast-forward
* **PR** `#38194`_: (`vutny`_) Document the requirements for running ZeroMQ-based integration tests
@ *2016-12-12 09:42:11 UTC*
* 28171cbfc5 Merge pull request `#38194`_ from vutny/integration-test-requirements-doc
* e9f419ff64 Document the requirements for running ZeroMQ-based integration tests
* **PR** `#38185`_: (`rallytime`_) Back-port `#38181`_ to 2016.3
@ *2016-12-09 22:27:44 UTC*
* **PR** `#38181`_: (`rallytime`_) Reset socket default timeout to None (fixes daemons_tests failures) (refs: `#38185`_)
* a4ef037ab1 Merge pull request `#38185`_ from rallytime/bp-38181
* 609f814454 Reset socket default timeout to None (fixes daemons_tests failures)
* **PR** `#38163`_: (`Ch3LL`_) enabled ec2 cloud tests
@ *2016-12-09 18:01:57 UTC*
* 65b2ad7b14 Merge pull request `#38163`_ from Ch3LL/enabled_ec2_cloud
* be74c45463 enabled ec2 cloud tests
* **PR** `#38177`_: (`vutny`_) Correct `cp.get_file_str` docstring and add integration tests
@ *2016-12-09 16:55:35 UTC*
* b63f74e034 Merge pull request `#38177`_ from vutny/fix-cp-get-file-str
* a449980672 Correct `cp.get_file_str` docstring and add integration tests
* **PR** `#38153`_: (`vutny`_) Master config includes may contain errors and be safely skipped
@ *2016-12-08 17:43:34 UTC*
* 7596313be0 Merge pull request `#38153`_ from vutny/master-includes-error-tolerance
* cd0154ee93 Master config includes may contain errors and be safely skipped
* **PR** `#38134`_: (`rallytime`_) Skip daemon unit tests when running on Python 2.6
* **ISSUE** `#38091`_: (`tjyang`_) [WARNING ] salt.loaded.int.module.zenoss.__virtual__() is wrongly returning `None`. (refs: `#38102`_)
* **PR** `#38102`_: (`rallytime`_) Add False + msg tuple return if requests is missing for zenoss module
@ *2016-12-07 13:24:37 UTC*
* d3d98fd4eb Merge pull request `#38102`_ from rallytime/fix-38091
* 4f79d5a0d1 Add False + msg tuple return if requests is missing for zenoss module
* **ISSUE** `#36707`_: (`do3meli`_) slow FreeBSD sysctl module with test=true (refs: `#36794`_)
* **PR** `#38104`_: (`rallytime`_) Back-port `#36794`_ to 2016.3
@ *2016-12-07 13:23:48 UTC*
* **PR** `#36794`_: (`do3meli`_) FreeBSD sysctl module now handels config_file parameter in show method (refs: `#38104`_)
* 8c8cbc2734 Merge pull request `#38104`_ from rallytime/bp-36794
* c906c8a0d5 Pylint fixes
* da3ebf83e6 FreeBSD sysctl module now handels config_file parameter in show method
* **ISSUE** `#35342`_: (`morganwillcock`_) win_pkg: refresh_db doesn't remove cached items which have been renamed or removed (refs: `#38083`_)
* **PR** `#38083`_: (`twangboy`_) Only delete .sls files from winrepo-ng [DO NOT MERGE FORWARD]
@ *2016-12-06 14:13:35 UTC*
* fbc87769b9 Merge pull request `#38083`_ from twangboy/fix_refresh_db
* 978af6d83c Remove only .sls files from the cached winrepo-ng
* **PR** `#38059`_: (`rallytime`_) Call exec_test for the Syndic daemon in tests.unit.daemons_test.py
@ *2016-12-04 04:18:41 UTC*
* **PR** `#38057`_: (`rallytime`_) [2016.11] Merge forward from 2016.3 to 2016.11 (refs: `#38059`_)
* **PR** `#38034`_: (`cachedout`_) Modify daemons test to use multiprocessing (refs: `#38059`_)
* 9dcfdeef6b Merge pull request `#38059`_ from rallytime/daemons-test-fix
* eb372b27d8 Add missing "not" statement: The last syndic test should assertFalse()
* 4e10f8e018 Call exec_test for the Syndic daemon in tests.unit.daemons_test.py
* **ISSUE** `#37939`_: (`Talkless`_) file.comment always report changes in test=True mode (refs: `#38039`_)
* **PR** `#38039`_: (`rallytime`_) Check to see if a line is already commented before moving on
@ *2016-12-02 20:08:35 UTC*
* 9cd42b9b3f Merge pull request `#38039`_ from rallytime/fix-37939
* 1da7aacfbe Update unit tests to account for additional file.search call
* 8a685b1820 Check to see if a line is already commented before moving on
* f2c045520d Write an integration test demonstrating the issue
* **ISSUE** `#38037`_: (`dmurphy18`_) pkg.latest and yumpkg.latest_version return incorrect package versions 2016.3 and 2016.11 (refs: `#38045`_)
* **PR** `#38045`_: (`terminalmage`_) yumpkg.py: don't include non-upgrade versions found by "yum list available"
@ *2016-12-02 20:07:38 UTC*
* a34a763984 Merge pull request `#38045`_ from terminalmage/issue38037
* 65289503d9 Simplify logic for matching desired pkg arch with actual pkg arch
* 3babbcda94 yumpkg.py: don't include non-upgrade versions found by "yum list available"
* **PR** `#38034`_: (`cachedout`_) Modify daemons test to use multiprocessing (refs: `#38059`_)
* **PR** `#37995`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3
@ *2016-11-30 20:12:55 UTC*
* 6942d5d95b Merge pull request `#37995`_ from rallytime/merge-2016.3
* b44e17921c Merge branch '2015.8' into '2016.3'
* 7a7e36728f Merge pull request `#37978`_ from terminalmage/ext_pillar_first-docs
* 61ed9a8657 Add clarifying language to ext_pillar_first docs
* **PR** `#38002`_: (`laleocen`_) fix broken yaml code block
* **ISSUE** `#35088`_: (`Modulus`_) salt/cloud/ec2.py encoding problems. (refs: `#37912`_)
* **PR** `#37912`_: (`attiasr`_) fix encoding problem aws responses
@ *2016-11-30 18:10:30 UTC*
* 3dd45fbedf Merge pull request `#37912`_ from attiasr/fix_aws_response_encoding
* ba4ec4e7f1 use Requests result encoding to encode the text
* abe4eb3b98 fix encoding problem aws responses
* **PR** `#37950`_: (`vutny`_) Set default Salt Master address for a Syndic (like for a Minion)
@ *2016-11-30 18:09:04 UTC*
* 69a74a4d2d Merge pull request `#37950`_ from vutny/fix-starting-up-syndic
* 7d9bc9abce syndic_master: correct default value, documentation and example config
* 92a7c7ed1b Set default Salt Master address for a Syndic (like for a Minion)
* **PR** `#37964`_: (`terminalmage`_) Add clarification on expr_form usage and future deprecation
* **ISSUE** `#37867`_: (`tobiasBora`_) Bug into lsb_release that crash salt (refs: `#37962`_)
* **PR** `#37962`_: (`cachedout`_) Catch possible exception from lsb_release
* **ISSUE** `#37945`_: (`gstachowiak`_) Missing exception handling in salt.master.Maintenance. Process never completes. (refs: `#37961`_)
* **PR** `#37961`_: (`cachedout`_) Handle empty tokens safely
* **PR** `#37272`_: (`vutny`_) Get default logging level and log file from default opts dict (refs: `#38223`_)
@ *2016-11-28 23:04:20 UTC*
* ea46639ce7 Merge pull request `#37272`_ from vutny/fix-getting-default-logging-opts
* e5ce52388a Fix description in the Salt Syndic usage info
* 518a3dd7ee Add unit tests for Salt parsers processing logging options
* 83d6a44254 Add `ssh_log_file` option to master config and documentation
* c8a0915460 Fix configuration example and documentation for `syndic_log_file` option
* e64dd3ed6b Correct default attributes for various parser classes
* 82a2e216b3 Fix default usage string for Salt command line programs
* 45dffa292f Fix readding and updating logfile and pidfile config options for Salt API
* f47253c21b Fix reading and applying Salt Cloud default configuration
* fad5bec936 Work with a copy of default opts dictionaries
* b7c24811e5 Fix `log_level_logfile` config value type
* 1bd76a1d96 Fix setting temporary log level if CLI option omitted
* 121848cc77 Fix obtaining `log_granular_levels` config setting
* 44cf07fec2 Make CLI options take precedence for setting up logfile_logger
* 61afaf1792 Fix setting option attributes when processing `log_level` and `log_file`
* 3c60e2388e Fix processing of `log_level_logfile` config setting
* 55a0af5bbd Use attribute functions for getting/setting options and config values
* c25f2d091e Fix getting Salt API default logfile option
* f2422373c1 Remove processing of unused and undocumented `cli_*_log_*` config options
* 2065e8311c Get default logging level and file from default opts dict
* **PR** `#37925`_: (`kontrolld`_) Fix missing ipv6 options centos network
@ *2016-11-28 22:38:43 UTC*
* f2f957da6c Merge pull request `#37925`_ from kontrolld/add-ipv6-centos-network
* ac2b477412 Adding IPv6 functionality for CentOS /etc/sysconfig/network
* **ISSUE** `#37059`_: (`basepi`_) Beacon fileserver operations cause scheduled jobs with fileserver operations to hang (refs: `#37899`_)
* **PR** `#37899`_: (`DmitryKuzmenko`_) Clear functions context in schedule tasks for ZeroMQ.
@ *2016-11-28 22:23:45 UTC*
* c07ad11279 Merge pull request `#37899`_ from DSRCorporation/bugs/37059_schedule_task_hang
* 9497748546 Clear functions context in schedule tasks for ZeroMQ.
* **ISSUE** `#37737`_: (`b-harper`_) python client api CloudClient multiple calls needed (refs: `#37928`_)
* **PR** `#37928`_: (`techhat`_) Don't modify self.opts directly
@ *2016-11-28 21:07:40 UTC*
* a55519db40 Merge pull request `#37928`_ from techhat/issue37737
* a09a60e89b Don't modify self.opts directly
* **PR** `#37929`_: (`gtmanfred`_) add list_nodes_min to nova driver
@ *2016-11-28 21:05:40 UTC*
* 9d17f1ce90 Merge pull request `#37929`_ from gtmanfred/2016.3
* c7d2c73503 add list_nodes_min to nova driver
* **PR** `#37926`_: (`kontrolld`_) Fixes no IPv6 functionality in /etc/sysconfig/network
@ *2016-11-28 20:40:00 UTC*
* 3bb743b59f Merge pull request `#37926`_ from kontrolld/fix-ipv6-centos-network
* 3ed42e5b44 updated
* 3b3bc4f239 Fixes no IPv6 functionality in /etc/sysconfig/network
* **PR** `#37921`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3
@ *2016-11-28 19:54:40 UTC*
* 271170a9f3 Merge pull request `#37921`_ from rallytime/merge-2016.3
* 523a67c422 Merge branch '2015.8' into '2016.3'
* 4cdc6cf5ec Update earlier release channels' docs with Carbon release notes (`#37914`_)
* d31491a7fe [2015.8] Update version numbers in doc config for 2016.11.0 release (`#37918`_)
* **PR** `#37924`_: (`cachedout`_) Update test for new gem ver
@ *2016-11-28 18:17:53 UTC*
* 6cd6429ac0 Merge pull request `#37924`_ from cachedout/fix_gem_states
* 894cca3427 Update test for new gem ver
* **PR** `#37916`_: (`rallytime`_) [2016.3] Update version numbers in doc config for 2016.11.0 release
@ *2016-11-28 17:09:08 UTC*
* c35ba1f390 Merge pull request `#37916`_ from rallytime/doc-update-2016.3
* bd40592289 [2016.3] Update version numbers in doc config for 2016.11.0 release
* **ISSUE** `#37287`_: (`aaronm-cloudtek`_) salt.states.ddns.present: 'NS' record type always returns as changed (refs: `#37785`_)
* **PR** `#37785`_: (`aaronm-cloudtek`_) respect trailing dot in ddns name parameter
@ *2016-11-28 14:02:10 UTC*
* e13a2488c8 Merge pull request `#37785`_ from Cloudtek/ddns-respect-trailing-dot
* 262e3b3697 respect trailing dot in ddns name parameter
* **ISSUE** `#37870`_: (`fj40crawler`_) salt.states.augeas.change returns None when test=True (refs: `#37895`_)
* **PR** `#37895`_: (`fj40crawler`_) Change return value for salt/states/augeas.py to be True instead of N…
@ *2016-11-28 13:49:27 UTC*
* c03b389422 Merge pull request `#37895`_ from fj40crawler/fix-augeas-return-for-test
* ddc238df36 Fixed augeas_test.py to match True v.s. None for test_change_in_test_mode
* ef75c459c0 Merge branch '2016.3' of github.com:saltstack/salt into fix-augeas-return-for-test
* b0fe0cd256 Change return value for salt/states/augeas.py to be True instead of None for cases where salt is run with test=True. Fixes `#37870`_
* **PR** `#37907`_: (`Talkless`_) Fix server trust in test run of svn.latest
@ *2016-11-28 13:47:39 UTC*
* fdbc31e8d8 Merge pull request `#37907`_ from Talkless/patch-2
* 072a319490 Fix server trust in test run of svn.latest
* **PR** `#37896`_: (`toanju`_) rh networking: add missing values
@ *2016-11-27 10:30:35 UTC*
* f39fdf443f Merge pull request `#37896`_ from toanju/2016.3
* c95304188e rh networking: add missing values
* **PR** `#37886`_: (`bdrung`_) Fix various spelling mistakes
@ *2016-11-25 02:59:36 UTC*
* ea935c5a91 Merge pull request `#37886`_ from bdrung/fix-typos
* 9a51ba5c5b Fix various spelling mistakes
* **ISSUE** `#37732`_: (`dhaines`_) list_semod() (from modules/selinux.py) incompatible with policycoreutils-2.5 (RHEL 7.3) (refs: `#37736`_)
* **PR** `#37736`_: (`dhaines`_) handle semodule version >=2.4 (`#37732`_) and fix typo
@ *2016-11-24 01:44:20 UTC*
* 371b0a86d9 Merge pull request `#37736`_ from dhaines/issue-37732
* 7ef590a505 Update selinux.py
* 516a67e6a3 fix indexing error
* 4e49c1e991 fix typo
* b16f2d8400 handle semodule version >=2.4 (`#37732`_) and fix typo
* **PR** `#37797`_: (`clan`_) check count of columns after split
@ *2016-11-24 01:28:59 UTC*
* 87aeb66fbf Merge pull request `#37797`_ from clan/extfs
* acf0f960ef check count of columns after split
* **PR** `#37762`_: (`twangboy`_) Add pre_versions to chocolatey.installed
@ *2016-11-24 01:27:29 UTC*
* f7c7109152 Merge pull request `#37762`_ from twangboy/fix_chocolatey_state
* 9696b6dfa5 Use keyword args instead of relying on ordering
* 398eaa074d Add pre_versions to the available arguments
* **PR** `#37866`_: (`meaksh`_) Backport `#37149`_ `#36938`_ and `#36784`_ to 2016.3
@ *2016-11-23 21:54:17 UTC*
* **PR** `#37857`_: (`meaksh`_) Backport `#37149`_ and `#36938`_ to 2015.8 (refs: `#37866`_)
* **PR** `#37856`_: (`meaksh`_) Backport `#36784`_ to 2015.8 (refs: `#37866`_)
* **PR** `#37149`_: (`dincamihai`_) Fix pkg.latest_version when latest already installed (refs: `#37857`_, `#37866`_)
* **PR** `#36938`_: (`wanparo`_) acl.delfacl: fix position of -X option to setfacl (refs: `#37857`_, `#37866`_)
* **PR** `#36784`_: (`moio`_) OS grains for SLES Expanded Support (refs: `#37856`_, `#37866`_)
* 56baa92d55 Merge pull request `#37866`_ from meaksh/2016.3-bp-37149-36938-36784
* 9d8d578109 Fix pkg.latest_version when latest already installed
* ffca0d491c - acl.delfacl: fix position of -X option to setfacl
* 3dfed6b841 Adjust linux_acl unit test argument ordering
* f185ecdde1 core.py: quote style fixed
* 8404d13424 Setting up OS grains for SLES Expanded Support (SUSE's Red Hat compatible platform)
* **ISSUE** `#32829`_: (`tyhunt99`_) Dockerng appears to not be using docker registries pillar data (refs: `#36893`_)
* **PR** `#37863`_: (`rallytime`_) Back-port `#36893`_ to 2016.3
@ *2016-11-23 17:09:09 UTC*
* **PR** `#36893`_: (`tyhunt99`_) add option to force a reauth for a docker registry (refs: `#37863`_)
* d0cc7f0d56 Merge pull request `#37863`_ from rallytime/bp-36893
* 4c70534991 Add versionadded to reauth option in dockerng module
* 5ca2c388c2 added documentation for the new reuth option in docker registry configuration
* 5b0c11ab47 add option to force a reauth for a docker registry
* **PR** `#37847`_: (`laleocen`_) add multiline encryption documentation to nacl
* **ISSUE** `#37787`_: (`elyulka`_) user.present state fails to change loginclass on FreeBSD (refs: `#37827`_)
* **PR** `#37827`_: (`silenius`_) add missing chloginclass
* **PR** `#37826`_: (`rallytime`_) Update branch refs to more relevant branch
* **PR** `#37822`_: (`laleocen`_) add documentation for multiline encryption using nacl (refs: `#37826`_)
* **ISSUE** `#19269`_: (`markuskramerIgitt`_) Undocumented feature `names:` of `file.directory` (refs: `#37823`_)
* **PR** `#37823`_: (`rallytime`_) Add "names" option to file state docs: point users to highstate doc examples
* **ISSUE** `#15697`_: (`arthurlogilab`_) keystone.user_present should not re-set the password when user exists (refs: `#37821`_)
* **PR** `#37821`_: (`rallytime`_) Clarify keystone.user_present password state docs with default behavior
* **ISSUE** `#5999`_: (`pille`_) libvirt.keys does not work (refs: `#37820`_)
* **PR** `#37820`_: (`rallytime`_) Add some dependency documentation to libvirt docs
* **PR** `#37772`_: (`bdrung`_) Support initializing OpenSSL 1.1
@ *2016-11-21 20:28:51 UTC*
* 485270f74e Merge pull request `#37772`_ from bdrung/openssl1.1
* 819c9658ed Support initializing OpenSSL 1.1
* **ISSUE** `#37383`_: (`edwardsdanielj`_) Orchestration arguments (kwarg) not being interperted / How I learned to stop worrying about documentation and love experimenting (refs: `#37817`_)
* **PR** `#37817`_: (`rallytime`_) Update orchestrate runner file.copy doc example
* **ISSUE** `#37653`_: (`gravyboat`_) Salt.cron docs don't wrap @hourly and @daily correctly in quotes for the examples (refs: `#37816`_)
* **ISSUE** `#31953`_: (`sjorge`_) Documentation for salt.states.cron is incorrect (refs: `#32157`_)
* **PR** `#37816`_: (`rallytime`_) Back-port `#32157`_ to 2016.3
@ *2016-11-21 20:22:27 UTC*
* **PR** `#32157`_: (`cachedout`_) Add quotes to cron doc (refs: `#37816`_)
* c5d3d8b66a Merge pull request `#37816`_ from rallytime/bp-32157
* d9c297119e Add quotes to cron doc
* **PR** `#37812`_: (`rallytime`_) Back-port `#37790`_ to 2016.3
@ *2016-11-21 18:46:40 UTC*
* **PR** `#37790`_: (`sofixa`_) Update cloud/proxmox.rst with more options and LXC (refs: `#37812`_)
* 97e6b6aabe Merge pull request `#37812`_ from rallytime/bp-37790
* ca3b6e7874 Update proxmox.rst with more options and LXC
* **ISSUE** `#37751`_: (`freach`_) Documentation salt.states.dockerng.running: "privileged" property undocumented (refs: `#37789`_)
* **PR** `#37811`_: (`rallytime`_) Back-port `#37789`_ to 2016.3
@ *2016-11-21 18:46:21 UTC*
* **PR** `#37789`_: (`fedusia`_) issue: 37751 (refs: `#37811`_)
* 27703c54bc Merge pull request `#37811`_ from rallytime/bp-37789
* ba3fef48e1 fix comment
* a021f76a9b issue: 37751 Add documentation for option privileged
* **PR** `#37810`_: (`rallytime`_) Back-port `#37775`_ to 2016.3
@ *2016-11-21 18:45:53 UTC*
* **PR** `#37775`_: (`calve`_) Document `python` argument in `salt.states.virtualenv_mod` (refs: `#37810`_)
* adac9d7c0c Merge pull request `#37810`_ from rallytime/bp-37775
* 2bed91437b Document `python` argument in `salt.states.virtualenv_mod`
* **ISSUE** `#37742`_: (`blaketmiller`_) Cannot match on nodegroup when checking minions (refs: `#37763`_)
* **PR** `#37763`_: (`cachedout`_) Add nodegroup check to ckminions
* **ISSUE** `#37725`_: (`secumod`_) salt-call incorrectly parses master hostname:port from minion config (refs: `#37766`_)
* **PR** `#37766`_: (`cachedout`_) Fix ip/port issue with salt-call
* **ISSUE** `#33709`_: (`msummers42`_) Any/All Salt-SSH invocations in 2016.3.0 Fails with AttributeError: 'module' object has no attribute 'BASE_THORIUM_ROOTS_DIR' (refs: `#37767`_)
* **PR** `#37767`_: (`cachedout`_) Add thorium path to syspaths
* **PR** `#37760`_: (`hu-dabao`_) Fix couchbase returner and add couple of more features
@ *2016-11-18 00:28:23 UTC*
* bff949f4e9 Merge pull request `#37760`_ from hu-dabao/fix_cb_returner
* de372f277e 1. returner no need to check whether the jid exists for external job cache setup 2. add full_ret to return doc so that the document will be informative 3. make ttl as a config attribute because salt-minion does not have keep_jobs attribute 4. add password into config attribute 5. update the documents accordingly
* **ISSUE** `#36629`_: (`yhekma`_) The pillar run module does not honor saltenv (refs: `#37738`_)
* **PR** `#37738`_: (`terminalmage`_) Allow pillar.get to retrieve fresh pillar data when saltenv passed
@ *2016-11-17 23:13:04 UTC*
* 1f976ac212 Merge pull request `#37738`_ from terminalmage/issue36629
* da46678c51 Allow pillar.get to retrieve fresh pillar data when saltenv passed
* **PR** `#37745`_: (`cro`_) Switch default filter tag for ONE resources from user only to all resources
* **ISSUE** `#37498`_: (`githubcdr`_) service.restart salt-minion fails on Ubuntu 14.04.5 LTS (refs: `#37748`_, `#38587`_)
* **PR** `#37748`_: (`silenius`_) check for SERVICE_DIR in __virtual__ in salt.modules.daemontools
* **ISSUE** `#37734`_: (`Ch3LL`_) Joyent Cloud Size Issue (refs: `#37735`_)
* **PR** `#37735`_: (`Ch3LL`_) change size and image of joyent profile
@ *2016-11-16 21:07:52 UTC*
* fa7883115e Merge pull request `#37735`_ from Ch3LL/fix_joyent_profile
* 9ef41dcdfc change size and image of joyent profile
* **PR** `#37731`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3
@ *2016-11-16 17:13:02 UTC*
* 98e25c68aa Merge pull request `#37731`_ from rallytime/merge-2016.3
* ec1389711f Merge branch '2015.8' into '2016.3'
* f417dbbe99 Merge pull request `#37718`_ from terminalmage/docs
* 23b8b2a3f4 Fix incorrectly-formatted RST
* **PR** `#37724`_: (`cachedout`_) Warn on AES test for systems with > 1 core
* **PR** `#37721`_: (`terminalmage`_) Fix for pillar setting 'environment' key in __gen_opts()
@ *2016-11-16 16:04:53 UTC*
* 35655d521f Merge pull request `#37721`_ from terminalmage/zd909
* acdd5513da Update git_pillar docs to reflect info from bugfix
* 433737d2dc Fix for pillar setting 'environment' key in __gen_opts()
* **PR** `#37719`_: (`terminalmage`_) Fix incorrectly-formatted RST (2016.3 branch)
@ *2016-11-16 08:20:53 UTC*
* 99cda7c003 Merge pull request `#37719`_ from terminalmage/docs-2016.3
* f163b4c724 Fix incorrectly-formatted RST
* **PR** `#37694`_: (`cachedout`_) Catch differences in git URLs in npm state
@ *2016-11-16 01:56:18 UTC*
* 8dea695c7c Merge pull request `#37694`_ from cachedout/npm_git
* 0e3bc2366a Catch differences in git URLs in npm state
* **ISSUE** `#37665`_: (`kluoto`_) boto_elb state fails as key is overwritten by the code (refs: `#37705`_)
* **PR** `#37705`_: (`rallytime`_) Don't overwrite the "key" variable passed in to _listeners_present func
@ *2016-11-15 21:26:37 UTC*
* 329448ccd7 Merge pull request `#37705`_ from rallytime/fix-37665
* 3b7e9c5e3b Don't overwrite the "key" variable passed in to _listeners_present func
* **PR** `#37707`_: (`Ch3LL`_) add timeout increase on azure tests
@ *2016-11-15 21:24:25 UTC*
* **PR** `#37239`_: (`Ch3LL`_) Fix cloud tests timeout (refs: `#37707`_)
* ac9a316b50 Merge pull request `#37707`_ from Ch3LL/fix_timeout_azure
* 363122c675 add timeout increase on azure tests
* **PR** `#37704`_: (`twangboy`_) Fix test disabled 2016.3 [DO NOT MERGE FORWARD]
@ *2016-11-15 16:48:52 UTC*
* 1ece265354 Merge pull request `#37704`_ from twangboy/fix_test_disabled_2016.3
* a0429cf839 Use nfsd instead of apsd for test_disabled
* **PR** `#37690`_: (`twangboy`_) Update pyzmq to 15.3.0 for 2016.3 [DO NOT MERGE FORWARD]
@ *2016-11-15 03:10:36 UTC*
* 44f05acbff Merge pull request `#37690`_ from twangboy/update_pyzmq_2016.3
* cf55342150 Update pyzmq to version 15.3.0
* **PR** `#37680`_: (`rallytime`_) Back-port `#32965`_ to 2016.3
@ *2016-11-15 02:56:46 UTC*
* **PR** `#32965`_: (`kevinquinnyo`_) Fix 'present' option when used without 'key_type' (refs: `#37680`_)
* a743d8b5e6 Merge pull request `#37680`_ from rallytime/bp-32965
* 1865b13645 Fix 'present' option when used without 'key_type'
* **ISSUE** `#35964`_: (`edgan`_) salt-ssh doesn't set the return code to non-zero on highstate rendering error (refs: `#35965`_)
* **PR** `#37681`_: (`rallytime`_) Back-port `#35965`_ to 2016.3
@ *2016-11-14 21:19:22 UTC*
* **PR** `#35965`_: (`edgan`_) Set the return code to 1 on salt-ssh highstate errors (refs: `#37681`_)
* 1c2d6ff293 Merge pull request `#37681`_ from rallytime/bp-35965
* 700f3fa57f Set the return code to 1 on salt-ssh highstate errors
* **PR** `#37668`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3
@ *2016-11-14 15:43:25 UTC*
* 1b456b55dc Merge pull request `#37668`_ from rallytime/merge-2016.3
* ef684c6b02 Merge branch '2015.8' into '2016.3'
* a01b66556f Add docs for rotate_aes_key (`#37641`_)
* **ISSUE** `#37492`_: (`JensRantil`_) Failing `salt -b 1 minion state.highstate` has wrong exit code (refs: `#37625`_)
* **PR** `#37625`_: (`cachedout`_) Return with proper retcodes in batch mode
@ *2016-11-12 20:29:09 UTC*
* 305e51d1c0 Merge pull request `#37625`_ from cachedout/issue_37492
* b6031524e5 Return with proper retcodes in batch mode
* **ISSUE** `#34547`_: (`sebw`_) salt-cloud deployment fails when deploy: True (refs: `#37607`_)
* **PR** `#37639`_: (`rallytime`_) Back-port `#37607`_ to 2016.3
@ *2016-11-11 20:29:20 UTC*
* **PR** `#37607`_: (`techhat`_) Try the connection again, in case it's been reset (refs: `#37639`_)
* **PR** `#35673`_: (`cro`_) Proxies don't handle reusing the SmartConnect instances very well. D… (refs: `#37607`_)
* **PR** `#34059`_: (`alexbleotu`_) Vmware common gh (refs: `#37607`_)
* 7510cd4da9 Merge pull request `#37639`_ from rallytime/bp-37607
* 9914c93bc4 Pylint: Remove kwargs that are not in the 2016.3 branch
* d941e9354d Disable pylint warning
* 940ee49a0b Lint fix
* 69893f0c38 Try the connection again, in case it's been reset
* **ISSUE** `saltstack/salt#37118`_: (`gtmanfred`_) group in file.find module unable to be a list (refs: `#37349`_)
* **ISSUE** `#37118`_: (`gtmanfred`_) group in file.find module unable to be a list (refs: `#37349`_)
* **PR** `#37638`_: (`rallytime`_) Back-port `#37349`_ to 2016.3
@ *2016-11-11 20:29:01 UTC*
* **PR** `#37349`_: (`haeac`_) Pull request for Bug `#37118`_ (refs: `#37638`_)
* 24ca96010d Merge pull request `#37638`_ from rallytime/bp-37349
* ba2105bc39 Fix for Bug `#37118`_, the wrong parameter was being used to convert the group name to group id.
* **ISSUE** `#37643`_: (`Ch3LL`_) digital ocean list_keypairs limits to 20 keys (refs: `#37644`_)
* **PR** `#37644`_: (`Ch3LL`_) digital ocean list_keypairs: increase limit for ssh keys parsed
@ *2016-11-11 20:28:46 UTC*
* e1e8b81d16 Merge pull request `#37644`_ from Ch3LL/fix_37643
* c02961a2f5 list_keypairs: increase limit for ssh keys parsed
* **ISSUE** `#37541`_: (`yhekma`_) salt-minion does not clean up temp files for templates (refs: `#37540`_, `#37640`_)
* **PR** `#37640`_: (`rallytime`_) Add known issue `#37541`_ to 2016.3.4 release notes
@ *2016-11-11 20:28:12 UTC*
* a97c2ad34b Merge pull request `#37640`_ from rallytime/update-release-notes
* 6d6de12aff Grammatical fix
* 24d7f20e16 Add known issue `#37541`_ to 2016.3.4 release notes
* **PR** `#37642`_: (`cro`_) Forward-port change from 2015.8 adding release note for rotate_aes_key
@ *2016-11-11 20:27:07 UTC*
* fab3eaa237 Merge pull request `#37642`_ from cro/rotate_aes_doc
* 1ca5b958c6 Forward-port change from 2015.8 adding release note for rotate_aes_key
* **ISSUE** `#37628`_: (`TronPaul`_) [git 2016.3] Refreshing of an s3 file server results in an exception (refs: `#37629`_)
* **PR** `#37629`_: (`TronPaul`_) fix __opts__ and provider being None in salt.utils.aws:get_location
@ *2016-11-11 09:49:47 UTC*
* 4c07b3534a Merge pull request `#37629`_ from TronPaul/fix-s3fs-opts
* a452cded20 fix __opts__ and provider being None issue
* **PR** `#37481`_: (`thatch45`_) Raet internal client reference fix
@ *2016-11-11 04:39:41 UTC*
* 200d9fcb6e Merge pull request `#37481`_ from thatch45/raet_client
* 50d911160b Attempted fix, needs user verification
* **PR** `#37611`_: (`jeanpralo`_) Fix cmd batch raw
@ *2016-11-11 02:53:58 UTC*
* b14faf1c68 Merge pull request `#37611`_ from jeanpralo/fix-cmd-batch-raw
* 4f16840ef1 add integration test for salt.client.LocalClient.cmd_batch
* ead47e4bba update ret dict to avoid hanging
* 0a2f153b6e fix dict key for raw support to avoid exception
* **PR** `#37614`_: (`gtmanfred`_) remove redundant code
@ *2016-11-11 02:49:13 UTC*
* 35c8333d04 Merge pull request `#37614`_ from gtmanfred/2016.3
* 71c2df89a9 remove redundent code
* **PR** `#37627`_: (`cachedout`_) Exempt pip.iteritems from test_valid_docs test
@ *2016-11-11 02:48:37 UTC*
* 4fab707bdd Merge pull request `#37627`_ from cachedout/pr-36706
* 94df2f8e6f Exempt pip.iteritems from test_valid_docs test
* **ISSUE** `#36644`_: (`b1naryth1ef`_) env_vars not properly validated/casted to strings w/ virtualenv.manage/pip.install (refs: `#36706`_)
* **PR** `#36706`_: (`siccrusher`_) Add basic sanity checks for env_vars in pip.install function
@ *2016-11-11 02:47:16 UTC*
* ee74f3116e Merge pull request `#36706`_ from siccrusher/fix_env_var_validation
* fb27f8b69e Revert change
* 79f3e83f8d Use fully-qualified path for six
* 0ca1222833 Update pip.py
* b15de371c1 * Ensure src is python3 compatible
* 0976a2d1ae * Before passing on the env_vars dictionary ensure all values are strings. Fixes `#36644`_
* **ISSUE** `#37491`_: (`JensRantil`_) "Failed to authenticate! ..." error should exit non-zero (refs: `#37626`_)
* **PR** `#37626`_: (`cachedout`_) Exit with proper retcode on hard client failures
@ *2016-11-11 02:38:47 UTC*
* 902a97575e Merge pull request `#37626`_ from cachedout/issue_37491
* bab9a729b1 Exit with proper retcode on hard client failures
* **PR** `#37617`_: (`terminalmage`_) Clarify docs for git_pillar dynamic env feature
@ *2016-11-11 01:52:52 UTC*
* 845f835177 Merge pull request `#37617`_ from terminalmage/git_pillar-docs
* 8cdf5dbb51 Clarify docs for git_pillar dynamic env feature
* **PR** `#36627`_: (`sjmh`_) Skip rest of loop on failed func match
@ *2016-11-10 23:47:12 UTC*
* 3079d78332 Merge pull request `#36627`_ from sjmh/fix/auth_skip_nomatch
* b3baaf30d0 Skip rest of loop on failed func match
* **PR** `#37600`_: (`mcalmer`_) change TIMEZONE on SUSE systems (bsc#1008933)
@ *2016-11-10 21:54:04 UTC*
* a71e7c77b3 Merge pull request `#37600`_ from mcalmer/fix-timezone-on-SUSE
* 3530b542f0 change TIMEZONE on SUSE systems (bsc#1008933)
* **ISSUE** `#37238`_: (`cmclaughlin`_) Restarting master causes minion to hang (refs: `#37438`_, `#37602`_)
* **ISSUE** `#37018`_: (`tsaridas`_) get events from python (refs: `#37438`_, `#37602`_)
* **PR** `#37602`_: (`DmitryKuzmenko`_) Handle master restart in appropriate places using `salt.event` listener.
@ *2016-11-10 21:53:20 UTC*
* **PR** `#37438`_: (`DmitryKuzmenko`_) Fix for `#37238`_ salt hang on master restart (refs: `#37602`_)
* 39b75878cf Merge pull request `#37602`_ from DSRCorporation/bugs/37238_salt_hang_on_master_restart
* d3d987b19c Handle master restart in appropriate places using `salt.event` listener.
* **PR** `#37608`_: (`gtmanfred`_) allow multiline returns from docker for mac
@ *2016-11-10 21:48:59 UTC*
* 019e1a721b Merge pull request `#37608`_ from gtmanfred/2016.3
* 74aee1e372 allow multiline returns from docker for mac
* **ISSUE** `#37592`_: (`craigafinch`_) State git.latest does not work with SSH (refs: `#37604`_)
* **ISSUE** `#37551`_: (`viict`_) git.latest "Not a valid commit name" (refs: `#37604`_, `#37571`_)
* **PR** `#37604`_: (`terminalmage`_) Documentation improvements and corrections
* **PR** `#37579`_: (`pass-by-value`_) Use existing VM's VDD size if not specified in the cloud profile
* **ISSUE** `#37541`_: (`yhekma`_) salt-minion does not clean up temp files for templates (refs: `#37540`_, `#37640`_)
* **PR** `#37540`_: (`yhekma`_) Added prefix to tempfile for template
@ *2016-11-10 00:37:18 UTC*
* fdd13b4145 Merge pull request `#37540`_ from yhekma/2016.3
* 93a59f8034 Added prefix to tempfile for template
* **ISSUE** `#37084`_: (`aaronm-cloudtek`_) x509.certificate_managed does not work with m2crypto >=0.25 (refs: `#37578`_)
* **PR** `#37578`_: (`clinta`_) Update for m2crypto changes removing lhash
* **PR** `#37584`_: (`clinta`_) Fix eauth example for limiting args
* **ISSUE** `#37551`_: (`viict`_) git.latest "Not a valid commit name" (refs: `#37604`_, `#37571`_)
* **PR** `#37571`_: (`terminalmage`_) Add a test to ensure we don't check for fast-forward before fetching
* **ISSUE** `#33645`_: (`ketzacoatl`_) saltutil.sync_all does not sync custom pillar modules to masterless minions (refs: `#33833`_)
* **ISSUE** `#25297`_: (`Akilesh1597`_) perform 'refresh_pillar' as a part of 'sync_all' (refs: `#25361`_, `#37521`_)
* **PR** `#37553`_: (`rallytime`_) Back-port `#37521`_ to 2016.3
@ *2016-11-08 23:11:07 UTC*
* **PR** `#37521`_: (`genuss`_) refresh_pillar() should be called always with refresh=True during saltutil.sync_all (refs: `#37553`_)
* **PR** `#33833`_: (`terminalmage`_) Support syncing pillar modules to masterless minions (refs: `#37521`_)
* **PR** `#25361`_: (`tedski`_) perform `refresh_pillar` as part of `sync_all` when `refresh=True` (refs: `#37521`_)
* b01c247ea9 Merge pull request `#37553`_ from rallytime/bp-37521
* 30f92b05f4 refresh_pillar() should be called always
* **PR** `saltstack/salt#37549`_: (`Mrten`_) sqlite is not found in 2015.8 (refs: `#37565`_)
* **PR** `#37565`_: (`rallytime`_) Back-port `#37549`_ to 2016.3
@ *2016-11-08 23:10:25 UTC*
* **PR** `#37549`_: (`Mrten`_) sqlite is not found in 2015.8 (refs: `#37565`_)
* 694df30d40 Merge pull request `#37565`_ from rallytime/bp-37549
* c92a90b8e5 Update sqlite3.py
* fb76557a2a sqlite is not found in 2015.8
* **ISSUE** `#37511`_: (`jdelic`_) service.dead now only operates if the service file exists (refs: `#37562`_)
* **PR** `#37562`_: (`terminalmage`_) Fix regression in service.dead state
* **ISSUE** `#37554`_: (`sjmh`_) salt-api doesn't dynamically re-read nodegroups configuration (refs: `#37560`_)
* **PR** `#37560`_: (`whiteinge`_) Skip config type checking for sdb values
* **PR** `#37556`_: (`rallytime`_) Don't pass the vpc id to boto.vpc.create_internet_gateway func
* **PR** `#37543`_: (`multani`_) Documentation rendering fixes
* **ISSUE** `saltstack/salt#31081`_: (`JensRantil`_) salt.modules.file.line documentation unclarities (refs: `#37457`_)
* **PR** `#37457`_: (`rallytime`_) Fixup file.line docs to be more clear and consistent
@ *2016-11-08 00:29:20 UTC*
* 96b8b9a849 Merge pull request `#37457`_ from rallytime/fix-31081
* 25821bb8db Clarify which modes use "before", "after", and "indent" options
* 8b2d2b9e7b Clarify file.line state docs as well
* b2615892eb Move note about using mode=insert with location options to mode section
* db0b0cefb8 Fixup file.line docs to be more clear and consistent
* **ISSUE** `#35799`_: (`davegiles`_) dsc.apply_config hangs (no error) on empty directory on target (refs: `#37526`_)
* **PR** `#37526`_: (`twangboy`_) Remove loop from dsc.apply_config
@ *2016-11-08 00:23:11 UTC*
* 7de790ffed Merge pull request `#37526`_ from twangboy/fix_35799
* fc4260911c Remove unnecessary format
* c934a2bfa7 Remove the loop from apply_config
* **PR** `saltstack/salt#37515`_: (`rallytime`_) [carbon] Merge forward from 2016.3 to carbon (refs: `#37534`_)
* **PR** `#37534`_: (`rallytime`_) Back-port fix needed from `#37515`_
@ *2016-11-08 00:14:46 UTC*
* **PR** `#37515`_: (`rallytime`_) [carbon] Merge forward from 2016.3 to carbon (refs: `#37534`_)
* 94811df2ea Merge pull request `#37534`_ from rallytime/bp-merge-foward-fix
* d1b2af1d69 Add missing source_hash_name args to a couple funcs
* **PR** `#37533`_: (`whiteinge`_) Return a 504 response instead of 500 for Salt timeouts
@ *2016-11-08 00:14:15 UTC*
* 17adbb0c9f Merge pull request `#37533`_ from whiteinge/salt-api-504-timeouts
* 63226aeda6 Return a 504 response instead of 500 for Salt timeouts
* **ISSUE** `saltstack/salt#36679`_: (`lorengordon`_) Command 'Import-Module ServerManager' failed with return code: 1 (refs: #`saltstack/salt`#36736`_`_, `#36736`_)
* **PR** `saltstack/salt#36736`_: (`m03`_) Fix issue 36679 win_servermanager error (refs: `#37529`_)
* **PR** `#37529`_: (`lorengordon`_) Backport: PR 36736 to 2016.3
@ *2016-11-08 00:04:10 UTC*
* **PR** `#36736`_: (`m03`_) Fix issue 36679 win_servermanager error
* a9f03eee6f Merge pull request `#37529`_ from lorengordon/bp-36736
* 21c2664b6a Fix issue 36679 win_servermanager failure
* **ISSUE** `#37444`_: (`Tanoti`_) Returning False from __virtual__ in a returner does not return expected error (refs: #`saltstack/salt`#37502`_`_, `#37519`_, `#37502`_)
* **PR** `saltstack/salt#37502`_: (`cachedout`_) Log proper message on returners that cannot be loaded (refs: `#37519`_)
* **PR** `#37519`_: (`rallytime`_) Update returner __virtual__() return messages for loader
@ *2016-11-07 23:06:23 UTC*
* 19475aada6 Merge pull request `#37519`_ from rallytime/returner-load-errors
* fb261a31f3 Update returner __virtual__() return messages for loader
* **ISSUE** `#35016`_: (`pingangit`_) TypeError: save_minions() got an unexpected keyword argument 'syndic_id' (refs: `#37527`_)
* **PR** `#37527`_: (`rallytime`_) Add syndic_id=None kwarg to save_minions funcs in returners
@ *2016-11-07 23:04:03 UTC*
* fefdfab850 Merge pull request `#37527`_ from rallytime/fix-35016
* 2944b244aa Add syndic_id=None kwarg to save_minions funcs in returners
* **PR** `#37530`_: (`gtmanfred`_) fix Lithium to 2015.5.0
* **PR** `#37514`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3
@ *2016-11-07 16:51:06 UTC*
* 743164844d Merge pull request `#37514`_ from rallytime/merge-2016.3
* 41166aede4 Merge branch '2015.8' into '2016.3'
* c505a059ef [2015.8] Doc version updated to 2016.3.4 (`#37482`_)
* **ISSUE** `#36713`_: (`Tanoti`_) ExtraData: unpack(b) received extra data after upgrading to 2016.3.3 (refs: `#37503`_)
* **PR** `#37503`_: (`cachedout`_) Catch loader error on returners without save_load
@ *2016-11-07 09:33:57 UTC*
* 2d924d0820 Merge pull request `#37503`_ from cachedout/issue_36713
* 5f7f971b2c Catch loader error on returners without save_load
* **ISSUE** `#37448`_: (`alisson276`_) In 'salt/key' events there are acts that never happen (refs: `#37499`_)
* **PR** `#37499`_: (`cachedout`_) Clarify docs on salt-key events
@ *2016-11-07 09:33:20 UTC*
* d95bf59f97 Merge pull request `#37499`_ from cachedout/key_docs_clarify
* 2758e74785 Clarify docs on salt-key events
* **PR** `#37500`_: (`cachedout`_) Remove unused flag
@ *2016-11-07 09:33:04 UTC*
* 1dd1408ae6 Merge pull request `#37500`_ from cachedout/remove_include_errors
* 6c705b11e0 Remove unused flag
* **ISSUE** `#37444`_: (`Tanoti`_) Returning False from __virtual__ in a returner does not return expected error (refs: #`saltstack/salt`#37502`_`_, `#37519`_, `#37502`_)
* **PR** `#37502`_: (`cachedout`_) Log proper message on returners that cannot be loaded
@ *2016-11-07 09:32:45 UTC*
* 4b6f1ab1c4 Merge pull request `#37502`_ from cachedout/issue_37444
* 4c5ab057ce Remove debugging
* 17d01e4f4c Log proper message on returners that cannot be loaded
* **ISSUE** `#37389`_: (`d101nelson`_) Some core grains are inaccurate or incomplete for Solaris (refs: `#37472`_)
* **PR** `#37494`_: (`sjorge`_) Forgot to update os_family map in `#37472`_
@ *2016-11-06 22:18:54 UTC*
* **PR** `#37472`_: (`sjorge`_) 2016.3 solaris grains improvements (refs: `#37494`_)
* 2422dafd52 Merge pull request `#37494`_ from sjorge/2016.3-osfam_map
* 96ba545492 Forgot to update os_family map in `#37472`_
* **PR** `#37496`_: (`mcalmer`_) fix status handling in sysv init scripts
@ *2016-11-06 22:18:00 UTC*
* 41bd8e3f52 Merge pull request `#37496`_ from mcalmer/fix-status-handling-in-sysv-init-scripts
* 1fb2c4dfcf fix status handling in sysv init scripts
* **PR** `#37497`_: (`terminalmage`_) Update 2016.3.5 release notes with source_hash_name explanation
@ *2016-11-06 22:17:40 UTC*
* e741a773a5 Merge pull request `#37497`_ from terminalmage/release_notes
* c08038d9ea Update 2016.3.5 release notes with source_hash_name explanation
* **PR** `#37486`_: (`twangboy`_) Add requirement for PowerShell 3 on Windows
@ *2016-11-06 06:01:07 UTC*
* f4426c2233 Merge pull request `#37486`_ from twangboy/fix_win_docs
* 9e0631a1ae Add docs denoting the requirement for at least PowerShell 3
* **PR** `#37493`_: (`cachedout`_) Add sdb support to minion and master configs
@ *2016-11-06 06:00:18 UTC*
* a1f355a569 Merge pull request `#37493`_ from cachedout/minion_master_sdb
* 9761a462c2 Add sdb support to minion and master configs
* **ISSUE** `#31135`_: (`jeffreyctang`_) file.line mode=replace breaks on empty file. (refs: `#37452`_)
* **PR** `#37452`_: (`rallytime`_) file.line with mode=replace on an empty file should return False, not stacktrace
@ *2016-11-06 01:55:11 UTC*
* be93710fee Merge pull request `#37452`_ from rallytime/fix-31135
* c792f76d2f Bump log level from debug to warning on empty file
* 5f181cf00d file.line with mode=replace on an empty file should return False
* 94a00c66eb Write a unit test demonstrating stack trace in `#31135`_
* **ISSUE** `#37001`_: (`phil123456`_) URGENT : archive.extracted does not work anymore (refs: `#37081`_, #saltstack/salt`#37081`_)
* **ISSUE** `#29010`_: (`The-Loeki`_) file.managed download failing checksum testing for Ubuntu initrd w/source_hash (refs: `#37469`_)
* **PR** `saltstack/salt#37081`_: (`terminalmage`_) Fix archive.extracted remote source_hash verification (refs: `#37469`_)
* **PR** `#37469`_: (`terminalmage`_) Rewrite file.extract_hash to improve its matching ability
@ *2016-11-06 01:50:01 UTC*
* **PR** `#37081`_: (`terminalmage`_) Fix archive.extracted remote source_hash verification (refs: `#37469`_)
* 129b0387e6 Merge pull request `#37469`_ from terminalmage/issue29010
* a3f38e5a9f Update file.extract_hash unit tests
* b26b528f79 Add the source_hash_name param to file.managed states
* 52fe72d402 Rewrite file.extract_hash
* **ISSUE** `#37389`_: (`d101nelson`_) Some core grains are inaccurate or incomplete for Solaris (refs: `#37472`_)
* **PR** `#37472`_: (`sjorge`_) 2016.3 solaris grains improvements (refs: `#37494`_)
@ *2016-11-06 01:46:10 UTC*
* 9426b9d5c4 Merge pull request `#37472`_ from sjorge/2016.3-solaris-grains
* 2958f5ce52 detect and properly handle OmniOS
* 37c3a7f5ab handle Oracle Solaris better
* 69706d32be parse minorrelease if it has a / in it
* d1cf4a0e56 improve regex for parsing /etc/release using files from Solaris 8 SPARC and Solaris 10
* 88eddef765 some more cleanup for smartos
* d3ff39f09c improve smartos os version grains
* **PR** `#37478`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3
@ *2016-11-04 20:30:08 UTC*
* 4ba63aba48 Merge pull request `#37478`_ from rallytime/merge-2016.3
* 3483a445f2 Merge branch '2015.8' into '2016.3'
* 35888c2e30 Merge pull request `#37408`_ from terminalmage/issue37286
* 4e4a05731e Strip slashes from gitfs mountpoints
* b6c57c6c8d Merge pull request `#37418`_ from terminalmage/issue36849
* 740bc54239 Do not use compression in tornado httpclient requests
* 7fba8aaa7e Merge pull request `#37441`_ from rallytime/bp-37428
* 6fe3ef49de Fix incorrect reference of __utils__ in salt.utils
* **PR** `#37485`_: (`rallytime`_) Get release notes started for 2016.3.5
* **PR** `#37483`_: (`rallytime`_) [2016.3] Doc version updated to 2016.3.4
* **ISSUE** `#37123`_: (`nevins-b`_) file.recurse state doesn't support pulling from other environments (refs: `#37121`_)
* **PR** `#37121`_: (`nevins-b`_) allow the file.recurse state to support saltenv
@ *2016-11-04 05:59:28 UTC*
* 580eca709b Merge pull request `#37121`_ from nevins-b/2016.3
* 99d2c360ed making messaging in tests match new return
* bc4b0e7cda adding test for saltenv in file.recurse source url
* 3315b67075 fixing saltenv if not set in url
* a9683cbbd8 allow the file.recurse state to support saltenv (salt://example/dir?saltenv=dev)
* **PR** `#37426`_: (`jfindlay`_) Wait for macOS to change system settings
@ *2016-11-04 04:35:52 UTC*
* **PR** `#37351`_: (`jfindlay`_) modules.mac_power: give macOS time to change setting (refs: `#37426`_)
* 766b1437c2 Merge pull request `#37426`_ from jfindlay/mac_sleep
* 43a8e199bf modules.mac_power: wait for system to make change
* feabca6e0b modules.mac_system: wait for system to make change
* 0213eb9a07 utils.mac_utils: add confirm_updated
* **ISSUE** `#37238`_: (`cmclaughlin`_) Restarting master causes minion to hang (refs: `#37438`_, `#37602`_)
* **ISSUE** `#37018`_: (`tsaridas`_) get events from python (refs: `#37438`_, `#37602`_)
* **PR** `#37438`_: (`DmitryKuzmenko`_) Fix for `#37238`_ salt hang on master restart (refs: `#37602`_)
@ *2016-11-04 04:10:51 UTC*
* 9eab5c8f71 Merge pull request `#37438`_ from DSRCorporation/bugs/37238_salt_hang_on_master_restart
* f253d3ce4a Auto reconnect `salt` to master if the connection was lost.
* **PR** `saltstack/salt#31207`_: (`thusoy`_) Remove error logging of missing boto libraries (refs: `#37440`_)
* **PR** `#37440`_: (`rallytime`_) Back-port `#31207`_ to 2016.3
@ *2016-11-04 04:09:33 UTC*
* **PR** `#31207`_: (`thusoy`_) Remove error logging of missing boto libraries (refs: `#37440`_)
* 9aa7073f70 Merge pull request `#37440`_ from rallytime/bp-31207
* c71ae61271 Remove error logging of missing boto libraries
* **PR** `#37442`_: (`twangboy`_) Create paths.d directory
@ *2016-11-04 04:07:19 UTC*
* edbfadca21 Merge pull request `#37442`_ from twangboy/fix_osx_postinstall
* 8091a3065e Create paths.d directory
* **PR** `#37445`_: (`twangboy`_) Check for Server os before checking [DO NOT MERGE FORWARD]
@ *2016-11-04 04:04:49 UTC*
* afb1b3cee5 Merge pull request `#37445`_ from twangboy/fix_import_error_2016.3
* c0d5ebdd8a Check for Server os before checking
* **PR** `#37446`_: (`twangboy`_) Detect VC++ for Python on Win32
@ *2016-11-04 04:04:02 UTC*
* 7a9f95ab3b Merge pull request `#37446`_ from twangboy/fix_build_32
* 2de69f48f8 Detect VC for Python correctly on 32bit Windows
* **ISSUE** `saltstack/salt#36961`_: (`nullify005`_) boto_secgroup assumes a string when checking ip_protocol validity when not tcp|udp|all|-1 (refs: `#37447`_)
* **PR** `#37447`_: (`rallytime`_) Cast ip_protocol rule as a str() in boto_secgroup.present
@ *2016-11-04 04:03:45 UTC*
* 651e0f728f Merge pull request `#37447`_ from rallytime/fix-36961
* 6b930ac7aa Cast ip_protocol rule as a str() in boto_secgroup.present
* **ISSUE** `#36446`_: (`whiteinge`_) Custom salt-api config problem (refs: `#37455`_)
* **PR** `saltstack/salt#36386`_: (`xiaoanyunfei`_) fix salt-api's default opts were covered by salt-master `#35734`_ (refs: `#37455`_)
* **PR** `#37455`_: (`techhat`_) Make api opts respect correct root_dir
@ *2016-11-04 03:25:40 UTC*
* **PR** `#35734`_: (`xiaoanyunfei`_) fix salt-api's default opts were covered by salt-master (refs: #`saltstack/salt#36386`_)
* a51d944c7c Merge pull request `#37455`_ from techhat/issue36446
* 7eff90d61d Make api opts respect correct root_dir
* **PR** `#37459`_: (`twangboy`_) Fix error message when ConvertTo-Json not supported [DO NOT MERGE FORWARD]
@ *2016-11-04 03:22:31 UTC*
* 3591bf0f58 Merge pull request `#37459`_ from twangboy/fix_dsc_json_msg_2016.3
* 949b70913d Use cmd.run_all instead of cmd.shell
* **PR** `#37430`_: (`meaksh`_) Including resolution parameters in the Zypper debug-solver call during a dry-run dist-upgrade (2016.3)
@ *2016-11-03 14:35:46 UTC*
* **PR** `#37353`_: (`meaksh`_) Including resolution parameters in the Zypper debug-solver call during a dry-run dist-upgrade (refs: `#37430`_)
* 80a99c4cc5 Merge pull request `#37430`_ from meaksh/zypper-dist-upgrade-debug-solver-fix-2016.3
* ffc596f215 Including resolver params for Zypper debug-solver
* **ISSUE** `#37388`_: (`tyhunt99`_) [2016.3.4] Refreshing of an s3 file server results in an exception. (refs: `#37428`_)
* **PR** `#37428`_: (`cachedout`_) Fix incorrect reference of __utils__ in salt.utils (refs: `#37441`_)
* **PR** `#37419`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3
@ *2016-11-02 21:40:04 UTC*
* 7864f9b79d Merge pull request `#37419`_ from rallytime/merge-2016.3
* bce47c9175 Merge branch '2015.8' into '2016.3'
* 7b1d3b5562 Merge pull request `#37392`_ from rallytime/bp-33190
* 4063bae5de catch None cases for comments in jboss7 state module
* **PR** `#37416`_: (`terminalmage`_) Fix regression in output for Ctrl-c'ed CLI jobs
* **PR** `#37414`_: (`pass-by-value`_) Add unit tests for cloning from snapshot
* **PR** `#37350`_: (`pass-by-value`_) Add handling for full and linked clone (refs: `#37414`_)
* **PR** `saltstack/salt#37401`_: (`cachedout`_) Bootstrap delay option for salt-cloud (refs: `#37404`_)
* **PR** `#37404`_: (`cachedout`_) Revert "Bootstrap delay option for salt-cloud"
@ *2016-11-02 09:48:53 UTC*
* ecd794a233 Merge pull request `#37404`_ from saltstack/revert-37401-bootstrap_delay
* e864de8f03 Revert "Bootstrap delay option for salt-cloud"
* **PR** `#37401`_: (`cachedout`_) Bootstrap delay option for salt-cloud
@ *2016-11-02 09:02:13 UTC*
* 2eb44fbd11 Merge pull request `#37401`_ from cachedout/bootstrap_delay
* 6e42b0e157 Bootstrap delay option for salt-cloud
* **PR** `#37350`_: (`pass-by-value`_) Add handling for full and linked clone (refs: `#37414`_)
@ *2016-11-02 08:02:29 UTC*
* 9446e48da0 Merge pull request `#37350`_ from pass-by-value/full_and_linked_clone_v1
* d8b1c9c777 Add handling for full and linked clone and commit disk mode additions
* **ISSUE** `#34841`_: (`Ch3LL`_) Wrong return when using `user.chgroups` on windows (refs: `#37386`_)
* **PR** `#37386`_: (`rallytime`_) Fix win_useradd.chgroups return when cmd.run_all retcode != 0
@ *2016-11-02 06:34:12 UTC*
* c7f4d7f76a Merge pull request `#37386`_ from rallytime/fix-34841
* c70492a1fe Fix win_useradd.chgroups return when cmd.run_all retcode != 0
* **ISSUE** `#34263`_: (`vernondcole`_) Use of dnsmasq.set_config injects unintentional text into the configuration file. (refs: `#37390`_)
* **PR** `#37390`_: (`rallytime`_) Don't insert __pub* keys into dnsmasq config file with set_config function
@ *2016-11-02 06:31:53 UTC*
* 34b6c6459a Merge pull request `#37390`_ from rallytime/fix-34263
* e082ff538b Fix failing test now that we're raising a CommandExecutionError
* c6a3476abb Filter out the __pub keys passed via \*\*kwargs for dnsmasq.set_config
* fd380c79b9 Add test case to reproduce dnsmasq.set_config failure in `#34263`_
* **ISSUE** `#35163`_: (`SolarisYan`_) salt file.mkdir (refs: `#35287`_, `#35189`_)
* **PR** `#37391`_: (`rallytime`_) Back-port `#35287`_ to 2016.3
@ *2016-11-02 06:18:26 UTC*
* **PR** `#35287`_: (`dere`_) 2016.3 (refs: `#37391`_)
* **PR** `#35189`_: (`dere`_) return value for file.mkdir instead of None (refs: `#35287`_)
* 798b2acbe3 Merge pull request `#37391`_ from rallytime/bp-35287
* 0e1ebea5a4 Simplify return value to "True".
* 13022c5cc4 return value for mkdir instead of None
* **ISSUE** `#37264`_: (`junster1`_) Parsing __grains__ with json.dumps in a module is returning an empty dict in 2016.3.3 (refs: `#37279`_)
* **PR** `#37279`_: (`gtmanfred`_) initialize super class of NamespacedDictWrapper
@ *2016-11-01 15:12:49 UTC*
* 1a4833b3a1 Merge pull request `#37279`_ from gtmanfred/2016.3
* 597f346d57 initialize super class of NamespacedDictWrapper
* **PR** `#37351`_: (`jfindlay`_) modules.mac_power: give macOS time to change setting (refs: `#37426`_)
@ *2016-10-31 19:15:40 UTC*
* 351175931c Merge pull request `#37351`_ from jfindlay/mac_set
* 0c58056d84 modules.mac_power: give macOS time to change setting
* **PR** `#37340`_: (`cachedout`_) SIGILL -> SIGKILL in process test
@ *2016-10-31 08:50:10 UTC*
* 25c987e33a Merge pull request `#37340`_ from cachedout/ill_kill_3
* a6b7417fe9 SIGILL -> SIGKILL in process test
* **ISSUE** `#35480`_: (`jelenak`_) 200 processes of salt-master (2016.3.2) (refs: `#37306`_)
* **PR** `#37306`_: (`DmitryKuzmenko`_) Don't use os.wait() on subprocesses managed by `multiprocessing`.
@ *2016-10-31 06:55:30 UTC*
* 7f1654894d Merge pull request `#37306`_ from DSRCorporation/bugs/35480_master_shutdown_no_process_error
* b6937ebaa8 Don't use os.wait() on subprocesses managed by `multiprocessing`.
* **ISSUE** `#34998`_: (`exowaucka`_) placementgroup parameter for salt-cloud is undocumented (refs: `#37314`_)
* **PR** `#37314`_: (`rallytime`_) Document the existence of placementgroup option in ec2 driver
@ *2016-10-31 06:42:33 UTC*
* bf8ba97d54 Merge pull request `#37314`_ from rallytime/fix-34998
* 39459ed30b Document the existence of placementgroup option in ec2 driver
* **ISSUE** `#36148`_: (`alex-zel`_) Eauth error with openLDAP groups (refs: `#37219`_)
* **PR** `#37219`_: (`alex-zel`_) Fix freeipa ldap groups
@ *2016-10-28 04:33:37 UTC*
* e0baf4b193 Merge pull request `#37219`_ from alex-zel/fix-freeipa-ldap-groups
* b5b2e7e097 Remove trailing whitespaces
* 32f906b020 Add support for FreeIPA
.. _`#10`: https://github.com/saltstack/salt/issues/10
.. _`#12788`: https://github.com/saltstack/salt/issues/12788
.. _`#15697`: https://github.com/saltstack/salt/issues/15697
.. _`#19269`: https://github.com/saltstack/salt/issues/19269
.. _`#19`: https://github.com/saltstack/salt/issues/19
.. _`#20`: https://github.com/saltstack/salt/issues/20
.. _`#25297`: https://github.com/saltstack/salt/issues/25297
.. _`#25361`: https://github.com/saltstack/salt/pull/25361
.. _`#27355`: https://github.com/saltstack/salt/issues/27355
.. _`#29010`: https://github.com/saltstack/salt/issues/29010
.. _`#29294`: https://github.com/saltstack/salt/pull/29294
.. _`#30454`: https://github.com/saltstack/salt/issues/30454
.. _`#30481`: https://github.com/saltstack/salt/pull/30481
.. _`#31135`: https://github.com/saltstack/salt/issues/31135
.. _`#31207`: https://github.com/saltstack/salt/pull/31207
.. _`#31953`: https://github.com/saltstack/salt/issues/31953
.. _`#32157`: https://github.com/saltstack/salt/pull/32157
.. _`#32400`: https://github.com/saltstack/salt/issues/32400
.. _`#32829`: https://github.com/saltstack/salt/issues/32829
.. _`#32965`: https://github.com/saltstack/salt/pull/32965
.. _`#33601`: https://github.com/saltstack/salt/pull/33601
.. _`#33645`: https://github.com/saltstack/salt/issues/33645
.. _`#33709`: https://github.com/saltstack/salt/issues/33709
.. _`#33833`: https://github.com/saltstack/salt/pull/33833
.. _`#34059`: https://github.com/saltstack/salt/pull/34059
.. _`#34263`: https://github.com/saltstack/salt/issues/34263
.. _`#34504`: https://github.com/saltstack/salt/issues/34504
.. _`#34547`: https://github.com/saltstack/salt/issues/34547
.. _`#34600`: https://github.com/saltstack/salt/issues/34600
.. _`#34841`: https://github.com/saltstack/salt/issues/34841
.. _`#34998`: https://github.com/saltstack/salt/issues/34998
.. _`#35016`: https://github.com/saltstack/salt/issues/35016
.. _`#35088`: https://github.com/saltstack/salt/issues/35088
.. _`#35163`: https://github.com/saltstack/salt/issues/35163
.. _`#35189`: https://github.com/saltstack/salt/pull/35189
.. _`#35287`: https://github.com/saltstack/salt/pull/35287
.. _`#35342`: https://github.com/saltstack/salt/issues/35342
.. _`#35390`: https://github.com/saltstack/salt/pull/35390
.. _`#35480`: https://github.com/saltstack/salt/issues/35480
.. _`#35673`: https://github.com/saltstack/salt/pull/35673
.. _`#35734`: https://github.com/saltstack/salt/pull/35734
.. _`#35799`: https://github.com/saltstack/salt/issues/35799
.. _`#35964`: https://github.com/saltstack/salt/issues/35964
.. _`#35965`: https://github.com/saltstack/salt/pull/35965
.. _`#36148`: https://github.com/saltstack/salt/issues/36148
.. _`#36446`: https://github.com/saltstack/salt/issues/36446
.. _`#36548`: https://github.com/saltstack/salt/issues/36548
.. _`#36598`: https://github.com/saltstack/salt/issues/36598
.. _`#36627`: https://github.com/saltstack/salt/pull/36627
.. _`#36629`: https://github.com/saltstack/salt/issues/36629
.. _`#36644`: https://github.com/saltstack/salt/issues/36644
.. _`#36706`: https://github.com/saltstack/salt/pull/36706
.. _`#36707`: https://github.com/saltstack/salt/issues/36707
.. _`#36713`: https://github.com/saltstack/salt/issues/36713
.. _`#36736`: https://github.com/saltstack/salt/pull/36736
.. _`#36784`: https://github.com/saltstack/salt/pull/36784
.. _`#36794`: https://github.com/saltstack/salt/pull/36794
.. _`#36893`: https://github.com/saltstack/salt/pull/36893
.. _`#36938`: https://github.com/saltstack/salt/pull/36938
.. _`#37001`: https://github.com/saltstack/salt/issues/37001
.. _`#37018`: https://github.com/saltstack/salt/issues/37018
.. _`#37059`: https://github.com/saltstack/salt/issues/37059
.. _`#37081`: https://github.com/saltstack/salt/pull/37081
.. _`#37084`: https://github.com/saltstack/salt/issues/37084
.. _`#37118`: https://github.com/saltstack/salt/issues/37118
.. _`#37121`: https://github.com/saltstack/salt/pull/37121
.. _`#37123`: https://github.com/saltstack/salt/issues/37123
.. _`#37149`: https://github.com/saltstack/salt/pull/37149
.. _`#37219`: https://github.com/saltstack/salt/pull/37219
.. _`#37238`: https://github.com/saltstack/salt/issues/37238
.. _`#37239`: https://github.com/saltstack/salt/pull/37239
.. _`#37264`: https://github.com/saltstack/salt/issues/37264
.. _`#37272`: https://github.com/saltstack/salt/pull/37272
.. _`#37279`: https://github.com/saltstack/salt/pull/37279
.. _`#37287`: https://github.com/saltstack/salt/issues/37287
.. _`#37306`: https://github.com/saltstack/salt/pull/37306
.. _`#37314`: https://github.com/saltstack/salt/pull/37314
.. _`#37340`: https://github.com/saltstack/salt/pull/37340
.. _`#37349`: https://github.com/saltstack/salt/pull/37349
.. _`#37350`: https://github.com/saltstack/salt/pull/37350
.. _`#37351`: https://github.com/saltstack/salt/pull/37351
.. _`#37353`: https://github.com/saltstack/salt/pull/37353
.. _`#37355`: https://github.com/saltstack/salt/issues/37355
.. _`#37358`: https://github.com/saltstack/salt/pull/37358
.. _`#37383`: https://github.com/saltstack/salt/issues/37383
.. _`#37386`: https://github.com/saltstack/salt/pull/37386
.. _`#37388`: https://github.com/saltstack/salt/issues/37388
.. _`#37389`: https://github.com/saltstack/salt/issues/37389
.. _`#37390`: https://github.com/saltstack/salt/pull/37390
.. _`#37391`: https://github.com/saltstack/salt/pull/37391
.. _`#37392`: https://github.com/saltstack/salt/pull/37392
.. _`#37401`: https://github.com/saltstack/salt/pull/37401
.. _`#37404`: https://github.com/saltstack/salt/pull/37404
.. _`#37408`: https://github.com/saltstack/salt/pull/37408
.. _`#37414`: https://github.com/saltstack/salt/pull/37414
.. _`#37416`: https://github.com/saltstack/salt/pull/37416
.. _`#37418`: https://github.com/saltstack/salt/pull/37418
.. _`#37419`: https://github.com/saltstack/salt/pull/37419
.. _`#37426`: https://github.com/saltstack/salt/pull/37426
.. _`#37428`: https://github.com/saltstack/salt/pull/37428
.. _`#37430`: https://github.com/saltstack/salt/pull/37430
.. _`#37438`: https://github.com/saltstack/salt/pull/37438
.. _`#37440`: https://github.com/saltstack/salt/pull/37440
.. _`#37441`: https://github.com/saltstack/salt/pull/37441
.. _`#37442`: https://github.com/saltstack/salt/pull/37442
.. _`#37444`: https://github.com/saltstack/salt/issues/37444
.. _`#37445`: https://github.com/saltstack/salt/pull/37445
.. _`#37446`: https://github.com/saltstack/salt/pull/37446
.. _`#37447`: https://github.com/saltstack/salt/pull/37447
.. _`#37448`: https://github.com/saltstack/salt/issues/37448
.. _`#37452`: https://github.com/saltstack/salt/pull/37452
.. _`#37455`: https://github.com/saltstack/salt/pull/37455
.. _`#37457`: https://github.com/saltstack/salt/pull/37457
.. _`#37459`: https://github.com/saltstack/salt/pull/37459
.. _`#37469`: https://github.com/saltstack/salt/pull/37469
.. _`#37472`: https://github.com/saltstack/salt/pull/37472
.. _`#37478`: https://github.com/saltstack/salt/pull/37478
.. _`#37481`: https://github.com/saltstack/salt/pull/37481
.. _`#37482`: https://github.com/saltstack/salt/pull/37482
.. _`#37483`: https://github.com/saltstack/salt/pull/37483
.. _`#37485`: https://github.com/saltstack/salt/pull/37485
.. _`#37486`: https://github.com/saltstack/salt/pull/37486
.. _`#37491`: https://github.com/saltstack/salt/issues/37491
.. _`#37492`: https://github.com/saltstack/salt/issues/37492
.. _`#37493`: https://github.com/saltstack/salt/pull/37493
.. _`#37494`: https://github.com/saltstack/salt/pull/37494
.. _`#37496`: https://github.com/saltstack/salt/pull/37496
.. _`#37497`: https://github.com/saltstack/salt/pull/37497
.. _`#37498`: https://github.com/saltstack/salt/issues/37498
.. _`#37499`: https://github.com/saltstack/salt/pull/37499
.. _`#37500`: https://github.com/saltstack/salt/pull/37500
.. _`#37502`: https://github.com/saltstack/salt/pull/37502
.. _`#37503`: https://github.com/saltstack/salt/pull/37503
.. _`#37511`: https://github.com/saltstack/salt/issues/37511
.. _`#37514`: https://github.com/saltstack/salt/pull/37514
.. _`#37515`: https://github.com/saltstack/salt/pull/37515
.. _`#37519`: https://github.com/saltstack/salt/pull/37519
.. _`#37521`: https://github.com/saltstack/salt/pull/37521
.. _`#37526`: https://github.com/saltstack/salt/pull/37526
.. _`#37527`: https://github.com/saltstack/salt/pull/37527
.. _`#37529`: https://github.com/saltstack/salt/pull/37529
.. _`#37530`: https://github.com/saltstack/salt/pull/37530
.. _`#37533`: https://github.com/saltstack/salt/pull/37533
.. _`#37534`: https://github.com/saltstack/salt/pull/37534
.. _`#37540`: https://github.com/saltstack/salt/pull/37540
.. _`#37541`: https://github.com/saltstack/salt/issues/37541
.. _`#37543`: https://github.com/saltstack/salt/pull/37543
.. _`#37549`: https://github.com/saltstack/salt/pull/37549
.. _`#37551`: https://github.com/saltstack/salt/issues/37551
.. _`#37553`: https://github.com/saltstack/salt/pull/37553
.. _`#37554`: https://github.com/saltstack/salt/issues/37554
.. _`#37556`: https://github.com/saltstack/salt/pull/37556
.. _`#37560`: https://github.com/saltstack/salt/pull/37560
.. _`#37562`: https://github.com/saltstack/salt/pull/37562
.. _`#37565`: https://github.com/saltstack/salt/pull/37565
.. _`#37571`: https://github.com/saltstack/salt/pull/37571
.. _`#37578`: https://github.com/saltstack/salt/pull/37578
.. _`#37579`: https://github.com/saltstack/salt/pull/37579
.. _`#37584`: https://github.com/saltstack/salt/pull/37584
.. _`#37592`: https://github.com/saltstack/salt/issues/37592
.. _`#37600`: https://github.com/saltstack/salt/pull/37600
.. _`#37602`: https://github.com/saltstack/salt/pull/37602
.. _`#37604`: https://github.com/saltstack/salt/pull/37604
.. _`#37607`: https://github.com/saltstack/salt/pull/37607
.. _`#37608`: https://github.com/saltstack/salt/pull/37608
.. _`#37611`: https://github.com/saltstack/salt/pull/37611
.. _`#37614`: https://github.com/saltstack/salt/pull/37614
.. _`#37617`: https://github.com/saltstack/salt/pull/37617
.. _`#37625`: https://github.com/saltstack/salt/pull/37625
.. _`#37626`: https://github.com/saltstack/salt/pull/37626
.. _`#37627`: https://github.com/saltstack/salt/pull/37627
.. _`#37628`: https://github.com/saltstack/salt/issues/37628
.. _`#37629`: https://github.com/saltstack/salt/pull/37629
.. _`#37638`: https://github.com/saltstack/salt/pull/37638
.. _`#37639`: https://github.com/saltstack/salt/pull/37639
.. _`#37640`: https://github.com/saltstack/salt/pull/37640
.. _`#37641`: https://github.com/saltstack/salt/pull/37641
.. _`#37642`: https://github.com/saltstack/salt/pull/37642
.. _`#37643`: https://github.com/saltstack/salt/issues/37643
.. _`#37644`: https://github.com/saltstack/salt/pull/37644
.. _`#37653`: https://github.com/saltstack/salt/issues/37653
.. _`#37665`: https://github.com/saltstack/salt/issues/37665
.. _`#37668`: https://github.com/saltstack/salt/pull/37668
.. _`#37680`: https://github.com/saltstack/salt/pull/37680
.. _`#37681`: https://github.com/saltstack/salt/pull/37681
.. _`#37684`: https://github.com/saltstack/salt/issues/37684
.. _`#37690`: https://github.com/saltstack/salt/pull/37690
.. _`#37694`: https://github.com/saltstack/salt/pull/37694
.. _`#37704`: https://github.com/saltstack/salt/pull/37704
.. _`#37705`: https://github.com/saltstack/salt/pull/37705
.. _`#37707`: https://github.com/saltstack/salt/pull/37707
.. _`#37718`: https://github.com/saltstack/salt/pull/37718
.. _`#37719`: https://github.com/saltstack/salt/pull/37719
.. _`#37721`: https://github.com/saltstack/salt/pull/37721
.. _`#37724`: https://github.com/saltstack/salt/pull/37724
.. _`#37725`: https://github.com/saltstack/salt/issues/37725
.. _`#37731`: https://github.com/saltstack/salt/pull/37731
.. _`#37732`: https://github.com/saltstack/salt/issues/37732
.. _`#37734`: https://github.com/saltstack/salt/issues/37734
.. _`#37735`: https://github.com/saltstack/salt/pull/37735
.. _`#37736`: https://github.com/saltstack/salt/pull/37736
.. _`#37737`: https://github.com/saltstack/salt/issues/37737
.. _`#37738`: https://github.com/saltstack/salt/pull/37738
.. _`#37742`: https://github.com/saltstack/salt/issues/37742
.. _`#37745`: https://github.com/saltstack/salt/pull/37745
.. _`#37748`: https://github.com/saltstack/salt/pull/37748
.. _`#37751`: https://github.com/saltstack/salt/issues/37751
.. _`#37760`: https://github.com/saltstack/salt/pull/37760
.. _`#37762`: https://github.com/saltstack/salt/pull/37762
.. _`#37763`: https://github.com/saltstack/salt/pull/37763
.. _`#37766`: https://github.com/saltstack/salt/pull/37766
.. _`#37767`: https://github.com/saltstack/salt/pull/37767
.. _`#37772`: https://github.com/saltstack/salt/pull/37772
.. _`#37775`: https://github.com/saltstack/salt/pull/37775
.. _`#37785`: https://github.com/saltstack/salt/pull/37785
.. _`#37787`: https://github.com/saltstack/salt/issues/37787
.. _`#37789`: https://github.com/saltstack/salt/pull/37789
.. _`#37790`: https://github.com/saltstack/salt/pull/37790
.. _`#37797`: https://github.com/saltstack/salt/pull/37797
.. _`#37810`: https://github.com/saltstack/salt/pull/37810
.. _`#37811`: https://github.com/saltstack/salt/pull/37811
.. _`#37812`: https://github.com/saltstack/salt/pull/37812
.. _`#37816`: https://github.com/saltstack/salt/pull/37816
.. _`#37817`: https://github.com/saltstack/salt/pull/37817
.. _`#37820`: https://github.com/saltstack/salt/pull/37820
.. _`#37821`: https://github.com/saltstack/salt/pull/37821
.. _`#37822`: https://github.com/saltstack/salt/pull/37822
.. _`#37823`: https://github.com/saltstack/salt/pull/37823
.. _`#37826`: https://github.com/saltstack/salt/pull/37826
.. _`#37827`: https://github.com/saltstack/salt/pull/37827
.. _`#37847`: https://github.com/saltstack/salt/pull/37847
.. _`#37856`: https://github.com/saltstack/salt/pull/37856
.. _`#37857`: https://github.com/saltstack/salt/pull/37857
.. _`#37863`: https://github.com/saltstack/salt/pull/37863
.. _`#37866`: https://github.com/saltstack/salt/pull/37866
.. _`#37867`: https://github.com/saltstack/salt/issues/37867
.. _`#37870`: https://github.com/saltstack/salt/issues/37870
.. _`#37886`: https://github.com/saltstack/salt/pull/37886
.. _`#37895`: https://github.com/saltstack/salt/pull/37895
.. _`#37896`: https://github.com/saltstack/salt/pull/37896
.. _`#37899`: https://github.com/saltstack/salt/pull/37899
.. _`#37907`: https://github.com/saltstack/salt/pull/37907
.. _`#37912`: https://github.com/saltstack/salt/pull/37912
.. _`#37914`: https://github.com/saltstack/salt/pull/37914
.. _`#37916`: https://github.com/saltstack/salt/pull/37916
.. _`#37918`: https://github.com/saltstack/salt/pull/37918
.. _`#37921`: https://github.com/saltstack/salt/pull/37921
.. _`#37924`: https://github.com/saltstack/salt/pull/37924
.. _`#37925`: https://github.com/saltstack/salt/pull/37925
.. _`#37926`: https://github.com/saltstack/salt/pull/37926
.. _`#37928`: https://github.com/saltstack/salt/pull/37928
.. _`#37929`: https://github.com/saltstack/salt/pull/37929
.. _`#37939`: https://github.com/saltstack/salt/issues/37939
.. _`#37945`: https://github.com/saltstack/salt/issues/37945
.. _`#37950`: https://github.com/saltstack/salt/pull/37950
.. _`#37961`: https://github.com/saltstack/salt/pull/37961
.. _`#37962`: https://github.com/saltstack/salt/pull/37962
.. _`#37964`: https://github.com/saltstack/salt/pull/37964
.. _`#37978`: https://github.com/saltstack/salt/pull/37978
.. _`#37995`: https://github.com/saltstack/salt/pull/37995
.. _`#38002`: https://github.com/saltstack/salt/pull/38002
.. _`#38034`: https://github.com/saltstack/salt/pull/38034
.. _`#38037`: https://github.com/saltstack/salt/issues/38037
.. _`#38039`: https://github.com/saltstack/salt/pull/38039
.. _`#38045`: https://github.com/saltstack/salt/pull/38045
.. _`#38057`: https://github.com/saltstack/salt/pull/38057
.. _`#38059`: https://github.com/saltstack/salt/pull/38059
.. _`#38083`: https://github.com/saltstack/salt/pull/38083
.. _`#38087`: https://github.com/saltstack/salt/issues/38087
.. _`#38091`: https://github.com/saltstack/salt/issues/38091
.. _`#38102`: https://github.com/saltstack/salt/pull/38102
.. _`#38104`: https://github.com/saltstack/salt/pull/38104
.. _`#38134`: https://github.com/saltstack/salt/pull/38134
.. _`#38153`: https://github.com/saltstack/salt/pull/38153
.. _`#38162`: https://github.com/saltstack/salt/issues/38162
.. _`#38163`: https://github.com/saltstack/salt/pull/38163
.. _`#38174`: https://github.com/saltstack/salt/issues/38174
.. _`#38177`: https://github.com/saltstack/salt/pull/38177
.. _`#38181`: https://github.com/saltstack/salt/pull/38181
.. _`#38185`: https://github.com/saltstack/salt/pull/38185
.. _`#38187`: https://github.com/saltstack/salt/issues/38187
.. _`#38191`: https://github.com/saltstack/salt/pull/38191
.. _`#38194`: https://github.com/saltstack/salt/pull/38194
.. _`#38198`: https://github.com/saltstack/salt/pull/38198
.. _`#38209`: https://github.com/saltstack/salt/issues/38209
.. _`#38213`: https://github.com/saltstack/salt/pull/38213
.. _`#38221`: https://github.com/saltstack/salt/pull/38221
.. _`#38223`: https://github.com/saltstack/salt/pull/38223
.. _`#38224`: https://github.com/saltstack/salt/pull/38224
.. _`#38248`: https://github.com/saltstack/salt/pull/38248
.. _`#38254`: https://github.com/saltstack/salt/pull/38254
.. _`#38256`: https://github.com/saltstack/salt/pull/38256
.. _`#38279`: https://github.com/saltstack/salt/pull/38279
.. _`#38281`: https://github.com/saltstack/salt/pull/38281
.. _`#38282`: https://github.com/saltstack/salt/issues/38282
.. _`#38288`: https://github.com/saltstack/salt/pull/38288
.. _`#38290`: https://github.com/saltstack/salt/issues/38290
.. _`#38312`: https://github.com/saltstack/salt/pull/38312
.. _`#38313`: https://github.com/saltstack/salt/pull/38313
.. _`#38320`: https://github.com/saltstack/salt/pull/38320
.. _`#38353`: https://github.com/saltstack/salt/issues/38353
.. _`#38372`: https://github.com/saltstack/salt/issues/38372
.. _`#38382`: https://github.com/saltstack/salt/pull/38382
.. _`#38385`: https://github.com/saltstack/salt/pull/38385
.. _`#38388`: https://github.com/saltstack/salt/issues/38388
.. _`#38390`: https://github.com/saltstack/salt/pull/38390
.. _`#38398`: https://github.com/saltstack/salt/pull/38398
.. _`#38407`: https://github.com/saltstack/salt/pull/38407
.. _`#38415`: https://github.com/saltstack/salt/pull/38415
.. _`#38419`: https://github.com/saltstack/salt/pull/38419
.. _`#38420`: https://github.com/saltstack/salt/pull/38420
.. _`#38421`: https://github.com/saltstack/salt/pull/38421
.. _`#38434`: https://github.com/saltstack/salt/pull/38434
.. _`#38438`: https://github.com/saltstack/salt/issues/38438
.. _`#38449`: https://github.com/saltstack/salt/issues/38449
.. _`#38457`: https://github.com/saltstack/salt/pull/38457
.. _`#38467`: https://github.com/saltstack/salt/pull/38467
.. _`#38472`: https://github.com/saltstack/salt/issues/38472
.. _`#38474`: https://github.com/saltstack/salt/pull/38474
.. _`#38479`: https://github.com/saltstack/salt/issues/38479
.. _`#38487`: https://github.com/saltstack/salt/pull/38487
.. _`#38491`: https://github.com/saltstack/salt/pull/38491
.. _`#38503`: https://github.com/saltstack/salt/pull/38503
.. _`#38524`: https://github.com/saltstack/salt/issues/38524
.. _`#38527`: https://github.com/saltstack/salt/pull/38527
.. _`#38531`: https://github.com/saltstack/salt/pull/38531
.. _`#38536`: https://github.com/saltstack/salt/pull/38536
.. _`#38541`: https://github.com/saltstack/salt/pull/38541
.. _`#38542`: https://github.com/saltstack/salt/pull/38542
.. _`#38554`: https://github.com/saltstack/salt/pull/38554
.. _`#38558`: https://github.com/saltstack/salt/issues/38558
.. _`#38560`: https://github.com/saltstack/salt/pull/38560
.. _`#38562`: https://github.com/saltstack/salt/pull/38562
.. _`#38567`: https://github.com/saltstack/salt/pull/38567
.. _`#38570`: https://github.com/saltstack/salt/pull/38570
.. _`#38572`: https://github.com/saltstack/salt/issues/38572
.. _`#38579`: https://github.com/saltstack/salt/pull/38579
.. _`#38585`: https://github.com/saltstack/salt/pull/38585
.. _`#38587`: https://github.com/saltstack/salt/pull/38587
.. _`#38589`: https://github.com/saltstack/salt/pull/38589
.. _`#38598`: https://github.com/saltstack/salt/pull/38598
.. _`#38600`: https://github.com/saltstack/salt/pull/38600
.. _`#38601`: https://github.com/saltstack/salt/pull/38601
.. _`#38602`: https://github.com/saltstack/salt/pull/38602
.. _`#38604`: https://github.com/saltstack/salt/issues/38604
.. _`#38618`: https://github.com/saltstack/salt/pull/38618
.. _`#38622`: https://github.com/saltstack/salt/issues/38622
.. _`#38626`: https://github.com/saltstack/salt/pull/38626
.. _`#38629`: https://github.com/saltstack/salt/issues/38629
.. _`#38647`: https://github.com/saltstack/salt/pull/38647
.. _`#38648`: https://github.com/saltstack/salt/issues/38648
.. _`#38649`: https://github.com/saltstack/salt/pull/38649
.. _`#38650`: https://github.com/saltstack/salt/pull/38650
.. _`#38657`: https://github.com/saltstack/salt/pull/38657
.. _`#38668`: https://github.com/saltstack/salt/pull/38668
.. _`#38669`: https://github.com/saltstack/salt/pull/38669
.. _`#38674`: https://github.com/saltstack/salt/issues/38674
.. _`#38693`: https://github.com/saltstack/salt/pull/38693
.. _`#38707`: https://github.com/saltstack/salt/pull/38707
.. _`#38720`: https://github.com/saltstack/salt/pull/38720
.. _`#38723`: https://github.com/saltstack/salt/pull/38723
.. _`#38731`: https://github.com/saltstack/salt/pull/38731
.. _`#38735`: https://github.com/saltstack/salt/pull/38735
.. _`#38739`: https://github.com/saltstack/salt/pull/38739
.. _`#38743`: https://github.com/saltstack/salt/pull/38743
.. _`#38749`: https://github.com/saltstack/salt/pull/38749
.. _`#38774`: https://github.com/saltstack/salt/pull/38774
.. _`#38789`: https://github.com/saltstack/salt/pull/38789
.. _`#38790`: https://github.com/saltstack/salt/pull/38790
.. _`#38792`: https://github.com/saltstack/salt/pull/38792
.. _`#38796`: https://github.com/saltstack/salt/pull/38796
.. _`#38808`: https://github.com/saltstack/salt/pull/38808
.. _`#38809`: https://github.com/saltstack/salt/pull/38809
.. _`#38812`: https://github.com/saltstack/salt/pull/38812
.. _`#38813`: https://github.com/saltstack/salt/pull/38813
.. _`#38833`: https://github.com/saltstack/salt/pull/38833
.. _`#5999`: https://github.com/saltstack/salt/issues/5999
.. _`747project`: https://github.com/747project
.. _`Akilesh1597`: https://github.com/Akilesh1597
.. _`Arabus`: https://github.com/Arabus
.. _`AvinashDeluxeVR`: https://github.com/AvinashDeluxeVR
.. _`COLABORATI`: https://github.com/COLABORATI
.. _`Ch3LL`: https://github.com/Ch3LL
.. _`DmitryKuzmenko`: https://github.com/DmitryKuzmenko
.. _`Firewire2002`: https://github.com/Firewire2002
.. _`JensRantil`: https://github.com/JensRantil
.. _`Modulus`: https://github.com/Modulus
.. _`Mrten`: https://github.com/Mrten
.. _`NickDubelman`: https://github.com/NickDubelman
.. _`SolarisYan`: https://github.com/SolarisYan
.. _`Talkless`: https://github.com/Talkless
.. _`Tanoti`: https://github.com/Tanoti
.. _`The-Loeki`: https://github.com/The-Loeki
.. _`TronPaul`: https://github.com/TronPaul
.. _`UtahDave`: https://github.com/UtahDave
.. _`aaronm-cloudtek`: https://github.com/aaronm-cloudtek
.. _`abonillasuse`: https://github.com/abonillasuse
.. _`alex-zel`: https://github.com/alex-zel
.. _`alexandr-orlov`: https://github.com/alexandr-orlov
.. _`alexbleotu`: https://github.com/alexbleotu
.. _`alisson276`: https://github.com/alisson276
.. _`arthurlogilab`: https://github.com/arthurlogilab
.. _`attiasr`: https://github.com/attiasr
.. _`b-harper`: https://github.com/b-harper
.. _`b1naryth1ef`: https://github.com/b1naryth1ef
.. _`basepi`: https://github.com/basepi
.. _`bdrung`: https://github.com/bdrung
.. _`blaketmiller`: https://github.com/blaketmiller
.. _`bshelton229`: https://github.com/bshelton229
.. _`cachedout`: https://github.com/cachedout
.. _`calve`: https://github.com/calve
.. _`clan`: https://github.com/clan
.. _`clinta`: https://github.com/clinta
.. _`cmclaughlin`: https://github.com/cmclaughlin
.. _`craigafinch`: https://github.com/craigafinch
.. _`cro`: https://github.com/cro
.. _`curiositycasualty`: https://github.com/curiositycasualty
.. _`d101nelson`: https://github.com/d101nelson
.. _`davegiles`: https://github.com/davegiles
.. _`davidpsv17`: https://github.com/davidpsv17
.. _`dere`: https://github.com/dere
.. _`dereckson`: https://github.com/dereckson
.. _`dhaines`: https://github.com/dhaines
.. _`dincamihai`: https://github.com/dincamihai
.. _`dmurphy18`: https://github.com/dmurphy18
.. _`do3meli`: https://github.com/do3meli
.. _`dragon788`: https://github.com/dragon788
.. _`edgan`: https://github.com/edgan
.. _`edwardsdanielj`: https://github.com/edwardsdanielj
.. _`elyulka`: https://github.com/elyulka
.. _`ericuldall`: https://github.com/ericuldall
.. _`exowaucka`: https://github.com/exowaucka
.. _`fanirama`: https://github.com/fanirama
.. _`favoretti`: https://github.com/favoretti
.. _`fedusia`: https://github.com/fedusia
.. _`fj40crawler`: https://github.com/fj40crawler
.. _`freach`: https://github.com/freach
.. _`genuss`: https://github.com/genuss
.. _`githubcdr`: https://github.com/githubcdr
.. _`gravyboat`: https://github.com/gravyboat
.. _`gstachowiak`: https://github.com/gstachowiak
.. _`gtmanfred`: https://github.com/gtmanfred
.. _`haeac`: https://github.com/haeac
.. _`heewa`: https://github.com/heewa
.. _`hu-dabao`: https://github.com/hu-dabao
.. _`ikkaro`: https://github.com/ikkaro
.. _`jackywu`: https://github.com/jackywu
.. _`jdelic`: https://github.com/jdelic
.. _`jeanpralo`: https://github.com/jeanpralo
.. _`jeffreyctang`: https://github.com/jeffreyctang
.. _`jelenak`: https://github.com/jelenak
.. _`jerob`: https://github.com/jerob
.. _`jf`: https://github.com/jf
.. _`jfindlay`: https://github.com/jfindlay
.. _`jinm`: https://github.com/jinm
.. _`johje349`: https://github.com/johje349
.. _`jsandas`: https://github.com/jsandas
.. _`junster1`: https://github.com/junster1
.. _`ketzacoatl`: https://github.com/ketzacoatl
.. _`kevinquinnyo`: https://github.com/kevinquinnyo
.. _`kluoto`: https://github.com/kluoto
.. _`kontrolld`: https://github.com/kontrolld
.. _`laleocen`: https://github.com/laleocen
.. _`limited`: https://github.com/limited
.. _`lorengordon`: https://github.com/lorengordon
.. _`m03`: https://github.com/m03
.. _`markuskramerIgitt`: https://github.com/markuskramerIgitt
.. _`mcalmer`: https://github.com/mcalmer
.. _`mchugh19`: https://github.com/mchugh19
.. _`meaksh`: https://github.com/meaksh
.. _`mikejford`: https://github.com/mikejford
.. _`moio`: https://github.com/moio
.. _`morganwillcock`: https://github.com/morganwillcock
.. _`msummers42`: https://github.com/msummers42
.. _`multani`: https://github.com/multani
.. _`nevins-b`: https://github.com/nevins-b
.. _`nullify005`: https://github.com/nullify005
.. _`pass-by-value`: https://github.com/pass-by-value
.. _`phil123456`: https://github.com/phil123456
.. _`pille`: https://github.com/pille
.. _`pingangit`: https://github.com/pingangit
.. _`rallytime`: https://github.com/rallytime
.. _`rbjorklin`: https://github.com/rbjorklin
.. _`saltstack/salt#31081`: https://github.com/saltstack/salt/issues/31081
.. _`saltstack/salt#31207`: https://github.com/saltstack/salt/pull/31207
.. _`saltstack/salt#36386`: https://github.com/saltstack/salt/pull/36386
.. _`saltstack/salt#36679`: https://github.com/saltstack/salt/issues/36679
.. _`saltstack/salt#36736`: https://github.com/saltstack/salt/pull/36736
.. _`saltstack/salt#36961`: https://github.com/saltstack/salt/issues/36961
.. _`saltstack/salt#37081`: https://github.com/saltstack/salt/pull/37081
.. _`saltstack/salt#37118`: https://github.com/saltstack/salt/issues/37118
.. _`saltstack/salt#37358`: https://github.com/saltstack/salt/pull/37358
.. _`saltstack/salt#37401`: https://github.com/saltstack/salt/pull/37401
.. _`saltstack/salt#37502`: https://github.com/saltstack/salt/pull/37502
.. _`saltstack/salt#37515`: https://github.com/saltstack/salt/pull/37515
.. _`saltstack/salt#37549`: https://github.com/saltstack/salt/pull/37549
.. _`saltstack/salt#38707`: https://github.com/saltstack/salt/pull/38707
.. _`sash-kan`: https://github.com/sash-kan
.. _`sebw`: https://github.com/sebw
.. _`secumod`: https://github.com/secumod
.. _`siccrusher`: https://github.com/siccrusher
.. _`silenius`: https://github.com/silenius
.. _`sjmh`: https://github.com/sjmh
.. _`sjorge`: https://github.com/sjorge
.. _`skizunov`: https://github.com/skizunov
.. _`slinn0`: https://github.com/slinn0
.. _`sofixa`: https://github.com/sofixa
.. _`swalladge`: https://github.com/swalladge
.. _`techhat`: https://github.com/techhat
.. _`tedski`: https://github.com/tedski
.. _`terminalmage`: https://github.com/terminalmage
.. _`thatch45`: https://github.com/thatch45
.. _`thusoy`: https://github.com/thusoy
.. _`tjyang`: https://github.com/tjyang
.. _`toanju`: https://github.com/toanju
.. _`tobiasBora`: https://github.com/tobiasBora
.. _`tobithiel`: https://github.com/tobithiel
.. _`tsaridas`: https://github.com/tsaridas
.. _`twangboy`: https://github.com/twangboy
.. _`tyeapple`: https://github.com/tyeapple
.. _`tyhunt99`: https://github.com/tyhunt99
.. _`vernondcole`: https://github.com/vernondcole
.. _`viict`: https://github.com/viict
.. _`vutny`: https://github.com/vutny
.. _`wanparo`: https://github.com/wanparo
.. _`whiteinge`: https://github.com/whiteinge
.. _`xiaoanyunfei`: https://github.com/xiaoanyunfei
.. _`yhekma`: https://github.com/yhekma
.. _`zwo-bot`: https://github.com/zwo-bot
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/releases/2016.3.5.rst
| 0.824533 | 0.675711 |
2016.3.5.rst
|
pypi
|
========================
Salt 0.9.8 Release Notes
========================
:release: 2012-03-21
Salt 0.9.8 is a big step forward, with many additions and enhancements, as
well as a number of precursors to advanced future developments.
This version of Salt adds much more power to the command line, making the
old hard timeout issues a thing of the past and adds keyword argument
support. These additions are also available in the salt client API, making
the available API tools much more powerful.
The new pillar system allows for data to be stored on the master and
assigned to minions in a granular way similar to the state system. It also
allows flexibility for users who want to keep data out of their state tree
similar to 'external lookup' functionality in other tools.
A new way to extend requisites was added, the "requisite in" statement.
This makes adding requires or watch statements to external state decs
much easier.
Additions to requisites making them much more powerful have been added as well
as improved error checking for sls files in the state system. A new provider
system has been added to allow for redirecting what modules run in the
background for individual states.
Support for openSUSE has been added and support for Solaris has begun
serious development. Windows support has been significantly enhanced as well.
The matcher and target systems have received a great deal of attention. The
default behavior of grain matching has changed slightly to reflect the rest
of salt and the compound matcher system has been refined.
A number of impressive features with keyword arguments have been added to both
the CLI and to the state system. This makes states much more powerful and
flexible while maintaining the simple configuration everyone loves.
The new batch size capability allows for executions to be rolled through a
group of targeted minions a percentage or specific number at a time. This
was added to prevent the "thundering herd" problem when targeting large
numbers of minions for things like service restarts or file downloads.
Upgrade Considerations
======================
Upgrade Issues
--------------
There was a previously missed oversight which could cause a newer minion to
crash an older master. That oversight has been resolved so the version
incompatibility issue will no longer occur. When upgrading to 0.9.8 make
sure to upgrade the master first, followed by the minions.
Debian/Ubuntu Packages
----------------------
The original Debian/Ubuntu packages were called salt and included all salt
applications. New packages in the ppa are split by function. If an old salt
package is installed then it should be manually removed and the new split
packages need to be freshly installed.
On the master:
.. code-block:: sh
# apt-get purge salt
# apt-get install salt-{master,minion}
On the minions:
.. code-block:: sh
# apt-get purge salt
# apt-get install salt-minion
And on any Syndics:
.. code-block:: sh
# apt-get install salt-syndic
The official Salt PPA for Ubuntu is located at:
https://launchpad.net/~saltstack/+archive/ubuntu/salt
Major Features
==============
Pillar
------
:ref:`Pillar <pillar>` offers an interface to declare variable data on the master that is then
assigned to the minions. The pillar data is made available to all modules,
states, sls files etc. It is compiled on the master and is declared using the
existing renderer system. This means that learning pillar should be fairly
trivial to those already familiar with salt states.
CLI Additions
-------------
The ``salt`` command has received a serious overhaul and is more powerful
than ever. Data is returned to the terminal as it is received, and the salt
command will now wait for all running minions to return data before stopping.
This makes adding very large *--timeout* arguments completely unnecessary and
gets rid of long running operations returning empty ``{}`` when the timeout is
exceeded.
When calling salt via sudo, the user originally running salt is saved to the
log for auditing purposes. This makes it easy to see who ran what by just
looking through the minion logs.
The *salt-key* command gained the *-D* and *--delete-all* arguments for
removing all keys. Be careful with this one!
Running States Without a Master
-------------------------------
The addition of running states without a salt-master has been added
to 0.9.8. This feature allows for the unmodified salt state tree to be
read locally from a minion. The result is that the UNMODIFIED state tree
has just become portable, allowing minions to have a local copy of states
or to manage states without a master entirely.
This is accomplished via the new file client interface in Salt that allows
for the ``salt://`` URI to be redirected to custom interfaces. This means that
there are now two interfaces for the salt file server, calling the master
or looking in a local, minion defined ``file_roots``.
This new feature can be used by modifying the minion config to point to a
local ``file_roots`` and setting the ``file_client`` option to ``local``.
Keyword Arguments and States
----------------------------
State modules now accept the ``**kwargs`` argument. This results in all data
in a sls file assigned to a state being made available to the state function.
This passes data in a transparent way back to the modules executing the logic.
In particular, this allows adding arguments to the ``pkg.install`` module that
enable more advanced and granular controls with respect to what the state is
capable of.
An example of this along with the new debconf module for installing ldap
client packages on Debian:
.. code-block:: yaml
ldap-client-packages:
pkg:
- debconf: salt://debconf/ldap-client.ans
- installed
- names:
- nslcd
- libpam-ldapd
- libnss-ldapd
Keyword Arguments and the CLI
-----------------------------
In the past it was required that all arguments be passed in the proper order to
the *salt* and *salt-call* commands. As of 0.9.8, keyword arguments can be
passed in the form of ``kwarg=argument``.
.. code-block:: sh
# salt -G 'type:dev' git.clone \
repository=https://github.com/saltstack/salt.git cwd=/tmp/salt user=jeff
Matcher Refinements and Changes
-------------------------------
A number of fixes and changes have been applied to the Matcher system. The
most noteworthy is the change in the grain matcher. The grain matcher used to
use a regular expression to match the passed data to a grain, but now defaults
to a shell glob like the majority of match interfaces in Salt. A new option
is available that still uses the old style regex matching to grain data called
``grain-pcre``. To use regex matching in compound matches use the letter *P*.
For example, this would match any ArchLinux or Fedora minions:
.. code-block:: sh
# salt --grain-pcre 'os:(Arch:Fed).*' test.ping
And the associated compound matcher suitable for ``top.sls`` is *P*:
.. code-block:: sh
P@os:(Arch|Fed).*
**NOTE**: Changing the grains matcher from pcre to glob is backwards
incompatible.
Support has been added for matching minions with Yahoo's range library. This
is handled by passing range syntax with *-R* or *--range* arguments to salt.
More information at:
https://github.com/ytoolshed/range/wiki/%22yamlfile%22-module-file-spec
Requisite "in"
--------------
A new means to updating requisite statements has been added to make adding
watchers and requires to external states easier. Before 0.9.8 the only way
to extend the states that were watched by a state outside of the sls was to
use an extend statement:
.. code-block:: yaml
include:
- http
extend:
apache:
service:
- watch:
- pkg: tomcat
tomcat:
pkg:
- installed
But the new ``Requisite in`` statement allows for easier extends for
requisites:
.. code-block:: yaml
include:
- http
tomcat:
pkg:
- installed
- watch_in:
- service: apache
Requisite in is part of the extend system, so still remember to always include
the sls that is being extended!
Providers
---------
Salt predetermines what modules should be mapped to what uses based on the
properties of a system. These determinations are generally made for modules
that provide things like package and service management. The apt module
maps to pkg on Debian and the yum module maps to pkg on Fedora for instance.
Sometimes in states, it may be necessary for a non-default module to be used
for the desired functionality. For instance, an Arch Linux system may have
been set up with systemd support. Instead of using the default service module
detected for Arch Linux, the systemd module can be used:
.. code-block:: yaml
http:
service:
- running
- enable: True
- provider: systemd
Default providers can also be defined in the minion config file:
.. code-block:: yaml
providers:
service: systemd
When default providers are passed in the minion config, then those providers
will be applied to all functionality in Salt, this means that the functions
called by the minion will use these modules, as well as states.
Requisite Glob Matching
-----------------------
Requisites can now be defined with glob expansion. This means that if there are
many requisites, they can be defined on a single line.
To watch all files in a directory:
.. code-block:: yaml
http:
service:
- running
- enable: True
- watch:
- file: /etc/http/conf.d/*
This example will watch all defined files that match the glob
``/etc/http/conf.d/*``
Batch Size
----------
The new batch size option allows commands to be executed while maintaining that
only so many hosts are executing the command at one time. This option can
take a percentage or a finite number:
.. code-block:: bash
salt '*' -b 10 test.ping
salt -G 'os:RedHat' --batch-size 25% apache.signal restart
This will only run test.ping on 10 of the targeted minions at a time and then
restart apache on 25% of the minions matching ``os:RedHat`` at a time and work
through them all until the task is complete. This makes jobs like rolling web
server restarts behind a load balancer or doing maintenance on BSD firewalls
using carp much easier with salt.
Module Updates
--------------
This is a list of notable, but non-exhaustive updates with new and existing
modules.
Windows support has seen a flurry of support this release cycle. We've gained
all new :mod:`file <alt.modules.win_file>`,
:mod:`network <salt.modules.win_network>`, and
:mod:`shadow <salt.modules.win_shadow>` modules. Please note
that these are still a work in progress.
For our ruby users, new :mod:`rvm <salt.modules.rvm>` and
:mod:`gem <salt.modules.gem>` modules have been added along
with the :mod:`associated <salt.states.rvm>`
:mod:`states <salt.states.gem>`
The :mod:`virt <salt.modules.virt>` module gained basic Xen support.
The :mod:`yum <salt.modules.yumpkg>` module gained
Scientific Linux support.
The :mod:`pkg <salt.modules.aptpkg>` module on Debian, Ubuntu,
and derivatives force apt to run in a non-interactive mode. This prevents
issues when package installation waits for confirmation.
A :mod:`pkg <salt.modules.zypper>` module for OpenSUSE's
zypper was added.
The :mod:`service <salt.modules.upstart>` module on Ubuntu
natively supports upstart.
A new :mod:`debconf <salt.modules.debconfmod>` module was
contributed by our community for more advanced control over deb package
deployments on Debian based distributions.
The :mod:`mysql.user <salt.states.mysql_user>` state and
:mod:`mysql <salt.modules.mysql>` module gained a
*password_hash* argument.
The :mod:`cmd <salt.modules.cmdmod>` module and state gained
a *shell* keyword argument for specifying a shell other than ``/bin/sh`` on
Linux / Unix systems.
New :mod:`git <salt.modules.git>` and
:mod:`mercurial <salt.modules.hg>` modules have been added
for fans of distributed version control.
In Progress Development
=======================
Master Side State Compiling
---------------------------
While we feel strongly that the advantages gained with minion side state
compiling are very critical, it does prevent certain features that may be
desired. 0.9.8 has support for initial master side state compiling, but many
more components still need to be developed, it is hoped that these can be
finished for 0.9.9.
The goal is that states can be compiled on both the master and the minion
allowing for compilation to be split between master and minion. Why will
this be great? It will allow storing sensitive data on the master and sending
it to some minions without all minions having access to it. This will be
good for handling ssl certificates on front-end web servers for instance.
Solaris Support
---------------
Salt 0.9.8 sees the introduction of basic Solaris support. The daemon runs
well, but grains and more of the modules need updating and testing.
Windows Support
---------------
Salt states on windows are now much more viable thanks to contributions from
our community! States for file, service, local user, and local group management are more fully
fleshed out along with network and disk modules. Windows users can also now manage
registry entries using the new "reg" module.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/releases/0.9.8.rst
| 0.715325 | 0.705252 |
0.9.8.rst
|
pypi
|
========================
Salt 0.9.5 Release Notes
========================
:release: 2012-01-15
Salt 0.9.5 is one of the largest steps forward in the development of Salt.
0.9.5 comes with many milestones, this release has seen the community of
developers grow out to an international team of 46 code contributors and has
many feature additions, feature enhancements, bug fixes and speed improvements.
.. warning::
Be sure to :ref:`read the upgrade instructions <v0.9.5-msgpack>` about the
switch to msgpack before upgrading!
Community
=========
Nothing has proven to have more value to the development of Salt that the
outstanding community that has been growing at such a great pace around Salt.
This has proven not only that Salt has great value, but also the
expandability of Salt is as exponential as I originally intended.
0.9.5 has received over 600 additional commits since 0.9.4 with a swath of new
committers. The following individuals have contributed to the development of
0.9.5:
* Aaron Bull Schaefer
* Antti Kaihola
* Bas Tichelaar
* Brad Barden
* Brian Wagner
* Byron Clark
* Chris Scheller
* Christer Edwards
* Clint Savage
* Corey Quinn
* David Boucha
* Eivind Uggedal
* Eric Poelke
* Evan Borgstrom
* Jed Glazner
* Jeff Schroeder
* Jeffrey C. Ollie
* Jonas Buckner
* Kent Tenney
* Martin Schnabel
* Maxim Burgerhout
* Mitch Anderson
* Nathaniel Whiteinge
* Seth House
* Thomas S Hatch
* Thomas Schreiber
* Tor Hveem
* lzyeval
* syphernl
This makes 21 new developers since 0.9.4 was released!
To keep up with the growing community follow Salt on Black Duck Open Hub
(https://www.openhub.net/p/salt), to join the Salt development community, fork
Salt on GitHub, and get coding (https://github.com/saltstack/salt)!
Major Features
==============
.. _v0.9.5-msgpack:
SPEED! Pickle to msgpack
------------------------
For a few months now we have been talking about moving away from Python
pickles for network serialization, but a preferred serialization format
had not yet been found. After an extensive performance testing period
involving everything from JSON to protocol buffers, a clear winner emerged.
Message Pack (https://msgpack.org/) proved to not only be the fastest and most
compact, but also the most "salt like". Message Pack is simple, and the code
involved is very small. The msgpack library for Python has been added directly
to Salt.
This move introduces a few changes to Salt. First off, Salt is no longer a
"noarch" package, since the msgpack lib is written in C. Salt 0.9.5 will also
have compatibility issues with 0.9.4 with the default configuration.
We have gone through great lengths to avoid backwards compatibility issues with
Salt, but changing the serialization medium was going to create issues
regardless. Salt 0.9.5 is somewhat backwards compatible with earlier minions. A
0.9.5 master can command older minions, but only if the :conf_master:`serial`
config value in the master is set to ``pickle``. This will tell the master to
publish messages in pickle format and will allow the master to receive messages
in both msgpack and pickle formats.
Therefore **the suggested methods for upgrading** are either to just upgrade
everything at once, or:
1. Upgrade the master to 0.9.5
2. Set :conf_master:`serial` to ``pickle`` in the master config
3. Upgrade the minions
4. Remove the ``serial`` option from the master config
Since pickles can be used as a security exploit the ability for a master to
accept pickles from minions at all will be removed in a future release.
C Bindings for YAML
--------------------
All of the YAML rendering is now done with the YAML C bindings. This speeds up
all of the sls files when running states.
Experimental Windows Support
----------------------------
David Boucha has worked tirelessly to bring initial support to Salt for
Microsoft Windows operating systems. Right now the Salt Minion can run as a
native Windows service and accept commands.
In the weeks and months to come Windows will receive the full treatment and
will have support for Salt States and more robust support for managing Windows
systems. This is a big step forward for Salt to move entirely outside of the
Unix world, and proves Salt is a viable cross platform solution. Big Thanks
to Dave for his contribution here!
Dynamic Module Distribution
---------------------------
Many Salt users have expressed the desire to have Salt distribute in-house
modules, states, renderers, returners, and grains. This support has been added
in a number of ways:
Modules via States
```````````````````
Now when salt modules are deployed to a minion via the state system as a file,
then the modules will be automatically loaded into the active running minion
- no restart required - and into the active running state. So custom state
modules can be deployed and used in the same state run.
Modules via Module Environment Directories
```````````````````````````````````````````
Under the file_roots each environment can now have directories that are used
to deploy large groups of modules. These directories sync modules at the
beginning of a state run on the minion, or can be manually synced via the Salt
module :mod:`salt.modules.saltutil.sync_all`.
The directories are named:
* ``_modules``
* ``_states``
* ``_grains``
* ``_renderers``
* ``_returners``
The modules are pushed to their respective scopes on the minions.
Module Reloading
----------------
Modules can now be reloaded without restarting the minion, this is done by
calling the :mod:`salt.modules.sys.reload_modules` function.
But wait, there's more! Now when a salt module of any type is added via
states the modules will be automatically reloaded, allowing for modules to be
laid down with states and then immediately used.
Finally, all modules are reloaded when modules are dynamically distributed
from the salt master.
Enable / Disable Added to Service
---------------------------------
A great deal of demand has existed for adding the capability to set services
to be started at boot in the service module. This feature also comes with an
overhaul of the service modules and initial systemd support.
This means that the :mod:`service state <salt.states.service.running>` can now
accept ``- enable: True`` to make sure a service is enabled at boot, and ``-
enable: False`` to make sure it is disabled.
Compound Target
---------------
A new target type has been added to the lineup, the compound target. In
previous versions the desired minions could only be targeted via a single
specific target type, but now many target specifications can be declared.
These targets can also be separated by and/or operators, so certain properties
can be used to omit a node:
.. code-block:: bash
salt -C 'webserv* and G@os:Debian or E@db.*' test.ping
will match all minions with ids starting with webserv via a glob and minions
matching the ``os:Debian`` grain. Or minions that match the ``db.*`` regular
expression.
Node Groups
-----------
Often the convenience of having a predefined group of minions to execute
targets on is desired. This can be accomplished with the new nodegroups
feature. Nodegroups allow for predefined compound targets to be declared in
the master configuration file:
.. code-block:: yaml
nodegroups:
group1: '[email protected],bar.domain.com,baz.domain.com and bl*.domain.com'
group2: 'G@os:Debian and foo.domain.com'
And then used via the ``-N`` option:
.. code-block:: bash
salt -N group1 test.ping
Minion Side Data Store
-----------------------
The data module introduces the initial approach into storing persistent data on
the minions, specific to the minions. This allows for data to be stored on
minions that can be accessed from the master or from the minion.
The Minion datastore is young, and will eventually provide an interface similar
to a more mature key/value pair server.
Major Grains Improvement
-------------------------
The Salt grains have been overhauled to include a massive amount of extra data.
this includes hardware data, os data and salt specific data.
Salt -Q is Useful Now
---------------------
In the past the salt query system, which would display the data from recent
executions would be displayed in pure Python, and it was unreadable.
0.9.5 has added the outputter system to the ``-Q`` option, thus enabling the
salt query system to return readable output.
Packaging Updates
=================
Huge strides have been made in packaging Salt for distributions. These
additions are thanks to our wonderful community where the work to set up
packages has proceeded tirelessly.
FreeBSD
-------
Salt on FreeBSD? There a port for that:
https://svnweb.freebsd.org/ports/head/sysutils/py-salt/
This port was developed and added by Christer Edwards. This also marks the
first time Salt has been included in an upstream packaging system!
Fedora and Red Hat Enterprise
------------------------------
Salt packages have been prepared for inclusion in the Fedora Project and in
EPEL for Red Hat Enterprise 5 and 6. These packages are the result of the
efforts made by Clint Savage (herlo).
Debian/Ubuntu
-------------
A team of many contributors have assisted in developing packages for Debian
and Ubuntu. Salt is still actively seeking inclusion in upstream Debian and
Ubuntu and the package data that has been prepared is being pushed through
the needed channels for inclusion.
These packages have been prepared with the help of:
* Corey
* Aaron Toponce
* and`
More to Come
------------
We are actively seeking inclusion in more distributions. Primarily getting
Salt into Gentoo, SUSE, OpenBSD, and preparing Solaris support are all turning
into higher priorities.
Refinement
==========
Salt continues to be refined into a faster, more stable and more usable
application. 0.9.5 comes with more debug logging, more bug fixes and more
complete support.
More Testing, More BugFixes
---------------------------
0.9.5 comes with more bugfixes due to more testing than any previous release.
The growing community and the introduction a dedicated QA environment have
unearthed many issues that were hiding under the covers. This has further
refined and cleaned the state interface, taking care of things from minor
visual issues to repairing misleading data.
Custom Exceptions
-----------------
A custom exception module has been added to throw salt specific exceptions.
This allows Salt to give much more granular error information.
New Modules
-----------
:mod:`data <salt.modules.data>`
```````````````````````````````
The new data module manages a persistent datastore on the minion.
Big thanks to bastichelaar for his help refining this module
:mod:`freebsdkmod <salt.modules.freebsdkmod>`
`````````````````````````````````````````````
FreeBSD kernel modules can now be managed in the same way Salt handles Linux
kernel modules.
This module was contributed thanks to the efforts of Christer Edwards
:mod:`gentoo_service <salt.modules.gentoo_service>`
```````````````````````````````````````````````````
Support has been added for managing services in Gentoo. Now Gentoo services
can be started, stopped, restarted, enabled, disabled, and viewed.
:mod:`pip <salt.modules.pip>`
`````````````````````````````
The pip module introduces management for pip installed applications.
Thanks goes to whitinge for the addition of the pip module
:mod:`rh_service <salt.modules.rh_service>`
```````````````````````````````````````````
The rh_service module enables Red Hat and Fedora specific service management.
Now Red Hat like systems come with extensive management of the classic init
system used by Red Hat
:mod:`saltutil <salt.modules.saltutil>`
```````````````````````````````````````
The saltutil module has been added as a place to hold functions used in the
maintenance and management of salt itself. Saltutil is used to salt the salt
minion. The saltutil module is presently used only to sync extension modules
from the master server.
:mod:`systemd <salt.modules.systemd>`
`````````````````````````````````````
Systemd support has been added to Salt, now systems using this next generation
init system are supported on systems running systemd.
:mod:`virtualenv <salt.modules.virtualenv>`
```````````````````````````````````````````
The virtualenv module has been added to allow salt to create virtual Python
environments.
Thanks goes to whitinge for the addition of the virtualenv module
:mod:`win_disk <salt.modules.win_disk>`
```````````````````````````````````````
Support for gathering disk information on Microsoft Windows minions
The windows modules come courtesy of Utah_Dave
:mod:`win_service <salt.modules.win_service>`
`````````````````````````````````````````````
The win_service module adds service support to Salt for Microsoft Windows
services
:mod:`win_useradd <salt.modules.win_useradd>`
`````````````````````````````````````````````
Salt can now manage local users on Microsoft Windows Systems
:mod:`yumpkg5 <salt.modules.yumpkg5>`
`````````````````````````````````````
The yumpkg module introduces in 0.9.4 uses the yum API to interact with the
yum package manager. Unfortunately, on Red Hat 5 systems salt does not have
access to the yum API because the yum API is running under Python 2.4 and Salt
needs to run under Python 2.6.
The yumpkg5 module bypasses this issue by shelling out to yum on systems where
the yum API is not available.
New States
-----------
:mod:`mysql_database <salt.states.mysql_database>`
``````````````````````````````````````````````````
The new mysql_database state adds the ability to systems running a mysql
server to manage the existence of mysql databases.
The mysql states are thanks to syphernl
:mod:`mysql_user <salt.states.mysql_user>`
``````````````````````````````````````````
The mysql_user state enables mysql user management.
:mod:`virtualenv <salt.states.virtualenv>`
``````````````````````````````````````````
The virtualenv state can manage the state of Python virtual environments.
Thanks to Whitinge for the virtualenv state
New Returners
-------------
:mod:`cassandra_returner <salt.returners.cassandra_return>`
```````````````````````````````````````````````````````````
A returner allowing Salt to send data to a cassandra server.
Thanks to Byron Clark for contributing this returner
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/releases/0.9.5.rst
| 0.829216 | 0.770896 |
0.9.5.rst
|
pypi
|
=========================
Salt 0.11.0 Release Notes
=========================
:release: 2012-12-14
Salt 0.11.0 is here, with some highly sought after and exciting features.
These features include the new overstate system, the reactor system, a new
state run scope component called __context__, the beginning of the search
system (still needs a great deal of work), multiple package states, the MySQL
returner and a better system to arbitrarily reference outputters.
It is also noteworthy that we are changing how we mark release numbers. For the
life of the project we have been pushing every release with features and fixes
as point releases. We will now be releasing point releases for only bug fixes
on a more regular basis and major feature releases on a slightly less regular
basis. This means that the next release will be a bugfix only release with a
version number of 0.11.1. The next feature release will be named 0.12.0 and
will mark the end of life for the 0.11 series.
Major Features
==============
OverState
---------
The overstate system is a simple way to manage rolling state executions across
many minions. The overstate allows for a state to depend on the successful
completion of another state.
Reactor System
--------------
The new reactor system allows for a reactive logic engine to be created which
can respond to events within a salted environment. The reactor system uses sls
files to match events fired on the master with actions, enabling Salt
to react to problems in an infrastructure.
Your load-balanced group of webservers is under extra load? Spin up a new VM
and add it to the group. Your fileserver is filling up? Send a notification to
your sysadmin on call. The possibilities are endless!
Module Context
--------------
A new component has been added to the module loader system. The module context
is a data structure that can hold objects for a given scope within the module.
This allows for components that are initialized to be stored in a persistent
context which can greatly speed up ongoing connections. Right now the best
example can be found in the `cp` execution module.
Multiple Package Management
---------------------------
A long desired feature has been added to package management. By definition Salt
States have always installed packages one at a time. On most platforms this is
not the fastest way to install packages. Erik Johnson, aka terminalmage, has
modified the package modules for many providers and added new capabilities to
install groups of packages. These package groups can be defined as a list of
packages available in repository servers:
.. code-block:: yaml
python_pkgs:
pkg.installed:
- pkgs:
- python-mako
- whoosh
- python-git
or specify based on the location of specific packages:
.. code-block:: yaml
python_pkgs:
pkg.installed:
- sources:
- python-mako: http://some-rpms.org/python-mako.rpm
- whoosh: salt://whoosh/whoosh.rpm
- python-git: ftp://companyserver.net/python-git.rpm
Search System
-------------
The bones to the search system have been added. This is a very basic interface
that allows for search backends to be added as search modules. The first
supported search module is the whoosh search backend. Right now only the basic
paths for the search system are in place, making this very experimental.
Further development will involve improving the search routines and index
routines for whoosh and other search backends.
The search system has been made to allow for searching through all of the state
and pillar files, configuration files and all return data from minion
executions.
Notable Changes
===============
All previous versions of Salt have shared many directories between the master
and minion. The default locations for keys, cached data and sockets has been
shared by master and minion. This has created serious problems with running a
master and a minion on the same systems. 0.11.0 changes the defaults to be
separate directories. Salt will also attempt to migrate all of the old key data
into the correct new directories, but if it is not successful it may need to be
done manually. If your keys exhibit issues after updating make sure that they
have been moved from ``/etc/salt/pki`` to ``/etc/salt/pki/{master,minion}``.
The old setup will look like this:
.. code-block:: text
/etc/salt/pki
|-- master.pem
|-- master.pub
|-- minions
| `-- ragnarok.saltstack.net
|-- minions_pre
|-- minion.pem
|-- minion.pub
|-- minion_master.pub
|-- minions_pre
`-- minions_rejected
With the accepted minion keys in ``/etc/salt/pki/minions``, the new setup
places the accepted minion keys in ``/etc/salt/pki/master/minions``.
.. code-block:: text
/etc/salt/pki
|-- master
| |-- master.pem
| |-- master.pub
| |-- minions
| | `-- ragnarok.saltstack.net
| |-- minions_pre
| `-- minions_rejected
|-- minion
| |-- minion.pem
| |-- minion.pub
| `-- minion_master.pub
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/releases/0.11.0.rst
| 0.705988 | 0.656183 |
0.11.0.rst
|
pypi
|
=========================
Salt 0.15.0 Release Notes
=========================
:release: 2013-05-03
The many new features of Salt 0.15.0 have arrived! Salt 0.15.0 comes with many
smaller features and a few larger ones.
These features range from better debugging tools to the new Salt Mine system.
Major Features
==============
The Salt Mine
-------------
First there was the peer system, allowing for commands to be executed from a
minion to other minions to gather data live. Then there was the external job
cache for storing and accessing long term data. Now the middle ground is being
filled in with the Salt Mine. The Salt Mine is a system used to execute
functions on a regular basis on minions and then store only the most recent
data from the functions on the master, then the data is looked up via targets.
The mine caches data that is public to all minions, so when a minion posts
data to the mine all other minions can see it.
IPV6 Support
------------
0.13.0 saw the addition of initial IPV6 support but errors were encountered and
it needed to be stripped out. This time the code covers more cases and must be
explicitly enabled. But the support is much more extensive than before.
Copy Files From Minions to the Master
-------------------------------------
Minions have long been able to copy files down from the master file server, but
until now files could not be easily copied from the minion up to the master.
A new function called ``cp.push`` can push files from the minions up to the
master server. The uploaded files are then cached on the master in the master
cachedir for each minion.
Better Template Debugging
-------------------------
Template errors have long been a burden when writing states and pillar. 0.15.0
will now send the compiled template data to the debug log, this makes tracking
down the intermittent stage templates much easier. So running state.sls or
state.highstate with `-l debug` will now print out the rendered templates in
the debug information.
State Event Firing
------------------
The state system is now more closely tied to the master's event bus. Now when
a state fails the failure will be fired on the master event bus so that the
reactor can respond to it.
Major Syndic Updates
--------------------
The Syndic system has been basically re-written. Now it runs in a completely
asynchronous way and functions primarily as an event broker. This means that
the events fired on the syndic are now pushed up to the higher level master
instead of the old method used which waited for the client libraries to
return.
This makes the syndic much more accurate and powerful, it also means that
all events fired on the syndic master make it up the pipe as well making a
reactor on the higher level master able to react to minions further
downstream.
Peer System Updates
-------------------
The Peer System has been updated to run using the client libraries instead
of firing directly over the publish bus. This makes the peer system much more
consistent and reliable.
Minion Key Revocation
---------------------
In the past when a minion was decommissioned the key needed to be manually
deleted on the master, but now a function on the minion can be used to revoke
the calling minion's key:
.. code-block:: bash
$ salt-call saltutil.revoke_auth
Function Return Codes
---------------------
Functions can now be assigned numeric return codes to determine if the function
executed successfully. While not all functions have been given return codes,
many have and it is an ongoing effort to fill out all functions that might
return a non-zero return code.
Functions in Overstate
----------------------
The overstate system was originally created to just manage the execution of
states, but with the addition of return codes to functions, requisite logic can
now be used with respect to the overstate. This means that an overstate stage
can now run single functions instead of just state executions.
Pillar Error Reporting
----------------------
Previously if errors surfaced in pillar, then the pillar would consist of only
an empty dict. Now all data that was successfully rendered stays in pillar and
the render error is also made available. If errors are found in the pillar,
states will refuse to run.
Using Cached State Data
-----------------------
Sometimes states are executed purely to maintain a specific state rather than
to update states with new configs. This is grounds for the new cached state
system. By adding `cache=True` to a state call the state will not be generated
fresh from the master but the last state data to be generated will be used.
If no previous state data is available then fresh data will be generated.
Monitoring States
-----------------
The new monitoring states system has been started. This is very young but
allows for states to be used to configure monitoring routines. So far only one
monitoring state is available, the ``disk.status`` state. As more capabilities
are added to Salt UI the monitoring capabilities of Salt will continue to be
expanded.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/releases/0.15.0.rst
| 0.738575 | 0.732687 |
0.15.0.rst
|
pypi
|
========================
Salt 0.9.7 Release Notes
========================
:release: 2012-02-15
Salt 0.9.7 is here! The latest iteration of Salt brings more features and many
fixes. This release is a great refinement over 0.9.6, adding many conveniences
under the hood, as well as some features that make working with Salt much
better.
A few highlights include the new Job system, refinements to the requisite
system in states, the ``mod_init`` interface for states, external node
classification, search path to managed files in the file state, and refinements
and additions to dynamic module loading.
0.9.7 also introduces the long developed (and oft changed) unit test framework
and the initial unit tests.
Major Features
==============
Salt Jobs Interface
-------------------
The new jobs interface makes the management of running executions much cleaner
and more transparent. Building on the existing execution framework the jobs
system allows clear introspection into the active running state of the
running Salt interface.
The Jobs interface is centered in the new minion side proc system. The
minions now store msgpack serialized files under ``/var/cache/salt/proc``.
These files keep track of the active state of processes on the minion.
Functions in the saltutil Module
````````````````````````````````
A number of functions have been added to the saltutil module to manage and
view the jobs:
``running`` - Returns the data of all running jobs that are found in the proc
directory.
``find_job`` - Returns specific data about a certain job based on job id.
``signal_job`` - Allows for a given jid to be sent a signal.
``term_job`` - Sends a termination signal (``SIGTERM, 15``) to the process
controlling the specified job.
``kill_job`` Sends a kill signal (``SIGKILL, 9``) to the process controlling the
specified job.
The jobs Runner
---------------
A convenience runner front end and reporting system has been added as well.
The jobs runner contains functions to make viewing data easier and cleaner.
The jobs runner contains a number of functions...
active
``````
The active function runs ``saltutil.running`` on all minions and formats the
return data about all running jobs in a much more usable and compact format.
The active function will also compare jobs that have returned and jobs that
are still running, making it easier to see what systems have completed a job
and what systems are still being waited on.
lookup_jid
``````````
When jobs are executed the return data is sent back to the master and cached.
By default is cached for 24 hours, but this can be configured via the
``keep_jobs`` option in the master configuration.
Using the ``lookup_jid`` runner will display the same return data that the
initial job invocation with the salt command would display.
list_jobs
`````````
Before finding a historic job, it may be required to find the job id.
``list_jobs`` will parse the cached execution data and display all of the job
data for jobs that have already, or partially returned.
External Node Classification
----------------------------
Salt can now use external node classifiers like Cobbler's
``cobbler-ext-nodes``.
Salt uses specific data from the external node classifier. In particular the
classes value denotes which sls modules to run, and the environment value sets
to another environment.
An external node classification can be set in the master configuration file via
the ``external_nodes`` option:
https://salt.readthedocs.io/en/latest/ref/configuration/master.html#external-nodes
External nodes are loaded in addition to the top files. If it is intended to
only use external nodes, do not deploy any top files.
State Mod Init System
---------------------
An issue arose with the pkg state. Every time a package was run Salt would
need to refresh the package database. This made systems with slower package
metadata refresh speeds much slower to work with. To alleviate this issue the
``mod_init`` interface has been added to salt states.
The ``mod_init`` interface is a function that can be added to a state file.
This function is called with the first state called. In the case of the pkg
state, the ``mod_init`` function sets up a tag which makes the package database
only refresh on the first attempt to install a package.
In a nutshell, the ``mod_init`` interface allows a state to run any command that
only needs to be run once, or can be used to set up an environment for working
with the state.
Source File Search Path
-----------------------
The file state continues to be refined, adding speed and capabilities. This
release adds the ability to pass a list to the source option. This list is then
iterated over until the source file is found, and the first found file is used.
The new syntax looks like this:
.. code-block:: yaml
/etc/httpd/conf/httpd.conf:
file:
- managed
- source:
- salt://httpd/httpd.conf
- http://myserver/httpd.conf: md5=8c1fe119e6f1fd96bc06614473509bf1
The source option can take sources in the list from the salt file server
as well as an arbitrary web source. If using an arbitrary web source the
checksum needs to be passed as well for file verification.
Refinements to the Requisite System
-----------------------------------
A few discrepancies were still lingering in the requisite system, in
particular, it was not possible to have a ``require`` and a ``watch`` requisite
declared in the same state declaration.
This issue has been alleviated, as well as making the requisite system run
more quickly.
Initial Unit Testing Framework
------------------------------
Because of the module system, and the need to test real scenarios, the
development of a viable unit testing system has been difficult, but unit
testing has finally arrived. Only a small amount of unit testing coverage
has been developed, much more coverage will be in place soon.
A huge thanks goes out to those who have helped with unit testing, and the
contributions that have been made to get us where we are. Without these
contributions unit tests would still be in the dark.
Compound Targets Expanded
-------------------------
Originally only support for ``and`` and ``or`` were available in the compound
target. 0.9.7 adds the capability to negate compound targets with ``not``.
Nodegroups in the Top File
--------------------------
Previously the nodegroups defined in the master configuration file could not
be used to match nodes for states. The nodegroups support has been expanded
and the nodegroups defined in the master configuration can now be used to
match minions in the top file.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/releases/0.9.7.rst
| 0.729038 | 0.907885 |
0.9.7.rst
|
pypi
|
.. _release-3005:
=============================================
Salt 3005 release notes - Codename Phosphorus
=============================================
Python 3.5 and 3.6 deprecation
------------------------------
This will be the last release we will support Python versions 3.5 and 3.6.
In Salt release 3006, we will only support Python versions 3.7 and higher.
Going forward, our policy will be to align with Python's supported versions.
OS support end of life
----------------------
Debian and Raspbian 9 are now EOL, therefore we will no longer be building
packages for these platforms.
Raspberry Pi
------------
We will no longer build the Raspberry Pi packages after the 3005 release but will
provide open sources project links in an updated announcement later. Please see the
announcement for more details:
https://saltproject.io/salt-project-announces-the-open-sourcing-of-several-saltstack-native-minions/
New packages available
----------------------
With the release of Salt 3005, we are pleased to announce the new onedir
packages using pyinstaller are now out of beta and ready for production. These
new packages make the installation process easier. Onedir packages install Salt
with one directory that includes all the executables Salt needs to run
effectively, including the version of Python and the required dependencies that
Salt needs. These packages make it easier to use Salt out of the box without
installing Python first.
Going forward, any new OS platforms supported by the Salt Project from version
3005 can only be installed using onedir packages. For this release, this
includes Redhat 9, Ubuntu 22.04, and Photon OS 3. The Salt Project will phase
out the old ("classic") Salt package builds for currently supported operating
systems by 3006. See
`Upgrade to onedir <https://docs.saltproject.io/salt/install-guide/en/latest/topics/upgrade-to-onedir.html>`_
for more information.
On the day of the Phosphorus release, the onedir packages will be available on
https://repo.saltproject.io for each platform. The instructions for installing
onedir packages and the classic packages will be available on the new
`Salt Install Guide <https://docs.saltproject.io/salt/install-guide/en/latest/>`_.
If you want to test out the packages today, you can install them from
https://repo.saltproject.io/salt-dev/py3/ using the correct directory
for your platform. If you find any issues with the packages, please open an
issue on this repo: https://gitlab.com/saltstack/open/salt-pkg
Classic, non-onedir packaging support
-------------------------------------
The classic, non-onedir packaging system previously used for Salt will also be
provided for platforms supported in previous Salt versions. The classic
packaging will only be available for the 3005 release. The 3006 release and
all releases going forward will only provide the onedir packages.
Platform package support
------------------------
+--------------+---------------------+------------------------------+
| OS | New onedir packages | Classic, non-onedir packages |
+==============+=====================+==============================+
| RHEL 7 | yes | yes |
+--------------+---------------------+------------------------------+
| RHEL 8 | yes | yes |
+--------------+---------------------+------------------------------+
| RHEL 9 | yes | no |
+--------------+---------------------+------------------------------+
| Ubuntu 18.04 | yes | yes |
+--------------+---------------------+------------------------------+
| Ubuntu 20.04 | yes | yes |
+--------------+---------------------+------------------------------+
| Ubuntu 22.04 | yes | no |
+--------------+---------------------+------------------------------+
| Debian 10 | yes | yes |
+--------------+---------------------+------------------------------+
| Debian 11 | yes | yes |
+--------------+---------------------+------------------------------+
| Raspbian 10 | no | yes |
+--------------+---------------------+------------------------------+
| Raspbian 11 | no | yes |
+--------------+---------------------+------------------------------+
| Fedora 35 | yes | yes |
+--------------+---------------------+------------------------------+
| Fedora 36 | yes | yes |
+--------------+---------------------+------------------------------+
| MacOS | yes | yes |
+--------------+---------------------+------------------------------+
| Windows | yes | yes |
+--------------+---------------------+------------------------------+
Repo paths
----------
+----------+-----------------------------------------------+-----------------------------------------+
| OS | Onedir path | Classic, Non-onedir path |
+==========+===============================================+=========================================+
| RHEL | https://repo.saltproject.io/salt/py3/redhat/ | https://repo.saltproject.io/py3/redhat/ |
+----------+-----------------------------------------------+-----------------------------------------+
| Ubuntu | https://repo.saltproject.io/salt/py3/ubuntu/ | https://repo.saltproject.io/py3/ubuntu/ |
+----------+-----------------------------------------------+-----------------------------------------+
| Debian | https://repo.saltproject.io/salt/py3/debian/ | https://repo.saltproject.io/py3/debian/ |
+----------+-----------------------------------------------+-----------------------------------------+
| Raspbian | Not available | https://repo.saltproject.io/py3/debian/ |
+----------+-----------------------------------------------+-----------------------------------------+
| Fedora | Hosted on Fedora Repos | Hosted on Fedora Repos |
+----------+-----------------------------------------------+-----------------------------------------+
| MacOS | https://repo.saltproject.io/salt/py3/osx/ | https://repo.saltproject.io/osx/ |
+----------+-----------------------------------------------+-----------------------------------------+
| Windows | https://repo.saltproject.io/salt/py3/windows/ | https://repo.saltproject.io/windows/ |
+----------+-----------------------------------------------+-----------------------------------------+
Note that the onedir paths above will not be available until the day of the
Phosphorus release.
How do I migrate to the onedir packages?
----------------------------------------
The migration path from the classic, non-onedir packages to the onedir packages
will include:
* Repo File: You need to update your repo file to point to the new repo paths
for your platform. After the repo file is updated, upgrade your Salt packages.
* Pip packages: You need to ensure any 3rd party pip packages are installed in
the correct onedir path. This can be accomplished in two ways:
* ``salt-pip install <package name>``
* Using the ``pip.installed`` Salt state.
To install python packages into the system python environment, users must now
provide the ``pip_bin`` or ``bin_env`` to the pip state module.
For example:
.. code-block:: yaml
lib-foo:
pip.installed:
- pip_bin: /usr/bin/pip3
lib-bar:
pip.installed:
- bin_env: /usr/bin/python3
Known issues
------------
- To make use of Salt 3005 or later on a Salt master connected to SaltStack
Config, you must use SaltStack Config version 8.9.0 or later.
The root cause of the issue is a breaking change to
``AsyncClient._proc_function()``` in Salt, which is the function that the
raas-master uses to run ``salt-run`` commands. As this is a private API, there's
no expectation that the API should remain backward-compatible.
It is recommended to upgrade SaltStack Config before upgrading your Salt
masters. However, if a Salt master is upgraded to version 3005 before
upgrading SaltStack Config, the upgrade can still be completed.
After upgrading SaltStack Config, including the SSC plugin on each Salt master,
restart the Salt masters.
- Salt does not currently support napalm 4. Users will need to install napalm 3.x to
ensure they do not run into issue #62468
Removed
-------
- Deprecating and removing salt-unity. (#56055)
- Removed support for macos mojave (#61130)
- Removed `salt.utils.MultiprocessingProcess` and `salt.utils.SignalHandlingMultiprocessingProcess`. Please use `salt.utils.Process` and `salt.utils.SignalHandlingProcess` instead. (#61573)
- Remove the grains.get_or_set_hash function. Please reference pillar and SDB documentation for secure ways to manage sensitive information. Grains are an insecure way to store secrets. (#61691)
- Removed the `telnet_port`, `serial_type` and `console` parameters in salt/modules/virt.py. Use the `serials` and `consoles` parameters instead. Use the `serials` parameter with a value like ``{{{{'type': 'tcp', 'protocol': 'telnet', 'port': {}}}}}`` instead and a similar `consoles` parameter. (#61693)
- Remove remove_lock in zypperpkg.py in favor of unhold.
Remove add_lock in zypperpkg.py in favor of hold. (#61694)
- Removed support for old-style Windows Group Policy names
Recommended policy names will be displayed in comments (#61696)
- Remove the feature flag feature.enable_slsvars_fixes and enable the fixes for `sls_path`, `tpl_file`, and `tpldir` by default.
Enabling this behavior by default will fix the following:
- tpldir: If your directory name and your SLS file name are the same tpldir used to return a ., now it returns the correct directory name.
- slspath,slsdotpath,slscolonpath,sls_path: If an init.sls file is accessed by its explicit name path.to.init instead of path.to, init shows up as a directory for in various sls context parameters, now it will only show as a file.
- tplfile: When using tplfile in a SLS file in the root directory of file roots it returns empty. Now it returns the filename. (#61697)
- Remove SaltMessageServer.shutdown in favor of close.
Remove LoadBalancerWorker.stop in favor of close. (#61698)
- Removed the PyObjC dependency.
This addresses problems with building a one dir build for macOS.
It became problematic because depending on the macOS version, it pulls different dependencies, and we would either have to build a macos onedir for each macOS supported release, or ship a crippled onedir(because it would be tied to the macOS version where the onedir was built).
Since it's currently not being used, it's removed. (#62432)
Deprecated
----------
- In etcd_util, the recursive kwarg in the read and delete methods has been deprecated in favor of recurse for both client versions.
In etcd_util, the index kwarg in the watch method has been deprecated in favor of start_revision for both client versions.
In etcd_util, the waitIndex kwarg in the read method has been deprecated in favor of start_revision for both client versions.
The etcd API v2 implementation has been deprecated in favor of etcd API v3. (#60325)
- Deprecated transport kwarg inside salt.utils.event.get_event (#61275)
- Deprecated netmiko_conn and pyeapi_conn in napalm_mod.py as these function should not be called from the CLI (#61566)
- Deprecate all Azure cloud modules (#62183)
- Deprecated ``defaults`` and ``preserve_context`` for ``salt.utils.functools.namespaced_function``.
Additionally, the behavior when ``preserve_namespace=True`` was passed is now the default in order not to require duplicating imports on the modules that are namespacing functions. (#62272)
- Added a pyinstaller hook that traverses the python used on the tiamat package to add all possible modules as hidden imports. (#62362)
- Fix use of random shuffle and sample functions as Jinja filters (#62372)
- All of the requirements provided in the requirements files are now included. The job of evaluating platform markers is not Salt's it's pip's. (#62392)
- Update all platforms to use pycparser 2.21 or greater for Py 3.9 or higher, fixes fips fault with openssl v3.x (#62400)
- Due to changes in the Netmiko library for the exception paths, need to check the version of Netmiko python library and then import the exceptions from different locations depending on the result. (#62405)
- Deprecated the cassandra module in favor of the cassandra_cql module/returner. (#62327)
Changed
-------
- alternatives: Do not access /var/lib/dpkg/alternatives directly (#58745)
- Enhance logging when there are errors at loading beacons (#60402)
- Updated mysql cache module to also store updated timestamp, making it consistent with default cache module. Users of mysql cache should ensure database size before updating, as ALTER TABLE will add the timestamp column. (#61081)
- Changed linux_shadow to test success of commands using cmd.retcode instead of cmd.run (#61932)
- `zabbix.user_get` returns full user info with groups and medias
`zabbix.user_addmedia` returns error for Zabbix 4.0+ due to `user.addmedia` method removal
`zabbix.user_deletemedia` returns error for Zabbix 4.0+ due to `user.deletemedia` method removal (#62012)
- "Sign before ending the testrun in x509.create_certificate" (#62100)
Fixed
-----
- Fix salt-ssh using sudo with a password (#8882)
- Fix SSH password regex to not search for content after password:. (#25721)
- Addressing a few issues when having keep_symlinks set to True with file.recurse. Also allow symlinks that are outside the salt fileserver root to be discoverable as symlinks when fileserver_followsymlinks is set to False. (#29562)
- serialize to JSON only non string objects. (#35215)
- Fix archive.extracted doesn't set user/group ownership correctly (#38605)
- Make sys.argspec work on functions with annotations (#48735)
- Fixed pdbedit.list_users with Samba 4.8 (#49648)
- Fixes a scenario where ipv6 is enabled but the master is configured as an ipv4 IP address. (#49835)
- Ensure that NOTIFY_SOCKET is not passed to child processes created with cmdmod unless it's set explicitly for such call. (#50851)
- remove escaping of dbname in mysql.alter_db function. (#51559)
- Fix runit module failing to find service if it is not symlinked. (#52759)
- Changed manage.versions to report minions offline if minion call fails. (#53513)
- Fixed events stream from /events endpoint not halting when auth token has expired. (#53742)
- Fixed user.present which was breaking when updating workphone,homephone, fullname and "other" fields in case int was passed instead of string (#53961)
- Fix error in webutil state module when attempting to grep a file that does not exist. (#53977)
- Fixed ability to modify the "Audit: Force audit policy subcategory settings..." policy (#54301)
- Fix timeout handling in netapi/saltnado. (#55394)
- Fixing REST auth so that we actually support using ACLs from the REST server like we said in the documentation. (#55654)
- Salt now correctly handles macOS after Py3.8 where python defaults to spawn instead of fork. (#55847)
- Factor out sum and sorting of permissions into separate functions.
Additionally, the same logic was applied to the rest_cherrypy netapi (#56495)
- Display packages that are marked NoRemove in pkg.list_pkgs for Windows platforms (#56864)
- Attempt to fix 56957 by detecting the broken recusion and stopping it. (#56957)
- Fixed bytes vs. text issue when using sqlite for sdb backend. (#57133)
- Ensure test is added to opts when using the state module with salt-ssh. (#57144)
- Fixed RuntimeError OrderedDict mutated in network.managed for Debian systems. (#57721)
- Improved the multiprocessing classes to better handle spawning platforms (#57742)
- Config options are enforced according to config type (#57873)
- fixed 57992 fix multi item kv v2 items read. (#57992)
- Fixed thread leak during FQDN lookup when DNS entries had malformed PTR records, or other similar issues. (#58141)
- Remove unnecessary dot in template that cause the bridge interface to fail on debian. Fixes #58195 (#58195)
- update salt.module.schedule to check the job_args and job_kwargs for valid formatting. (#58329)
- Allowe use of `roster` in salt.function state when using the SSH client. (#58662)
- Detect new and legacy styles of calling module.run and support them both. (#58763)
- Clean repo uri before checking if it's present, avoiding ghost change. (#58807)
- Fix error "'__opts__' is not defined" when using the boto v2 modules (#58934)
- hgfs: fix bytes vs str issues within hgfs. (#58963)
- Fixes salt-ssh error when targetting IPs or hostnames directly. (#59033)
- Allow for multiple configuration entries with keyword strict_config=False on yum-based systems (#59090)
- Fixed error when running legacy code in winrepo.update_git_repos (#59101)
- Clarify the persist argument in the scheduler module. Adding code in the list function to indicate if the schedule job is saved or not. (#59102)
- Swap ret["retcode"] for ret.get("retcode") in the event that there is no retcode, eg. when a function is not passed with a module. (#59331)
- Fix race condition when caching vault tokens (#59361)
- The ssh module now accepts all ssh public key types as of openssh server version 8.7. (#59429)
- Set default transport and port settings for Napalm NXOS, if not set. (#59448)
- Use __salt_system_encoding__ when retrieving keystore certificate SHA1 str (#59503)
- Fix error being thrown on empty flags list given to file.replace (#59554)
- Update url for ez_setup.py script in virtualenv_mod.py (#59604)
- Changed yumpkg module to normalize versions to strings when they were ambiguously floats (example version=3005.0). (#59705)
- Fix pillar_roots.write on subdirectories broken after CVE-2021-25282 patch. (#59935)
- Improved performance of zfs.filesystem_present and zfs.volume_present. When
applying these states, only query specified ZFS properties rather than all
properties. (#59970)
- Fixed highstate outputter not displaying with salt.function in orchestration when module returns a dictionary. (#60029)
- Update docs where python-dateutil is required for schedule. (#60070)
- Send un-parsed username to LookupAccountName function (#60076)
- Fix ability to set propagation on a folder to "this_folder_only" (#60103)
- Fix name attribute access error in spm. (#60106)
- Fix zeromq stream.send exception message (#60228)
- Exit gracefully on ctrl+c. (#60242)
- Corrected import statement for redis_cache in cluster mode. (#60272)
- loader: Fix loading grains with annotations (#60285)
- fix docker_network.present when com.docker.network.bridge.name is being used as the unixes can not have a bridge of the same name (#60316)
- Fix exception in yumpkg.remove for not installed package on calling pkg.remove or pkg.removed (#60356)
- Batch runs now return proper retcodes in a tuple of the form (result, retcode) (#60361)
- Fixed issue with ansible roster __virtual__ when ansible is not installed. (#60370)
- Fixed error being thrown when None was passed as src/defaults or dest to defaults.update and defaults.merge (#60431)
- Allow for additional options for xmit hash policy in mode 4 NIC bonding on Redhat (#60583)
- Properly detect VMware grains on Windows Server 2019+ (#60593)
- Allow for minion failure to respond to job sent in batch mode (#60724)
- The mac assistive execution module no longer shells out to change the database. (#60819)
- Fix regression in win_timezone.get_zone which failed to resolve specific timezones that begin or end with d/s/t/o/f/_ characters (#60829)
- The TCP transport resets it's unpacker on stream disconnects (#60831)
- Moving the call to the validate function earlier to ensure that beacons are in the right format before we attempt to do anything to the configuration. Adding a generic validation to ensure the beacon configuration is in the wrong format when a validation function does not exist. (#60838)
- Update the mac installer welcome and conclusion page, add docs for the salt-config tool (#60858)
- Fixed external node classifier not callable due to wrong parameter (#60872)
- Adjust Debian/Ubuntu package use of name 'ifenslave-2.6' to 'ifenslave' (#60876)
- Clear and update the Pillar Cache when running saltutil.refresh_pillar. This only affects users
that have `pillar_cache` set to True. If you do not want to clear the cache you can pass the kwarg
`clean_cache=False` to `saltutil.refresh_pillar`. (#60897)
- Handle the situation when apt repo lines have or do not have trailing slashes properly. (#60907)
- Fixed Python 2 syntax for Python 3, allow for view objects returned by dictionary keys() function (#60909)
- Fix REST CherryPY append the default permissions every request (#60955)
- Do not consider "skipped" targets as failed for "ansible.playbooks" states (#60983)
- Fix behavior for internal "_netlink_tool_remote_on" to filter results based on requested end (#61017)
- schedule.job_status module: Convert datetime objects into formatted strings (#61043)
- virt: don't crash if console doesn't have service or type attribute (#61054)
- Fixed conflict between importlib_metada from Salt and importlib.metadata from Python 3.10 (#61062)
- sys.argspec now works with pillar.get, vault.read_secret, and vault.list_secrets (#61084)
- Set virtual grain on FreeBSD EC2 instances (#61094)
- Fixed v3004 windows minion failing to open log file at C:\ProgramData\Salt Project\Salt\var\log\salt\minion (#61113)
- Correct returned result to False when an error exception occurs for pip.installed (#61117)
- fixed extend being too strict and wanting the system_type to exist when it is only needed for requisites. (#61121)
- Fixed bug where deserialization in script engine would throw an error after all output was read. (#61124)
- Adding missing import for salt.utils.beacons into beacons that were updated to use it. (#61135)
- added exception catch to salt.utils.vt.terminal.isalive(). (#61160)
- Re-factor transport to make them more plug-able (#61161)
- Remove max zeromq pinned version due to issues on FreeBSD (#61163)
- Fixing deltaproxy code to handle the situation where the control proxy is configured to control a proxy minion whose pillar data could not be loaded. (#61172)
- Prevent get_tops from performing a Set operation on a List (#61176)
- Make "state.highstate" to acts on concurrent flag.
Simplify "transactional_update" module to not use SSH wrapper and allow more flexible execution (#61188)
- Fix a failure with salt.utils.vault.make_request when namespace is not defined in the connection. (#61191)
- Fix race condition in `salt.utils.verify.verify_env` and ignore directories starting with dot (#61192)
- LGPO: Search for policies in a case-sensitive manner first, then fall back to non case-sensitive names (#61198)
- Fixed state includes in dynamic environments (#61200)
- Minimize the number of network connections minions to the master (#61247)
- Fix salt-call event.event with pillar or grains (#61252)
- Fixed failing dcs.compile_config where a successful compile errored with `AttributeError: 'list' object has no attribute 'get'`. (#61261)
- Make the salt.utils.win_dacl.get_name() function include the "NT Security" prefix for Virtual Accounts. Virtual Accounts can only be added with the fully qualified name. (#61271)
- Fixed tracebacks and print helpful error message when proxy_return = True but no platform or primary_ip set in NetBox pillar. (#61277)
- Ensure opts is included in pack for minion_mods and config loads opts from the named_context. (#61297)
- Added prefix length info for IPv6 addresses in Windows (#61316)
- Handle MariaDB 10.5+ SLAVE MONITOR grant (#61331)
- Fix secondary ip addresses being added to ip4_interfaces and ip6_interfaces at the same time (#61370)
- Do not block the deltaproxy startup. Wrap the call to the individual proxy initialization functions in a try...except, catching the exception, logging an error and moving onto the next proxy minion. (#61377)
- show_instance of hetzner cloud provider should enforce an action like the other ones (#61392)
- Fix Hetzner Cloud config loading mechanism (#61399)
- Sets correctly the lvm grain even when lvm's command execution outputs a WARNING (#61412)
- Use net instead of sc in salt cloud when restarting the salt service (#61413)
- Fix use_etag support in fileclient by removing case sensitivity of expected header (#61440)
- Expand environment variables in the root_dir registry key (#61445)
- Use salt.utils.path.readlink everywhere instead of os.readlink (#61458)
- Fix state_aggregate minion option not respected (#61478)
- Fixed wua.installed and wua.uptodate to return all changes, failures, and supersedences (#61479)
- When running with test=True and there are no changes, don't show that there are changes. (#61483)
- Fix issue with certutil when there's a space in the path to the certificate (#61494)
- Fix cmdmod not respecting config for saltenv (#61507)
- Convert Py 2'isms to Python 3, and add tests for set_filesystems on AIX (#61509)
- Fix tracebacks caused by missing block device type and wrong mode used for gzip.open while calling inspector.export (#61530)
- win_wua: Titles no longer limited to 40 characters (#61533)
- Fixed error when using network module on RHEL 8 due to the name of the service changing from "network" to "NetworkManager". (#61538)
- Allow symlink to be created even if source is missing on Windows (#61544)
- Print jinja error context on `UndefinedError`. Previously `jinja2.exceptions.UndefinedError` resulted in a `SaltRenderError` without source file context, unlike all of the other Jinja exceptions handled in `salt/utils/templates.py`. (#61553)
- Fix uptime on AIX systems when less than 24 hours (#61557)
- Fix issue with state.show_state_usage when a saltenv is not referenced in any topfile (#61614)
- Making the retry state system feature available when parallel is set to True. (#61630)
- modules/aptpkg.SourceEntry: fix parsing lines with arbitrary comments in case HAS_APT=False (#61632)
- Fix file.comment incorrectly reports changes in test mode (#61662)
- Fix improper master caching of file listing in multiple dynamic environments (#61738)
- When configured beacons are empty write an empty beacon configuration file. (#61741)
- Fix file.replace updating mtime with no changes (#61743)
- Fixed etcd_return being out of sync with the underlying etcd_util. (#61756)
- Fixing items, values, and keys functions in the data module. (#61812)
- Ensure that `salt://` URIs never contain backslashes, converting them to forward slashes instead. A specific situation to handle is caching files on Windows minions, where Jinja relative imports introduce a backslash into the path. (#61829)
- Do not raise a UnicodeDecodeError when pillar cache cannot decode binary data. (#61836)
- Don't rely on ``importlib.metadata``, even on Py3.10, use ``importlib_metadata`` instead. (#61839)
- Fix the reporting of errors for file.directory in test mode (#61846)
- Update Markup and contextfunction imports for jinja versions >=3.1. (#61848)
- Update states.chef for version 16.x and 17.x Chef Infra Client output. (#61891)
- Fixed some whitespace and ``pathlib.Path`` issues when not using the sytem ``aptsources`` package. (#61936)
- fixed error when using backslash literal in file.replace (#61944)
- Fix an issue where under spawning platforms, one could exhaust the available multiprocessing semaphores. (#61945)
- Fix salt-cloud sync_after_install functionality (#61946)
- Ensure that `common_prefix` matching only occurs if a directory name is identified (in the `archive.list` execution module function, which affects the `archive.extracted` state). (#61968)
- When states are running in parallel, ensure that the total run time produced by the highstate outputter takes that into account. (#61999)
- Temporary logging is now shutdown when logging has been configured. (#62005)
- modules/lxd.FilesManager: fix memory leak through pylxd.modules.container.Container.FilesManager (#62006)
- utils/jinja.SaltCacheLoader: fix leaking SaltCacheLoader through atexit.register (#62007)
- Fixed errors on calling `zabbix_user.admin_password_present` state, due to changed error message in Zabbix 6.0
Fixed `zabbix.host_update` not mapping group ids list to list of dicts in format `[{"groupid": groupid}, ...]`
Fixed `zabbix.user_update` not mapping usergroup id list to list of dicts in format `[{"usrgrpid": usrgrpid}, ...]` (#62012)
- utils/yamlloader and yamlloader_old: fix leaking DuplicateKeyWarning through a warnings module (#62021)
- Fix cache checking for Jinja templates (#62042)
- Fixed salt.states.file.managed() for follow_symlinks=True and test=True (#62066)
- Stop trigering the `GLIBC race condition <https://sourceware.org/bugzilla/show_bug.cgi?id=19329>`_ when parallelizing the resolution of the fqnds. (#62071)
- Fix useradd functions hard-coded relative command name (#62087)
- Fix #62092: Catch zmq.error.ZMQError to set HWM for zmq >= 3.
Run ``git show 0be0941`` for more info. (#62092)
- Allow emitatstartup to work when delay option is setup. (#62095)
- Fix broken relative jinja includes in local mode bug introduced in #62043 (#62117)
- Fix broken file.comment functionality introduced in #62045 (#62121)
- Fixed an incompatibility preventing salt-cloud from deploying VMs on Proxmox VE 7 (#62154)
- Fix sysctl functions hard-coded relative command name (#62164)
- All of Salt's loaders now accept ``loaded_base_name`` as a keyword argument, allowing different namespacing the loaded modules. (#62186)
- Only functions defined on the modules being loaded will be added to the lazy loader, functions imported from other modules, unless they are properly namespaced, are not included. (#62190)
- Fixes issue in postgresql privileges detection: privileges on views were never retrieved and always recreated. (#57690)
- Fix service.enabled error for unavailable service in test mode (#62258)
- Fix variable reuse causing requisite_in problems (#62264)
- Adding -G option to pkgdd cmd_prefix list when current_zone_only is True. (#62206)
- Don't expect ``lsof`` to be installed when trying check which minions are connected. (#62303)
- Fixed urlparse typo in rpmbuild_pkgbuild.py (#62442)
- Fixing changes dict in pkg state to be consistent when installing and test=True. (#60995)
- Use fire_event_async when expecting a coroutine (#62453)
- Fixes import error under windows. (#62459)
- account for revision number in formulas to account for difference between bottle and formula (#62466)
- Fixed stacktrace on Windows when running pkg.list_pkgs (#62479)
- Update sanitizing masking for Salt SSH to include additional password like strings. (#62483)
- Fixes an issue where the minion could not connect to a master after 2 failed attempts (#62489)
Added
-----
- Added ability to request VPC peering connections in different AWS regions (boto_vpc). (#50394)
- Added event return capability to Splunk returner (#50815)
- Added allow downgrades support to apt upgrade (#52977)
- added new grain for metadata to handle googles metadata differences (#53223)
- Added win_shortcut execution and state module that does not prepend the current working directory to paths. Use shortcut.create and shortcut.present instead of file.shortcut. (#53706)
- Add __env__ substitution inside file and pillar root paths (#55747)
- Added support cpu hot add/remove, memory hot add, and nested virtualization to VMware salt-cloud driver. (#56144)
- Add a consul state module with acl_present and acl_absent functions. (#58101)
- Added restconf module/states/proxy code for network device automation (#59006)
- Adds the ability to get version information from a file on Windows systems (#59702)
- Add aptkey=False kwarg option to the aptpkg.py module and pkgrepo state. Apt-key is on the path to be deprecated. This will allow users to not use apt-key to manage the repo keys. It will set aptkey=False automatically if it does not detect apt-key exists on the machine. (#59785)
- Added "Instant Clone" feature in the existing VMware Cloud module (#60004)
- Added support for etcd API v3 (#60325)
- Added `pkg.held` and `pkg.unheld` state functions for Zypper, YUM/DNF and APT. Improved `zypperpkg.hold` and `zypperpkg.unhold` functions. (#60432)
- Added suse_ip module allowing to manage network interfaces on SUSE based Linux systems (#60702)
- Support querying for JSON data in SQL external pillar (#60905)
- Added support for yum and dnf on AIX (#60912)
- Added percent success/failure of state runs in highstate summary output via new state_output_pct option (#60990)
- Add support for retrieve IP-address from qemu agent by Salt-cloud on Proxmox (#61146)
- Added new shortcut execution and state module to better handle UNC shortcuts and to test more thoroughly (#61170)
- added yamllint utils module and yaml execution modules (#61182)
- Add "--no-return-event" option to salt-call to prevent sending return event back to master. (#61188)
- Add Etag support for file.managed web sources (#61270)
- Adding the ability to add, delete, purge, and modify Salt scheduler jobs when the Salt minion is not running. (#61324)
- Added a force option to file.symlink to overwrite an existing symlink with the same name (#61326)
- `gpg_decrypt_must_succeed` config to prevent gpg renderer from failing silently (#61418)
- Do not load a private copy of `__grains__` and `__salt__` for the sentry log handler if it is disabled. (#61484)
- Add Jinja filters for itertools functions, flatten, and a state template workflow (#61502)
- Add feature to allow roll-up of duplicate IDs with different names in highstate output (#61549)
- Allow cp functions to derive saltenv from config if not explicitly set (#61562)
- Multiprocessing logging no longer uses multiprocessing queues which penalized performance.
Instead, each new process configures the terminal and file logging, and also any external logging handlers configured. (#61629)
- Add a function to the freezer module for comparison of packages and repos in two frozen states (#61682)
- Add grains_refresh_pre_exec option to allow grains to be refreshed before any operation (#61708)
- Add possibility to pass extra parameters to salt-ssh pre flight script with `ssh_pre_flight_args` (#61715)
- Add Etag support for archive.extracted web sources (#61763)
- Add regex exclusions, full path matching, symlink following, and mtime/ctime comparison to file.tidied (#61823)
- Add better handling for unit abbreviations and large values to salt.utils.stringutils.human_to_bytes (#61831)
- Provide PyInstaller hooks that provide some runtime adjustments when Salt is running from a onedir (PyInstaller) bundled package. (#61864)
- Add configurable onedir pip pypath location (#61937)
- Add CNAME record support to the dig exec module (#61991)
- Added support for changed user object in Zabbix 5.4+
Added compatibility with Zabbix 4.0+ for `zabbix.user_getmedia` method
Added support for setting medias in `zabbix.user_update` for Zabbix 3.4+ (#62012)
- Add ignore_missing parameter to file.comment state (#62044)
- General improvements on the "ansiblegate" module:
* Add "ansible.targets" method to gather Ansible inventory
* Add "ansible.discover_playbooks" method to help collecting playbooks
* Fix crash when running Ansible playbooks if ansible-playbook CLI output is not the expected JSON.
* Fix issues when processing inventory and there are groups with no members.
* Allow new types of targets for Ansible roster (#60056)
- Add sample and shuffle functions from random (#62225)
- Add "<tiamat> python" subcommand to allow execution or arbitrary scripts via bundled Python runtime (#62381)
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/releases/3005.rst
| 0.745028 | 0.781664 |
3005.rst
|
pypi
|
.. _pillar:
=================================
Storing Static Data in the Pillar
=================================
Pillar is an interface for Salt designed to offer global values that can be
distributed to minions. Pillar data is managed in a similar way as
the Salt State Tree.
Pillar was added to Salt in version 0.9.8
.. note:: Storing sensitive data
Pillar data is compiled on the master. Additionally, pillar data for a
given minion is only accessible by the minion for which it is targeted in
the pillar configuration. This makes pillar useful for storing sensitive
data specific to a particular minion.
Declaring the Master Pillar
===========================
The Salt Master server maintains a :conf_master:`pillar_roots` setup that
matches the structure of the :conf_master:`file_roots` used in the Salt file
server. Like :conf_master:`file_roots`, the :conf_master:`pillar_roots` option
maps environments to directories. The pillar data is then mapped to minions
based on matchers in a top file which is laid out in the same way as the state
top file. Salt pillars can use the same matcher types as the standard :ref:`top
file <states-top>`.
conf_master:`pillar_roots` is configured just like :conf_master:`file_roots`.
For example:
.. code-block:: yaml
pillar_roots:
base:
- /srv/pillar
This example configuration declares that the base environment will be located
in the ``/srv/pillar`` directory. It must not be in a subdirectory of the
state tree.
The top file used matches the name of the top file used for States,
and has the same structure:
``/srv/pillar/top.sls``
.. code-block:: yaml
base:
'*':
- packages
In the above top file, it is declared that in the ``base`` environment, the
glob matching all minions will have the pillar data found in the ``packages``
pillar available to it. Assuming the ``pillar_roots`` value of ``/srv/pillar``
taken from above, the ``packages`` pillar would be located at
``/srv/pillar/packages.sls``.
Any number of matchers can be added to the base environment. For example, here
is an expanded version of the Pillar top file stated above:
/srv/pillar/top.sls:
.. code-block:: yaml
base:
'*':
- packages
'web*':
- vim
In this expanded top file, minions that match ``web*`` will have access to the
``/srv/pillar/packages.sls`` file, as well as the ``/srv/pillar/vim.sls`` file.
Another example shows how to use other standard top matching types
to deliver specific salt pillar data to minions with different properties.
Here is an example using the ``grains`` matcher to target pillars to minions
by their ``os`` grain:
.. code-block:: yaml
dev:
'os:Debian':
- match: grain
- servers
Pillar definitions can also take a keyword argument ``ignore_missing``.
When the value of ``ignore_missing`` is ``True``, all errors for missing
pillar files are ignored. The default value for ``ignore_missing`` is
``False``.
Here is an example using the ``ignore_missing`` keyword parameter to ignore
errors for missing pillar files:
.. code-block:: yaml
base:
'*':
- servers
- systems
- ignore_missing: True
Assuming that the pillar ``servers`` exists in the fileserver backend
and the pillar ``systems`` doesn't, all pillar data from ``servers``
pillar is delivered to minions and no error for the missing pillar
``systems`` is noted under the key ``_errors`` in the pillar data
delivered to minions.
Should the ``ignore_missing`` keyword parameter have the value ``False``,
an error for the missing pillar ``systems`` would produce the value
``Specified SLS 'servers' in environment 'base' is not available on the salt master``
under the key ``_errors`` in the pillar data delivered to minions.
``/srv/pillar/packages.sls``
.. code-block:: jinja
{% if grains['os'] == 'RedHat' %}
apache: httpd
git: git
{% elif grains['os'] == 'Debian' %}
apache: apache2
git: git-core
{% endif %}
company: Foo Industries
.. important::
See :ref:`Is Targeting using Grain Data Secure? <faq-grain-security>` for
important security information.
The above pillar sets two key/value pairs. If a minion is running RedHat, then
the ``apache`` key is set to ``httpd`` and the ``git`` key is set to the value
of ``git``. If the minion is running Debian, those values are changed to
``apache2`` and ``git-core`` respectively. All minions that have this pillar
targeting to them via a top file will have the key of ``company`` with a value
of ``Foo Industries``.
Consequently this data can be used from within modules, renderers, State SLS
files, and more via the shared pillar dictionary:
.. code-block:: jinja
apache:
pkg.installed:
- name: {{ pillar['apache'] }}
.. code-block:: jinja
git:
pkg.installed:
- name: {{ pillar['git'] }}
Finally, the above states can utilize the values provided to them via Pillar.
All pillar values targeted to a minion are available via the 'pillar'
dictionary. As seen in the above example, Jinja substitution can then be
utilized to access the keys and values in the Pillar dictionary.
Note that you cannot just list key/value-information in ``top.sls``. Instead,
target a minion to a pillar file and then list the keys and values in the
pillar. Here is an example top file that illustrates this point:
.. code-block:: yaml
base:
'*':
- common_pillar
And the actual pillar file at '/srv/pillar/common_pillar.sls':
.. code-block:: yaml
foo: bar
boo: baz
.. note::
When working with multiple pillar environments, assuming that each pillar
environment has its own top file, the jinja placeholder ``{{ saltenv }}``
can be used in place of the environment name:
.. code-block:: jinja
{{ saltenv }}:
'*':
- common_pillar
Yes, this is ``{{ saltenv }}``, and not ``{{ pillarenv }}``. The reason for
this is because the Pillar top files are parsed using some of the same code
which parses top files when :ref:`running states <running-highstate>`, so
the pillar environment takes the place of ``{{ saltenv }}`` in the jinja
context.
Dynamic Pillar Environments
===========================
If environment ``__env__`` is specified in :conf_master:`pillar_roots`, all
environments that are not explicitly specified in :conf_master:`pillar_roots`
will map to the directories from ``__env__``. This allows one to use dynamic
git branch based environments for state/pillar files with the same file-based
pillar applying to all environments. For example:
.. code-block:: yaml
pillar_roots:
__env__:
- /srv/pillar
ext_pillar:
- git:
- __env__ https://example.com/git-pillar.git
.. versionadded:: 2017.7.5,2018.3.1
Taking it one step further, ``__env__`` can also be used in the ``pillar_root``
filesystem path. It will be replaced with the actual ``pillarenv`` and searched
for Pillar data to provide to the minion. Note this substitution ONLY occurs for
the ``__env__`` environment. For instance, this configuration:
.. code-block:: yaml
pillar_roots:
__env__:
- /srv/__env__/pillar
is equivalent to this static configuration:
.. code-block:: yaml
pillar_roots:
dev:
- /srv/dev/pillar
test:
- /srv/test/pillar
prod:
- /srv/prod/pillar
.. versionadded:: 3005
Pillar Namespace Flattening
===========================
The separate pillar SLS files all merge down into a single dictionary of
key-value pairs. When the same key is defined in multiple SLS files, this can
result in unexpected behavior if care is not taken to how the pillar SLS files
are laid out.
For example, given a ``top.sls`` containing the following:
.. code-block:: yaml
base:
'*':
- packages
- services
with ``packages.sls`` containing:
.. code-block:: yaml
bind: bind9
and ``services.sls`` containing:
.. code-block:: yaml
bind: named
Then a request for the ``bind`` pillar key will only return ``named``. The
``bind9`` value will be lost, because ``services.sls`` was evaluated later.
.. note::
Pillar files are applied in the order they are listed in the top file.
Therefore conflicting keys will be overwritten in a 'last one wins' manner!
For example, in the above scenario conflicting key values in ``services``
will overwrite those in ``packages`` because it's at the bottom of the list.
It can be better to structure your pillar files with more hierarchy. For
example the ``package.sls`` file could be configured like so:
.. code-block:: yaml
packages:
bind: bind9
This would make the ``packages`` pillar key a nested dictionary containing a
``bind`` key.
Pillar Dictionary Merging
=========================
If the same pillar key is defined in multiple pillar SLS files, and the keys in
both files refer to nested dictionaries, then the content from these
dictionaries will be recursively merged.
For example, keeping the ``top.sls`` the same, assume the following
modifications to the pillar SLS files:
``packages.sls``:
.. code-block:: yaml
bind:
package-name: bind9
version: 9.9.5
``services.sls``:
.. code-block:: yaml
bind:
port: 53
listen-on: any
The resulting pillar dictionary will be:
.. code-block:: bash
$ salt-call pillar.get bind
local:
----------
listen-on:
any
package-name:
bind9
port:
53
version:
9.9.5
Since both pillar SLS files contained a ``bind`` key which contained a nested
dictionary, the pillar dictionary's ``bind`` key contains the combined contents
of both SLS files' ``bind`` keys.
.. _pillar-include:
Including Other Pillars
=======================
.. versionadded:: 0.16.0
Pillar SLS files may include other pillar files, similar to State files. Two
syntaxes are available for this purpose. The simple form simply includes the
additional pillar as if it were part of the same file:
.. code-block:: yaml
include:
- users
The full include form allows two additional options -- passing default values
to the templating engine for the included pillar file as well as an optional
key under which to nest the results of the included pillar:
.. code-block:: yaml
include:
- users:
defaults:
sudo: ['bob', 'paul']
key: users
With this form, the included file (users.sls) will be nested within the 'users'
key of the compiled pillar. Additionally, the 'sudo' value will be available
as a template variable to users.sls.
.. _pillar-in-memory:
In-Memory Pillar Data vs. On-Demand Pillar Data
===============================================
Since compiling pillar data is computationally expensive, the minion will
maintain a copy of the pillar data in memory to avoid needing to ask the master
to recompile and send it a copy of the pillar data each time pillar data is
requested. This in-memory pillar data is what is returned by the
:py:func:`pillar.item <salt.modules.pillar.item>`, :py:func:`pillar.get
<salt.modules.pillar.get>`, and :py:func:`pillar.raw <salt.modules.pillar.raw>`
functions.
Also, for those writing custom execution modules, or contributing to Salt's
existing execution modules, the in-memory pillar data is available as the
``__pillar__`` dunder dictionary.
The in-memory pillar data is generated on minion start, and can be refreshed
using the :py:func:`saltutil.refresh_pillar
<salt.modules.saltutil.refresh_pillar>` function:
.. code-block:: bash
salt '*' saltutil.refresh_pillar
This function triggers the minion to asynchronously refresh the in-memory
pillar data and will always return ``None``.
In contrast to in-memory pillar data, certain actions trigger pillar data to be
compiled to ensure that the most up-to-date pillar data is available. These
actions include:
- Running states
- Running :py:func:`pillar.items <salt.modules.pillar.items>`
Performing these actions will *not* refresh the in-memory pillar data. So, if
pillar data is modified, and then states are run, the states will see the
updated pillar data, but :py:func:`pillar.item <salt.modules.pillar.item>`,
:py:func:`pillar.get <salt.modules.pillar.get>`, and :py:func:`pillar.raw
<salt.modules.pillar.raw>` will not see this data unless refreshed using
:py:func:`saltutil.refresh_pillar <salt.modules.saltutil.refresh_pillar>`.
If you are using the Pillar Cache and have set :conf_master:`pillar_cache` to `True`,
the pillar cache can be updated either when you run :py:func:`saltutil.refresh_pillar
<salt.modules.saltutil.refresh_pillar>`, or using the pillar runner function
:py:func:`pillar.clear_pillar_cache <salt.runners.pillar.clear_pillar_cache>`:
.. code-block:: bash
salt-run pillar.clear_pillar_cache 'minion'
The pillar will not be updated when running :py:func:`pillar.items
<salt.modules.pillar.items>` or a state for example. If you are
using a Salt version before 3003, you would need to manually delete the cache
file, located in Salt's master cache. For example, on linux the file would be
in this directory: /var/cache/salt/master/pillar_cache/
.. _pillar-environments:
How Pillar Environments Are Handled
===================================
When multiple pillar environments are used, the default behavior is for the
pillar data from all environments to be merged together. The pillar dictionary
will therefore contain keys from all configured environments.
The :conf_minion:`pillarenv` minion config option can be used to force the
minion to only consider pillar configuration from a single environment. This
can be useful in cases where one needs to run states with alternate pillar
data, either in a testing/QA environment or to test changes to the pillar data
before pushing them live.
For example, assume that the following is set in the minion config file:
.. code-block:: yaml
pillarenv: base
This would cause that minion to ignore all other pillar environments besides
``base`` when compiling the in-memory pillar data. Then, when running states,
the ``pillarenv`` CLI argument can be used to override the minion's
:conf_minion:`pillarenv` config value:
.. code-block:: bash
salt '*' state.apply mystates pillarenv=testing
The above command will run the states with pillar data sourced exclusively from
the ``testing`` environment, without modifying the in-memory pillar data.
.. note::
When running states, the ``pillarenv`` CLI option does not require a
:conf_minion:`pillarenv` option to be set in the minion config file. When
:conf_minion:`pillarenv` is left unset, as mentioned above all configured
environments will be combined. Running states with ``pillarenv=testing`` in
this case would still restrict the states' pillar data to just that of the
``testing`` pillar environment.
Starting in the 2017.7.0 release, it is possible to pin the pillarenv to the
effective saltenv, using the :conf_minion:`pillarenv_from_saltenv` minion
config option. When this is set to ``True``, if a specific saltenv is specified
when running states, the ``pillarenv`` will be the same. This essentially makes
the following two commands equivalent:
.. code-block:: bash
salt '*' state.apply mystates saltenv=dev
salt '*' state.apply mystates saltenv=dev pillarenv=dev
However, if a pillarenv is specified, it will override this behavior. So, the
following command will use the ``qa`` pillar environment but source the SLS
files from the ``dev`` saltenv:
.. code-block:: bash
salt '*' state.apply mystates saltenv=dev pillarenv=qa
So, if a ``pillarenv`` is set in the minion config file,
:conf_minion:`pillarenv_from_saltenv` will be ignored, and passing a
``pillarenv`` on the CLI will temporarily override
:conf_minion:`pillarenv_from_saltenv`.
Viewing Pillar Data
===================
To view pillar data, use the :mod:`pillar <salt.modules.pillar>` execution
module. This module includes several functions, each of them with their own
use. These functions include:
- :py:func:`pillar.item <salt.modules.pillar.item>` - Retrieves the value of
one or more keys from the :ref:`in-memory pillar data <pillar-in-memory>`.
- :py:func:`pillar.items <salt.modules.pillar.items>` - Compiles a fresh pillar
dictionary and returns it, leaving the :ref:`in-memory pillar data
<pillar-in-memory>` untouched. If pillar keys are passed to this function
however, this function acts like :py:func:`pillar.item
<salt.modules.pillar.item>` and returns their values from the :ref:`in-memory
pillar data <pillar-in-memory>`.
- :py:func:`pillar.raw <salt.modules.pillar.raw>` - Like :py:func:`pillar.items
<salt.modules.pillar.items>`, it returns the entire pillar dictionary, but
from the :ref:`in-memory pillar data <pillar-in-memory>` instead of compiling
fresh pillar data.
- :py:func:`pillar.get <salt.modules.pillar.get>` - Described in detail below.
The :py:func:`pillar.get <salt.modules.pillar.get>` Function
============================================================
.. versionadded:: 0.14.0
The :mod:`pillar.get <salt.modules.pillar.get>` function works much in the same
way as the ``get`` method in a python dict, but with an enhancement: nested
dictionaries can be traversed using a colon as a delimiter.
If a structure like this is in pillar:
.. code-block:: yaml
foo:
bar:
baz: qux
Extracting it from the raw pillar in an sls formula or file template is done
this way:
.. code-block:: jinja
{{ pillar['foo']['bar']['baz'] }}
Now, with the new :mod:`pillar.get <salt.modules.pillar.get>` function the data
can be safely gathered and a default can be set, allowing the template to fall
back if the value is not available:
.. code-block:: jinja
{{ salt['pillar.get']('foo:bar:baz', 'qux') }}
This makes handling nested structures much easier.
.. note:: ``pillar.get()`` vs ``salt['pillar.get']()``
It should be noted that within templating, the ``pillar`` variable is just
a dictionary. This means that calling ``pillar.get()`` inside of a
template will just use the default dictionary ``.get()`` function which
does not include the extra ``:`` delimiter functionality. It must be
called using the above syntax (``salt['pillar.get']('foo:bar:baz',
'qux')``) to get the salt function, instead of the default dictionary
behavior.
Setting Pillar Data at the Command Line
=======================================
Pillar data can be set at the command line like the following example:
.. code-block:: bash
salt '*' state.apply pillar='{"cheese": "spam"}'
This will add a pillar key of ``cheese`` with its value set to ``spam``.
.. note::
Be aware that when sending sensitive data via pillar on the command-line
that the publication containing that data will be received by all minions
and will not be restricted to the targeted minions. This may represent
a security concern in some cases.
.. _pillar-encryption:
Pillar Encryption
=================
Salt's renderer system can be used to decrypt pillar data. This allows for
pillar items to be stored in an encrypted state, and decrypted during pillar
compilation.
Encrypted Pillar SLS
--------------------
.. versionadded:: 2017.7.0
Consider the following pillar SLS file:
.. code-block:: yaml
secrets:
vault:
foo: |
-----BEGIN PGP MESSAGE-----
hQEMAw2B674HRhwSAQgAhTrN8NizwUv/VunVrqa4/X8t6EUulrnhKcSeb8sZS4th
W1Qz3K2NjL4lkUHCQHKZVx/VoZY7zsddBIFvvoGGfj8+2wjkEDwFmFjGE4DEsS74
ZLRFIFJC1iB/O0AiQ+oU745skQkU6OEKxqavmKMrKo3rvJ8ZCXDC470+i2/Hqrp7
+KWGmaDOO422JaSKRm5D9bQZr9oX7KqnrPG9I1+UbJyQSJdsdtquPWmeIpamEVHb
VMDNQRjSezZ1yKC4kCWm3YQbBF76qTHzG1VlLF5qOzuGI9VkyvlMaLfMibriqY73
zBbPzf6Bkp2+Y9qyzuveYMmwS4sEOuZL/PetqisWe9JGAWD/O+slQ2KRu9hNww06
KMDPJRdyj5bRuBVE4hHkkP23KrYr7SuhW2vpe7O/MvWEJ9uDNegpMLhTWruGngJh
iFndxegN9w==
=bAuo
-----END PGP MESSAGE-----
bar: this was unencrypted already
baz: |
-----BEGIN PGP MESSAGE-----
hQEMAw2B674HRhwSAQf+Ne+IfsP2IcPDrUWct8sTJrga47jQvlPCmO+7zJjOVcqz
gLjUKvMajrbI/jorBWxyAbF+5E7WdG9WHHVnuoywsyTB9rbmzuPqYCJCe+ZVyqWf
9qgJ+oUjcvYIFmH3h7H68ldqbxaAUkAOQbTRHdr253wwaTIC91ZeX0SCj64HfTg7
Izwk383CRWonEktXJpientApQFSUWNeLUWagEr/YPNFA3vzpPF5/Ia9X8/z/6oO2
q+D5W5mVsns3i2HHbg2A8Y+pm4TWnH6mTSh/gdxPqssi9qIrzGQ6H1tEoFFOEq1V
kJBe0izlfudqMq62XswzuRB4CYT5Iqw1c97T+1RqENJCASG0Wz8AGhinTdlU5iQl
JkLKqBxcBz4L70LYWyHhYwYROJWjHgKAywX5T67ftq0wi8APuZl9olnOkwSK+wrY
1OZi
=7epf
-----END PGP MESSAGE-----
qux:
- foo
- bar
- |
-----BEGIN PGP MESSAGE-----
hQEMAw2B674HRhwSAQgAg1YCmokrweoOI1c9HO0BLamWBaFPTMblOaTo0WJLZoTS
ksbQ3OJAMkrkn3BnnM/djJc5C7vNs86ZfSJ+pvE8Sp1Rhtuxh25EKMqGOn/SBedI
gR6N5vGUNiIpG5Tf3DuYAMNFDUqw8uY0MyDJI+ZW3o3xrMUABzTH0ew+Piz85FDA
YrVgwZfqyL+9OQuu6T66jOIdwQNRX2NPFZqvon8liZUPus5VzD8E5cAL9OPxQ3sF
f7/zE91YIXUTimrv3L7eCgU1dSxKhhfvA2bEUi+AskMWFXFuETYVrIhFJAKnkFmE
uZx+O9R9hADW3hM5hWHKH9/CRtb0/cC84I9oCWIQPdI+AaPtICxtsD2N8Q98hhhd
4M7I0sLZhV+4ZJqzpUsOnSpaGyfh1Zy/1d3ijJi99/l+uVHuvmMllsNmgR+ZTj0=
=LrCQ
-----END PGP MESSAGE-----
When the pillar data is compiled, the results will be decrypted:
.. code-block:: bash
# salt myminion pillar.items
myminion:
----------
secrets:
----------
vault:
----------
bar:
this was unencrypted already
baz:
rosebud
foo:
supersecret
qux:
- foo
- bar
- baz
Salt must be told what portions of the pillar data to decrypt. This is done
using the :conf_master:`decrypt_pillar` config option:
.. code-block:: yaml
decrypt_pillar:
- 'secrets:vault': gpg
The notation used to specify the pillar item(s) to be decrypted is the same as
the one used in :py:func:`pillar.get <salt.modules.pillar.get>` function.
If a different delimiter is needed, it can be specified using the
:conf_master:`decrypt_pillar_delimiter` config option:
.. code-block:: yaml
decrypt_pillar:
- 'secrets|vault': gpg
decrypt_pillar_delimiter: '|'
The name of the renderer used to decrypt a given pillar item can be omitted,
and if so it will fall back to the value specified by the
:conf_master:`decrypt_pillar_default` config option, which defaults to ``gpg``.
So, the first example above could be rewritten as:
.. code-block:: yaml
decrypt_pillar:
- 'secrets:vault'
Encrypted Pillar Data on the CLI
--------------------------------
.. versionadded:: 2016.3.0
The following functions support passing pillar data on the CLI via the
``pillar`` argument:
- :py:func:`pillar.items <salt.modules.pillar.items>`
- :py:func:`state.apply <salt.modules.state.apply_>`
- :py:func:`state.highstate <salt.modules.state.highstate>`
- :py:func:`state.sls <salt.modules.state.sls>`
Triggering decryption of this CLI pillar data can be done in one of two ways:
1. Using the ``pillar_enc`` argument:
.. code-block:: bash
# salt myminion pillar.items pillar_enc=gpg pillar='{foo: "-----BEGIN PGP MESSAGE-----\n\nhQEMAw2B674HRhwSAQf+OvPqEdDoA2fk15I5dYUTDoj1yf/pVolAma6iU4v8Zixn\nRDgWsaAnFz99FEiFACsAGDEFdZaVOxG80T0Lj+PnW4pVy0OXmXHnY2KjV9zx8FLS\nQxfvmhRR4t23WSFybozfMm0lsN8r1vfBBjbK+A72l0oxN78d1rybJ6PWNZiXi+aC\nmqIeunIbAKQ21w/OvZHhxH7cnIiGQIHc7N9nQH7ibyoKQzQMSZeilSMGr2abAHun\nmLzscr4wKMb+81Z0/fdBfP6g3bLWMJga3hSzSldU9ovu7KR8rDJI1qOlENj3Wm8C\nwTpDOB33kWIKMqiAjY3JFtb5MCHrafyggwQL7cX1+tI+AbSO6kZpbcDfzetb77LZ\nxc5NWnnGK4pGoqq4MAmZshw98RpecSHKMosto2gtiuWCuo9Zn5cV/FbjZ9CTWrQ=\n=0hO/\n-----END PGP MESSAGE-----"}'
The newlines in this example are specified using a literal ``\n``. Newlines
can be replaced with a literal ``\n`` using ``sed``:
.. code-block:: bash
$ echo -n bar | gpg --armor --trust-model always --encrypt -r [email protected] | sed ':a;N;$!ba;s/\n/\\n/g'
.. note::
Using ``pillar_enc`` will perform the decryption minion-side, so for
this to work it will be necessary to set up the keyring in
``/etc/salt/gpgkeys`` on the minion just as one would typically do on
the master. The easiest way to do this is to first export the keys from
the master:
.. code-block:: bash
# gpg --homedir /etc/salt/gpgkeys --export-secret-key -a [email protected] >/tmp/keypair.gpg
Then, copy the file to the minion, setup the keyring, and import:
.. code-block:: bash
# mkdir -p /etc/salt/gpgkeys
# chmod 0700 /etc/salt/gpgkeys
# gpg --homedir /etc/salt/gpgkeys --list-keys
# gpg --homedir /etc/salt/gpgkeys --import --allow-secret-key-import keypair.gpg
The ``--list-keys`` command is run create a keyring in the newly-created
directory.
Pillar data which is decrypted minion-side will still be securely
transferred to the master, since the data sent between minion and master is
encrypted with the master's public key.
2. Use the :conf_master:`decrypt_pillar` option. This is less flexible in that
the pillar key passed on the CLI must be pre-configured on the master, but
it doesn't require a keyring to be setup on the minion. One other caveat to
this method is that pillar decryption on the master happens at the end of
pillar compilation, so if the encrypted pillar data being passed on the CLI
needs to be referenced by pillar or ext_pillar *during pillar compilation*,
it *must* be decrypted minion-side.
Adding New Renderers for Decryption
-----------------------------------
Those looking to add new renderers for decryption should look at the :mod:`gpg
<salt.renderers.gpg>` renderer for an example of how to do so. The function
that performs the decryption should be recursive and be able to traverse a
mutable type such as a dictionary, and modify the values in-place.
Once the renderer has been written, :conf_master:`decrypt_pillar_renderers`
should be modified so that Salt allows it to be used for decryption.
If the renderer is being submitted upstream to the Salt project, the renderer
should be added in `salt/renderers/`_. Additionally, the following should be
done:
- Both occurrences of :conf_master:`decrypt_pillar_renderers` in
`salt/config/__init__.py`_ should be updated to include the name of the new
renderer so that it is included in the default value for this config option.
- The documentation for the :conf_master:`decrypt_pillar_renderers` config
option in the `master config file`_ and `minion config file`_ should be
updated to show the correct new default value.
- The commented example for the :conf_master:`decrypt_pillar_renderers` config
option in the `master config template`_ should be updated to show the correct
new default value.
.. _`salt/renderers/`: https://github.com/saltstack/salt/tree/|repo_primary_branch|/salt/renderers/
.. _`salt/config/__init__.py`: https://github.com/saltstack/salt/tree/|repo_primary_branch|/salt/config/__init__.py
.. _`master config file`: https://github.com/saltstack/salt/tree/|repo_primary_branch|/doc/ref/configuration/master.rst
.. _`minion config file`: https://github.com/saltstack/salt/tree/|repo_primary_branch|/doc/ref/configuration/minion.rst
.. _`master config template`: https://github.com/saltstack/salt/tree/|repo_primary_branch|/conf/master
Binary Data in the Pillar
=========================
Salt has partial support for binary pillar data.
.. note::
There are some situations (such as salt-ssh) where only text (ASCII or
Unicode) is allowed.
The simplest way to embed binary data in your pillar is to make use of YAML's
built-in binary data type, which requires base64 encoded data.
.. code-block:: yaml
salt_pic: !!binary
iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAMAAAC67D+PAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAA
Then you can use it as a ``contents_pillar`` in a state:
.. code-block:: yaml
/tmp/salt.png:
file.managed:
- contents_pillar: salt_pic
It is also possible to add ASCII-armored encrypted data to pillars, as
mentioned in the Pillar Encryption section.
Master Config in Pillar
=======================
For convenience the data stored in the master configuration file can be made
available in all minion's pillars. This makes global configuration of services
and systems very easy but may not be desired if sensitive data is stored in the
master configuration. This option is disabled by default.
To enable the master config from being added to the pillar set
:conf_minion:`pillar_opts` to ``True`` in the minion config file:
.. code-block:: yaml
pillar_opts: True
Minion Config in Pillar
=======================
Minion configuration options can be set on pillars. Any option that you want
to modify, should be in the first level of the pillars, in the same way you set
the options in the config file. For example, to configure the MySQL root
password to be used by MySQL Salt execution module, set the following pillar
variable:
.. code-block:: yaml
mysql.pass: hardtoguesspassword
Master Provided Pillar Error
============================
By default if there is an error rendering a pillar, the detailed error is
hidden and replaced with:
.. code-block:: bash
Rendering SLS 'my.sls' failed. Please see master log for details.
The error is protected because it's possible to contain templating data
which would give that minion information it shouldn't know, like a password!
To have the master provide the detailed error that could potentially carry
protected data set ``pillar_safe_render_error`` to ``False``:
.. code-block:: yaml
pillar_safe_render_error: False
.. toctree::
../tutorials/pillar
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/pillar/index.rst
| 0.892545 | 0.750324 |
index.rst
|
pypi
|
.. _external-job-cache:
=========================================
Storing Job Results in an External System
=========================================
After a job executes, job results are returned
to the Salt Master by each Salt Minion. These results are stored in the
:ref:`Default Job Cache <default_job_cache>`.
In addition to the Default Job Cache, Salt provides two additional
mechanisms to send job results to other systems (databases, local syslog,
and others):
* External Job Cache
* Master Job Cache
The major difference between these two mechanism is from where results are
returned (from the Salt Master or Salt Minion). Configuring either of these
options will also make the :py:mod:`Jobs Runner functions <salt.runners.jobs>`
to automatically query the remote stores for information.
External Job Cache - Minion-Side Returner
-----------------------------------------
When an External Job Cache is configured, data is returned to the Default Job
Cache on the Salt Master like usual, and then results are also sent to an
External Job Cache using a Salt returner module running on the Salt Minion.
.. image:: /_static/external-job-cache.png
:align: center
* Advantages: Data is stored without placing additional load on the Salt Master.
* Disadvantages: Each Salt Minion connects to the external job cache, which can
result in a large number of connections. Also requires additional configuration to
get returner module settings on all Salt Minions.
Master Job Cache - Master-Side Returner
---------------------------------------
.. versionadded:: 2014.7.0
Instead of configuring an External Job Cache on each Salt Minion, you can
configure the Master Job Cache to send job results from the Salt Master
instead. In this configuration, Salt Minions send data to the Default Job Cache
as usual, and then the Salt Master sends the data to the external system using
a Salt returner module running on the Salt Master.
.. image:: /_static/master-job-cache.png
:align: center
* Advantages: A single connection is required to the external system. This is
preferred for databases and similar systems.
* Disadvantages: Places additional load on your Salt Master.
Configure an External or Master Job Cache
-----------------------------------------
Step 1: Understand Salt Returners
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Before you configure a job cache, it is essential to understand Salt returner
modules ("returners"). Returners are pluggable Salt Modules that take the data
returned by jobs, and then perform any necessary steps to send the data to an
external system. For example, a returner might establish a connection,
authenticate, and then format and transfer data.
The Salt Returner system provides the core functionality used by the External
and Master Job Cache systems, and the same returners are used by both systems.
Salt currently provides many different returners that let you connect to a
wide variety of systems. A complete list is available at
:ref:`all Salt returners <all-salt.returners>`.
Each returner is configured differently, so make sure you read and follow the
instructions linked from that page.
For example, the MySQL returner requires:
* A database created using provided schema (structure is available at
:mod:`MySQL returner <salt.returners.mysql>`)
* A user created with privileges to the database
* Optional SSL configuration
A simpler returner, such as Slack or HipChat, requires:
* An API key/version
* The target channel/room
* The username that should be used to send the message
Step 2: Configure the Returner
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
After you understand the configuration and have the external system ready, the
configuration requirements must be declared.
External Job Cache
""""""""""""""""""
The returner configuration settings can be declared in the Salt Minion
configuration file, the Minion's pillar data, or the Minion's grains.
If ``external_job_cache`` configuration settings are specified in more than
one place, the options are retrieved in the following order. The first
configuration location that is found is the one that will be used.
- Minion configuration file
- Minion's grains
- Minion's pillar data
Master Job Cache
""""""""""""""""
The returner configuration settings for the Master Job Cache should be
declared in the Salt Master's configuration file.
Configuration File Examples
"""""""""""""""""""""""""""
MySQL requires:
.. code-block:: yaml
mysql.host: 'salt'
mysql.user: 'salt'
mysql.pass: 'salt'
mysql.db: 'salt'
mysql.port: 3306
Slack requires:
.. code-block:: yaml
slack.channel: 'channel'
slack.api_key: 'key'
slack.from_name: 'name'
After you have configured the returner and added settings to the configuration
file, you can enable the External or Master Job Cache.
Step 3: Enable the External or Master Job Cache
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Configuration is a single line that specifies an
already-configured returner to use to send all job data to an external system.
External Job Cache
""""""""""""""""""
To enable a returner as the External Job Cache (Minion-side), add the following
line to the Salt Master configuration file:
.. code-block:: yaml
ext_job_cache: <returner>
For example:
.. code-block:: yaml
ext_job_cache: mysql
.. note::
When configuring an External Job Cache (Minion-side), the returner settings are
added to the Minion configuration file, but the External Job Cache setting
is configured in the Master configuration file.
Master Job Cache
""""""""""""""""
To enable a returner as a Master Job Cache (Master-side), add the following
line to the Salt Master configuration file:
.. code-block:: yaml
master_job_cache: <returner>
For example:
.. code-block:: yaml
master_job_cache: mysql
Verify that the returner configuration settings are in the Master configuration
file, and be sure to restart the salt-master service after you make
configuration changes. (``service salt-master restart``).
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/jobs/external_cache.rst
| 0.886445 | 0.696894 |
external_cache.rst
|
pypi
|
.. _spm-development:
=====================
SPM Development Guide
=====================
This document discusses developing additional code for SPM.
SPM-Specific Loader Modules
===========================
SPM was designed to behave like traditional package managers, which apply files
to the filesystem and store package metadata in a local database. However,
because modern infrastructures often extend beyond those use cases, certain
parts of SPM have been broken out into their own set of modules.
Each function that accepts arguments has a set of required and optional
arguments. Take note that SPM will pass all arguments in, and therefore each
function must accept each of those arguments. However, arguments that are
marked as required are crucial to SPM's core functionality, while arguments that
are marked as optional are provided as a benefit to the module, if it needs to
use them.
.. _spm-development-pkgdb:
Package Database
----------------
By default, the package database is stored using the ``sqlite3`` module. This
module was chosen because support for SQLite3 is built into Python itself.
Modules for managing the package database are stored in the ``salt/spm/pkgdb/``
directory. A number of functions must exist to support database management.
init()
``````
Get a database connection, and initialize the package database if necessary.
This function accepts no arguments. If a database is used which supports a
connection object, then that connection object is returned. For instance, the
``sqlite3`` module returns a ``connect()`` object from the ``sqlite3`` library:
.. code-block:: python
def myfunc():
conn = sqlite3.connect(__opts__["spm_db"], isolation_level=None)
...
return conn
SPM itself will not use this connection object; it will be passed in as-is to
the other functions in the module. Therefore, when you set up this object, make
sure to do so in a way that is easily usable throughout the module.
info()
``````
Return information for a package. This generally consists of the information
that is stored in the ``FORMULA`` file in the package.
The arguments that are passed in, in order, are ``package`` (required) and
``conn`` (optional).
``package`` is the name of the package, as specified in the ``FORMULA``.
``conn`` is the connection object returned from ``init()``.
list_files()
````````````
Return a list of files for an installed package. Only the filename should be
returned, and no other information.
The arguments that are passed in, in order, are ``package`` (required) and
``conn`` (optional).
``package`` is the name of the package, as specified in the ``FORMULA``.
``conn`` is the connection object returned from ``init()``.
register_pkg()
``````````````
Register a package in the package database. Nothing is expected to be returned
from this function.
The arguments that are passed in, in order, are ``name`` (required),
``formula_def`` (required), and ``conn`` (optional).
``name`` is the name of the package, as specified in the ``FORMULA``.
``formula_def`` is the contents of the ``FORMULA`` file, as a ``dict``. ``conn``
is the connection object returned from ``init()``.
register_file()
```````````````
Register a file in the package database. Nothing is expected to be returned
from this function.
The arguments that are passed in are ``name`` (required), ``member`` (required),
``path`` (required), ``digest`` (optional), and ``conn`` (optional).
``name`` is the name of the package.
``member`` is a ``tarfile`` object for the
package file. It is included, because it contains most of the information for
the file.
``path`` is the location of the file on the local filesystem.
``digest`` is the SHA1 checksum of the file.
``conn`` is the connection object returned from ``init()``.
unregister_pkg()
````````````````
Unregister a package from the package database. This usually only involves
removing the package's record from the database. Nothing is expected to be
returned from this function.
The arguments that are passed in, in order, are ``name`` (required) and
``conn`` (optional).
``name`` is the name of the package, as specified in the ``FORMULA``. ``conn``
is the connection object returned from ``init()``.
unregister_file()
`````````````````
Unregister a package from the package database. This usually only involves
removing the package's record from the database. Nothing is expected to be
returned from this function.
The arguments that are passed in, in order, are ``name`` (required), ``pkg``
(optional) and ``conn`` (optional).
``name`` is the path of the file, as it was installed on the filesystem.
``pkg`` is the name of the package that the file belongs to.
``conn`` is the connection object returned from ``init()``.
db_exists()
```````````
Check to see whether the package database already exists. This is the path to
the package database file. This function will return ``True`` or ``False``.
The only argument that is expected is ``db_``, which is the package database
file.
.. _spm-development-pkgfiles:
Package Files
-------------
By default, package files are installed using the ``local`` module. This module
applies files to the local filesystem, on the machine that the package is
installed on.
Modules for managing the package database are stored in the
``salt/spm/pkgfiles/`` directory. A number of functions must exist to support
file management.
init()
``````
Initialize the installation location for the package files. Normally these will
be directory paths, but other external destinations such as databases can be
used. For this reason, this function will return a connection object, which can
be a database object. However, in the default ``local`` module, this object is a
dict containing the paths. This object will be passed into all other functions.
Three directories are used for the destinations: ``formula_path``,
``pillar_path``, and ``reactor_path``.
``formula_path`` is the location of most of the files that will be installed.
The default is specific to the operating system, but is normally ``/srv/salt/``.
``pillar_path`` is the location that the ``pillar.example`` file will be
installed to. The default is specific to the operating system, but is normally
``/srv/pillar/``.
``reactor_path`` is the location that reactor files will be installed to. The
default is specific to the operating system, but is normally ``/srv/reactor/``.
check_existing()
````````````````
Check the filesystem for existing files. All files for the package will be
checked, and if any are existing, then this function will normally state that
SPM will refuse to install the package.
This function returns a list of the files that exist on the system.
The arguments that are passed into this function are, in order: ``package``
(required), ``pkg_files`` (required), ``formula_def`` (formula_def), and
``conn`` (optional).
``package`` is the name of the package that is to be installed.
``pkg_files`` is a list of the files to be checked.
``formula_def`` is a copy of the information that is stored in the ``FORMULA``
file.
``conn`` is the file connection object.
install_file()
``````````````
Install a single file to the destination (normally on the filesystem). Nothing
is expected to be returned from this function.
This function returns the final location that the file was installed to.
The arguments that are passed into this function are, in order, ``package``
(required), ``formula_tar`` (required), ``member`` (required), ``formula_def``
(required), and ``conn`` (optional).
``package`` is the name of the package that is to be installed.
``formula_tar`` is the tarfile object for the package. This is passed in so that
the function can call ``formula_tar.extract()`` for the file.
``member`` is the tarfile object which represents the individual file. This may
be modified as necessary, before being passed into ``formula_tar.extract()``.
``formula_def`` is a copy of the information from the ``FORMULA`` file.
``conn`` is the file connection object.
remove_file()
`````````````
Remove a single file from file system. Normally this will be little more than an
``os.remove()``. Nothing is expected to be returned from this function.
The arguments that are passed into this function are, in order, ``path``
(required) and ``conn`` (optional).
``path`` is the absolute path to the file to be removed.
``conn`` is the file connection object.
hash_file()
```````````
Returns the hexdigest hash value of a file.
The arguments that are passed into this function are, in order, ``path``
(required), ``hashobj`` (required), and ``conn`` (optional).
``path`` is the absolute path to the file.
``hashobj`` is a reference to ``hashlib.sha1()``, which is used to pull the
``hexdigest()`` for the file.
``conn`` is the file connection object.
This function will not generally be more complex than:
.. code-block:: python
def hash_file(path, hashobj, conn=None):
with salt.utils.files.fopen(path, "r") as f:
hashobj.update(f.read())
return hashobj.hexdigest()
path_exists()
`````````````
Check to see whether the file already exists on the filesystem. Returns ``True``
or ``False``.
This function expects a ``path`` argument, which is the absolute path to the
file to be checked.
path_isdir()
````````````
Check to see whether the path specified is a directory. Returns ``True`` or
``False``.
This function expects a ``path`` argument, which is the absolute path to be
checked.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/spm/dev.rst
| 0.833833 | 0.855248 |
dev.rst
|
pypi
|
.. _spm-formula:
============
FORMULA File
============
In addition to the formula itself, a ``FORMULA`` file must exist which
describes the package. An example of this file is:
.. code-block:: yaml
name: apache
os: RedHat, Debian, Ubuntu, SUSE, FreeBSD
os_family: RedHat, Debian, Suse, FreeBSD
version: 201506
release: 2
summary: Formula for installing Apache
description: Formula for installing Apache
Required Fields
```````````````
This file must contain at least the following fields:
name
~~~~
The name of the package, as it will appear in the package filename, in the
repository metadata, and the package database. Even if the source formula has
``-formula`` in its name, this name should probably not include that. For
instance, when packaging the ``apache-formula``, the name should be set to
``apache``.
os
~~
The value of the ``os`` grain that this formula supports. This is used to
help users know which operating systems can support this package.
os_family
~~~~~~~~~
The value of the ``os_family`` grain that this formula supports. This is used to
help users know which operating system families can support this package.
version
~~~~~~~
The version of the package. While it is up to the organization that manages this
package, it is suggested that this version is specified in a ``YYYYMM`` format.
For instance, if this version was released in June 2015, the package version
should be ``201506``. If multiple releases are made in a month, the ``release``
field should be used.
minimum_version
~~~~~~~~~~~~~~~
Minimum recommended version of Salt to use this formula. Not currently enforced.
release
~~~~~~~
This field refers primarily to a release of a version, but also to multiple
versions within a month. In general, if a version has been made public, and
immediate updates need to be made to it, this field should also be updated.
summary
~~~~~~~
A one-line description of the package.
description
~~~~~~~~~~~
A more detailed description of the package which can contain more than one line.
Optional Fields
```````````````
The following fields may also be present.
top_level_dir
~~~~~~~~~~~~~
This field is optional, but highly recommended. If it is not specified, the
package name will be used.
Formula repositories typically do not store ``.sls`` files in the root of the
repository; instead they are stored in a subdirectory. For instance, an
``apache-formula`` repository would contain a directory called ``apache``, which
would contain an ``init.sls``, plus a number of other related files. In this
instance, the ``top_level_dir`` should be set to ``apache``.
Files outside the ``top_level_dir``, such as ``README.rst``, ``FORMULA``, and
``LICENSE`` will not be installed. The exceptions to this rule are files that
are already treated specially, such as ``pillar.example`` and ``_modules/``.
dependencies
~~~~~~~~~~~~
A comma-separated list of packages that must be installed along with this
package. When this package is installed, SPM will attempt to discover and
install these packages as well. If it is unable to, then it will refuse to
install this package.
This is useful for creating packages which tie together other packages. For
instance, a package called wordpress-mariadb-apache would depend upon
wordpress, mariadb, and apache.
optional
~~~~~~~~
A comma-separated list of packages which are related to this package, but are
neither required nor necessarily recommended. This list is displayed in an
informational message when the package is installed to SPM.
recommended
~~~~~~~~~~~
A comma-separated list of optional packages that are recommended to be
installed with the package. This list is displayed in an informational message
when the package is installed to SPM.
files
~~~~~
A files section can be added, to specify a list of files to add to the SPM.
Such a section might look like:
.. code-block:: yaml
files:
- _pillar
- FORMULA
- _runners
- d|mymodule/index.rst
- r|README.rst
When ``files`` are specified, then only those files will be added to the SPM,
regardless of what other files exist in the directory. They will also be added
in the order specified, which is useful if you have a need to lay down files in
a specific order.
As can be seen in the example above, you may also tag files as being a specific
type. This is done by pre-pending a filename with its type, followed by a pipe
(``|``) character. The above example contains a document file and a readme. The
available file types are:
* ``c``: config file
* ``d``: documentation file
* ``g``: ghost file (i.e. the file contents are not included in the package payload)
* ``l``: license file
* ``r``: readme file
* ``s``: SLS file
* ``m``: Salt module
The first 5 of these types (``c``, ``d``, ``g``, ``l``, ``r``) will be placed in
``/usr/share/salt/spm/`` by default. This can be changed by setting an
``spm_share_dir`` value in your ``/etc/salt/spm`` configuration file.
The last two types (``s`` and ``m``) are currently ignored, but they are
reserved for future use.
Pre and Post States
-------------------
It is possible to run Salt states before and after installing a package by
using pre and post states. The following sections may be declared in a
``FORMULA``:
* ``pre_local_state``
* ``pre_tgt_state``
* ``post_local_state``
* ``post_tgt_state``
Sections with ``pre`` in their name are evaluated before a package is installed
and sections with ``post`` are evaluated after a package is installed. ``local``
states are evaluated before ``tgt`` states.
Each of these sections needs to be evaluated as text, rather than as YAML.
Consider the following block:
.. code-block:: yaml
pre_local_state: >
echo test > /tmp/spmtest:
cmd:
- run
Note that this declaration uses ``>`` after ``pre_local_state``. This is a YAML
marker that marks the next multi-line block as text, including newlines. It is
important to use this marker whenever declaring ``pre`` or ``post`` states, so
that the text following it can be evaluated properly.
local States
~~~~~~~~~~~~
``local`` states are evaluated locally; this is analogous to issuing a state
run using a ``salt-call --local`` command. These commands will be issued on the
local machine running the ``spm`` command, whether that machine is a master or
a minion.
``local`` states do not require any special arguments, but they must still use
the ``>`` marker to denote that the state is evaluated as text, not a data
structure.
.. code-block:: yaml
pre_local_state: >
echo test > /tmp/spmtest:
cmd:
- run
tgt States
~~~~~~~~~~
``tgt`` states are issued against a remote target. This is analogous to issuing
a state using the ``salt`` command. As such it requires that the machine that
the ``spm`` command is running on is a master.
Because ``tgt`` states require that a target be specified, their code blocks
are a little different. Consider the following state:
.. code-block:: yaml
pre_tgt_state:
tgt: '*'
data: >
echo test > /tmp/spmtest:
cmd:
- run
With ``tgt`` states, the state data is placed under a ``data`` section, inside
the ``*_tgt_state`` code block. The target is of course specified as a ``tgt``
and you may also optionally specify a ``tgt_type`` (the default is ``glob``).
You still need to use the ``>`` marker, but this time it follows the ``data``
line, rather than the ``*_tgt_state`` line.
Templating States
~~~~~~~~~~~~~~~~~
The reason that state data must be evaluated as text rather than a data
structure is because that state data is first processed through the rendering
engine, as it would be with a standard state run.
This means that you can use Jinja or any other supported renderer inside of
Salt. All formula variables are available to the renderer, so you can reference
``FORMULA`` data inside your state if you need to:
.. code-block:: yaml
pre_tgt_state:
tgt: '*'
data: >
echo {{ name }} > /tmp/spmtest:
cmd:
- run
You may also declare your own variables inside the ``FORMULA``. If SPM doesn't
recognize them then it will ignore them, so there are no restrictions on
variable names, outside of avoiding reserved words.
By default the renderer is set to ``jinja|yaml``. You may change this by
changing the ``renderer`` setting in the ``FORMULA`` itself.
Building a Package
------------------
Once a ``FORMULA`` file has been created, it is placed into the root of the
formula that is to be turned into a package. The ``spm build`` command is
used to turn that formula into a package:
.. code-block:: bash
spm build /path/to/saltstack-formulas/apache-formula
The resulting file will be placed in the build directory. By default this
directory is located at ``/srv/spm/``.
Loader Modules
==============
When an execution module is placed in ``<file_roots>/_modules/`` on the master,
it will automatically be synced to minions, the next time a sync operation takes
place. Other modules are also propagated this way: state modules can be placed
in ``_states/``, and so on.
When SPM detects a file in a package which resides in one of these directories,
that directory will be placed in ``<file_roots>`` instead of in the formula
directory with the rest of the files.
Removing Packages
=================
Packages may be removed once they are installed using the ``spm remove``
command.
.. code-block:: bash
spm remove apache
If files have been modified, they will not be removed. Empty directories will
also be removed.
Technical Information
=====================
Packages are built using BZ2-compressed tarballs. By default, the package
database is stored using the ``sqlite3`` driver (see Loader Modules below).
Support for these are built into Python, and so no external dependencies are
needed.
All other files belonging to SPM use YAML, for portability and ease of use and
maintainability.
SPM-Specific Loader Modules
===========================
SPM was designed to behave like traditional package managers, which apply files
to the filesystem and store package metadata in a local database. However,
because modern infrastructures often extend beyond those use cases, certain
parts of SPM have been broken out into their own set of modules.
Package Database
----------------
By default, the package database is stored using the ``sqlite3`` module. This
module was chosen because support for SQLite3 is built into Python itself.
Please see the SPM Development Guide for information on creating new modules
for package database management.
Package Files
-------------
By default, package files are installed using the ``local`` module. This module
applies files to the local filesystem, on the machine that the package is
installed on.
Please see the :ref:`SPM Development Guide <spm-development>` for information
on creating new modules for package file management.
Types of Packages
=================
SPM supports different types of formula packages. The function of each package
is denoted by its name. For instance, packages which end in ``-formula`` are
considered to be Salt States (the most common type of formula). Packages which
end in ``-conf`` contain configuration which is to be placed in the
``/etc/salt/`` directory. Packages which do not contain one of these names are
treated as if they have a ``-formula`` name.
formula
-------
By default, most files from this type of package live in the ``/srv/spm/salt/``
directory. The exception is the ``pillar.example`` file, which will be renamed
to ``<package_name>.sls`` and placed in the pillar directory (``/srv/spm/pillar/``
by default).
reactor
-------
By default, files from this type of package live in the ``/srv/spm/reactor/``
directory.
conf
----
The files in this type of package are configuration files for Salt, which
normally live in the ``/etc/salt/`` directory. Configuration files for packages
other than Salt can and should be handled with a Salt State (using a ``formula``
type of package).
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/spm/spm_formula.rst
| 0.759761 | 0.802285 |
spm_formula.rst
|
pypi
|
.. _pull_requests:
Pull Requests
=============
Salt is a large software project with many developers working together. We
encourage all Salt users to contribute new features, bug fixes and
documentation fixes. For those who haven't contributed to a large software
project before we encourage you to consider the following questions when
preparing a pull request.
This isn't an exhaustive list and these aren't necessarily hard and fast rules,
but these are things we consider when reviewing a pull request.
* Does this change work on all platforms? In cases where it does not, is an
appropriate and easy-to-understand reason presented to the user? Is it
documented as-such? Have we thought about all the possible ways this code
might be used and accounted as best we can for them?
* Will this code work on versions of all Python we support? Will it work on
future versions?
* Are Python reserved keywords used? Are variables named in a way that will
make it easy for the next person to understand what's going on?
* Does this code present a security risk in any way? What is the worst possible
thing that an attacker could do with this code? If dangerous cases are
possible, is it appropriate to document them? If so, has this been done?
Would this change pass muster with a professional security audit? Is it
obvious to a person using this code what the risks are?
* Is it readable? Does it conform to our `style guide`_? Is the code documented
such that the next person who comes along will be able to read and understand
it? Most especially, are edge-cases documented to avoid regressions? Will it
be immediately evident to the next person who comes along why this change was
made?
.. _`style guide`: https://docs.saltproject.io/en/latest/topics/development/conventions/style.html
* If appropriate, has the person who wrote the code which is being modified
been notified and included in the process?
* What are the performance implications of this change? Is there a more
efficient way to structure the logic and if so, does making the change
balance itself against readability in a sensible way? Do the performance
characteristics of the code change based on the way it is being invoked
(i.e., through an API or various command-line tools.) Will it be easy to
profile this change if it might be a problem?
* Are caveats considered and documented in the change?
* Will the code scale? More critically, will it scale in *both* directions?
Salt runs in data-centers and on Raspberry Pi installations in the Sahara. It
needs to work on big servers and tiny devices.
* Is appropriate documentation written both in public-facing docs and in-line?
How will the user know how to use this? What will they do if it doesn't work
as expected? Is this something a new user will understand? Can a user know
all they need to about this functionality by reading the public docs?
* Is this a change in behavior? If so, is it in the appropriate branch? Are
deprecation warnings necessary? Have those changes been fully documented?
Have we fully thought through what implications a change in behavior might
have?
* How has the code been tested? If appropriate are there automated tests which
cover this? Is it likely to regress? If so, how has the potential of that
regression been mitigated? What is the plan for ensuring that this code works
going forward?
* If it's asynchronous code, what is the potential for a race condition?
* Is this code an original work? If it's borrowed from another project or found
online are the appropriate licensing/attribution considerations handled?
* Is the reason for the change fully explained in the PR? If not for review,
this is necessary so that somebody in the future can go back and figure out
why it was necessary.
* Is the intended behavior of the change clear? How will that behavior be known
to future contributors and to users?
* Does this code handle errors in a reasonable way? Have we gone back through
the stack as much as possible to make sure that an error cannot be raised
that we do not account for? Are errors tested for as well as proper
functionality?
* If the code relies on external libraries, do we properly handle old versions
of them? Do we require a specific version and if so is this version check
implemented? Is the library available on the same platforms that module in
question claims to support? If the code was written and tested against a
particular library, have we documented that fact?
* Can this code freeze/hang/crash a running daemon? Can it stall a state run?
Are there infinite loops? Are appropriate timeouts implemented?
* Is the function interface well documented? If argument types can not be
inferred by introspection, are they documented?
* Are resources such as file-handles cleaned-up after they are used?
* Is it possible that a reference-cycle exists between objects that will leak
memory?
* Has the code been linted and does it pass all tests?
* Does the change fully address the problem or is it limited to a small surface
area? By this, I mean that it should be clear that the submitter has looked
for other cases in the function or module where the given case might also be
addressed. If additional changes are necessary are they documented in the
code as a FIXME or the PR and in Github as an issue to be tracked?
* Will the code throw errors/warnings/stacktraces to the console during normal
operation?
* Has all the debugging been removed?
* Does the code log any sensitive data? Does it show sensitive data in process
lists? Does it store sensitive data to disk and if so, does it do so in a
secure manner? Are there potential race conditions in between writing the
data to disk and setting the appropriate permissions?
* Is it clear from the solution that the problem is well-understood? How can
somebody who has never seen the problem feel confident that this proposed
change is the best one?
* What's hard-coded that might not need to be? Are we making sensible decisions
for the user and allowing them to tune and change things where appropriate?
* Are utility functions used where appropriate? Does this change re-implement
something we already have code for?
* Is the right thing being fixed? There are cases where it's appropriate to fix
a test and cases where it's appropriate to fix the code that's under test.
Which is best for the user? Is this change a shortcut or a solution that will
be solid in the months and years to come?
* How will this code react to changes elsewhere in the code base? What is it
coupled to and have we fully thought through how best to present a coherent
interface to consumers of a given function or method?
* Does this PR try to fix too many bugs/problems at once?
* Should this be split into multiple PRs to make them easier to test and reason
about?
Pull Request Requirements
=========================
The following outlines what is required before a pull request can be merged into
the salt project. For each of these requirements, an exception can be made
that requires 3 approvals before merge. The exceptions are detailed more below.
All PR requirements
-------------------
* Approval Required: approval review from core team member OR 1 approval review
from captain of working group
* Cannot merge your own PR until 1 reviewer approves from defined list above that
is not the author.
* All Tests Pass
Bug Fix PR requirements
-----------------------
* Test Coverage: regression test written to cover bug fix. Contributors only need
to write test coverage for their specific changes.
* Point to the issue the PR is resolving. If there is not an issue one will need
to be created.
Feature PR requirements
-----------------------
* Test Coverage: tests written to cover new feature. Contributors only need to write
test coverage for their specific changes.
* Release Notes: Add note in release notes of new feature for relative release.
* Add .. versionadded:: <release> to module's documentation. If you are not certain
which release your fix will be included in you can include TBD and the PR reviewer
will let you know the correct name of the release you need to update to the versionadded.
Exceptions to all requirements
------------------------------
As previously stated, all of the above requirements can be bypassed with 3 approvals.
PR's that do not require tests include:
* documentation
* cosmetic changes (for example changing from log.debug to log.trace)
* fixing tests
* pylint
* changes outside of the salt directory
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/development/pull_requests.rst
| 0.604632 | 0.740937 |
pull_requests.rst
|
pypi
|
================================
Using the Salt Modules for Cloud
================================
In addition to the ``salt-cloud`` command, Salt Cloud can be called from Salt,
in a variety of different ways. Most users will be interested in either the
execution module or the state module, but it is also possible to call Salt Cloud
as a runner.
Because the actual work will be performed on a remote minion, the normal Salt
Cloud configuration must exist on any target minion that needs to execute a Salt
Cloud command. Because Salt Cloud now supports breaking out configuration into
individual files, the configuration is easily managed using Salt's own
``file.managed`` state function. For example, the following directories allow
this configuration to be managed easily:
.. code-block:: yaml
/etc/salt/cloud.providers.d/
/etc/salt/cloud.profiles.d/
Minion Keys
-----------
Keep in mind that when creating minions, Salt Cloud will create public and
private minion keys, upload them to the minion, and place the public key on the
machine that created the minion. It will *not* attempt to place any public
minion keys on the master, unless the minion which was used to create the
instance is also the Salt Master. This is because granting arbitrary minions
access to modify keys on the master is a serious security risk, and must be
avoided.
Execution Module
----------------
The ``cloud`` module is available to use from the command line. At the moment,
almost every standard Salt Cloud feature is available to use. The following
commands are available:
list_images
~~~~~~~~~~~
This command is designed to show images that are available to be used to create
an instance using Salt Cloud. In general they are used in the creation of
profiles, but may also be used to create an instance directly (see below).
Listing images requires a provider to be configured, and specified:
.. code-block:: bash
salt myminion cloud.list_images my-cloud-provider
list_sizes
~~~~~~~~~~
This command is designed to show sizes that are available to be used to create
an instance using Salt Cloud. In general they are used in the creation of
profiles, but may also be used to create an instance directly (see below). This
command is not available for all cloud providers; see the provider-specific
documentation for details. Listing sizes requires a provider to be configured,
and specified:
.. code-block:: bash
salt myminion cloud.list_sizes my-cloud-provider
list_locations
~~~~~~~~~~~~~~
This command is designed to show locations that are available to be used to
create an instance using Salt Cloud. In general they are used in the creation of
profiles, but may also be used to create an instance directly (see below). This
command is not available for all cloud providers; see the provider-specific
documentation for details. Listing locations requires a provider to be
configured, and specified:
.. code-block:: bash
salt myminion cloud.list_locations my-cloud-provider
query
~~~~~
This command is used to query all configured cloud providers, and display all
instances associated with those accounts. By default, it will run a standard
query, returning the following fields:
``id``
The name or ID of the instance, as used by the cloud provider.
``image``
The disk image that was used to create this instance.
``private_ips``
Any public IP addresses currently assigned to this instance.
``public_ips``
Any private IP addresses currently assigned to this instance.
``size``
The size of the instance; can refer to RAM, CPU(s), disk space, etc.,
depending on the cloud provider.
``state``
The running state of the instance; for example, ``running``, ``stopped``,
``pending``, etc. This state is dependent upon the provider.
This command may also be used to perform a full query or a select query, as
described below. The following usages are available:
.. code-block:: bash
salt myminion cloud.query
salt myminion cloud.query list_nodes
salt myminion cloud.query list_nodes_full
full_query
~~~~~~~~~~
This command behaves like the ``query`` command, but lists all information
concerning each instance as provided by the cloud provider, in addition to the
fields returned by the ``query`` command.
.. code-block:: bash
salt myminion cloud.full_query
select_query
~~~~~~~~~~~~
This command behaves like the ``query`` command, but only returned select
fields as defined in the ``/etc/salt/cloud`` configuration file. A sample
configuration for this section of the file might look like:
.. code-block:: yaml
query.selection:
- id
- key_name
This configuration would only return the ``id`` and ``key_name`` fields, for
those cloud providers that support those two fields. This would be called using
the following command:
.. code-block:: bash
salt myminion cloud.select_query
profile
~~~~~~~
This command is used to create an instance using a profile that is configured
on the target minion. Please note that the profile must be configured before
this command can be used with it.
.. code-block:: bash
salt myminion cloud.profile ec2-centos64-x64 my-new-instance
Please note that the execution module does *not* run in parallel mode. Using
multiple minions to create instances can effectively perform parallel instance
creation.
create
~~~~~~
This command is similar to the ``profile`` command, in that it is used to create
a new instance. However, it does not require a profile to be pre-configured.
Instead, all of the options that are normally configured in a profile are passed
directly to Salt Cloud to create the instance:
.. code-block:: bash
salt myminion cloud.create my-ec2-config my-new-instance \
image=ami-1624987f size='t1.micro' ssh_username=ec2-user \
securitygroup=default delvol_on_destroy=True
Please note that the execution module does *not* run in parallel mode. Using
multiple minions to create instances can effectively perform parallel instance
creation.
destroy
~~~~~~~
This command is used to destroy an instance or instances. This command will
search all configured providers and remove any instance(s) which matches the
name(s) passed in here. The results of this command are *non-reversable* and
should be used with caution.
.. code-block:: bash
salt myminion cloud.destroy myinstance
salt myminion cloud.destroy myinstance1,myinstance2
action
~~~~~~
This command implements both the ``action`` and the ``function`` commands
used in the standard ``salt-cloud`` command. If one of the standard ``action``
commands is used, an instance name must be provided. If one of the standard
``function`` commands is used, a provider configuration must be named.
.. code-block:: bash
salt myminion cloud.action start instance=myinstance
salt myminion cloud.action show_image provider=my-ec2-config \
image=ami-1624987f
The actions available are largely dependent upon the module for the specific
cloud provider. The following actions are available for all cloud providers:
``list_nodes``
This is a direct call to the ``query`` function as described above, but is
only performed against a single cloud provider. A provider configuration
must be included.
``list_nodes_select``
This is a direct call to the ``full_query`` function as described above, but
is only performed against a single cloud provider. A provider configuration
must be included.
``list_nodes_select``
This is a direct call to the ``select_query`` function as described above,
but is only performed against a single cloud provider. A provider
configuration must be included.
``show_instance``
This is a thin wrapper around ``list_nodes``, which returns the full
information about a single instance. An instance name must be provided.
State Module
------------
A subset of the execution module is available through the ``cloud`` state
module. Not all functions are currently included, because there is currently
insufficient code for them to perform statefully. For example, a command to
create an instance may be issued with a series of options, but those options
cannot currently be statefully managed. Additional states to manage these
options will be released at a later time.
cloud.present
~~~~~~~~~~~~~
This state will ensure that an instance is present inside a particular cloud
provider. Any option that is normally specified in the ``cloud.create``
execution module and function may be declared here, but only the actual
presence of the instance will be managed statefully.
.. code-block:: yaml
my-instance-name:
cloud.present:
- cloud_provider: my-ec2-config
- image: ami-1624987f
- size: 't1.micro'
- ssh_username: ec2-user
- securitygroup: default
- delvol_on_destroy: True
cloud.profile
~~~~~~~~~~~~~
This state will ensure that an instance is present inside a particular cloud
provider. This function calls the ``cloud.profile`` execution module and
function, but as with ``cloud.present``, only the actual presence of the
instance will be managed statefully.
.. code-block:: yaml
my-instance-name:
cloud.profile:
- profile: ec2-centos64-x64
cloud.absent
~~~~~~~~~~~~
This state will ensure that an instance (identified by name) does not exist in
any of the cloud providers configured on the target minion. Please note that
this state is *non-reversable* and may be considered especially destructive when
issued as a cloud state.
.. code-block:: yaml
my-instance-name:
cloud.absent
Runner Module
-------------
The ``cloud`` runner module is executed on the master, and performs actions
using the configuration and Salt modules on the master itself. This means that
any public minion keys will also be properly accepted by the master.
Using the functions in the runner module is no different than using those in
the execution module, outside of the behavior described in the above paragraph.
The following functions are available inside the runner:
- list_images
- list_sizes
- list_locations
- query
- full_query
- select_query
- profile
- destroy
- action
Outside of the standard usage of ``salt-run`` itself, commands are executed as
usual:
.. code-block:: bash
salt-run cloud.profile ec2-centos64-x86_64 my-instance-name
CloudClient
-----------
The execution, state, and runner modules ultimately all use the CloudClient
library that ships with Salt. To use the CloudClient library locally (either on
the master or a minion), create a client object and issue a command against it:
.. code-block:: python
import salt.cloud
import pprint
client = salt.cloud.CloudClient("/etc/salt/cloud")
nodes = client.query()
pprint.pprint(nodes)
Reactor
-------
Examples of using the reactor with Salt Cloud are available in the
:formula_url:`ec2-autoscale-reactor <ec2-autoscale-reactor>` and
:formula_url:`salt-cloud-reactor <salt-cloud-reactor>` formulas.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/cloud/salt.rst
| 0.903784 | 0.686817 |
salt.rst
|
pypi
|
.. _salt-cloud-feature-matrix:
==============
Feature Matrix
==============
A number of features are available in most cloud hosts, but not all are
available everywhere. This may be because the feature isn't supported by the
cloud host itself, or it may only be that the feature has not yet been
added to Salt Cloud. In a handful of cases, it is because the feature does not
make sense for a particular cloud provider (Saltify, for instance).
This matrix shows which features are available in which cloud hosts, as far
as Salt Cloud is concerned. This is not a comprehensive list of all features
available in all cloud hosts, and should not be used to make business
decisions concerning choosing a cloud host. In most cases, adding support
for a feature to Salt Cloud requires only a little effort.
Legacy Drivers
==============
Both AWS and Rackspace are listed as "Legacy". This is because those drivers
have been replaced by other drivers, which are generally the preferred method
for working with those hosts.
The EC2 driver should be used instead of the AWS driver, when possible. The
OpenStack driver should be used instead of the Rackspace driver, unless the user
is dealing with instances in "the old cloud" in Rackspace.
Note for Developers
===================
When adding new features to a particular cloud host, please make sure to
add the feature to this table. Additionally, if you notice a feature that is not
properly listed here, pull requests to fix them is appreciated.
Standard Features
=================
These are features that are available for almost every cloud host.
.. container:: scrollable
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
| |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Vagrant|Softlayer|Softlayer|Aliyun|Tencent|
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | | |Hardware | |Cloud |
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=======+=========+=========+======+=======+
|Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|Full Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|Selective Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|List Sizes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|List Images |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|List Locations |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|create |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |[1] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|destroy |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
[1] Yes, if salt-api is enabled.
[2] Always returns `{}`.
Actions
=======
These are features that are performed on a specific instance, and require an
instance name to be passed in. For example:
.. code-block:: bash
# salt-cloud -a attach_volume ami.example.com
.. container:: scrollable
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|Actions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|Tencent|
| |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |Cloud |
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+=======+
|attach_volume | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|create_attach_volumes |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|del_tags |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|delvol_on_destroy | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|detach_volume | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|disable_term_protect |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|enable_term_protect |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_tags |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|keepvol_on_destroy | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_keypairs | | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|rename |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|set_tags |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_delvol_on_destroy | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_instance | | |Yes |Yes| | |Yes | |Yes | | |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_term_protect | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|start |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|stop |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|take_action | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
Functions
=========
These are features that are performed against a specific cloud provider, and
require the name of the provider to be passed in. For example:
.. code-block:: bash
# salt-cloud -f list_images my_digitalocean
.. container:: scrollable
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|Functions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|Tencent|
| |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |Cloud |
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+=======+
|block_device_mappings |Yes | | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|create_keypair | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|create_volume | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|delete_key | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|delete_keypair | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|delete_volume | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_image | | |Yes | | |Yes | | |Yes | | | | |Yes | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_ip | |Yes | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_key | |Yes | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_keyid | | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_keypair | |Yes | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_networkid | |Yes | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_node | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_password | |Yes | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_size | | |Yes | | |Yes | | | | | | | |Yes | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_spot_config | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_subnetid | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|iam_profile |Yes | | |Yes| | | | | | | | | |Yes | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|import_key | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|key_list | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|keyname |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_availability_zones| | | |Yes| | | | | | | | | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_custom_images | | | | | | | | | | | |Yes | | |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_keys | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_nodes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_nodes_full |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_nodes_select |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_vlans | | | | | | | | | | | |Yes |Yes | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|rackconnect | | | | | | | |Yes | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|reboot | | | |Yes| |Yes | | | | |[1] | | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|reformat_node | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|securitygroup |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|securitygroupid | | | |Yes| | | | | | | | | |Yes | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_image | | | |Yes| | | | |Yes | | | | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_key | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_keypair | | |Yes |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_volume | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
[1] Yes, if salt-api is enabled.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/cloud/features.rst
| 0.782953 | 0.668322 |
features.rst
|
pypi
|
.. _grains:
======
Grains
======
Salt comes with an interface to derive information about the underlying system.
This is called the grains interface, because it presents salt with grains of
information. Grains are collected for the operating system, domain name,
IP address, kernel, OS type, memory, and many other system properties.
The grains interface is made available to Salt modules and components so that
the right salt minion commands are automatically available on the right
systems.
Grain data is relatively static, though if system information changes
(for example, if network settings are changed), or if a new value is assigned
to a custom grain, grain data is refreshed.
.. note::
Grains resolve to lowercase letters. For example, ``FOO``, and ``foo``
target the same grain.
Listing Grains
==============
Available grains can be listed by using the 'grains.ls' module:
.. code-block:: bash
salt '*' grains.ls
Grains data can be listed by using the 'grains.items' module:
.. code-block:: bash
salt '*' grains.items
.. _static-custom-grains:
Using grains in a state
=======================
To use a grain in a state you can access it via `{{ grains['key'] }}`.
Grains in the Minion Config
===========================
Grains can also be statically assigned within the minion configuration file.
Just add the option :conf_minion:`grains` and pass options to it:
.. code-block:: yaml
grains:
roles:
- webserver
- memcache
deployment: datacenter4
cabinet: 13
cab_u: 14-15
Then status data specific to your servers can be retrieved via Salt, or used
inside of the State system for matching. It also makes it possible to target based on specific data about your deployment, as in the example above.
Grains in /etc/salt/grains
==========================
If you do not want to place your custom static grains in the minion config
file, you can also put them in ``/etc/salt/grains`` on the minion. They are configured in the
same way as in the above example, only without a top-level ``grains:`` key:
.. code-block:: yaml
roles:
- webserver
- memcache
deployment: datacenter4
cabinet: 13
cab_u: 14-15
.. note::
Grains in ``/etc/salt/grains`` are ignored if you specify the same grains in the minion config.
.. note::
Grains are static, and since they are not often changed, they will need a grains refresh when they are updated. You can do this by calling: ``salt minion saltutil.refresh_modules``
.. note::
You can equally configure static grains for Proxy Minions.
As multiple Proxy Minion processes can run on the same machine, you need
to index the files using the Minion ID, under ``/etc/salt/proxy.d/<minion ID>/grains``.
For example, the grains for the Proxy Minion ``router1`` can be defined
under ``/etc/salt/proxy.d/router1/grains``, while the grains for the
Proxy Minion ``switch7`` can be put in ``/etc/salt/proxy.d/switch7/grains``.
Matching Grains in the Top File
===============================
With correctly configured grains on the Minion, the :term:`top file <Top File>` used in
Pillar or during Highstate can be made very efficient. For example, consider
the following configuration:
.. code-block:: yaml
'roles:webserver':
- match: grain
- state0
'roles:memcache':
- match: grain
- state1
- state2
For this example to work, you would need to have defined the grain
``role`` for the minions you wish to match.
.. _writing-grains:
Writing Grains
==============
.. include:: ../../_incl/grains_passwords.rst
The grains are derived by executing all of the "public" functions (i.e. those
which do not begin with an underscore) found in the modules located in the
Salt's core grains code, followed by those in any custom grains modules. The
functions in a grains module must return a :ref:`Python dictionary
<python:typesmapping>`, where the dictionary keys are the names of grains, and
each key's value is that value for that grain.
Custom grains modules should be placed in a subdirectory named ``_grains``
located under the :conf_master:`file_roots` specified by the master config
file. The default path would be ``/srv/salt/_grains``. Custom grains modules
will be distributed to the minions when :mod:`state.highstate
<salt.modules.state.highstate>` is run, or by executing the
:mod:`saltutil.sync_grains <salt.modules.saltutil.sync_grains>` or
:mod:`saltutil.sync_all <salt.modules.saltutil.sync_all>` functions.
Grains modules are easy to write, and (as noted above) only need to return a
dictionary. For example:
.. code-block:: python
def yourfunction():
# initialize a grains dictionary
grains = {}
# Some code for logic that sets grains like
grains["yourcustomgrain"] = True
grains["anothergrain"] = "somevalue"
return grains
The name of the function does not matter and will not factor into the grains
data at all; only the keys/values returned become part of the grains.
When to Use a Custom Grain
--------------------------
Before adding new grains, consider what the data is and remember that grains
should (for the most part) be static data.
If the data is something that is likely to change, consider using :ref:`Pillar
<pillar>` or an execution module instead. If it's a simple set of
key/value pairs, pillar is a good match. If compiling the information requires
that system commands be run, then putting this information in an execution
module is likely a better idea.
Good candidates for grains are data that is useful for targeting minions in the
:ref:`top file <states-top>` or the Salt CLI. The name and data structure of
the grain should be designed to support many platforms, operating systems or
applications. Also, keep in mind that Jinja templating in Salt supports
referencing pillar data as well as invoking functions from execution modules,
so there's no need to place information in grains to make it available to Jinja
templates. For example:
.. code-block:: text
...
...
{{ salt['module.function_name']('argument_1', 'argument_2') }}
{{ pillar['my_pillar_key'] }}
...
...
.. warning::
Custom grains will not be available in the top file until after the first
:ref:`highstate <running-highstate>`. To make custom grains available on a
minion's first highstate, it is recommended to use :ref:`this example
<minion-start-reactor>` to ensure that the custom grains are synced when
the minion starts.
Loading Custom Grains
---------------------
If you have multiple functions specifying grains that are called from a ``main``
function, be sure to prepend grain function names with an underscore. This prevents
Salt from including the loaded grains from the grain functions in the final
grain data structure. For example, consider this custom grain file:
.. code-block:: python
#!/usr/bin/env python
def _my_custom_grain():
my_grain = {"foo": "bar", "hello": "world"}
return my_grain
def main():
# initialize a grains dictionary
grains = {}
grains["my_grains"] = _my_custom_grain()
return grains
The output of this example renders like so:
.. code-block:: console
# salt-call --local grains.items
local:
----------
<Snipped for brevity>
my_grains:
----------
foo:
bar
hello:
world
However, if you don't prepend the ``my_custom_grain`` function with an underscore,
the function will be rendered twice by Salt in the items output: once for the
``my_custom_grain`` call itself, and again when it is called in the ``main``
function:
.. code-block:: console
# salt-call --local grains.items
local:
----------
<Snipped for brevity>
foo:
bar
<Snipped for brevity>
hello:
world
<Snipped for brevity>
my_grains:
----------
foo:
bar
hello:
world
Precedence
==========
Core grains can be overridden by custom grains. As there are several ways of
defining custom grains, there is an order of precedence which should be kept in
mind when defining them. The order of evaluation is as follows:
1. Core grains.
2. Custom grains in ``/etc/salt/grains``.
3. Custom grains in ``/etc/salt/minion``.
4. Custom grain modules in ``_grains`` directory, synced to minions.
Each successive evaluation overrides the previous ones, so any grains defined
by custom grains modules synced to minions that have the same name as a core
grain will override that core grain. Similarly, grains from
``/etc/salt/minion`` override both core grains and custom grain modules, and
grains in ``_grains`` will override *any* grains of the same name.
For custom grains, if the function takes an argument ``grains``, then the
previously rendered grains will be passed in. Because the rest of the grains
could be rendered in any order, the only grains that can be relied upon to be
passed in are ``core`` grains. This was added in the 2019.2.0 release.
Examples of Grains
==================
The core module in the grains package is where the main grains are loaded by
the Salt minion and provides the principal example of how to write grains:
:blob:`salt/grains/core.py`
Syncing Grains
==============
Syncing grains can be done a number of ways. They are automatically synced when
:mod:`state.highstate <salt.modules.state.highstate>` is called, or (as noted
above) the grains can be manually synced and reloaded by calling the
:mod:`saltutil.sync_grains <salt.modules.saltutil.sync_grains>` or
:mod:`saltutil.sync_all <salt.modules.saltutil.sync_all>` functions.
.. note::
When the :conf_minion:`grains_cache` is set to False, the grains dictionary is built
and stored in memory on the minion. Every time the minion restarts or
``saltutil.refresh_grains`` is run, the grain dictionary is rebuilt from scratch.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/grains/index.rst
| 0.900645 | 0.697297 |
index.rst
|
pypi
|
.. _syndic:
===========
Salt Syndic
===========
The most basic or typical Salt topology consists of a single Master node
controlling a group of Minion nodes. An intermediate node type, called Syndic,
when used offers greater structural flexibility and scalability in the
construction of Salt topologies than topologies constructed only out of Master
and Minion node types.
A Syndic node can be thought of as a special passthrough Minion node. A Syndic
node consists of a ``salt-syndic`` daemon and a ``salt-master`` daemon running
on the same system. The ``salt-master`` daemon running on the Syndic node
controls a group of lower level Minion nodes and the ``salt-syndic`` daemon
connects higher level Master node, sometimes called a Master of Masters.
The ``salt-syndic`` daemon relays publications and events between the Master
node and the local ``salt-master`` daemon. This gives the Master node control
over the Minion nodes attached to the ``salt-master`` daemon running on the
Syndic node.
Configuring the Syndic
======================
To setup a Salt Syndic you need to tell the Syndic node and its Master node
about each other. If your Master node is located at ``10.10.0.1``, then your
configurations would be:
On the Syndic node:
.. code-block:: yaml
# /etc/salt/master
syndic_master: 10.10.0.1 # may be either an IP address or a hostname
.. code-block:: yaml
# /etc/salt/minion
# id is shared by the salt-syndic daemon and a possible salt-minion daemon
# on the Syndic node
id: my_syndic
On the Master node:
.. code-block:: yaml
# /etc/salt/master
order_masters: True
The :conf_master:`syndic_master` option tells the Syndic node where to find the
Master node in the same way that the :conf_minion:`master` option tells a
Minion node where to find a Master node.
The :conf_minion:`id` option is used by the ``salt-syndic`` daemon to identify
with the Master node and if unset will default to the hostname or IP address of
the Syndic just as with a Minion.
The :conf_master:`order_masters` option configures the Master node to send
extra information with its publications that is needed by Syndic nodes
connected directly to it.
.. warning::
The syndic process must be run as the same user as the syndic master.
.. note::
Each Syndic must provide its own ``file_roots`` directory. Files will not
be automatically transferred from the Master node.
Configuring the Syndic with Multimaster
=======================================
.. versionadded:: 2015.5.0
Syndic with Multimaster lets you connect a syndic to multiple masters to provide
an additional layer of redundancy in a syndic configuration.
Higher level masters should first be configured in a multimaster configuration.
See :ref:`Multimaster Tutorial <tutorial-multi-master>`.
On the syndic, the :conf_master:`syndic_master` option is populated with
a list of the higher level masters.
Since each syndic is connected to each master, jobs sent from any master are
forwarded to minions that are connected to each syndic. If the ``master_id`` value
is set in the master config on the higher level masters, job results are returned
to the master that originated the request in a best effort fashion. Events/jobs
without a ``master_id`` are returned to any available master.
Running the Syndic
==================
The ``salt-syndic`` daemon is a separate process that needs to be started in
addition to the ``salt-master`` daemon running on the Syndic node. Starting
the ``salt-syndic`` daemon is the same as starting the other Salt daemons.
The Master node in many ways sees the Syndic as an ordinary Minion node. In
particular, the Master will need to accept the Syndic's Minion key as it would
for any other Minion.
On the Syndic node:
.. code-block:: bash
# salt-syndic
or
# service salt-syndic start
On the Master node:
.. code-block:: bash
# salt-key -a my_syndic
The Master node will now be able to control the Minion nodes connected to the
Syndic. Only the Syndic key will be listed in the Master node's key registry
but this also means that key activity between the Syndic's Minions and the
Syndic does not encumber the Master node. In this way, the Syndic's key on the
Master node can be thought of as a placeholder for the keys of all the Minion
and Syndic nodes beneath it, giving the Master node a clear, high level
structural view on the Salt cluster.
On the Master node:
.. code-block:: bash
# salt-key -L
Accepted Keys:
my_syndic
Denied Keys:
Unaccepted Keys:
Rejected Keys:
# salt '*' test.version
minion_1:
2018.3.4
minion_2:
2018.3.4
minion_4:
2018.3.4
minion_3:
2018.3.4
Topology
========
A Master node (a node which is itself not a Syndic to another higher level
Master node) must run a ``salt-master`` daemon and optionally a ``salt-minion``
daemon.
A Syndic node must run ``salt-syndic`` and ``salt-master`` daemons and
optionally a ``salt-minion`` daemon.
A Minion node must run a ``salt-minion`` daemon.
When a ``salt-master`` daemon issues a command, it will be received by the
Syndic and Minion nodes directly connected to it. A Minion node will process
the command in the way it ordinarily would. On a Syndic node, the
``salt-syndic`` daemon will relay the command to the ``salt-master`` daemon
running on the Syndic node, which then propagates the command to the Minions
and Syndics connected to it.
When events and job return data are generated by ``salt-minion`` daemons, they
are aggregated by the ``salt-master`` daemon they are connected to, which
``salt-master`` daemon then relays the data back through its ``salt-syndic``
daemon until the data reaches the Master or Syndic node that issued the command.
Syndic wait
===========
``syndic_wait`` is a master configuration file setting that specifies the number of
seconds the Salt client should wait for additional syndics to check in with their
lists of expected minions before giving up. This value defaults to ``5`` seconds.
The ``syndic_wait`` setting is necessary because the higher-level master does not
have a way of knowing which minions are below the syndics. The higher-level master
has its own list of expected minions and the masters below them have their own lists
as well, so the Salt client does not know how long to wait for all returns. The
``syndic_wait`` option allows time for all minions to return to the Salt client.
.. note::
To reduce the amount of time the CLI waits for Minions to respond, install
a Minion on the Syndic or tune the value of the ``syndic_wait``
configuration.
While it is possible to run a Syndic without a Minion installed on the same
system, it is recommended, for a faster CLI response time, to do so. Without a
Minion installed on the Syndic node, the timeout value of ``syndic_wait``
increases significantly - about three-fold. With a Minion installed on the
Syndic, the CLI timeout resides at the value defined in ``syndic_wait``.
.. note::
If you have a very large infrastructure or many layers of Syndics, you may
find that the CLI doesn't wait long enough for the Syndics to return their
events. If you think this is the case, you can set the
:conf_master:`syndic_wait` value in the Master configs on the Master or
Syndic nodes from which commands are executed. The default value is ``5``,
and should work for the majority of deployments.
In order for a Master or Syndic node to return information from Minions that
are below their Syndics, the CLI requires a short wait time in order to allow
the Syndics to gather responses from their Minions. This value is defined in
the :conf_master:`syndic_wait` config option and has a default of five seconds.
Syndic config options
=====================
These are the options that can be used to configure a Syndic node. Note that
other than ``id``, Syndic config options are placed in the Master config on the
Syndic node.
- :conf_minion:`id`: Syndic id (shared by the ``salt-syndic`` daemon with a
potential ``salt-minion`` daemon on the same system)
- :conf_master:`syndic_master`: Master node IP address or hostname
- :conf_master:`syndic_master_port`: Master node ret_port
- :conf_master:`syndic_log_file`: path to the logfile (absolute or not)
- :conf_master:`syndic_pidfile`: path to the pidfile (absolute or not)
- :conf_master:`syndic_wait`: time in seconds to wait on returns from this syndic
Minion Data Cache
=================
Beginning with Salt 2016.11.0, the :ref:`Pluggable Minion Data Cache <pluggable-data-cache>`
was introduced. The minion data cache contains the Salt Mine data, minion grains, and minion
pillar information cached on the Salt Master. By default, Salt uses the ``localfs`` cache
module, but other external data stores can be used instead.
Using a pluggable minion cache modules allows for the data stored on a Salt Master about
Salt Minions to be replicated on other Salt Masters the Minion is connected to. Please see
the :ref:`Minion Data Cache <cache>` documentation for more information and configuration
examples.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/topology/syndic.rst
| 0.807537 | 0.771198 |
syndic.rst
|
pypi
|
.. _tutorial-macos-walk-through:
======================================================================
The macOS (Maverick) Developer Step By Step Guide To Salt Installation
======================================================================
This document provides a step-by-step guide to installing a Salt cluster
consisting of one master, and one minion running on a local VM hosted on macOS.
.. note::
This guide is aimed at developers who wish to run Salt in a virtual machine.
The official (Linux) walkthrough can be found
`here <https://docs.saltproject.io/topics/tutorials/walkthrough.html>`_.
The 5 Cent Salt Intro
=====================
Since you're here you've probably already heard about Salt, so you already
know Salt lets you configure and run commands on hordes of servers easily.
Here's a brief overview of a Salt cluster:
- Salt works by having a "master" server sending commands to one or multiple
"minion" servers. The master server is the "command center". It is
going to be the place where you store your configuration files, aka: "which
server is the db, which is the web server, and what libraries and software
they should have installed". The minions receive orders from the master.
Minions are the servers actually performing work for your business.
- Salt has two types of configuration files:
1. the "salt communication channels" or "meta" or "config" configuration
files (not official names): one for the master (usually is /etc/salt/master
, **on the master server**), and one for minions (default is
/etc/salt/minion or /etc/salt/minion.conf, **on the minion servers**). Those
files are used to determine things like the Salt Master IP, port, Salt
folder locations, etc.. If these are configured incorrectly, your minions
will probably be unable to receive orders from the master, or the master
will not know which software a given minion should install.
2. the "business" or "service" configuration files (once again, not an
official name): these are configuration files, ending with ".sls" extension,
that describe which software should run on which server, along with
particular configuration properties for the software that is being
installed. These files should be created in the /srv/salt folder by default,
but their location can be changed using ... /etc/salt/master configuration file!
.. note::
This tutorial contains a third important configuration file, not to
be confused with the previous two: the virtual machine provisioning
configuration file. This in itself is not specifically tied to Salt, but
it also contains some Salt configuration. More on that in step 3. Also
note that all configuration files are YAML files. So indentation matters.
.. note::
Salt also works with "masterless" configuration where a minion is
autonomous (in which case salt can be seen as a local configuration tool),
or in "multiple master" configuration. See the documentation for more on
that.
Before Digging In, The Architecture Of The Salt Cluster
-------------------------------------------------------
Salt Master
***********
The "Salt master" server is going to be the Mac OS machine, directly. Commands
will be run from a terminal app, so Salt will need to be installed on the Mac.
This is going to be more convenient for toying around with configuration files.
Salt Minion
***********
We'll only have one "Salt minion" server. It is going to be running on a
Virtual Machine running on the Mac, using VirtualBox. It will run an Ubuntu
distribution.
Step 1 - Configuring The Salt Master On Your Mac
================================================
See the `Salt install guide <https://docs.saltproject.io/salt/install-guide/en/latest/>`_
for macOS installation instructions.
Because Salt has a lot of dependencies that are not built in macOS, we will use
Homebrew to install Salt. Homebrew is a package manager for Mac, it's great, use
it (for this tutorial at least!). Some people spend a lot of time installing
libs by hand to better understand dependencies, and then realize how useful a
package manager is once they're configuring a brand new machine and have to do
it all over again. It also lets you *uninstall* things easily.
.. note::
Brew is a Ruby program (Ruby is installed by default with your Mac). Brew
downloads, compiles, and links software. The linking phase is when compiled
software is deployed on your machine. It may conflict with manually
installed software, especially in the /usr/local directory. It's ok,
remove the manually installed version then refresh the link by typing
``brew link 'packageName'``. Brew has a ``brew doctor`` command that can
help you troubleshoot. It's a great command, use it often. Brew requires
xcode command line tools. When you run brew the first time it asks you to
install them if they're not already on your system. Brew installs
software in /usr/local/bin (system bins are in /usr/bin). In order to use
those bins you need your $PATH to search there first. Brew tells you if
your $PATH needs to be fixed.
.. tip::
Use the keyboard shortcut ``cmd + shift + period`` in the "open" macOS
dialog box to display hidden files and folders, such as .profile.
Install Homebrew
----------------
Install Homebrew here https://brew.sh/
Or just type
.. code-block:: bash
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
Now type the following commands in your terminal (you may want to type ``brew
doctor`` after each to make sure everything's fine):
.. code-block:: bash
brew install python
brew install swig
brew install zmq
.. note::
zmq is ZeroMQ. It's a fantastic library used for server to server network
communication and is at the core of Salt efficiency.
Install Salt
------------
You should now have everything ready to launch this command:
.. code-block:: bash
pip install salt
.. note::
There should be no need for ``sudo pip install salt``. Brew installed
Python for your user, so you should have all the access. In case you
would like to check, type ``which python`` to ensure that it's
/usr/local/bin/python, and ``which pip`` which should be
/usr/local/bin/pip.
Now type ``python`` in a terminal then, ``import salt``. There should be no
errors. Now exit the Python terminal using ``exit()``.
Create The Master Configuration
-------------------------------
If the default /etc/salt/master configuration file was not created,
copy-paste it from here:
https://docs.saltproject.io/en/latest/ref/configuration/examples.html#configuration-examples-master
.. note::
``/etc/salt/master`` is a file, not a folder.
Salt Master configuration changes. The Salt master needs a few customization
to be able to run on macOS:
.. code-block:: bash
sudo launchctl limit maxfiles 4096 8192
In the /etc/salt/master file, change max_open_files to 8192 (or just add the
line: ``max_open_files: 8192`` (no quote) if it doesn't already exists).
You should now be able to launch the Salt master:
.. code-block:: bash
sudo salt-master --log-level=all
There should be no errors when running the above command.
.. note::
This command is supposed to be a daemon, but for toying around, we'll keep
it running on a terminal to monitor the activity.
Now that the master is set, let's configure a minion on a VM.
Step 2 - Configuring The Minion VM
==================================
The Salt minion is going to run on a Virtual Machine. There are a lot of
software options that let you run virtual machines on a mac, But for this
tutorial we're going to use VirtualBox. In addition to virtualBox, we will use
Vagrant, which allows you to create the base VM configuration.
Vagrant lets you build ready to use VM images, starting from an OS image and
customizing it using "provisioners". In our case, we'll use it to:
* Download the base Ubuntu image
* Install salt on that Ubuntu image (Salt is going to be the "provisioner"
for the VM).
* Launch the VM
* SSH into the VM to debug
* Stop the VM once you're done.
Install VirtualBox
------------------
Go get it here: https://www.virtualbox.org/wiki/Downloads (click on VirtualBox
for macOS hosts => x86/amd64)
Install Vagrant
---------------
Go get it here: https://www.vagrantup.com/downloads.html and choose the latest version
(1.3.5 at time of writing), then the .dmg file. Double-click to install it.
Make sure the ``vagrant`` command is found when run in the terminal. Type
``vagrant``. It should display a list of commands.
Create The Minion VM Folder
---------------------------
Create a folder in which you will store your minion's VM. In this tutorial,
it's going to be a minion folder in the $home directory.
.. code-block:: bash
cd $home
mkdir minion
Initialize Vagrant
------------------
From the minion folder, type
.. code-block:: bash
vagrant init
This command creates a default Vagrantfile configuration file. This
configuration file will be used to pass configuration parameters to the Salt
provisioner in Step 3.
Import Precise64 Ubuntu Box
---------------------------
.. code-block:: bash
vagrant box add precise64 http://files.vagrantup.com/precise64.box
.. note::
This box is added at the global Vagrant level. You only need to do it
once as each VM will use this same file.
Modify the Vagrantfile
----------------------
Modify ./minion/Vagrantfile to use th precise64 box. Change the ``config.vm.box``
line to:
.. code-block:: yaml
config.vm.box = "precise64"
Uncomment the line creating a host-only IP. This is the ip of your minion
(you can change it to something else if that IP is already in use):
.. code-block:: yaml
config.vm.network :private_network, ip: "192.168.33.10"
At this point you should have a VM that can run, although there won't be much
in it. Let's check that.
Checking The VM
---------------
From the $home/minion folder type:
.. code-block:: bash
vagrant up
A log showing the VM booting should be present. Once it's done you'll be back
to the terminal:
.. code-block:: bash
ping 192.168.33.10
The VM should respond to your ping request.
Now log into the VM in ssh using Vagrant again:
.. code-block:: bash
vagrant ssh
You should see the shell prompt change to something similar to
``vagrant@precise64:~$`` meaning you're inside the VM. From there, enter the
following:
.. code-block:: bash
ping 10.0.2.2
.. note::
That ip is the ip of your VM host (the macOS host). The number is a
VirtualBox default and is displayed in the log after the Vagrant ssh
command. We'll use that IP to tell the minion where the Salt master is.
Once you're done, end the ssh session by typing ``exit``.
It's now time to connect the VM to the salt master
Step 3 - Connecting Master and Minion
=====================================
Creating The Minion Configuration File
--------------------------------------
Create the ``/etc/salt/minion`` file. In that file, put the
following lines, giving the ID for this minion, and the IP of the master:
.. code-block:: yaml
master: 10.0.2.2
id: 'minion1'
file_client: remote
Minions authenticate with the master using keys. Keys are generated
automatically if you don't provide one and can accept them later on. However,
this requires accepting the minion key every time the minion is destroyed or
created (which could be quite often). A better way is to create those keys in
advance, feed them to the minion, and authorize them once.
Preseed minion keys
-------------------
From the minion folder on your Mac run:
.. code-block:: bash
sudo salt-key --gen-keys=minion1
This should create two files: minion1.pem, and minion1.pub.
Since those files have been created using sudo, but will be used by vagrant,
you need to change ownership:
.. code-block:: bash
sudo chown youruser:yourgroup minion1.pem
sudo chown youruser:yourgroup minion1.pub
Then copy the .pub file into the list of accepted minions:
.. code-block:: bash
sudo cp minion1.pub /etc/salt/pki/master/minions/minion1
Modify Vagrantfile to Use Salt Provisioner
------------------------------------------
Let's now modify the Vagrantfile used to provision the Salt VM. Add the
following section in the Vagrantfile (note: it should be at the same
indentation level as the other properties):
.. code-block:: yaml
# salt-vagrant config
config.vm.provision :salt do |salt|
salt.run_highstate = true
salt.minion_config = "/etc/salt/minion"
salt.minion_key = "./minion1.pem"
salt.minion_pub = "./minion1.pub"
end
Now destroy the vm and recreate it from the /minion folder:
.. code-block:: bash
vagrant destroy
vagrant up
If everything is fine you should see the following message:
.. code-block:: bash
"Bootstrapping Salt... (this may take a while)
Salt successfully configured and installed!"
Checking Master-Minion Communication
------------------------------------
To make sure the master and minion are talking to each other, enter the
following:
.. code-block:: bash
sudo salt '*' test.version
You should see your minion answering with its salt version. It's now time to do some
configuration.
Step 4 - Configure Services to Install On the Minion
====================================================
In this step we'll use the Salt master to instruct our minion to install
Nginx.
Checking the system's original state
------------------------------------
First, make sure that an HTTP server is not installed on our minion.
When opening a browser directed at ``http://192.168.33.10/`` You should get an
error saying the site cannot be reached.
Initialize the top.sls file
---------------------------
System configuration is done in ``/srv/salt/top.sls`` (and subfiles/folders),
and then applied by running the :py:func:`state.apply
<salt.modules.state.apply_>` function to have the Salt master order its minions
to update their instructions and run the associated commands.
First Create an empty file on your Salt master (macOS machine):
.. code-block:: bash
touch /srv/salt/top.sls
When the file is empty, or if no configuration is found for our minion
an error is reported:
.. code-block:: bash
sudo salt 'minion1' state.apply
This should return an error stating: **No Top file or external nodes data
matches found**.
Create The Nginx Configuration
------------------------------
Now is finally the time to enter the real meat of our server's configuration.
For this tutorial our minion will be treated as a web server that needs to
have Nginx installed.
Insert the following lines into ``/srv/salt/top.sls`` (which should current be
empty).
.. code-block:: yaml
base:
'minion1':
- bin.nginx
Now create ``/srv/salt/bin/nginx.sls`` containing the following:
.. code-block:: yaml
nginx:
pkg.installed:
- name: nginx
service.running:
- enable: True
- reload: True
Check Minion State
------------------
Finally, run the :py:func:`state.apply <salt.modules.state.apply_>` function
again:
.. code-block:: bash
sudo salt 'minion1' state.apply
You should see a log showing that the Nginx package has been installed
and the service configured. To prove it, open your browser and navigate to
http://192.168.33.10/, you should see the standard Nginx welcome page.
Congratulations!
Where To Go From Here
=====================
A full description of configuration management within Salt (sls files among
other things) is available here:
https://docs.saltproject.io/en/latest/index.html#configuration-management
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/tutorials/walkthrough_macosx.rst
| 0.818737 | 0.660258 |
walkthrough_macosx.rst
|
pypi
|
.. _tutorial-salt-at-scale:
===================
Using Salt at scale
===================
The focus of this tutorial will be building a Salt infrastructure for handling
large numbers of minions. This will include tuning, topology, and best practices.
For how to install the Salt Master, see the
`Salt install guide <https://docs.saltproject.io/salt/install-guide/en/latest/>`_.
.. note::
This tutorial is intended for large installations, although these same settings
won't hurt, it may not be worth the complexity to smaller installations.
When used with minions, the term 'many' refers to at least a thousand
and 'a few' always means 500.
For simplicity reasons, this tutorial will default to the standard ports
used by Salt.
The Master
==========
The most common problems on the Salt Master are:
1. too many minions authing at once
2. too many minions re-authing at once
3. too many minions re-connecting at once
4. too many minions returning at once
5. too few resources (CPU/HDD)
The first three are all "thundering herd" problems. To mitigate these issues
we must configure the minions to back-off appropriately when the Master is
under heavy load.
The fourth is caused by masters with little hardware resources in combination
with a possible bug in ZeroMQ. At least that's what it looks like till today
(`Issue 118651 <https://github.com/saltstack/salt/issues/11865>`_,
`Issue 5948 <https://github.com/saltstack/salt/issues/5948>`_,
`Mail thread <https://groups.google.com/forum/#!searchin/salt-users/lots$20of$20minions/salt-users/WxothArv2Do/t12MigMQDFAJ>`_)
To fully understand each problem, it is important to understand, how Salt works.
Very briefly, the Salt Master offers two services to the minions.
- a job publisher on port 4505
- an open port 4506 to receive the minions returns
All minions are always connected to the publisher on port 4505 and only connect
to the open return port 4506 if necessary. On an idle Master, there will only
be connections on port 4505.
Too many minions authing
------------------------
When the Minion service is first started up, it will connect to its Master's publisher
on port 4505. If too many minions are started at once, this can cause a "thundering herd".
This can be avoided by not starting too many minions at once.
The connection itself usually isn't the culprit, the more likely cause of master-side
issues is the authentication that the Minion must do with the Master. If the Master
is too heavily loaded to handle the auth request it will time it out. The Minion
will then wait `acceptance_wait_time` to retry. If `acceptance_wait_time_max` is
set then the Minion will increase its wait time by the `acceptance_wait_time` each
subsequent retry until reaching `acceptance_wait_time_max`.
Too many minions re-authing
---------------------------
This is most likely to happen in the testing phase of a Salt deployment, when
all Minion keys have already been accepted, but the framework is being tested
and parameters are frequently changed in the Salt Master's configuration
file(s).
The Salt Master generates a new AES key to encrypt its publications at certain
events such as a Master restart or the removal of a Minion key. If you are
encountering this problem of too many minions re-authing against the Master,
you will need to recalibrate your setup to reduce the rate of events like a
Master restart or Minion key removal (``salt-key -d``).
When the Master generates a new AES key, the minions aren't notified of this
but will discover it on the next pub job they receive. When the Minion
receives such a job it will then re-auth with the Master. Since Salt does
minion-side filtering this means that all the minions will re-auth on the next
command published on the master-- causing another "thundering herd". This can
be avoided by setting the
.. code-block:: yaml
random_reauth_delay: 60
in the minions configuration file to a higher value and stagger the amount
of re-auth attempts. Increasing this value will of course increase the time
it takes until all minions are reachable via Salt commands.
Too many minions re-connecting
------------------------------
By default the zmq socket will re-connect every 100ms which for some larger
installations may be too quick. This will control how quickly the TCP session is
re-established, but has no bearing on the auth load.
To tune the minions sockets reconnect attempts, there are a few values in
the sample configuration file (default values)
.. code-block:: yaml
recon_default: 1000
recon_max: 5000
recon_randomize: True
- recon_default: the default value the socket should use, i.e. 1000. This value is in
milliseconds. (1000ms = 1 second)
- recon_max: the max value that the socket should use as a delay before trying to reconnect
This value is in milliseconds. (5000ms = 5 seconds)
- recon_randomize: enables randomization between recon_default and recon_max
To tune this values to an existing environment, a few decision have to be made.
1. How long can one wait, before the minions should be online and reachable via Salt?
2. How many reconnects can the Master handle without a syn flood?
These questions can not be answered generally. Their answers depend on the
hardware and the administrators requirements.
Here is an example scenario with the goal, to have all minions reconnect
within a 60 second time-frame on a Salt Master service restart.
.. code-block:: yaml
recon_default: 1000
recon_max: 59000
recon_randomize: True
Each Minion will have a randomized reconnect value between 'recon_default'
and 'recon_default + recon_max', which in this example means between 1000ms
and 60000ms (or between 1 and 60 seconds). The generated random-value will
be doubled after each attempt to reconnect (ZeroMQ default behavior).
Lets say the generated random value is 11 seconds (or 11000ms).
.. code-block:: console
reconnect 1: wait 11 seconds
reconnect 2: wait 22 seconds
reconnect 3: wait 33 seconds
reconnect 4: wait 44 seconds
reconnect 5: wait 55 seconds
reconnect 6: wait time is bigger than 60 seconds (recon_default + recon_max)
reconnect 7: wait 11 seconds
reconnect 8: wait 22 seconds
reconnect 9: wait 33 seconds
reconnect x: etc.
With a thousand minions this will mean
.. code-block:: text
1000/60 = ~16
round about 16 connection attempts a second. These values should be altered to
values that match your environment. Keep in mind though, that it may grow over
time and that more minions might raise the problem again.
Too many minions returning at once
----------------------------------
This can also happen during the testing phase, if all minions are addressed at
once with
.. code-block:: bash
$ salt * disk.usage
it may cause thousands of minions trying to return their data to the Salt Master
open port 4506. Also causing a flood of syn-flood if the Master can't handle that many
returns at once.
This can be easily avoided with Salt's batch mode:
.. code-block:: bash
$ salt * disk.usage -b 50
This will only address 50 minions at once while looping through all addressed
minions.
Too few resources
=================
The masters resources always have to match the environment. There is no way
to give good advise without knowing the environment the Master is supposed to
run in. But here are some general tuning tips for different situations:
The Master is CPU bound
-----------------------
In installations with large or with complex pillar files, it is possible
for the master to exhibit poor performance as a result of having to render
many pillar files at once. This exhibit itself in a number of ways, both
as high load on the master and on minions which block on waiting for their
pillar to be delivered to them.
To reduce pillar rendering times, it is possible to cache pillars on the
master. To do this, see the set of master configuration options which
are prefixed with `pillar_cache`.
If many pillars are encrypted using :mod:`gpg <salt.renderers.gpg>` renderer, it
is possible to cache GPG data. To do this, see the set of master configuration
options which are prefixed with `gpg_cache`.
.. note::
Caching pillars or GPG data on the master may introduce security
considerations. Be certain to read caveats outlined in the master
configuration file to understand how pillar caching may affect a master's
ability to protect sensitive data!
The Master is disk IO bound
---------------------------
By default, the Master saves every Minion's return for every job in its
job-cache. The cache can then be used later, to lookup results for previous
jobs. The default directory for this is:
.. code-block:: yaml
cachedir: /var/cache/salt
and then in the ``/proc`` directory.
Each job return for every Minion is saved in a single file. Over time this
directory can grow quite large, depending on the number of published jobs. The
amount of files and directories will scale with the number of jobs published and
the retention time defined by
.. code-block:: yaml
keep_jobs_seconds: 86400
.. code-block:: text
250 jobs/day * 2000 minions returns = 500,000 files a day
Use and External Job Cache
~~~~~~~~~~~~~~~~~~~~~~~~~~
An external job cache allows for job storage to be placed on an external
system, such as a database.
- ext_job_cache: this will have the minions store their return data directly
into a returner (not sent through the Master)
- master_job_cache (New in `2014.7.0`): this will make the Master store the job
data using a returner (instead of the local job cache on disk).
If a master has many accepted keys, it may take a long time to publish a job
because the master must first determine the matching minions and deliver
that information back to the waiting client before the job can be published.
To mitigate this, a key cache may be enabled. This will reduce the load
on the master to a single file open instead of thousands or tens of thousands.
This cache is updated by the maintenance process, however, which means that
minions with keys that are accepted may not be targeted by the master
for up to sixty seconds by default.
To enable the master key cache, set `key_cache: 'sched'` in the master
configuration file.
Disable The Job Cache
~~~~~~~~~~~~~~~~~~~~~
The job cache is a central component of the Salt Master and many aspects of
the Salt Master will not function correctly without a running job cache.
Disabling the job cache is **STRONGLY DISCOURAGED** and should not be done
unless the master is being used to execute routines that require no history
or reliable feedback!
The job cache can be disabled:
.. code-block:: yaml
job_cache: False
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/tutorials/intro_scale.rst
| 0.90218 | 0.760406 |
intro_scale.rst
|
pypi
|
.. _pillar-walk-through:
==================
Pillar Walkthrough
==================
.. note::
This walkthrough assumes that the reader has already completed the initial
Salt :ref:`walkthrough <tutorial-salt-walk-through>`.
Pillars are tree-like structures of data defined on the Salt Master and passed
through to minions. They allow confidential, targeted data to be securely sent
only to the relevant minion.
.. note::
Grains and Pillar are sometimes confused, just remember that Grains
are data about a minion which is stored or generated from the minion.
This is why information like the OS and CPU type are found in Grains.
Pillar is information about a minion or many minions stored or generated
on the Salt Master.
Pillar data is useful for:
Highly Sensitive Data:
Information transferred via pillar is guaranteed to only be presented to
the minions that are targeted, making Pillar suitable
for managing security information, such as cryptographic keys and
passwords.
Minion Configuration:
Minion modules such as the execution modules, states, and returners can
often be configured via data stored in pillar.
Variables:
Variables which need to be assigned to specific minions or groups of
minions can be defined in pillar and then accessed inside sls formulas
and template files.
Arbitrary Data:
Pillar can contain any basic data structure in dictionary format,
so a key/value store can be defined making it easy to iterate over a group
of values in sls formulas.
Pillar is therefore one of the most important systems when using Salt. This
walkthrough is designed to get a simple Pillar up and running in a few minutes
and then to dive into the capabilities of Pillar and where the data is
available.
Setting Up Pillar
=================
The pillar is already running in Salt by default. To see the minion's
pillar data:
.. code-block:: bash
salt '*' pillar.items
.. note::
Prior to version 0.16.2, this function is named ``pillar.data``. This
function name is still supported for backwards compatibility.
By default, the contents of the master configuration file are not loaded into
pillar for all minions. This default is stored in the ``pillar_opts`` setting,
which defaults to ``False``.
The contents of the master configuration file can be made available to minion
pillar files. This makes global configuration of services and systems very easy,
but note that this may not be desired or appropriate if sensitive data is stored
in the master's configuration file. To enable the master configuration file to be
available to minion as pillar, set ``pillar_opts: True`` in the master
configuration file, and then for appropriate minions also set ``pillar_opts: True``
in the minion(s) configuration file.
Similar to the state tree, the pillar is comprised of sls files and has a top file.
The default location for the pillar is in /srv/pillar.
.. note::
The pillar location can be configured via the ``pillar_roots`` option inside
the master configuration file. It must not be in a subdirectory of the state
tree or file_roots. If the pillar is under file_roots, any pillar targeting
can be bypassed by minions.
To start setting up the pillar, the /srv/pillar directory needs to be present:
.. code-block:: bash
mkdir /srv/pillar
Now create a simple top file, following the same format as the top file used for
states:
``/srv/pillar/top.sls``:
.. code-block:: yaml
base:
'*':
- data
This top file associates the data.sls file to all minions. Now the
``/srv/pillar/data.sls`` file needs to be populated:
``/srv/pillar/data.sls``:
.. code-block:: yaml
info: some data
To ensure that the minions have the new pillar data, issue a command
to them asking that they fetch their pillars from the master:
.. code-block:: bash
salt '*' saltutil.refresh_pillar
Now that the minions have the new pillar, it can be retrieved:
.. code-block:: bash
salt '*' pillar.items
The key ``info`` should now appear in the returned pillar data.
More Complex Data
~~~~~~~~~~~~~~~~~
Unlike states, pillar files do not need to define :strong:`formulas`.
This example sets up user data with a UID:
``/srv/pillar/users/init.sls``:
.. code-block:: yaml
users:
thatch: 1000
shouse: 1001
utahdave: 1002
redbeard: 1003
.. note::
The same directory lookups that exist in states exist in pillar, so the
file ``users/init.sls`` can be referenced with ``users`` in the :term:`top
file <Top File>`.
The top file will need to be updated to include this sls file:
``/srv/pillar/top.sls``:
.. code-block:: yaml
base:
'*':
- data
- users
Now the data will be available to the minions. To use the pillar data in a
state, you can use Jinja:
``/srv/salt/users/init.sls``
.. code-block:: jinja
{% for user, uid in pillar.get('users', {}).items() %}
{{user}}:
user.present:
- uid: {{uid}}
{% endfor %}
This approach allows for users to be safely defined in a pillar and then the
user data is applied in an sls file.
Parameterizing States With Pillar
=================================
Pillar data can be accessed in state files to customise behavior for each
minion. All pillar (and grain) data applicable to each minion is substituted
into the state files through templating before being run. Typical uses
include setting directories appropriate for the minion and skipping states
that don't apply.
A simple example is to set up a mapping of package names in pillar for
separate Linux distributions:
``/srv/pillar/pkg/init.sls``:
.. code-block:: jinja
pkgs:
{% if grains['os_family'] == 'RedHat' %}
apache: httpd
vim: vim-enhanced
{% elif grains['os_family'] == 'Debian' %}
apache: apache2
vim: vim
{% elif grains['os'] == 'Arch' %}
apache: apache
vim: vim
{% endif %}
The new ``pkg`` sls needs to be added to the top file:
``/srv/pillar/top.sls``:
.. code-block:: yaml
base:
'*':
- data
- users
- pkg
Now the minions will auto map values based on respective operating systems
inside of the pillar, so sls files can be safely parameterized:
``/srv/salt/apache/init.sls``:
.. code-block:: jinja
apache:
pkg.installed:
- name: {{ pillar['pkgs']['apache'] }}
Or, if no pillar is available a default can be set as well:
.. note::
The function ``pillar.get`` used in this example was added to Salt in
version 0.14.0
``/srv/salt/apache/init.sls``:
.. code-block:: jinja
apache:
pkg.installed:
- name: {{ salt['pillar.get']('pkgs:apache', 'httpd') }}
In the above example, if the pillar value ``pillar['pkgs']['apache']`` is not
set in the minion's pillar, then the default of ``httpd`` will be used.
.. note::
Under the hood, pillar is just a Python dict, so Python dict methods such
as ``get`` and ``items`` can be used.
Pillar Makes Simple States Grow Easily
======================================
One of the design goals of pillar is to make simple sls formulas easily grow
into more flexible formulas without refactoring or complicating the states.
A simple formula:
``/srv/salt/edit/vim.sls``:
.. code-block:: yaml
vim:
pkg.installed: []
/etc/vimrc:
file.managed:
- source: salt://edit/vimrc
- mode: 644
- user: root
- group: root
- require:
- pkg: vim
Can be easily transformed into a powerful, parameterized formula:
``/srv/salt/edit/vim.sls``:
.. code-block:: jinja
vim:
pkg.installed:
- name: {{ pillar['pkgs']['vim'] }}
/etc/vimrc:
file.managed:
- source: {{ pillar['vimrc'] }}
- mode: 644
- user: root
- group: root
- require:
- pkg: vim
Where the vimrc source location can now be changed via pillar:
``/srv/pillar/edit/vim.sls``:
.. code-block:: jinja
{% if grains['id'].startswith('dev') %}
vimrc: salt://edit/dev_vimrc
{% elif grains['id'].startswith('qa') %}
vimrc: salt://edit/qa_vimrc
{% else %}
vimrc: salt://edit/vimrc
{% endif %}
Ensuring that the right vimrc is sent out to the correct minions.
The pillar top file must include a reference to the new sls pillar file:
``/srv/pillar/top.sls``:
.. code-block:: yaml
base:
'*':
- pkg
- edit.vim
Setting Pillar Data on the Command Line
=======================================
Pillar data can be set on the command line when running :py:func:`state.apply
<salt.modules.state.apply_` like so:
.. code-block:: bash
salt '*' state.apply pillar='{"foo": "bar"}'
salt '*' state.apply my_sls_file pillar='{"hello": "world"}'
Nested pillar values can also be set via the command line:
.. code-block:: bash
salt '*' state.sls my_sls_file pillar='{"foo": {"bar": "baz"}}'
Lists can be passed via command line pillar data as follows:
.. code-block:: bash
salt '*' state.sls my_sls_file pillar='{"some_list": ["foo", "bar", "baz"]}'
.. note::
If a key is passed on the command line that already exists on the minion,
the key that is passed in will overwrite the entire value of that key,
rather than merging only the specified value set via the command line.
The example below will swap the value for vim with telnet in the previously
specified list, notice the nested pillar dict:
.. code-block:: bash
salt '*' state.apply edit.vim pillar='{"pkgs": {"vim": "telnet"}}'
This will attempt to install telnet on your minions, feel free to
uninstall the package or replace telnet value with anything else.
.. note::
Be aware that when sending sensitive data via pillar on the command-line
that the publication containing that data will be received by all minions
and will not be restricted to the targeted minions. This may represent
a security concern in some cases.
More On Pillar
==============
Pillar data is generated on the Salt master and securely distributed to
minions. Salt is not restricted to the pillar sls files when defining the
pillar but can retrieve data from external sources. This can be useful when
information about an infrastructure is stored in a separate location.
Reference information on pillar and the external pillar interface can be found
in the Salt documentation:
:ref:`Pillar <pillar>`
Minion Config in Pillar
=======================
Minion configuration options can be set on pillars. Any option that you want
to modify, should be in the first level of the pillars, in the same way you set
the options in the config file. For example, to configure the MySQL root
password to be used by MySQL Salt execution module:
.. code-block:: yaml
mysql.pass: hardtoguesspassword
This is very convenient when you need some dynamic configuration change that
you want to be applied on the fly. For example, there is a chicken and the egg
problem if you do this:
.. code-block:: yaml
mysql-admin-passwd:
mysql_user.present:
- name: root
- password: somepasswd
mydb:
mysql_db.present
The second state will fail, because you changed the root password and the
minion didn't notice it. Setting mysql.pass in the pillar, will help to sort
out the issue. But always change the root admin password in the first place.
This is very helpful for any module that needs credentials to apply state
changes: mysql, keystone, etc.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/tutorials/pillar.rst
| 0.894176 | 0.663492 |
pillar.rst
|
pypi
|
.. _starting-states:
=========================
How Do I Use Salt States?
=========================
Simplicity, Simplicity, Simplicity
Many of the most powerful and useful engineering solutions are founded on
simple principles. Salt States strive to do just that: K.I.S.S. (Keep It
Stupidly Simple)
The core of the Salt State system is the SLS, or **S**\ a\ **L**\ t
**S**\ tate file. The SLS is a representation of the state in which
a system should be in, and is set up to contain this data in a simple format.
This is often called configuration management.
.. note::
This is just the beginning of using states, make sure to read up on pillar
:ref:`Pillar <pillar-walk-through>` next.
It is All Just Data
===================
Before delving into the particulars, it will help to understand that the SLS
file is just a data structure under the hood. While understanding that the SLS
is just a data structure isn't critical for understanding and making use of
Salt States, it should help bolster knowledge of where the real power is.
SLS files are therefore, in reality, just dictionaries, lists, strings, and
numbers. By using this approach Salt can be much more flexible. As one writes
more state files, it becomes clearer exactly what is being written. The result
is a system that is easy to understand, yet grows with the needs of the admin
or developer.
The Top File
============
The example SLS files in the below sections can be assigned to hosts using a
file called :strong:`top.sls`. This file is described in-depth :ref:`here
<states-top>`.
Default Data - YAML
===================
By default Salt represents the SLS data in what is one of the simplest
serialization formats available - `YAML`_.
A typical SLS file will often look like this in YAML:
.. note::
These demos use some generic service and package names, different
distributions often use different names for packages and services. For
instance `apache` should be replaced with `httpd` on a Red Hat system.
Salt uses the name of the init script, systemd name, upstart name etc.
based on what the underlying service management for the platform. To
get a list of the available service names on a platform execute the
service.get_all salt function.
Information on how to make states work with multiple distributions
is later in the tutorial.
.. code-block:: yaml
apache:
pkg.installed: []
service.running:
- require:
- pkg: apache
This SLS data will ensure that the package named apache is installed, and
that the apache service is running. The components can be explained in a
simple way.
The first line is the ID for a set of data, and it is called the ID
Declaration. This ID sets the name of the thing that needs to be manipulated.
The second and third lines contain the state module function to be run, in the
format ``<state_module>.<function>``. The ``pkg.installed`` state module
function ensures that a software package is installed via the system's native
package manager. The ``service.running`` state module function ensures that a
given system daemon is running.
Finally, on line four, is the word ``require``. This is called a Requisite
Statement, and it makes sure that the Apache service is only started after
a successful installation of the apache package.
.. _`YAML`: https://yaml.org/spec/1.1/
Adding Configs and Users
========================
When setting up a service like an Apache web server, many more components may
need to be added. The Apache configuration file will most likely be managed,
and a user and group may need to be set up.
.. code-block:: yaml
apache:
pkg.installed: []
service.running:
- watch:
- pkg: apache
- file: /etc/httpd/conf/httpd.conf
- user: apache
user.present:
- uid: 87
- gid: 87
- home: /var/www/html
- shell: /bin/nologin
- require:
- group: apache
group.present:
- gid: 87
- require:
- pkg: apache
/etc/httpd/conf/httpd.conf:
file.managed:
- source: salt://apache/httpd.conf
- user: root
- group: root
- mode: 644
This SLS data greatly extends the first example, and includes a config file,
a user, a group and new requisite statement: ``watch``.
Adding more states is easy, since the new user and group states are under
the Apache ID, the user and group will be the Apache user and group. The
``require`` statements will make sure that the user will only be made after
the group, and that the group will be made only after the Apache package is
installed.
Next, the ``require`` statement under service was changed to watch, and is
now watching 3 states instead of just one. The watch statement does the same
thing as require, making sure that the other states run before running the
state with a watch, but it adds an extra component. The ``watch`` statement
will run the state's watcher function for any changes to the watched states.
So if the package was updated, the config file changed, or the user
uid modified, then the service state's watcher will be run. The service
state's watcher just restarts the service, so in this case, a change in the
config file will also trigger a restart of the respective service.
Moving Beyond a Single SLS
==========================
When setting up Salt States in a scalable manner, more than one SLS will need
to be used. The above examples were in a single SLS file, but two or more
SLS files can be combined to build out a State Tree. The above example also
references a file with a strange source - ``salt://apache/httpd.conf``. That
file will need to be available as well.
The SLS files are laid out in a directory structure on the Salt master; an
SLS is just a file and files to download are just files.
The Apache example would be laid out in the root of the Salt file server like
this:
.. code-block:: text
apache/init.sls
apache/httpd.conf
So the httpd.conf is just a file in the apache directory, and is referenced
directly.
.. include:: ../../_incl/sls_filename_cant_contain_period.rst
But when using more than one single SLS file, more components can be added to
the toolkit. Consider this SSH example:
``ssh/init.sls:``
.. code-block:: yaml
openssh-client:
pkg.installed
/etc/ssh/ssh_config:
file.managed:
- user: root
- group: root
- mode: 644
- source: salt://ssh/ssh_config
- require:
- pkg: openssh-client
``ssh/server.sls:``
.. code-block:: yaml
include:
- ssh
openssh-server:
pkg.installed
sshd:
service.running:
- require:
- pkg: openssh-client
- pkg: openssh-server
- file: /etc/ssh/banner
- file: /etc/ssh/sshd_config
/etc/ssh/sshd_config:
file.managed:
- user: root
- group: root
- mode: 644
- source: salt://ssh/sshd_config
- require:
- pkg: openssh-server
/etc/ssh/banner:
file:
- managed
- user: root
- group: root
- mode: 644
- source: salt://ssh/banner
- require:
- pkg: openssh-server
.. note::
Notice that we use two similar ways of denoting that a file
is managed by Salt. In the `/etc/ssh/sshd_config` state section above,
we use the `file.managed` state declaration whereas with the
`/etc/ssh/banner` state section, we use the `file` state declaration
and add a `managed` attribute to that state declaration. Both ways
produce an identical result; the first way -- using `file.managed` --
is merely a shortcut.
Now our State Tree looks like this:
.. code-block:: text
apache/init.sls
apache/httpd.conf
ssh/init.sls
ssh/server.sls
ssh/banner
ssh/ssh_config
ssh/sshd_config
This example now introduces the ``include`` statement. The include statement
includes another SLS file so that components found in it can be required,
watched or as will soon be demonstrated - extended.
The include statement allows for states to be cross linked. When an SLS
has an include statement it is literally extended to include the contents of
the included SLS files.
Note that some of the SLS files are called init.sls, while others are not. More
info on what this means can be found in the :ref:`States Tutorial
<sls-file-namespace>`.
Extending Included SLS Data
===========================
Sometimes SLS data needs to be extended. Perhaps the apache service needs to
watch additional resources, or under certain circumstances a different file
needs to be placed.
In these examples, the first will add a custom banner to ssh and the second will
add more watchers to apache to include mod_python.
``ssh/custom-server.sls:``
.. code-block:: yaml
include:
- ssh.server
extend:
/etc/ssh/banner:
file:
- source: salt://ssh/custom-banner
``python/mod_python.sls:``
.. code-block:: yaml
include:
- apache
extend:
apache:
service:
- watch:
- pkg: mod_python
mod_python:
pkg.installed
The ``custom-server.sls`` file uses the extend statement to overwrite where the
banner is being downloaded from, and therefore changing what file is being used
to configure the banner.
In the new mod_python SLS the mod_python package is added, but more importantly
the apache service was extended to also watch the mod_python package.
.. include:: ../../_incl/extend_with_require_watch.rst
Understanding the Render System
===============================
Since SLS data is simply that (data), it does not need to be represented
with YAML. Salt defaults to YAML because it is very straightforward and easy
to learn and use. But the SLS files can be rendered from almost any imaginable
medium, so long as a renderer module is provided.
The default rendering system is the ``jinja|yaml`` renderer. The
``jinja|yaml`` renderer will first pass the template through the `Jinja2`_
templating system, and then through the YAML parser. The benefit here is that
full programming constructs are available when creating SLS files.
Other renderers available are ``yaml_mako`` and ``yaml_wempy`` which each use
the `Mako`_ or `Wempy`_ templating system respectively rather than the jinja
templating system, and more notably, the pure Python or ``py``, ``pydsl`` &
``pyobjects`` renderers.
The ``py`` renderer allows for SLS files to be written in pure Python,
allowing for the utmost level of flexibility and power when preparing SLS
data; while the :mod:`pydsl<salt.renderers.pydsl>` renderer
provides a flexible, domain-specific language for authoring SLS data in Python;
and the :mod:`pyobjects<salt.renderers.pyobjects>` renderer
gives you a `"Pythonic"`_ interface to building state data.
.. _`Jinja2`: https://jinja.palletsprojects.com/en/2.11.x/
.. _`Mako`: https://www.makotemplates.org/
.. _`Wempy`: https://fossil.secution.com/u/gcw/wempy/doc/tip/README.wiki
.. _`"Pythonic"`: https://legacy.python.org/dev/peps/pep-0008/
.. note::
The templating engines described above aren't just available in SLS files.
They can also be used in :mod:`file.managed <salt.states.file.managed>`
states, making file management much more dynamic and flexible. Some
examples for using templates in managed files can be found in the
documentation for the :mod:`file state <salt.states.file>`, as well as the
:ref:`MooseFS example<jinja-example-moosefs>` below.
Getting to Know the Default - jinja|yaml
----------------------------------------
The default renderer - ``jinja|yaml``, allows for use of the jinja
templating system. A guide to the Jinja templating system can be found here:
https://jinja.palletsprojects.com/en/2.11.x/
When working with renderers a few very useful bits of data are passed in. In
the case of templating engine based renderers, three critical components are
available, ``salt``, ``grains``, and ``pillar``. The ``salt`` object allows for
any Salt function to be called from within the template, and ``grains`` allows
for the Grains to be accessed from within the template. A few examples:
``apache/init.sls:``
.. code-block:: jinja
apache:
pkg.installed:
{% if grains['os'] == 'RedHat'%}
- name: httpd
{% endif %}
service.running:
{% if grains['os'] == 'RedHat'%}
- name: httpd
{% endif %}
- watch:
- pkg: apache
- file: /etc/httpd/conf/httpd.conf
- user: apache
user.present:
- uid: 87
- gid: 87
- home: /var/www/html
- shell: /bin/nologin
- require:
- group: apache
group.present:
- gid: 87
- require:
- pkg: apache
/etc/httpd/conf/httpd.conf:
file.managed:
- source: salt://apache/httpd.conf
- user: root
- group: root
- mode: 644
This example is simple. If the ``os`` grain states that the operating system is
Red Hat, then the name of the Apache package and service needs to be httpd.
.. _jinja-example-moosefs:
A more aggressive way to use Jinja can be found here, in a module to set up
a MooseFS distributed filesystem chunkserver:
``moosefs/chunk.sls:``
.. code-block:: jinja
include:
- moosefs
{% for mnt in salt['cmd.run']('ls /dev/data/moose*').split() %}
/mnt/moose{{ mnt[-1] }}:
mount.mounted:
- device: {{ mnt }}
- fstype: xfs
- mkmnt: True
file.directory:
- user: mfs
- group: mfs
- require:
- user: mfs
- group: mfs
{% endfor %}
/etc/mfshdd.cfg:
file.managed:
- source: salt://moosefs/mfshdd.cfg
- user: root
- group: root
- mode: 644
- template: jinja
- require:
- pkg: mfs-chunkserver
/etc/mfschunkserver.cfg:
file.managed:
- source: salt://moosefs/mfschunkserver.cfg
- user: root
- group: root
- mode: 644
- template: jinja
- require:
- pkg: mfs-chunkserver
mfs-chunkserver:
pkg.installed: []
mfschunkserver:
service.running:
- require:
{% for mnt in salt['cmd.run']('ls /dev/data/moose*') %}
- mount: /mnt/moose{{ mnt[-1] }}
- file: /mnt/moose{{ mnt[-1] }}
{% endfor %}
- file: /etc/mfschunkserver.cfg
- file: /etc/mfshdd.cfg
- file: /var/lib/mfs
This example shows much more of the available power of Jinja.
Multiple for loops are used to dynamically detect available hard drives
and set them up to be mounted, and the ``salt`` object is used multiple
times to call shell commands to gather data.
Introducing the Python, PyDSL, and the Pyobjects Renderers
----------------------------------------------------------
Sometimes the chosen default renderer might not have enough logical power to
accomplish the needed task. When this happens, the Python renderer can be
used. Normally a YAML renderer should be used for the majority of SLS files,
but an SLS file set to use another renderer can be easily added to the tree.
This example shows a very basic Python SLS file:
``python/django.sls:``
.. code-block:: python
#!py
def run():
"""
Install the django package
"""
return {"include": ["python"], "django": {"pkg": ["installed"]}}
This is a very simple example; the first line has an SLS shebang that
tells Salt to not use the default renderer, but to use the ``py`` renderer.
Then the run function is defined, the return value from the run function
must be a Salt friendly data structure, or better known as a Salt
:ref:`HighState data structure<states-highstate>`.
Alternatively, using the :mod:`pydsl<salt.renderers.pydsl>`
renderer, the above example can be written more succinctly as:
.. code-block:: python
#!pydsl
include("python", delayed=True)
state("django").pkg.installed()
The :mod:`pyobjects<salt.renderers.pyobjects>` renderer
provides an `"Pythonic"`_ object based approach for building the state data.
The above example could be written as:
.. code-block:: python
#!pyobjects
include("python")
Pkg.installed("django")
These Python examples would look like this if they were written in YAML:
.. code-block:: yaml
include:
- python
django:
pkg.installed
This example clearly illustrates that; one, using the YAML renderer by default
is a wise decision and two, unbridled power can be obtained where needed by
using a pure Python SLS.
Running and Debugging Salt States
---------------------------------
Once the rules in an SLS are ready, they should be tested to ensure they
work properly. To invoke these rules, simply execute
``salt '*' state.apply`` on the command line. If you get back only
hostnames with a ``:`` after, but no return, chances are there is a problem with
one or more of the sls files. On the minion, use the ``salt-call`` command to
examine the output for errors:
.. code-block:: bash
salt-call state.apply -l debug
This should help troubleshoot the issue. The minion can also be started in the
foreground in debug mode by running ``salt-minion -l debug``.
Next Reading
============
With an understanding of states, the next recommendation is to become familiar
with Salt's pillar interface:
:ref:`Pillar Walkthrough <pillar-walk-through>`
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/tutorials/starting_states.rst
| 0.880065 | 0.752468 |
starting_states.rst
|
pypi
|
.. _tutorial-multi-master:
=====================
Multi Master Tutorial
=====================
As of Salt 0.16.0, the ability to connect minions to multiple masters has been
made available. The multi-master system allows for redundancy of Salt
masters and facilitates multiple points of communication out to minions. When
using a multi-master setup, all masters are running hot, and any active master
can be used to send commands out to the minions.
.. note::
If you need failover capabilities with multiple masters, there is also a
MultiMaster-PKI setup available, that uses a different topology
`MultiMaster-PKI with Failover Tutorial <https://docs.saltproject.io/en/latest/topics/tutorials/multimaster_pki.html>`_
In 0.16.0, the masters do not share any information, keys need to be accepted
on both masters, and shared files need to be shared manually or use tools like
the git fileserver backend to ensure that the :conf_master:`file_roots` are
kept consistent.
Beginning with Salt 2016.11.0, the :ref:`Pluggable Minion Data Cache <pluggable-data-cache>`
was introduced. The minion data cache contains the Salt Mine data, minion grains, and minion
pillar information cached on the Salt Master. By default, Salt uses the ``localfs`` cache
module, but other external data stores can be used instead.
Using a pluggable minion cache modules allows for the data stored on a Salt Master about
Salt Minions to be replicated on other Salt Masters the Minion is connected to. Please see
the :ref:`Minion Data Cache <cache>` documentation for more information and configuration
examples.
Summary of Steps
----------------
1. Create a redundant master server
2. Copy primary master key to redundant master
3. Start redundant master
4. Configure minions to connect to redundant master
5. Restart minions
6. Accept keys on redundant master
Prepping a Redundant Master
---------------------------
The first task is to prepare the redundant master. If the redundant master is
already running, stop it. There is only one requirement when preparing a
redundant master, which is that masters share the same private key. When the
first master was created, the master's identifying key pair was generated and
placed in the master's ``pki_dir``. The default location of the master's key
pair is ``/etc/salt/pki/master/``. Take the private key, ``master.pem``, and
copy it to the same location on the redundant master. Do the same for the
master's public key, ``master.pub``. Assuming that no minions have yet been
connected to the new redundant master, it is safe to delete any existing key
in this location and replace it.
.. note::
There is no logical limit to the number of redundant masters that can be
used.
Once the new key is in place, the redundant master can be safely started.
Configure Minions
-----------------
Since minions need to be master-aware, the new master needs to be added to the
minion configurations. Simply update the minion configurations to list all
connected masters:
.. code-block:: yaml
master:
- saltmaster1.example.com
- saltmaster2.example.com
Now the minion can be safely restarted.
.. note::
If the ipc_mode for the minion is set to TCP (default in Windows), then
each minion in the multi-minion setup (one per master) needs its own
tcp_pub_port and tcp_pull_port.
If these settings are left as the default 4510/4511, each minion object
will receive a port 2 higher than the previous. Thus the first minion will
get 4510/4511, the second will get 4512/4513, and so on. If these port
decisions are unacceptable, you must configure tcp_pub_port and
tcp_pull_port with lists of ports for each master. The length of these
lists should match the number of masters, and there should not be overlap
in the lists.
Now the minions will check into the original master and also check into the new
redundant master. Both masters are first-class and have rights to the minions.
.. note::
Minions can automatically detect failed masters and attempt to reconnect
to them quickly. To enable this functionality, set
`master_alive_interval` in the minion config and specify a number of
seconds to poll the masters for connection status.
If this option is not set, minions will still reconnect to failed masters
but the first command sent after a master comes back up may be lost while
the minion authenticates.
Sharing Files Between Masters
-----------------------------
Salt does not automatically share files between multiple masters. A number of
files should be shared or sharing of these files should be strongly considered.
Minion Keys
```````````
Minion keys can be accepted the normal way using :strong:`salt-key` on both
masters. Keys accepted, deleted, or rejected on one master will NOT be
automatically managed on redundant masters; this needs to be taken care of by
running salt-key on both masters or sharing the
``/etc/salt/pki/master/{minions,minions_pre,minions_rejected}`` directories
between masters.
.. note::
While sharing the :strong:`/etc/salt/pki/master` directory will work, it is
strongly discouraged, since allowing access to the :strong:`master.pem` key
outside of Salt creates a *SERIOUS* security risk.
File_Roots
``````````
The :conf_master:`file_roots` contents should be kept consistent between
masters. Otherwise state runs will not always be consistent on minions since
instructions managed by one master will not agree with other masters.
The recommended way to sync these is to use a fileserver backend like gitfs or
to keep these files on shared storage.
.. important::
If using gitfs/git_pillar with the cachedir shared between masters using
`GlusterFS`_, nfs, or another network filesystem, and the masters are
running Salt 2015.5.9 or later, it is strongly recommended not to turn off
:conf_master:`gitfs_global_lock`/:conf_master:`git_pillar_global_lock` as
doing so will cause lock files to be removed if they were created by a
different master.
.. _GlusterFS: http://www.gluster.org/
Pillar_Roots
````````````
Pillar roots should be given the same considerations as
:conf_master:`file_roots`.
Master Configurations
`````````````````````
While reasons may exist to maintain separate master configurations, it is wise
to remember that each master maintains independent control over minions.
Therefore, access controls should be in sync between masters unless a valid
reason otherwise exists to keep them inconsistent.
These access control options include but are not limited to:
- external_auth
- publisher_acl
- peer
- peer_run
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/tutorials/multimaster.rst
| 0.72526 | 0.906073 |
multimaster.rst
|
pypi
|
.. _tutorial-multi-master-pki:
=======================================
Multi-Master-PKI Tutorial With Failover
=======================================
This tutorial will explain, how to run a salt-environment where a single
minion can have multiple masters and fail-over between them if its current
master fails.
The individual steps are
- setup the master(s) to sign its auth-replies
- setup minion(s) to verify master-public-keys
- enable multiple masters on minion(s)
- enable master-check on minion(s)
Please note, that it is advised to have good knowledge of the salt-
authentication and communication-process to understand this tutorial.
All of the settings described here, go on top of the default
authentication/communication process.
Motivation
==========
The default behaviour of a salt-minion is to connect to a master and accept
the masters public key. With each publication, the master sends his public-key
for the minion to check and if this public-key ever changes, the minion
complains and exits. Practically this means, that there can only be a single
master at any given time.
Would it not be much nicer, if the minion could have any number of masters
(1:n) and jump to the next master if its current master died because of a
network or hardware failure?
.. note::
There is also a MultiMaster-Tutorial with a different approach and topology
than this one, that might also suite your needs or might even be better suited
`Multi-Master Tutorial <https://docs.saltproject.io/en/latest/topics/tutorials/multimaster.html>`_
It is also desirable, to add some sort of authenticity-check to the very first
public key a minion receives from a master. Currently a minions takes the
first masters public key for granted.
The Goal
========
Setup the master to sign the public key it sends to the minions and enable the
minions to verify this signature for authenticity.
Prepping the master to sign its public key
==========================================
For signing to work, both master and minion must have the signing and/or
verification settings enabled. If the master signs the public key but the
minion does not verify it, the minion will complain and exit. The same
happens, when the master does not sign but the minion tries to verify.
The easiest way to have the master sign its public key is to set
.. code-block:: yaml
master_sign_pubkey: True
After restarting the salt-master service, the master will automatically
generate a new key-pair
.. code-block:: yaml
master_sign.pem
master_sign.pub
A custom name can be set for the signing key-pair by setting
.. code-block:: yaml
master_sign_key_name: <name_without_suffix>
The master will then generate that key-pair upon restart and use it for
creating the public keys signature attached to the auth-reply.
The computation is done for every auth-request of a minion. If many minions
auth very often, it is advised to use conf_master:`master_pubkey_signature`
and conf_master:`master_use_pubkey_signature` settings described below.
If multiple masters are in use and should sign their auth-replies, the signing
key-pair master_sign.* has to be copied to each master. Otherwise a minion
will fail to verify the masters public when connecting to a different master
than it did initially. That is because the public keys signature was created
with a different signing key-pair.
Prepping the minion to verify received public keys
==================================================
The minion must have the public key (and only that one!) available to be
able to verify a signature it receives. That public key (defaults to
master_sign.pub) must be copied from the master to the minions pki-directory.
.. code-block:: bash
/etc/salt/pki/minion/master_sign.pub
.. important::
DO NOT COPY THE master_sign.pem FILE. IT MUST STAY ON THE MASTER AND
ONLY THERE!
When that is done, enable the signature checking in the minions configuration
.. code-block:: yaml
verify_master_pubkey_sign: True
and restart the minion. For the first try, the minion should be run in manual
debug mode.
.. code-block:: bash
salt-minion -l debug
Upon connecting to the master, the following lines should appear on the output:
.. code-block:: text
[DEBUG ] Attempting to authenticate with the Salt Master at 172.16.0.10
[DEBUG ] Loaded minion key: /etc/salt/pki/minion/minion.pem
[DEBUG ] salt.crypt.verify_signature: Loading public key
[DEBUG ] salt.crypt.verify_signature: Verifying signature
[DEBUG ] Successfully verified signature of master public key with verification public key master_sign.pub
[INFO ] Received signed and verified master pubkey from master 172.16.0.10
[DEBUG ] Decrypting the current master AES key
If the signature verification fails, something went wrong and it will look
like this
.. code-block:: text
[DEBUG ] Attempting to authenticate with the Salt Master at 172.16.0.10
[DEBUG ] Loaded minion key: /etc/salt/pki/minion/minion.pem
[DEBUG ] salt.crypt.verify_signature: Loading public key
[DEBUG ] salt.crypt.verify_signature: Verifying signature
[DEBUG ] Failed to verify signature of public key
[CRITICAL] The Salt Master server's public key did not authenticate!
In a case like this, it should be checked, that the verification pubkey
(master_sign.pub) on the minion is the same as the one on the master.
Once the verification is successful, the minion can be started in daemon mode
again.
For the paranoid among us, its also possible to verify the publication whenever
it is received from the master. That is, for every single auth-attempt which
can be quite frequent. For example just the start of the minion will force the
signature to be checked 6 times for various things like auth, mine,
:ref:`highstate <running-highstate>`, etc.
If that is desired, enable the setting
.. code-block:: yaml
always_verify_signature: True
Multiple Masters For A Minion
=============================
Configuring multiple masters on a minion is done by specifying two settings:
- a list of masters addresses
- what type of master is defined
.. code-block:: yaml
master:
- 172.16.0.10
- 172.16.0.11
- 172.16.0.12
.. code-block:: yaml
master_type: failover
This tells the minion that all the master above are available for it to
connect to. When started with this configuration, it will try the master
in the order they are defined. To randomize that order, set
.. code-block:: yaml
random_master: True
The master-list will then be shuffled before the first connection attempt.
The first master that accepts the minion, is used by the minion. If the
master does not yet know the minion, that counts as accepted and the minion
stays on that master.
For the minion to be able to detect if its still connected to its current
master enable the check for it
.. code-block:: yaml
master_alive_interval: <seconds>
If the loss of the connection is detected, the minion will temporarily
remove the failed master from the list and try one of the other masters
defined (again shuffled if that is enabled).
Testing the setup
=================
At least two running masters are needed to test the failover setup.
Both masters should be running and the minion should be running on the command
line in debug mode
.. code-block:: bash
salt-minion -l debug
The minion will connect to the first master from its master list
.. code-block:: bash
[DEBUG ] Attempting to authenticate with the Salt Master at 172.16.0.10
[DEBUG ] Loaded minion key: /etc/salt/pki/minion/minion.pem
[DEBUG ] salt.crypt.verify_signature: Loading public key
[DEBUG ] salt.crypt.verify_signature: Verifying signature
[DEBUG ] Successfully verified signature of master public key with verification public key master_sign.pub
[INFO ] Received signed and verified master pubkey from master 172.16.0.10
[DEBUG ] Decrypting the current master AES key
A test.version on the master the minion is currently connected to should be run to
test connectivity.
If successful, that master should be turned off. A firewall-rule denying the
minions packets will also do the trick.
Depending on the configured conf_minion:`master_alive_interval`, the minion
will notice the loss of the connection and log it to its logfile.
.. code-block:: bash
[INFO ] Connection to master 172.16.0.10 lost
[INFO ] Trying to tune in to next master from master-list
The minion will then remove the current master from the list and try connecting
to the next master
.. code-block:: bash
[INFO ] Removing possibly failed master 172.16.0.10 from list of masters
[WARNING ] Master ip address changed from 172.16.0.10 to 172.16.0.11
[DEBUG ] Attempting to authenticate with the Salt Master at 172.16.0.11
If everything is configured correctly, the new masters public key will be
verified successfully
.. code-block:: bash
[DEBUG ] Loaded minion key: /etc/salt/pki/minion/minion.pem
[DEBUG ] salt.crypt.verify_signature: Loading public key
[DEBUG ] salt.crypt.verify_signature: Verifying signature
[DEBUG ] Successfully verified signature of master public key with verification public key master_sign.pub
the authentication with the new master is successful
.. code-block:: bash
[INFO ] Received signed and verified master pubkey from master 172.16.0.11
[DEBUG ] Decrypting the current master AES key
[DEBUG ] Loaded minion key: /etc/salt/pki/minion/minion.pem
[INFO ] Authentication with master successful!
and the minion can be pinged again from its new master.
Performance Tuning
==================
With the setup described above, the master computes a signature for every
auth-request of a minion. With many minions and many auth-requests, that
can chew up quite a bit of CPU-Power.
To avoid that, the master can use a pre-created signature of its public-key.
The signature is saved as a base64 encoded string which the master reads
once when starting and attaches only that string to auth-replies.
Enabling this also gives paranoid users the possibility, to have the signing
key-pair on a different system than the actual salt-master and create the public
keys signature there. Probably on a system with more restrictive firewall rules,
without internet access, less users, etc.
That signature can be created with
.. code-block:: bash
salt-key --gen-signature
This will create a default signature file in the master pki-directory
.. code-block:: bash
/etc/salt/pki/master/master_pubkey_signature
It is a simple text-file with the binary-signature converted to base64.
If no signing-pair is present yet, this will auto-create the signing pair and
the signature file in one call
.. code-block:: bash
salt-key --gen-signature --auto-create
Telling the master to use the pre-created signature is done with
.. code-block:: yaml
master_use_pubkey_signature: True
That requires the file 'master_pubkey_signature' to be present in the masters
pki-directory with the correct signature.
If the signature file is named differently, its name can be set with
.. code-block:: yaml
master_pubkey_signature: <filename>
With many masters and many public-keys (default and signing), it is advised to
use the salt-masters hostname for the signature-files name. Signatures can be
easily confused because they do not provide any information about the key the
signature was created from.
Verifying that everything works is done the same way as above.
How the signing and verification works
======================================
The default key-pair of the salt-master is
.. code-block:: yaml
/etc/salt/pki/master/master.pem
/etc/salt/pki/master/master.pub
To be able to create a signature of a message (in this case a public-key),
another key-pair has to be added to the setup. Its default name is:
.. code-block:: yaml
master_sign.pem
master_sign.pub
The combination of the master.* and master_sign.* key-pairs give the
possibility of generating signatures. The signature of a given message
is unique and can be verified, if the public-key of the signing-key-pair
is available to the recipient (the minion).
The signature of the masters public-key in master.pub is computed with
.. code-block:: yaml
master_sign.pem
master.pub
M2Crypto.EVP.sign_update()
This results in a binary signature which is converted to base64 and attached
to the auth-reply send to the minion.
With the signing-pairs public-key available to the minion, the attached
signature can be verified with
.. code-block:: yaml
master_sign.pub
master.pub
M2Cryptos EVP.verify_update().
When running multiple masters, either the signing key-pair has to be present
on all of them, or the master_pubkey_signature has to be pre-computed for
each master individually (because they all have different public-keys).
DO NOT PUT THE SAME master.pub ON ALL MASTERS FOR EASE OF USE.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/tutorials/multimaster_pki.rst
| 0.757794 | 0.776242 |
multimaster_pki.rst
|
pypi
|
.. _tutorial-states-part-2:
=========================================================
States tutorial, part 2 - More Complex States, Requisites
=========================================================
.. note::
This tutorial builds on topics covered in :ref:`part 1 <states-tutorial>`. It is
recommended that you begin there.
In the :ref:`last part <states-tutorial>` of the Salt States tutorial we covered the
basics of installing a package. We will now modify our ``webserver.sls`` file
to have requirements, and use even more Salt States.
Call multiple States
====================
You can specify multiple :ref:`state-declaration` under an
:ref:`id-declaration`. For example, a quick modification to our
``webserver.sls`` to also start Apache if it is not running:
.. code-block:: yaml
:linenos:
:emphasize-lines: 4,5
apache:
pkg.installed: []
service.running:
- require:
- pkg: apache
Try stopping Apache before running :py:func:`state.apply
<salt.modules.state.apply_>` once again and observe the output.
.. note::
For those running RedhatOS derivatives (Centos, AWS), you will want to specify the
service name to be httpd. More on state service here, :mod:`service state
<salt.states.service>`. With the example above, just add "- name: httpd"
above the require line and with the same spacing.
Require other states
====================
We now have a working installation of Apache so let's add an HTML file to
customize our website. It isn't exactly useful to have a website without a
webserver so we don't want Salt to install our HTML file until Apache is
installed and running. Include the following at the bottom of your
``webserver/init.sls`` file:
.. code-block:: yaml
:linenos:
:emphasize-lines: 7,11
apache:
pkg.installed: []
service.running:
- require:
- pkg: apache
/var/www/index.html: # ID declaration
file: # state declaration
- managed # function
- source: salt://webserver/index.html # function arg
- require: # requisite declaration
- pkg: apache # requisite reference
**line 7** is the :ref:`id-declaration`. In this example it is the location we
want to install our custom HTML file. (**Note:** the default location that
Apache serves may differ from the above on your OS or distro. ``/srv/www``
could also be a likely place to look.)
**Line 8** the :ref:`state-declaration`. This example uses the Salt :mod:`file
state <salt.states.file>`.
**Line 9** is the :ref:`function-declaration`. The :func:`managed function
<salt.states.file.managed>` will download a file from the master and install it
in the location specified.
**Line 10** is a :ref:`function-arg-declaration` which, in this example, passes
the ``source`` argument to the :func:`managed function
<salt.states.file.managed>`.
**Line 11** is a :ref:`requisite-declaration`.
**Line 12** is a :ref:`requisite-reference` which refers to a state and an ID.
In this example, it is referring to the ``ID declaration`` from our example in
:ref:`part 1 <states-tutorial>`. This declaration tells Salt not to install the HTML
file until Apache is installed.
Next, create the ``index.html`` file and save it in the ``webserver``
directory:
.. code-block:: html
<!DOCTYPE html>
<html>
<head><title>Salt rocks</title></head>
<body>
<h1>This file brought to you by Salt</h1>
</body>
</html>
Last, call :func:`state.apply <salt.modules.state.apply_>` again and the minion
will fetch and execute the :ref:`highstate <running-highstate>` as well as our
HTML file from the master using Salt's File Server:
.. code-block:: bash
salt '*' state.apply
Verify that Apache is now serving your custom HTML.
.. admonition:: ``require`` vs. ``watch``
There are two :ref:`requisite-declaration`, “require”, and “watch”. Not
every state supports “watch”. The :mod:`service state
<salt.states.service>` does support “watch” and will restart a service
based on the watch condition.
For example, if you use Salt to install an Apache virtual host
configuration file and want to restart Apache whenever that file is changed
you could modify our Apache example from earlier as follows:
.. code-block:: yaml
:emphasize-lines: 1,2,3,10,11
/etc/httpd/extra/httpd-vhosts.conf:
file.managed:
- source: salt://webserver/httpd-vhosts.conf
apache:
pkg.installed: []
service.running:
- watch:
- file: /etc/httpd/extra/httpd-vhosts.conf
- require:
- pkg: apache
If the pkg and service names differ on your OS or distro of choice you can
specify each one separately using a :ref:`name-declaration` which explained
in :ref:`Part 3 <tutorial-states-part-3>`.
Next steps
==========
In :ref:`part 3 <tutorial-states-part-3>` we will discuss how to use includes, extends, and
templating to make a more complete State Tree configuration.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/tutorials/states_pt2.rst
| 0.897412 | 0.729158 |
states_pt2.rst
|
pypi
|
.. _tutorial-states-part-4:
=======================
States tutorial, part 4
=======================
.. note::
This tutorial builds on topics covered in :ref:`part 1 <states-tutorial>`,
:ref:`part 2 <tutorial-states-part-2>`, and :ref:`part 3 <tutorial-states-part-3>`.
It is recommended that you begin there.
This part of the tutorial will show how to use salt's :conf_master:`file_roots`
to set up a workflow in which states can be "promoted" from dev, to QA, to
production.
Salt fileserver path inheritance
================================
Salt's fileserver allows for more than one root directory per environment, like
in the below example, which uses both a local directory and a secondary
location shared to the salt master via NFS:
.. code-block:: yaml
# In the master config file (/etc/salt/master)
file_roots:
base:
- /srv/salt
- /mnt/salt-nfs/base
Salt's fileserver collapses the list of root directories into a single virtual
environment containing all files from each root. If the same file exists at the
same relative path in more than one root, then the top-most match "wins". For
example, if ``/srv/salt/foo.txt`` and ``/mnt/salt-nfs/base/foo.txt`` both
exist, then ``salt://foo.txt`` will point to ``/srv/salt/foo.txt``.
.. note::
When using multiple fileserver backends, the order in which they are listed
in the :conf_master:`fileserver_backend` parameter also matters. If both
``roots`` and ``git`` backends contain a file with the same relative path,
and ``roots`` appears before ``git`` in the
:conf_master:`fileserver_backend` list, then the file in ``roots`` will
"win", and the file in gitfs will be ignored.
A more thorough explanation of how Salt's modular fileserver works can be
found :ref:`here <file-server-backends>`. We recommend reading this.
Environment configuration
=========================
Configure a multiple-environment setup like so:
.. code-block:: yaml
file_roots:
base:
- /srv/salt/prod
qa:
- /srv/salt/qa
- /srv/salt/prod
dev:
- /srv/salt/dev
- /srv/salt/qa
- /srv/salt/prod
Given the path inheritance described above, files within ``/srv/salt/prod``
would be available in all environments. Files within ``/srv/salt/qa`` would be
available in both ``qa``, and ``dev``. Finally, the files within
``/srv/salt/dev`` would only be available within the ``dev`` environment.
Based on the order in which the roots are defined, new files/states can be
placed within ``/srv/salt/dev``, and pushed out to the dev hosts for testing.
Those files/states can then be moved to the same relative path within
``/srv/salt/qa``, and they are now available only in the ``dev`` and ``qa``
environments, allowing them to be pushed to QA hosts and tested.
Finally, if moved to the same relative path within ``/srv/salt/prod``, the
files are now available in all three environments.
Requesting files from specific fileserver environments
======================================================
See :ref:`here <file-server-environments>` for documentation on how to request
files from specific environments.
Practical Example
=================
As an example, consider a simple website, installed to ``/var/www/foobarcom``.
Below is a top.sls that can be used to deploy the website:
``/srv/salt/prod/top.sls:``
.. code-block:: yaml
base:
'web*prod*':
- webserver.foobarcom
qa:
'web*qa*':
- webserver.foobarcom
dev:
'web*dev*':
- webserver.foobarcom
Using pillar, roles can be assigned to the hosts:
``/srv/pillar/top.sls:``
.. code-block:: yaml
base:
'web*prod*':
- webserver.prod
'web*qa*':
- webserver.qa
'web*dev*':
- webserver.dev
``/srv/pillar/webserver/prod.sls:``
.. code-block:: yaml
webserver_role: prod
``/srv/pillar/webserver/qa.sls:``
.. code-block:: yaml
webserver_role: qa
``/srv/pillar/webserver/dev.sls:``
.. code-block:: yaml
webserver_role: dev
And finally, the SLS to deploy the website:
``/srv/salt/prod/webserver/foobarcom.sls:``
.. code-block:: jinja
{% if pillar.get('webserver_role', '') %}
/var/www/foobarcom:
file.recurse:
- source: salt://webserver/src/foobarcom
- env: {{ pillar['webserver_role'] }}
- user: www
- group: www
- dir_mode: 755
- file_mode: 644
{% endif %}
Given the above SLS, the source for the website should initially be placed in
``/srv/salt/dev/webserver/src/foobarcom``.
First, let's deploy to dev. Given the configuration in the top file, this can
be done using :py:func:`state.apply <salt.modules.state.apply_>`:
.. code-block:: bash
salt --pillar 'webserver_role:dev' state.apply
However, in the event that it is not desirable to apply all states configured
in the top file (which could be likely in more complex setups), it is possible
to apply just the states for the ``foobarcom`` website, by invoking
:py:func:`state.apply <salt.modules.state.apply_>` with the desired SLS target
as an argument:
.. code-block:: bash
salt --pillar 'webserver_role:dev' state.apply webserver.foobarcom
Once the site has been tested in dev, then the files can be moved from
``/srv/salt/dev/webserver/src/foobarcom`` to
``/srv/salt/qa/webserver/src/foobarcom``, and deployed using the following:
.. code-block:: bash
salt --pillar 'webserver_role:qa' state.apply webserver.foobarcom
Finally, once the site has been tested in qa, then the files can be moved from
``/srv/salt/qa/webserver/src/foobarcom`` to
``/srv/salt/prod/webserver/src/foobarcom``, and deployed using the following:
.. code-block:: bash
salt --pillar 'webserver_role:prod' state.apply webserver.foobarcom
Thanks to Salt's fileserver inheritance, even though the files have been moved
to within ``/srv/salt/prod``, they are still available from the same
``salt://`` URI in both the qa and dev environments.
Continue Learning
=================
The best way to continue learning about Salt States is to read through the
:ref:`reference documentation <state-system-reference>` and to look through examples
of existing state trees. Many pre-configured state trees
can be found on GitHub in the `saltstack-formulas`_ collection of repositories.
.. _`saltstack-formulas`: https://github.com/saltstack-formulas
If you have any questions, suggestions, or just want to chat with other people
who are using Salt, we have a very active community and we'd love to hear from
you. One of the best places to talk to the community is on the
`Salt Project Slack workspace <https://saltstackcommunity.slack.com/>`_.
In addition, by continuing to the :ref:`Orchestrate Runner <orchestrate-runner>` docs,
you can learn about the powerful orchestration of which Salt is capable.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/tutorials/states_pt4.rst
| 0.905732 | 0.715699 |
states_pt4.rst
|
pypi
|
.. _tutorial-http:
HTTP Modules
============
This tutorial demonstrates using the various HTTP modules available in Salt.
These modules wrap the Python ``tornado``, ``urllib2``, and ``requests``
libraries, extending them in a manner that is more consistent with Salt
workflows.
The ``salt.utils.http`` Library
-------------------------------
This library forms the core of the HTTP modules. Since it is designed to be used
from the minion as an execution module, in addition to the master as a runner,
it was abstracted into this multi-use library. This library can also be imported
by 3rd-party programs wishing to take advantage of its extended functionality.
Core functionality of the execution, state, and runner modules is derived from
this library, so common usages between them are described here. Documentation
specific to each module is described below.
This library can be imported with:
.. code-block:: python
import salt.utils.http
Configuring Libraries
~~~~~~~~~~~~~~~~~~~~~
This library can make use of either ``tornado``, which is required by Salt,
``urllib2``, which ships with Python, or ``requests``, which can be installed
separately. By default, ``tornado`` will be used. In order to switch to
``urllib2``, set the following variable:
.. code-block:: yaml
backend: urllib2
In order to switch to ``requests``, set the following variable:
.. code-block:: yaml
backend: requests
This can be set in the master or minion configuration file, or passed as an
option directly to any ``http.query()`` functions.
``salt.utils.http.query()``
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This function forms a basic query, but with some add-ons not present in the
``tornado``, ``urllib2``, and ``requests`` libraries. Not all functionality
currently available in these libraries has been added, but can be in future
iterations.
HTTPS Request Methods
`````````````````````
A basic query can be performed by calling this function with no more than a
single URL:
.. code-block:: python
salt.utils.http.query("http://example.com")
By default the query will be performed with a ``GET`` method. The method can
be overridden with the ``method`` argument:
.. code-block:: python
salt.utils.http.query("http://example.com/delete/url", "DELETE")
When using the ``POST`` method (and others, such as ``PUT``), extra data is usually
sent as well. This data can be sent directly (would be URL encoded when necessary),
or in whatever format is required by the remote server (XML, JSON, plain text, etc).
.. code-block:: python
salt.utils.http.query(
"http://example.com/post/url", method="POST", data=json.dumps(mydict)
)
Data Formatting and Templating
``````````````````````````````
Bear in mind that the data must be sent pre-formatted; this function will not
format it for you. However, a templated file stored on the local system may be
passed through, along with variables to populate it with. To pass through only
the file (untemplated):
.. code-block:: python
salt.utils.http.query(
"http://example.com/post/url", method="POST", data_file="/srv/salt/somefile.xml"
)
To pass through a file that contains jinja + yaml templating (the default):
.. code-block:: python
salt.utils.http.query(
"http://example.com/post/url",
method="POST",
data_file="/srv/salt/somefile.jinja",
data_render=True,
template_dict={"key1": "value1", "key2": "value2"},
)
To pass through a file that contains mako templating:
.. code-block:: python
salt.utils.http.query(
"http://example.com/post/url",
method="POST",
data_file="/srv/salt/somefile.mako",
data_render=True,
data_renderer="mako",
template_dict={"key1": "value1", "key2": "value2"},
)
Because this function uses Salt's own rendering system, any Salt renderer can
be used. Because Salt's renderer requires ``__opts__`` to be set, an ``opts``
dictionary should be passed in. If it is not, then the default ``__opts__``
values for the node type (master or minion) will be used. Because this library
is intended primarily for use by minions, the default node type is ``minion``.
However, this can be changed to ``master`` if necessary.
.. code-block:: python
salt.utils.http.query(
"http://example.com/post/url",
method="POST",
data_file="/srv/salt/somefile.jinja",
data_render=True,
template_dict={"key1": "value1", "key2": "value2"},
opts=__opts__,
)
salt.utils.http.query(
"http://example.com/post/url",
method="POST",
data_file="/srv/salt/somefile.jinja",
data_render=True,
template_dict={"key1": "value1", "key2": "value2"},
node="master",
)
Headers
```````
Headers may also be passed through, either as a ``header_list``, a
``header_dict``, or as a ``header_file``. As with the ``data_file``, the
``header_file`` may also be templated. Take note that because HTTP headers are
normally syntactically-correct YAML, they will automatically be imported as an
a Python dict.
.. code-block:: python
salt.utils.http.query(
"http://example.com/delete/url",
method="POST",
header_file="/srv/salt/headers.jinja",
header_render=True,
header_renderer="jinja",
template_dict={"key1": "value1", "key2": "value2"},
)
Because much of the data that would be templated between headers and data may be
the same, the ``template_dict`` is the same for both. Correcting possible
variable name collisions is up to the user.
Authentication
``````````````
The ``query()`` function supports basic HTTP authentication. A username and
password may be passed in as ``username`` and ``password``, respectively.
.. code-block:: python
salt.utils.http.query("http://example.com", username="larry", password="5700g3543v4r")
Cookies and Sessions
````````````````````
Cookies are also supported, using Python's built-in ``cookielib``. However, they
are turned off by default. To turn cookies on, set ``cookies`` to True.
.. code-block:: python
salt.utils.http.query("http://example.com", cookies=True)
By default cookies are stored in Salt's cache directory, normally
``/var/cache/salt``, as a file called ``cookies.txt``. However, this location
may be changed with the ``cookie_jar`` argument:
.. code-block:: python
salt.utils.http.query(
"http://example.com", cookies=True, cookie_jar="/path/to/cookie_jar.txt"
)
By default, the format of the cookie jar is LWP (aka, lib-www-perl). This
default was chosen because it is a human-readable text file. If desired, the
format of the cookie jar can be set to Mozilla:
.. code-block:: python
salt.utils.http.query(
"http://example.com",
cookies=True,
cookie_jar="/path/to/cookie_jar.txt",
cookie_format="mozilla",
)
Because Salt commands are normally one-off commands that are piped together,
this library cannot normally behave as a normal browser, with session cookies
that persist across multiple HTTP requests. However, the session can be
persisted in a separate cookie jar. The default filename for this file, inside
Salt's cache directory, is ``cookies.session.p``. This can also be changed.
.. code-block:: python
salt.utils.http.query(
"http://example.com", persist_session=True, session_cookie_jar="/path/to/jar.p"
)
The format of this file is msgpack, which is consistent with much of the rest
of Salt's internal structure. Historically, the extension for this file is
``.p``. There are no current plans to make this configurable.
Proxy
`````
If the ``tornado`` backend is used (``tornado`` is the default), proxy
information configured in ``proxy_host``, ``proxy_port``, ``proxy_username``,
``proxy_password`` and ``no_proxy`` from the ``__opts__`` dictionary will be used. Normally
these are set in the minion configuration file.
.. code-block:: yaml
proxy_host: proxy.my-domain
proxy_port: 31337
proxy_username: charon
proxy_password: obolus
no_proxy: ['127.0.0.1', 'localhost']
.. code-block:: python
salt.utils.http.query("http://example.com", opts=__opts__, backend="tornado")
Return Data
~~~~~~~~~~~
.. note:: Return data encoding
If ``decode`` is set to ``True``, ``query()`` will attempt to decode the
return data. ``decode_type`` defaults to ``auto``. Set it to a specific
encoding, ``xml``, for example, to override autodetection.
Because Salt's http library was designed to be used with REST interfaces,
``query()`` will attempt to decode the data received from the remote server
when ``decode`` is set to ``True``. First it will check the ``Content-type``
header to try and find references to XML. If it does not find any, it will look
for references to JSON. If it does not find any, it will fall back to plain
text, which will not be decoded.
JSON data is translated into a dict using Python's built-in ``json`` library.
XML is translated using ``salt.utils.xml_util``, which will use Python's
built-in XML libraries to attempt to convert the XML into a dict. In order to
force either JSON or XML decoding, the ``decode_type`` may be set:
.. code-block:: python
salt.utils.http.query("http://example.com", decode_type="xml")
Once translated, the return dict from ``query()`` will include a dict called
``dict``.
If the data is not to be translated using one of these methods, decoding may be
turned off.
.. code-block:: python
salt.utils.http.query("http://example.com", decode=False)
If decoding is turned on, and references to JSON or XML cannot be found, then
this module will default to plain text, and return the undecoded data as
``text`` (even if text is set to ``False``; see below).
The ``query()`` function can return the HTTP status code, headers, and/or text
as required. However, each must individually be turned on.
.. code-block:: python
salt.utils.http.query("http://example.com", status=True, headers=True, text=True)
The return from these will be found in the return dict as ``status``,
``headers`` and ``text``, respectively.
Writing Return Data to Files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is possible to write either the return data or headers to files, as soon as
the response is received from the server, but specifying file locations via the
``text_out`` or ``headers_out`` arguments. ``text`` and ``headers`` do not need
to be returned to the user in order to do this.
.. code-block:: python
salt.utils.http.query(
"http://example.com",
text=False,
headers=False,
text_out="/path/to/url_download.txt",
headers_out="/path/to/headers_download.txt",
)
SSL Verification
~~~~~~~~~~~~~~~~
By default, this function will verify SSL certificates. However, for testing or
debugging purposes, SSL verification can be turned off.
.. code-block:: python
salt.utils.http.query("https://example.com", verify_ssl=False)
CA Bundles
~~~~~~~~~~
The ``requests`` library has its own method of detecting which CA (certificate
authority) bundle file to use. Usually this is implemented by the packager for
the specific operating system distribution that you are using. However,
``urllib2`` requires a little more work under the hood. By default, Salt will
try to auto-detect the location of this file. However, if it is not in an
expected location, or a different path needs to be specified, it may be done so
using the ``ca_bundle`` variable.
.. code-block:: python
salt.utils.http.query("https://example.com", ca_bundle="/path/to/ca_bundle.pem")
Updating CA Bundles
```````````````````
The ``update_ca_bundle()`` function can be used to update the bundle file at a
specified location. If the target location is not specified, then it will
attempt to auto-detect the location of the bundle file. If the URL to download
the bundle from does not exist, a bundle will be downloaded from the cURL
website.
CAUTION: The ``target`` and the ``source`` should always be specified! Failure
to specify the ``target`` may result in the file being written to the wrong
location on the local system. Failure to specify the ``source`` may cause the
upstream URL to receive excess unnecessary traffic, and may cause a file to be
download which is hazardous or does not meet the needs of the user.
.. code-block:: python
salt.utils.http.update_ca_bundle(
target="/path/to/ca-bundle.crt",
source="https://example.com/path/to/ca-bundle.crt",
opts=__opts__,
)
The ``opts`` parameter should also always be specified. If it is, then the
``target`` and the ``source`` may be specified in the relevant configuration
file (master or minion) as ``ca_bundle`` and ``ca_bundle_url``, respectively.
.. code-block:: yaml
ca_bundle: /path/to/ca-bundle.crt
ca_bundle_url: https://example.com/path/to/ca-bundle.crt
If Salt is unable to auto-detect the location of the CA bundle, it will raise
an error.
The ``update_ca_bundle()`` function can also be passed a string or a list of
strings which represent files on the local system, which should be appended (in
the specified order) to the end of the CA bundle file. This is useful in
environments where private certs need to be made available, and are not
otherwise reasonable to add to the bundle file.
.. code-block:: python
salt.utils.http.update_ca_bundle(
opts=__opts__,
merge_files=[
"/etc/ssl/private_cert_1.pem",
"/etc/ssl/private_cert_2.pem",
"/etc/ssl/private_cert_3.pem",
],
)
Test Mode
~~~~~~~~~
This function may be run in test mode. This mode will perform all work up until
the actual HTTP request. By default, instead of performing the request, an empty
dict will be returned. Using this function with ``TRACE`` logging turned on will
reveal the contents of the headers and POST data to be sent.
Rather than returning an empty dict, an alternate ``test_url`` may be passed in.
If this is detected, then test mode will replace the ``url`` with the
``test_url``, set ``test`` to ``True`` in the return data, and perform the rest
of the requested operations as usual. This allows a custom, non-destructive URL
to be used for testing when necessary.
Execution Module
----------------
The ``http`` execution module is a very thin wrapper around the
``salt.utils.http`` library. The ``opts`` can be passed through as well, but if
they are not specified, the minion defaults will be used as necessary.
Because passing complete data structures from the command line can be tricky at
best and dangerous (in terms of execution injection attacks) at worse, the
``data_file``, and ``header_file`` are likely to see more use here.
All methods for the library are available in the execution module, as kwargs.
.. code-block:: bash
salt myminion http.query http://example.com/restapi method=POST \
username='larry' password='5700g3543v4r' headers=True text=True \
status=True decode_type=xml data_render=True \
header_file=/tmp/headers.txt data_file=/tmp/data.txt \
header_render=True cookies=True persist_session=True
Runner Module
-------------
Like the execution module, the ``http`` runner module is a very thin wrapper
around the ``salt.utils.http`` library. The only significant difference is that
because runners execute on the master instead of a minion, a target is not
required, and default opts will be derived from the master config, rather than
the minion config.
All methods for the library are available in the runner module, as kwargs.
.. code-block:: bash
salt-run http.query http://example.com/restapi method=POST \
username='larry' password='5700g3543v4r' headers=True text=True \
status=True decode_type=xml data_render=True \
header_file=/tmp/headers.txt data_file=/tmp/data.txt \
header_render=True cookies=True persist_session=True
State Module
------------
The state module is a wrapper around the runner module, which applies stateful
logic to a query. All kwargs as listed above are specified as usual in state
files, but two more kwargs are available to apply stateful logic. A required
parameter is ``match``, which specifies a pattern to look for in the return
text. By default, this will perform a string comparison of looking for the
value of match in the return text. In Python terms this looks like:
.. code-block:: python
def myfunc():
if match in html_text:
return True
If more complex pattern matching is required, a regular expression can be used
by specifying a ``match_type``. By default this is set to ``string``, but it
can be manually set to ``pcre`` instead. Please note that despite the name, this
will use Python's ``re.search()`` rather than ``re.match()``.
Therefore, the following states are valid:
.. code-block:: yaml
http://example.com/restapi:
http.query:
- match: 'SUCCESS'
- username: 'larry'
- password: '5700g3543v4r'
- data_render: True
- header_file: /tmp/headers.txt
- data_file: /tmp/data.txt
- header_render: True
- cookies: True
- persist_session: True
http://example.com/restapi:
http.query:
- match_type: pcre
- match: '(?i)succe[ss|ed]'
- username: 'larry'
- password: '5700g3543v4r'
- data_render: True
- header_file: /tmp/headers.txt
- data_file: /tmp/data.txt
- header_render: True
- cookies: True
- persist_session: True
In addition to, or instead of a match pattern, the status code for a URL can be
checked. This is done using the ``status`` argument:
.. code-block:: yaml
http://example.com/:
http.query:
- status: 200
If both are specified, both will be checked, but if only one is ``True`` and the
other is ``False``, then ``False`` will be returned. In this case, the comments
in the return data will contain information for troubleshooting.
Because this is a monitoring state, it will return extra data to code that
expects it. This data will always include ``text`` and ``status``. Optionally,
``headers`` and ``dict`` may also be requested by setting the ``headers`` and
``decode`` arguments to True, respectively.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/tutorials/http.rst
| 0.882174 | 0.904482 |
http.rst
|
pypi
|
.. _states-tutorial:
=====================================
States tutorial, part 1 - Basic Usage
=====================================
The purpose of this tutorial is to demonstrate how quickly you can configure a
system to be managed by Salt States. For detailed information about the state
system please refer to the full :ref:`states reference <state-system-reference>`.
This tutorial will walk you through using Salt to configure a minion to run the
Apache HTTP server and to ensure the server is running.
.. include:: /_incl/requisite_incl.rst
Setting up the Salt State Tree
==============================
States are stored in text files on the master and transferred to the minions on
demand via the master's File Server. The collection of state files make up the
``State Tree``.
To start using a central state system in Salt, the Salt File Server must first
be set up. Edit the master config file (:conf_master:`file_roots`) and
uncomment the following lines:
.. code-block:: yaml
file_roots:
base:
- /srv/salt
.. note::
If you are deploying on FreeBSD via ports, the ``file_roots`` path defaults
to ``/usr/local/etc/salt/states``.
Restart the Salt master in order to pick up this change:
.. code-block:: bash
pkill salt-master
salt-master -d
Preparing the Top File
======================
On the master, in the directory uncommented in the previous step,
(``/srv/salt`` by default), create a new file called
:conf_master:`top.sls <state_top>` and add the following:
.. code-block:: yaml
base:
'*':
- webserver
The :ref:`top file <states-top>` is separated into environments (discussed
later). The default environment is ``base``. Under the ``base`` environment a
collection of minion matches is defined; for now simply specify all hosts
(``*``).
.. _targeting-minions:
.. admonition:: Targeting minions
The expressions can use any of the targeting mechanisms used by Salt —
minions can be matched by glob, PCRE regular expression, or by :ref:`grains
<targeting-grains>`. For example:
.. code-block:: yaml
base:
'os:Fedora':
- match: grain
- webserver
Create an ``sls`` file
======================
In the same directory as the :ref:`top file <states-top>`, create a file
named ``webserver.sls``, containing the following:
.. code-block:: yaml
apache: # ID declaration
pkg: # state declaration
- installed # function declaration
The first line, called the :ref:`id-declaration`, is an arbitrary identifier.
In this case it defines the name of the package to be installed.
.. note::
The package name for the Apache httpd web server may differ depending on
OS or distro — for example, on Fedora it is ``httpd`` but on
Debian/Ubuntu it is ``apache2``.
The second line, called the :ref:`state-declaration`, defines which of the Salt
States we are using. In this example, we are using the :mod:`pkg state
<salt.states.pkg>` to ensure that a given package is installed.
The third line, called the :ref:`function-declaration`, defines which function
in the :mod:`pkg state <salt.states.pkg>` module to call.
.. admonition:: Renderers
States ``sls`` files can be written in many formats. Salt requires only
a simple data structure and is not concerned with how that data structure
is built. Templating languages and `DSLs`_ are a dime-a-dozen and everyone
has a favorite.
Building the expected data structure is the job of Salt :ref:`renderers`
and they are dead-simple to write.
In this tutorial we will be using YAML in Jinja2 templates, which is the
default format. The default can be changed by editing
:conf_master:`renderer` in the master configuration file.
.. _`DSLs`: https://en.wikipedia.org/wiki/Domain-specific_language
.. _running-highstate:
Install the package
===================
Next, let's run the state we created. Open a terminal on the master and run:
.. code-block:: bash
salt '*' state.apply
Our master is instructing all targeted minions to run :func:`state.apply
<salt.modules.state.apply>`. When this function is executed without any SLS
targets, a minion will download the :ref:`top file <states-top>` and attempt to
match the expressions within it. When the minion does match an expression the
modules listed for it will be downloaded, compiled, and executed.
.. note::
This action is referred to as a "highstate", and can be run using the
:py:func:`state.highstate <salt.modules.state.highstate>` function.
However, to make the usage easier to understand ("highstate" is not
necessarily an intuitive name), a :py:func:`state.apply
<salt.modules.state.apply_>` function was added in version 2015.5.0, which
when invoked without any SLS names will trigger a highstate.
:py:func:`state.highstate <salt.modules.state.highstate>` still exists and
can be used, but the documentation (as can be seen above) has been updated
to reference :py:func:`state.apply <salt.modules.state.apply_>`, so keep
the following in mind as you read the documentation:
- :py:func:`state.apply <salt.modules.state.apply_>` invoked without any
SLS names will run :py:func:`state.highstate
<salt.modules.state.highstate>`
- :py:func:`state.apply <salt.modules.state.apply_>` invoked with SLS names
will run :py:func:`state.sls <salt.modules.state.sls>`
Once completed, the minion will report back with a summary of all actions taken
and all changes made.
.. warning::
If you have created :ref:`custom grain modules <writing-grains>`, they will
not be available in the top file until after the first :ref:`highstate
<running-highstate>`. To make custom grains available on a minion's first
:ref:`highstate <running-highstate>`, it is recommended to use :ref:`this
example <minion-start-reactor>` to ensure that the custom grains are synced
when the minion starts.
.. _sls-file-namespace:
.. admonition:: SLS File Namespace
Note that in the :ref:`example <targeting-minions>` above, the SLS file
``webserver.sls`` was referred to simply as ``webserver``. The namespace
for SLS files when referenced in :conf_master:`top.sls <state_top>` or an :ref:`include-declaration`
follows a few simple rules:
1. The ``.sls`` is discarded (i.e. ``webserver.sls`` becomes
``webserver``).
2. Subdirectories can be used for better organization.
a. Each subdirectory under the configured file_roots (default:
``/srv/salt/``) is represented with a dot (following the Python
import model) in Salt states and on the command line.
``webserver/dev.sls`` on the filesystem is referred to as
``webserver.dev`` in Salt
b. Because slashes are represented as dots, SLS files can not contain
dots in the name (other than the dot for the SLS suffix). The SLS
file ``webserver_1.0.sls`` can not be matched, and ``webserver_1.0``
would match the directory/file ``webserver_1/0.sls``
3. A file called ``init.sls`` in a subdirectory is referred to by the path
of the directory. So, ``webserver/init.sls`` is referred to as
``webserver``.
4. If both ``webserver.sls`` and ``webserver/init.sls`` happen to exist,
``webserver/init.sls`` will be ignored and ``webserver.sls`` will be the
file referred to as ``webserver``.
.. admonition:: Troubleshooting Salt
If the expected output isn't seen, the following tips can help to
narrow down the problem.
Turn up logging
Salt can be quite chatty when you change the logging setting to
``debug``:
.. code-block:: bash
salt-minion -l debug
Run the minion in the foreground
By not starting the minion in daemon mode (:option:`-d <salt-minion -d>`)
one can view any output from the minion as it works:
.. code-block:: bash
salt-minion
Increase the default timeout value when running :command:`salt`. For
example, to change the default timeout to 60 seconds:
.. code-block:: bash
salt -t 60
For best results, combine all three:
.. code-block:: bash
salt-minion -l debug # On the minion
salt '*' state.apply -t 60 # On the master
Next steps
==========
This tutorial focused on getting a simple Salt States configuration working.
:ref:`Part 2 <tutorial-states-part-2>` will build on this example to cover more advanced
``sls`` syntax and will explore more of the states that ship with Salt.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/tutorials/states_pt1.rst
| 0.920196 | 0.736164 |
states_pt1.rst
|
pypi
|
.. _troubleshooting:
===============
Troubleshooting
===============
The intent of the troubleshooting section is to introduce solutions to a
number of common issues encountered by users and the tools that are available
to aid in developing States and Salt code.
Troubleshooting the Salt Master
===============================
If your Salt master is having issues such as minions not returning data, slow
execution times, or a variety of other issues, the following links contain
details on troubleshooting the most common issues encountered:
.. toctree::
:maxdepth: 2
master
Troubleshooting the Salt Minion
===============================
In the event that your Salt minion is having issues, a variety of solutions
and suggestions are available. Please refer to the following links for more information:
.. toctree::
:maxdepth: 2
minion
Running in the Foreground
=========================
A great deal of information is available via the debug logging system, if you
are having issues with minions connecting or not starting run the minion and/or
master in the foreground:
.. code-block:: bash
salt-master -l debug
salt-minion -l debug
Anyone wanting to run Salt daemons via a process supervisor such as `monit`_,
`runit`_, or `supervisord`_, should omit the ``-d`` argument to the daemons and
run them in the foreground.
.. _`monit`: https://mmonit.com/monit/
.. _`runit`: http://smarden.org/runit/
.. _`supervisord`: http://supervisord.org/
What Ports do the Master and Minion Need Open?
==============================================
No ports need to be opened up on each minion. For the master, TCP ports 4505
and 4506 need to be open. If you've put both your Salt master and minion in
debug mode and don't see an acknowledgment that your minion has connected,
it could very well be a firewall.
You can check port connectivity from the minion with the nc command:
.. code-block:: bash
nc -v -z salt.master.ip 4505
nc -v -z salt.master.ip 4506
There is also a :ref:`firewall configuration<firewall>`
document that might help as well.
If you've enabled the right TCP ports on your operating system or Linux
distribution's firewall and still aren't seeing connections, check that no
additional access control system such as `SELinux`_ or `AppArmor`_ is blocking
Salt.
.. _`SELinux`: https://en.wikipedia.org/wiki/Security-Enhanced_Linux
.. _`AppArmor`: https://gitlab.com/apparmor/apparmor/-/wikis/home
.. _using-salt-call:
Using salt-call
===============
The ``salt-call`` command was originally developed for aiding in the development
of new Salt modules. Since then, many applications have been developed for
running any Salt module locally on a minion. These range from the original
intent of salt-call, development assistance, to gathering more verbose output
from calls like :mod:`state.apply <salt.modules.state.apply_>`.
When initially creating your state tree, it is generally recommended to invoke
:mod:`state.apply <salt.modules.state.apply_>` directly from the minion with
``salt-call``, rather than remotely from the master. This displays far more
information about the execution than calling it remotely. For even more
verbosity, increase the loglevel using the ``-l`` argument:
.. code-block:: bash
salt-call -l debug state.apply
The main difference between using ``salt`` and using ``salt-call`` is that
``salt-call`` is run from the minion, and it only runs the selected function on
that minion. By contrast, ``salt`` is run from the master, and requires you to
specify the minions on which to run the command using salt's :ref:`targeting
system <targeting>`.
Too many open files
===================
The salt-master needs at least 2 sockets per host that connects to it, one for
the Publisher and one for response port. Thus, large installations may, upon
scaling up the number of minions accessing a given master, encounter:
.. code-block:: console
12:45:29,289 [salt.master ][INFO ] Starting Salt worker process 38
Too many open files
sock != -1 (tcp_listener.cpp:335)
The solution to this would be to check the number of files allowed to be
opened by the user running salt-master (root by default):
.. code-block:: bash
[root@salt-master ~]# ulimit -n
1024
And modify that value to be at least equal to the number of minions x 2.
This setting can be changed in limits.conf as the nofile value(s),
and activated upon new a login of the specified user.
So, an environment with 1800 minions, would need 1800 x 2 = 3600 as a minimum.
Salt Master Stops Responding
============================
There are known bugs with ZeroMQ versions less than 2.1.11 which can cause the
Salt master to not respond properly. If you're running a ZeroMQ version greater
than or equal to 2.1.9, you can work around the bug by setting the sysctls
``net.core.rmem_max`` and ``net.core.wmem_max`` to 16777216. Next, set the third
field in ``net.ipv4.tcp_rmem`` and ``net.ipv4.tcp_wmem`` to at least 16777216.
You can do it manually with something like:
.. code-block:: bash
# echo 16777216 > /proc/sys/net/core/rmem_max
# echo 16777216 > /proc/sys/net/core/wmem_max
# echo "4096 87380 16777216" > /proc/sys/net/ipv4/tcp_rmem
# echo "4096 87380 16777216" > /proc/sys/net/ipv4/tcp_wmem
Or with the following Salt state:
.. code-block:: yaml
:linenos:
net.core.rmem_max:
sysctl:
- present
- value: 16777216
net.core.wmem_max:
sysctl:
- present
- value: 16777216
net.ipv4.tcp_rmem:
sysctl:
- present
- value: 4096 87380 16777216
net.ipv4.tcp_wmem:
sysctl:
- present
- value: 4096 87380 16777216
Salt and SELinux
================
Currently there are no SELinux policies for Salt. For the most part Salt runs
without issue when SELinux is running in Enforcing mode. This is because when
the minion executes as a daemon the type context is changed to ``initrc_t``.
The problem with SELinux arises when using salt-call or running the minion in
the foreground, since the type context stays ``unconfined_t``.
This problem is generally manifest in the rpm install scripts when using the
pkg module. Until a full SELinux Policy is available for Salt the solution
to this issue is to set the execution context of ``salt-call`` and
``salt-minion`` to rpm_exec_t:
.. code-block:: bash
# CentOS 5 and RHEL 5:
chcon -t system_u:system_r:rpm_exec_t:s0 /usr/bin/salt-minion
chcon -t system_u:system_r:rpm_exec_t:s0 /usr/bin/salt-call
# CentOS 6 and RHEL 6:
chcon system_u:object_r:rpm_exec_t:s0 /usr/bin/salt-minion
chcon system_u:object_r:rpm_exec_t:s0 /usr/bin/salt-call
This works well, because the ``rpm_exec_t`` context has very broad control over
other types.
Red Hat Enterprise Linux 5
==========================
Salt requires Python 2.6 or 2.7. Red Hat Enterprise Linux 5 and its variants
come with Python 2.4 installed by default. When installing on RHEL 5 from the
`EPEL repository`_ this is handled for you. But, if you run Salt from git, be
advised that its dependencies need to be installed from EPEL and that Salt
needs to be run with the ``python26`` executable.
.. _`EPEL repository`: https://fedoraproject.org/wiki/EPEL
Common YAML Gotchas
===================
An extensive list of YAML idiosyncrasies has been compiled:
.. toctree::
:maxdepth: 2
yaml_idiosyncrasies
Live Python Debug Output
========================
If the minion or master seems to be unresponsive, a SIGUSR1 can be passed to
the processes to display where in the code they are running. If encountering a
situation like this, this debug information can be invaluable. First make
sure the master of minion are running in the foreground:
.. code-block:: bash
salt-master -l debug
salt-minion -l debug
Then pass the signal to the master or minion when it seems to be unresponsive:
.. code-block:: bash
killall -SIGUSR1 salt-master
killall -SIGUSR1 salt-minion
Also under BSD and macOS in addition to SIGUSR1 signal, debug subroutine set
up for SIGINFO which has an advantage of being sent by Ctrl+T shortcut.
When filing an issue or sending questions to the mailing list for a problem
with an unresponsive daemon this information can be invaluable.
Salt 0.16.x minions cannot communicate with a 0.17.x master
===========================================================
As of release 0.17.1 you can no longer run different versions of Salt on your
Master and Minion servers. This is due to a protocol change for security
purposes. The Salt team will continue to attempt to ensure versions are as
backwards compatible as possible.
Debugging the Master and Minion
===============================
A list of common :ref:`master<troubleshooting-salt-master>` and
:ref:`minion<troubleshooting-minion-salt-call>` troubleshooting steps provide a
starting point for resolving issues you may encounter.
|
/salt-3006.2.tar.gz/salt-3006.2/doc/topics/troubleshooting/index.rst
| 0.794584 | 0.686541 |
index.rst
|
pypi
|
[](https://travis-ci.com/saltant-org/saltant-cli)
[](https://codecov.io/gh/saltant-org/saltant-cli)
[](https://pypi.org/project/saltant-cli/)
[](https://pypi.org/project/saltant-cli/)
[](https://github.com/ambv/black)
# saltant CLI
saltant-cli is a CLI for
[saltant](https://github.com/saltant-org/saltant) written on top of
[saltant-py](https://github.com/saltant-org/saltant-py). It lets you
interface with a saltant API conveniently from a terminal.
## Installation
Using pip,
```
pip install saltant-cli
```
or, from source, after cloning this repository, run
```
python setup.py install
```
where `python` is in versions 2.7 or 3.5+.
However you chose to install saltant-cli, make sure that the binary
resulting from the above commands are somewhere on your `$PATH`. On some
systems, this may involve running the above commands as root.
### Running from source
Alternatively, instead of installing saltant-cli you can run it directly
from source using the script [`run_saltant_cli.py`](run_saltant_cli.py).
### Setting up a configuration file
In order to run saltant-cli, it needs to know where your saltant server
is and how to authenticate your user. To get this information,
saltant-cli looks for a config file located at
`$XDG_CONFIG_HOME/saltant-cli/config.yaml`; if `$XDG_CONFIG_HOME` isn't
defined, `$HOME/.config` is used instead. Alternatively, you can use a
`config.yaml` file at the root of the project's repository, which is
useful when running from source.
The easiest way to set up a config file is to run
```
saltant-cli --setup
```
which interactively constructs and writes a config file to
`$XDG_CONFIG_HOME/saltant-cli/config.yaml`.
Alternatively, you can copy the example config file,
[`config.yaml.example`](config.yaml.example), to where it needs to go,
and fill in the file with your favourite text editor:
```
mkdir -p $XDG_CONFIG_HOME/saltant-cli
cp config.yaml.example $XDG_CONFIG_HOME/saltant-cli/config.yaml
```
There may be times where it is advantageous to juggle multiple config
files; to do so, you can specify the `--config-path` option, like so:
```
saltant-cli --config-path /path/to/config.yaml mycommandhere
```
### Shell command completion
Assuming you installed normally, i.e., you aren't running from source,
saltant-cli supports command completion for
[Bash](https://www.gnu.org/software/bash/), [Zsh](https://www.zsh.org/),
[fish](https://fishshell.com/), and
[PowerShell](https://docs.microsoft.com/en-us/powershell/scripting/powershell-scripting?view=powershell-6).
To install any of these, run
```
saltant-cli completion install my-shell-type
```
where `my-shell-type` is either `bash`, `zsh`, `fish`, or `powershell`
(or blank if you want to use the current shell type).
## Usage
Here you're going to find `--help` your best friend. Run this at any
stage of the command tree to learn more about what to do!
### Commmand tree
Here's a sketch of what you can do. Again, supply `--help` at any point
to figure out how to use a given command.
```
saltant-cli
├── completion
│ └── install
├── container-task-instances
│ ├── clone
│ ├── create
│ ├── get
│ ├── list
│ ├── terminate
│ └── wait
├── container-task-types
│ ├── create
│ ├── get
│ ├── list
│ └── put
├── executable-task-instances
│ ├── clone
│ ├── create
│ ├── get
│ ├── list
│ ├── terminate
│ └── wait
├── executable-task-types
│ ├── create
│ ├── get
│ ├── list
│ └── put
├── task-queues
│ ├── create
│ ├── get
│ ├── list
│ └── put
├── task-whitelists
│ ├── create
│ ├── get
│ ├── list
│ └── put
└── users
├── get
└── list
```
### Examples
Let's go through a few examples. First, let's list some container task
types using some API filters:
```
saltant-cli container-task-types list --filters '{"user_username_in": ["matt", "daniel"]}'
```
Great! This will show us the container task types created by Matt and
Daniel! Secondly, let's create a task queue:
```
saltant-cli task-queues create --name "amazing-task-queue" --description "Seriously best task queue ever."
```
If we got confused about how to use this command, all we need to do is
drop in `--help`. Thirdly: let's do just that:
```
saltant-cli task-queues create --help
```
which will give us
```
$ saltant-cli task-queues create --help
Usage: saltant-cli task-queues create [OPTIONS]
Create a task queue.
Options:
--name TEXT The name of the task queue. [required]
--description TEXT A description of the task queue.
--private BOOLEAN Whether the task queue is exclusive to the creator.
[default: False]
--active BOOLEAN Whether the task queue is active. [default: True]
--help Show this message and exit.
```
## See also
[saltant-py](https://github.com/saltant-org/saltant-py/), a saltant SDK
for Python.
|
/saltant-cli-0.3.1.tar.gz/saltant-cli-0.3.1/README.md
| 0.455925 | 0.932576 |
README.md
|
pypi
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import click
from .resource import (
generic_create_command,
generic_get_command,
generic_list_command,
generic_put_command,
)
from .utils import list_options, PythonLiteralOption
TASK_WHITELIST_GET_ATTRS = (
"id",
"user",
"name",
"description",
"whitelisted_container_task_types",
"whitelisted_executable_task_types",
)
TASK_WHITELIST_LIST_ATTRS = ("id", "user", "name", "description")
@click.group()
def task_whitelists():
"""Command group for task whitelists."""
pass
@task_whitelists.command(name="get")
@click.argument("id", nargs=1, type=click.INT)
@click.pass_context
def get_task_whitelist(ctx, id):
"""Get task whitelist based on ID."""
generic_get_command("task_whitelists", TASK_WHITELIST_GET_ATTRS, ctx, id)
@task_whitelists.command(name="list")
@list_options
@click.pass_context
def list_task_whitelists(ctx, filters, filters_file):
"""List task whitelists matching filter parameters."""
generic_list_command(
"task_whitelists",
TASK_WHITELIST_LIST_ATTRS,
ctx,
filters,
filters_file,
)
@task_whitelists.command(name="create")
@click.option("--name", help="The name of the task whitelist.", required=True)
@click.option(
"--description", help="A description of the task whitelist.", default=""
)
@click.option(
"--whitelisted-container-task-types",
help="IDs of the whitelists container task types.",
cls=PythonLiteralOption,
default=[],
show_default=True,
)
@click.option(
"--whitelisted-executable-task-types",
help="IDs of the whitelists executable task types.",
cls=PythonLiteralOption,
default=[],
show_default=True,
)
@click.pass_context
def create_task_whitelist(ctx, **kwargs):
"""Create a task whitelist."""
generic_create_command(
"task_whitelists", TASK_WHITELIST_GET_ATTRS, ctx, **kwargs
)
@task_whitelists.command(name="put")
@click.argument("id", nargs=1, type=click.INT)
@click.option("--name", required=True, help="The name of the task whitelist.")
@click.option(
"--description", required=True, help="A description of the task whitelist."
)
@click.option(
"--whitelisted-container-task-types",
help="IDs of the whitelists container task types.",
cls=PythonLiteralOption,
required=True,
show_default=True,
)
@click.option(
"--whitelisted-executable-task-types",
help="IDs of the whitelists executable task types.",
cls=PythonLiteralOption,
required=True,
show_default=True,
)
@click.pass_context
def put_task_whitelist(ctx, id, **kwargs):
"""Update a task whitelist, overwritting all its attributes."""
generic_put_command(
"task_whitelists", TASK_WHITELIST_GET_ATTRS, ctx, id, **kwargs
)
|
/saltant-cli-0.3.1.tar.gz/saltant-cli-0.3.1/saltant_cli/subcommands/task_whitelists.py
| 0.746693 | 0.155976 |
task_whitelists.py
|
pypi
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import click
import click_spinner
from saltant.exceptions import BadHttpRequestError
from .utils import combine_filter_json, generate_table, generate_list_display
def generic_get_command(manager_name, attrs, ctx, id):
"""Performs a generic get command.
Args:
manager_name: A string containing the name of the
saltant.client.Client's manager to use. For example,
"task_queues".
attrs: An iterable containing the attributes of the object to
use when displaying it.
ctx: A click.core.Context object containing information about
the Click session.
id: A string or int (depending on the object type) containing
the primary identifier of the object to get.
"""
# Get the client from the context
client = ctx.obj["client"]
# Query for the object
try:
manager = getattr(client, manager_name)
object = manager.get(id)
# Output a list display of the object
output = generate_list_display(object, attrs)
except BadHttpRequestError:
# Bad request
output = "not found"
click.echo(output)
def generic_put_command(manager_name, attrs, ctx, id, **kwargs):
"""Performs a generic put command.
Args:
manager_name: A string containing the name of the
saltant.client.Client's manager to use. For example,
"task_queues".
attrs: An iterable containing the attributes of the object to
use when displaying it.
ctx: A click.core.Context object containing information about
the Click session.
id: A string or int (depending on the object type) containing
the primary identifier of the object to update.
**kwargs: A dictionary of arbitrary keyword arguments which
should match attributes used to update the object.
"""
# Get the client from the context
client = ctx.obj["client"]
# Create the object
manager = getattr(client, manager_name)
object = manager.put(id, **kwargs)
# Output a list display of the object created
output = generate_list_display(object, attrs)
click.echo(output)
def generic_create_command(manager_name, attrs, ctx, **kwargs):
"""Performs a generic create command.
Args:
manager_name: A string containing the name of the
saltant.client.Client's manager to use. For example,
"task_queues".
attrs: An iterable containing the attributes of the object to
use when displaying it.
ctx: A click.core.Context object containing information about
the Click session.
**kwargs: A dictionary of arbitrary keyword arguments which
should match attributes used to create the object.
"""
# Get the client from the context
client = ctx.obj["client"]
# Create the object
manager = getattr(client, manager_name)
object = manager.create(**kwargs)
# Output a list display of the object created
output = generate_list_display(object, attrs)
click.echo(output)
def generic_list_command(manager_name, attrs, ctx, filters, filters_file):
"""Performs a generic list command.
Args:
manager_name: A string containing the name of the
saltant.client.Client's manager to use. For example,
"task_queues".
attrs: An iterable containing the attributes of the object to
use when displaying the list.
ctx: A click.core.Context object containing information about
the Click session.
filters: A JSON-encoded string containing filter information.
filters_file: A string containing a path to a JSON-encoded file
specifying filter information.
"""
# Get the client from the context
client = ctx.obj["client"]
# Build up JSON filters to use
combined_filters = combine_filter_json(filters, filters_file)
# Query for objects
manager = getattr(client, manager_name)
object_list = manager.list(combined_filters)
# Output a pretty table
output = generate_table(object_list, attrs)
click.echo_via_pager(output)
def generic_clone_command(manager_name, attrs, ctx, uuid):
"""Performs a generic clone command for task instances.
Args:
manager_name: A string containing the name of the
saltant.client.Client's manager to use. For example,
"executable_task_instances".
attrs: An iterable containing the attributes of the object to
use when displaying it.
ctx: A click.core.Context object containing information about
the Click session.
uuid: A string containing the uuid of the task instance to
clone.
"""
# Get the client from the context
client = ctx.obj["client"]
# Clone the task instance
try:
manager = getattr(client, manager_name)
object = manager.clone(uuid)
# Output a list display of the task instance
output = generate_list_display(object, attrs)
except BadHttpRequestError:
# Bad request
output = "task instance %s not found" % uuid
click.echo(output)
def generic_terminate_command(manager_name, attrs, ctx, uuid):
"""Performs a generic terminate command for task instances.
Args:
manager_name: A string containing the name of the
saltant.client.Client's manager to use. For example,
"executable_task_instances".
attrs: An iterable containing the attributes of the object to
use when displaying it.
ctx: A click.core.Context object containing information about
the Click session.
uuid: A string containing the uuid of the task instance to
terminate.
"""
# Get the client from the context
client = ctx.obj["client"]
# Terminate the task instance
try:
manager = getattr(client, manager_name)
object = manager.terminate(uuid)
# Output a list display of the task instance
output = generate_list_display(object, attrs)
except BadHttpRequestError:
# Bad request
output = "task instance %s not found" % uuid
click.echo(output)
def generic_wait_command(manager_name, attrs, ctx, uuid, refresh_period):
"""Performs a generic wait command for task instances.
Args:
manager_name: A string containing the name of the
saltant.client.Client's manager to use. For example,
"executable_task_instances".
attrs: An iterable containing the attributes of the object to
use when displaying it.
ctx: A click.core.Context object containing information about
the Click session.
uuid: A string containing the uuid of the task instance to
wait for.
refresh_period: A float specifying how many seconds to wait in
between checking the task's status.
"""
# Get the client from the context
client = ctx.obj["client"]
# Terminate the task instance
try:
manager = getattr(client, manager_name)
# Wait for the task instance to finish
with click_spinner.spinner():
object = manager.wait_until_finished(uuid, refresh_period)
# Output a list display of the task instance
output = generate_list_display(object, attrs)
except BadHttpRequestError:
# Bad request
output = "task instance %s not found" % uuid
click.echo(output)
|
/saltant-cli-0.3.1.tar.gz/saltant-cli-0.3.1/saltant_cli/subcommands/resource.py
| 0.761405 | 0.162613 |
resource.py
|
pypi
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import json
import click
from tabulate import tabulate
class PythonLiteralOption(click.Option):
"""Thanks to Stephen Rauch on stack overflow.
See https://stackoverflow.com/a/47730333
"""
def type_cast_value(self, ctx, value):
try:
return ast.literal_eval(value)
except:
raise click.BadParameter(value)
def list_options(func):
"""Adds in --filters and --filters-file options for a command.
Args:
func: The function to be enclosed.
Returns:
The enclosed function.
"""
filters_option = click.option(
"--filters",
help="Filter keys and values encoded in JSON.",
default=None,
)
filters_file_option = click.option(
"--filters-file",
help="Filter keys and values encoded in a JSON file.",
default=None,
type=click.Path(),
)
return filters_option(filters_file_option(func))
def combine_filter_json(filters, filters_file):
"""Combines filter JSON sources for a list command.
Args:
filters: A JSON-encoded string containing filter information.
filters_file: A string containing a path to a JSON-encoded file
specifying filter information.
Returns:
A dictionary to be encoded into JSON containing the filters
combined from the above sources.
"""
combined_filters = {}
if filters is not None:
combined_filters.update(json.loads(filters))
if filters_file is not None:
with open(filters_file) as f:
combined_filters.update(json.load(f))
return combined_filters
def generate_table(objects, attrs):
"""Generate a table for object(s) based on some attributes.
Args:
objects: An iterable of objects which have specific attributes.
attrs: An interable object of strings containing attributes to
get from the above objects.
Returns:
A string containing the tabulated objects with respect to the
passed in attributes.
"""
return tabulate(
[[getattr(object, attr) for attr in attrs] for object in objects],
headers=attrs,
)
def generate_list_display(object, attrs):
"""Generate a display string for an object based on some attributes.
Args:
object: An object which has specific attributes.
attrs: An interable of strings containing attributes to get from
the above object.
Returns:
A string containing a list display of the object with respect to
the passed in attributes.
"""
return "\n".join(
click.style(attr, bold=True) + ": %s" % getattr(object, attr)
for attr in attrs
)
|
/saltant-cli-0.3.1.tar.gz/saltant-cli-0.3.1/saltant_cli/subcommands/utils.py
| 0.907161 | 0.277452 |
utils.py
|
pypi
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .base_task_type import BaseTaskType, BaseTaskTypeManager
class ContainerTaskType(BaseTaskType):
"""Model for container task types.
Attributes:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
user (str): The user associated with the task type.
datetime_created (:class:`datetime.datetime`): The datetime when
the task type was created.
command_to_run (str): The command to run inside the container to
execute the task.
environment_variables (list): The environment variables required
on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for the
tasks required arguments.
logs_path (str): The path of the logs directory inside the
container.
results_path (str): The path of the results directory inside the
container.
container_image (str): The container name and tag. For example,
ubuntu:14.04 for Docker; and docker://ubuntu:14:04 or
shub://vsoch/hello-world for Singularity.
container_type (str): The type of the container.
manager (:class:`saltant.models.container_task_type.ContainerTaskTypeManager`):
The task type manager which spawned this task type.
"""
def __init__(
self,
id,
name,
description,
user,
datetime_created,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
logs_path,
results_path,
container_image,
container_type,
manager,
):
"""Initialize a container task type.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
user (str): The user associated with the task type.
datetime_created (:class:`datetime.datetime`): The datetime
when the task type was created.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
logs_path (str): The path of the logs directory inside the
container.
results_path (str): The path of the results directory inside
the container.
container_image (str): The container name and tag. For
example, ubuntu:14.04 for Docker; and docker://ubuntu:14:04
or shub://vsoch/hello-world for Singularity.
container_type (str): The type of the container.
manager (:class:`saltant.models.container_task_type.ContainerTaskTypeManager`):
The task type manager which spawned this task type.
"""
# Call the parent constructor
super(ContainerTaskType, self).__init__(
id=id,
name=name,
description=description,
user=user,
datetime_created=datetime_created,
command_to_run=command_to_run,
environment_variables=environment_variables,
required_arguments=required_arguments,
required_arguments_default_values=required_arguments_default_values,
manager=manager,
)
# Add in the attributes unique to container task types
self.logs_path = logs_path
self.results_path = results_path
self.container_image = container_image
self.container_type = container_type
def put(self):
"""Updates this task type on the saltant server.
Returns:
:class:`saltant.models.container_task_type.ContainerTaskType`:
A task type model instance representing the task type
just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
command_to_run=self.command_to_run,
environment_variables=self.environment_variables,
required_arguments=self.required_arguments,
required_arguments_default_values=(
self.required_arguments_default_values
),
logs_path=self.logs_path,
results_path=self.results_path,
container_image=self.container_image,
container_type=self.container_type,
)
class ContainerTaskTypeManager(BaseTaskTypeManager):
"""Manager for container task types.
Attributes:
_client (:class:`saltant.client.Client`): An authenticated
saltant client.
list_url (str): The URL to list task types.
detail_url (str): The URL format to get specific task types.
model (:class:`saltant.models.container_task_type.ContainerTaskType`):
The model of the task instance being used.
"""
list_url = "containertasktypes/"
detail_url = "containertasktypes/{id}/"
model = ContainerTaskType
def create(
self,
name,
command_to_run,
container_image,
container_type,
description="",
logs_path="",
results_path="",
environment_variables=None,
required_arguments=None,
required_arguments_default_values=None,
extra_data_to_post=None,
):
"""Create a container task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
container_image (str): The container name and tag. For
example, ubuntu:14.04 for Docker; and docker://ubuntu:14:04
or shub://vsoch/hello-world for Singularity.
container_type (str): The type of the container.
description (str, optional): The description of the task type.
logs_path (str, optional): The path of the logs directory
inside the container.
results_path (str, optional): The path of the results
directory inside the container.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the task's required arguments.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.container_task_type.ContainerTaskType`:
A container task type model instance representing the
task type just created.
"""
# Add in extra data specific to container task types
if extra_data_to_post is None:
extra_data_to_post = {}
extra_data_to_post.update(
{
"container_image": container_image,
"container_type": container_type,
"logs_path": logs_path,
"results_path": results_path,
}
)
# Call the parent create function
return super(ContainerTaskTypeManager, self).create(
name=name,
command_to_run=command_to_run,
description=description,
environment_variables=environment_variables,
required_arguments=required_arguments,
required_arguments_default_values=required_arguments_default_values,
extra_data_to_post=extra_data_to_post,
)
def put(
self,
id,
name,
description,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
logs_path,
results_path,
container_image,
container_type,
extra_data_to_put=None,
):
"""Updates a task type on the saltant server.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
extra_data_to_put (dict, optional): Extra key-value pairs to
add to the request data. This is useful for subclasses
which require extra parameters.
logs_path (str): The path of the logs directory inside the
container.
results_path (str): The path of the results directory inside
the container.
container_image (str): The container name and tag. For
example, ubuntu:14.04 for Docker; and docker://ubuntu:14:04
or shub://vsoch/hello-world for Singularity.
container_type (str): The type of the container.
"""
# Add in extra data specific to container task types
if extra_data_to_put is None:
extra_data_to_put = {}
extra_data_to_put.update(
{
"logs_path": logs_path,
"results_path": results_path,
"container_image": container_image,
"container_type": container_type,
}
)
# Call the parent create function
return super(ContainerTaskTypeManager, self).put(
id=id,
name=name,
description=description,
command_to_run=command_to_run,
environment_variables=environment_variables,
required_arguments=required_arguments,
required_arguments_default_values=(
required_arguments_default_values
),
extra_data_to_put=extra_data_to_put,
)
|
/saltant-py-0.4.0.tar.gz/saltant-py-0.4.0/saltant/models/container_task_type.py
| 0.7874 | 0.198278 |
container_task_type.py
|
pypi
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from saltant.exceptions import BadHttpRequestError
from saltant.constants import HTTP_200_OK
class Model(object):
"""Base class for representing a model.
Attributes:
manager (:class:`saltant.models.resource.Manager`):
The manager which spawned this model instance.
"""
def __init__(self, manager):
"""Initialize the model.
Args:
manager (:class:`saltant.models.resource.Manager`):
The manager which spawned this model instance.
"""
self.manager = manager
class ModelManager(object):
"""Base class for a model manager.
Attributes:
_client (:class:`saltant.client.Client`): An authenticated
saltant client.
list_url (str): The URL to list models.
detail_url (str): The URL format to get specific models.
model (:class:`saltant.models.resource.Model`): The model
being used.
"""
list_url = "NotImplemented"
detail_url = "NotImplemented"
model = Model
def __init__(self, _client):
"""Save the client so we can make API calls in the manager.
Args:
_client (:class:`saltant.client.Client`): An
authenticated saltant client.
"""
self._client = _client
def list(self, filters=None):
"""List model instances.
Currently this gets *everything* and iterates through all
possible pages in the API. This may be unsuitable for production
environments with huge databases, so finer grained page support
should likely be added at some point.
Args:
filters (dict, optional): API query filters to apply to the
request. For example:
.. code-block:: python
{'name__startswith': 'azure',
'user__in': [1, 2, 3, 4],}
See saltant's API reference at
https://saltant-org.github.io/saltant/ for each model's
available filters.
Returns:
list:
A list of :class:`saltant.models.resource.Model`
subclass instances (for example, container task type
model instances).
"""
# Add in the page and page_size parameters to the filter, such
# that our request gets *all* objects in the list. However,
# don't do this if the user has explicitly included these
# parameters in the filter.
if not filters:
filters = {}
if "page" not in filters:
filters["page"] = 1
if "page_size" not in filters:
# The below "magic number" is 2^63 - 1, which is the largest
# number you can hold in a 64 bit integer. The main point
# here is that we want to get everything in one page (unless
# otherwise specified, of course).
filters["page_size"] = 9223372036854775807
# Form the request URL - first add in the query filters
query_filter_sub_url = ""
for idx, filter_param in enumerate(filters):
# Prepend '?' or '&'
if idx == 0:
query_filter_sub_url += "?"
else:
query_filter_sub_url += "&"
# Add in the query filter
query_filter_sub_url += "{param}={val}".format(
param=filter_param, val=filters[filter_param]
)
# Stitch together all sub-urls
request_url = (
self._client.base_api_url + self.list_url + query_filter_sub_url
)
# Make the request
response = self._client.session.get(request_url)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a list of model instances
return self.response_data_to_model_instances_list(response.json())
def get(self, id):
"""Get the model instance with a given id.
Args:
id (int or str): The primary identifier (e.g., pk or UUID)
for the task instance to get.
Returns:
:class:`saltant.models.resource.Model`:
A :class:`saltant.models.resource.Model` subclass
instance representing the resource requested.
"""
# Get the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
response = self._client.session.get(request_url)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance
return self.response_data_to_model_instance(response.json())
def response_data_to_model_instance(self, response_data):
"""Convert get response data to a model.
Args:
response_data (dict): The data from the request's response.
Returns:
:class:`saltant.models.resource.Model`:
A :class:`saltant.models.resource.Model` subclass
instance representing the resource given in the
request's response data.
"""
# Add in this manager to the data
response_data["manager"] = self
# Instantiate a model
return self.model(**response_data)
def response_data_to_model_instances_list(self, response_data):
"""Convert list response data to a list of models.
Args:
response_data (dict): The data from the request's response.
Returns:
list:
A list of :class:`saltant.models.resource.Model`
subclass instances.
"""
return [
self.response_data_to_model_instance(subdata)
for subdata in response_data["results"]
]
@staticmethod
def validate_request_success(
response_text, request_url, status_code, expected_status_code
):
"""Validates that a request was successful.
Args:
response_text (str): The response body of the request.
request_url (str): The URL the request was made at.
status_code (int): The status code of the response.
expected_status_code (int): The expected status code of the
response.
Raises:
:class:`saltant.exceptions.BadHttpRequestError`: The HTTP
request failed.
"""
try:
assert status_code == expected_status_code
except AssertionError:
msg = (
"Request to {url} failed with status {status_code}:\n"
"The reponse from the request was as follows:\n\n"
"{content}"
).format(
url=request_url, status_code=status_code, content=response_text
)
raise BadHttpRequestError(msg)
|
/saltant-py-0.4.0.tar.gz/saltant-py-0.4.0/saltant/models/resource.py
| 0.925171 | 0.219819 |
resource.py
|
pypi
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import dateutil.parser
from saltant.constants import HTTP_200_OK, HTTP_201_CREATED
from .resource import Model, ModelManager
class BaseTaskType(Model):
"""Base model for a task type.
Attributes:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
user (str): The user associated with the task type.
datetime_created (:class:`datetime.datetime`): The datetime when
the task type was created.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables required
on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for the
task's required arguments.
manager (:class:`saltant.models.base_task_type.BaseTaskTypeManager`):
The task type manager which spawned this task type.
"""
def __init__(
self,
id,
name,
description,
user,
datetime_created,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
manager,
):
"""Initialize a task type.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
user (str): The user associated with the task type.
datetime_created (:class:`datetime.datetime`): The datetime
when the task type was created.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
manager (:class:`saltant.models.base_task_type.BaseTaskTypeManager`):
The task type manager which spawned this task type.
"""
# Call parent constructor
super(BaseTaskType, self).__init__(manager)
self.id = id
self.name = name
self.description = description
self.user = user
self.datetime_created = datetime_created
self.command_to_run = command_to_run
self.environment_variables = environment_variables
self.required_arguments = required_arguments
self.required_arguments_default_values = (
required_arguments_default_values
)
def __str__(self):
"""String representation of the task type."""
return "%s (%s)" % (self.name, self.user)
def sync(self):
"""Sync this model with latest data on the saltant server.
Note that in addition to returning the updated object, it also
updates the existing object.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
This task type instance after syncing.
"""
self = self.manager.get(id=self.id)
return self
def put(self):
"""Updates this task type on the saltant server.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A task type model instance representing the task type
just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
command_to_run=self.command_to_run,
environment_variables=self.environment_variables,
required_arguments=self.required_arguments,
required_arguments_default_values=(
self.required_arguments_default_values
),
)
class BaseTaskTypeManager(ModelManager):
"""Base manager for task types.
Attributes:
_client (:class:`saltant.client.Client`): An authenticated
saltant client.
list_url (str): The URL to list task types.
detail_url (str): The URL format to get specific task types.
model (:class:`saltant.models.resource.Model`): The model of the
task type being used.
"""
model = BaseTaskType
def get(self, id=None, name=None):
"""Get a task type.
Either the id xor the name of the task type must be specified.
Args:
id (int, optional): The id of the task type to get.
name (str, optional): The name of the task type to get.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A task type model instance representing the task type
requested.
Raises:
ValueError: Neither id nor name were set *or* both id and
name were set.
"""
# Validate arguments - use an xor
if not ((id is None) ^ (name is None)):
raise ValueError("Either id or name must be set (but not both!)")
# If it's just ID provided, call the parent function
if id is not None:
return super(BaseTaskTypeManager, self).get(id=id)
# Try getting the task type by name
return self.list(filters={"name": name})[0]
def create(
self,
name,
command_to_run,
description="",
environment_variables=None,
required_arguments=None,
required_arguments_default_values=None,
extra_data_to_post=None,
):
"""Create a task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
description (str, optional): The description of the task type.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the tasks required arguments.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskType`:
A task type model instance representing the task type
just created.
"""
# Set None for optional list and dicts to proper datatypes
if environment_variables is None:
environment_variables = []
if required_arguments is None:
required_arguments = []
if required_arguments_default_values is None:
required_arguments_default_values = {}
# Create the object
request_url = self._client.base_api_url + self.list_url
data_to_post = {
"name": name,
"description": description,
"command_to_run": command_to_run,
"environment_variables": json.dumps(environment_variables),
"required_arguments": json.dumps(required_arguments),
"required_arguments_default_values": json.dumps(
required_arguments_default_values
),
}
# Add in extra data if any was passed in
if extra_data_to_post is not None:
data_to_post.update(extra_data_to_post)
response = self._client.session.post(request_url, data=data_to_post)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance representing the task type
return self.response_data_to_model_instance(response.json())
def put(
self,
id,
name,
description,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
extra_data_to_put=None,
):
"""Updates a task type on the saltant server.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
extra_data_to_put (dict, optional): Extra key-value pairs to
add to the request data. This is useful for subclasses
which require extra parameters.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A :class:`saltant.models.base_task_type.BaseTaskType`
subclass instance representing the task type just
updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_put = {
"name": name,
"description": description,
"command_to_run": command_to_run,
"environment_variables": json.dumps(environment_variables),
"required_arguments": json.dumps(required_arguments),
"required_arguments_default_values": json.dumps(
required_arguments_default_values
),
}
# Add in extra data if any was passed in
if extra_data_to_put is not None:
data_to_put.update(extra_data_to_put)
response = self._client.session.put(request_url, data=data_to_put)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
def response_data_to_model_instance(self, response_data):
"""Convert response data to a task type model.
Args:
response_data (dict): The data from the request's response.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A model instance representing the task type from the
reponse data.
"""
# Coerce datetime strings into datetime objects
response_data["datetime_created"] = dateutil.parser.parse(
response_data["datetime_created"]
)
# Instantiate a model for the task instance
return super(
BaseTaskTypeManager, self
).response_data_to_model_instance(response_data)
|
/saltant-py-0.4.0.tar.gz/saltant-py-0.4.0/saltant/models/base_task_type.py
| 0.898252 | 0.241266 |
base_task_type.py
|
pypi
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .base_task_type import BaseTaskType, BaseTaskTypeManager
class ExecutableTaskType(BaseTaskType):
"""Model for executable task types.
Attributes:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
user (str): The user associated with the task type.
datetime_created (:class:`datetime.datetime`): The datetime when
the task type was created.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables required
on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for the
task's required arguments.
json_file_option (str): The name of a command line option, e.g.,
--json-file, which accepts a JSON-encoded file for the
command to run.
manager (:class:`saltant.models.executable_task_type.ExecutableTaskTypeManager`):
The task type manager which spawned this task type instance.
"""
def __init__(
self,
id,
name,
description,
user,
datetime_created,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
json_file_option,
manager,
):
"""Initialize a container task type.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
user (str): The user associated with the task type.
datetime_created (:class:`datetime.datetime`): The datetime
when the task type was created.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
json_file_option (str): The name of a command line option,
e.g., --json-file, which accepts a JSON-encoded file for
the command to run.
manager (:class:`saltant.models.container_task_type.ContainerTaskTypeManager`):
The task type manager which spawned this task type.
"""
# Call the parent constructor
super(ExecutableTaskType, self).__init__(
id=id,
name=name,
description=description,
user=user,
datetime_created=datetime_created,
command_to_run=command_to_run,
environment_variables=environment_variables,
required_arguments=required_arguments,
required_arguments_default_values=required_arguments_default_values,
manager=manager,
)
# Add in the attributes unique to executable task types
self.json_file_option = json_file_option
def put(self):
"""Updates this task type on the saltant server.
Returns:
:class:`saltant.models.container_task_type.ExecutableTaskType`:
An executable task type model instance representing the task type
just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
command_to_run=self.command_to_run,
environment_variables=self.environment_variables,
required_arguments=self.required_arguments,
required_arguments_default_values=(
self.required_arguments_default_values
),
json_file_option=self.json_file_option,
)
class ExecutableTaskTypeManager(BaseTaskTypeManager):
"""Manager for executable task types.
Attributes:
_client (:class:`saltant.client.Client`): An authenticated
saltant client.
list_url (str): The URL to list task types.
detail_url (str): The URL format to get specific task types.
model (:class:`saltant.models.executable_task_type.ExecutableTaskType`):
The model of the task instance being used.
"""
list_url = "executabletasktypes/"
detail_url = "executabletasktypes/{id}/"
model = ExecutableTaskType
def create(
self,
name,
command_to_run,
description="",
environment_variables=None,
required_arguments=None,
required_arguments_default_values=None,
json_file_option=None,
extra_data_to_post=None,
):
"""Create a container task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
description (str, optional): The description of the task type.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the task's required arguments.
json_file_option (str, optional): The name of a command line
option, e.g., --json-file, which accepts a JSON-encoded
file for the command to run.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.container_task_type.ExecutableTaskType`:
An executable task type model instance representing the
task type just created.
"""
# Add in extra data specific to container task types
if extra_data_to_post is None:
extra_data_to_post = {}
extra_data_to_post.update({"json_file_option": json_file_option})
# Call the parent create function
return super(ExecutableTaskTypeManager, self).create(
name=name,
command_to_run=command_to_run,
description=description,
environment_variables=environment_variables,
required_arguments=required_arguments,
required_arguments_default_values=required_arguments_default_values,
extra_data_to_post=extra_data_to_post,
)
def put(
self,
id,
name,
description,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
json_file_option,
extra_data_to_put=None,
):
"""Updates a task type on the saltant server.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
json_file_option (str): The name of a command line option,
e.g., --json-file, which accepts a JSON-encoded file for
the command to run.
extra_data_to_put (dict, optional): Extra key-value pairs to
add to the request data. This is useful for subclasses
which require extra parameters.
"""
# Add in extra data specific to container task types
if extra_data_to_put is None:
extra_data_to_put = {}
extra_data_to_put.update({"json_file_option": json_file_option})
# Call the parent create function
return super(ExecutableTaskTypeManager, self).put(
id=id,
name=name,
description=description,
command_to_run=command_to_run,
environment_variables=environment_variables,
required_arguments=required_arguments,
required_arguments_default_values=(
required_arguments_default_values
),
extra_data_to_put=extra_data_to_put,
)
|
/saltant-py-0.4.0.tar.gz/saltant-py-0.4.0/saltant/models/executable_task_type.py
| 0.864454 | 0.236946 |
executable_task_type.py
|
pypi
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from saltant.constants import HTTP_200_OK, HTTP_201_CREATED
from .resource import Model, ModelManager
class TaskQueue(Model):
"""Base model for a task queue.
Attributes:
id (int): The ID of the task queue.
user (str): The user who created the task queue.
name (str): The name of the task queue.
description (str): The description of the task queue.
private (bool): A Booleon signalling whether the queue can only
be used by its associated user.
runs_executable_tasks (bool): A Boolean specifying whether the
queue runs executable tasks.
runs_docker_container_tasks (bool): A Boolean specifying whether
the queue runs container tasks that run in Docker
containers.
runs_singularity_container_tasks (bool): A Boolean specifying
whether the queue runs container tasks that run in
Singularity containers.
active (bool): A Booleon signalling whether the queue is active.
whitelists (list): A list of task whitelist IDs.
manager (:class:`saltant.models.task_queue.TaskQueueManager`):
The task queue manager which spawned this task queue.
"""
def __init__(
self,
id,
user,
name,
description,
private,
runs_executable_tasks,
runs_docker_container_tasks,
runs_singularity_container_tasks,
active,
whitelists,
manager,
):
"""Initialize a task queue.
Args:
id (int): The ID of the task queue.
user (str): The user who created the task queue.
name (str): The name of the task queue.
description (str): The description of the task queue.
private (bool): A Booleon signalling whether the queue can
only be used by its associated user.
runs_executable_tasks (bool): A Boolean specifying whether
the queue runs executable tasks.
runs_docker_container_tasks (bool): A Boolean specifying
whether the queue runs container tasks that run in
Docker containers.
runs_singularity_container_tasks (bool): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool): A Booleon signalling whether the queue is
active.
whitelists (list): A list of task whitelist IDs.
manager (:class:`saltant.models.task_queue.TaskQueueManager`):
The task queue manager which spawned this task instance.
"""
# Call the parent constructor
super(TaskQueue, self).__init__(manager)
# Add in task queue stuff
self.id = id
self.user = user
self.name = name
self.description = description
self.private = private
self.runs_executable_tasks = runs_executable_tasks
self.runs_docker_container_tasks = runs_docker_container_tasks
self.runs_singularity_container_tasks = (
runs_singularity_container_tasks
)
self.active = active
self.whitelists = whitelists
def __str__(self):
"""String representation of the task queue."""
return self.name
def sync(self):
"""Sync this model with latest data on the saltant server.
Note that in addition to returning the updated object, it also
updates the existing object.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
This task queue instance after syncing.
"""
self = self.manager.get(id=self.id)
return self
def patch(self):
"""Updates this task queue on the saltant server.
This is an alias for the model's put method. (Both are identical
operations on the model level.)
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
return self.put()
def put(self):
"""Updates this task queue on the saltant server.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
private=self.private,
runs_executable_tasks=self.runs_executable_tasks,
runs_docker_container_tasks=self.runs_docker_container_tasks,
runs_singularity_container_tasks=self.runs_singularity_container_tasks,
active=self.active,
whitelists=self.whitelists,
)
class TaskQueueManager(ModelManager):
"""Manager for task queues.
Attributes:
_client (:class:`saltant.client.Client`): An authenticated
saltant client.
list_url (str): The URL to list task queues.
detail_url (str): The URL format to get specific task queues.
model (:class:`saltant.models.task_queue.TaskQueue`): The model
of the task queue being used.
"""
list_url = "taskqueues/"
detail_url = "taskqueues/{id}/"
model = TaskQueue
def get(self, id=None, name=None):
"""Get a task queue.
Either the id xor the name of the task type must be specified.
Args:
id (int, optional): The id of the task type to get.
name (str, optional): The name of the task type to get.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
requested.
Raises:
ValueError: Neither id nor name were set *or* both id and
name were set.
"""
# Validate arguments - use an xor
if not (id is None) ^ (name is None):
raise ValueError("Either id or name must be set (but not both!)")
# If it's just ID provided, call the parent function
if id is not None:
return super(TaskQueueManager, self).get(id=id)
# Try getting the task queue by name
return self.list(filters={"name": name})[0]
def create(
self,
name,
description="",
private=False,
runs_executable_tasks=True,
runs_docker_container_tasks=True,
runs_singularity_container_tasks=True,
active=True,
whitelists=None,
):
"""Create a task queue.
Args:
name (str): The name of the task queue.
description (str, optional): A description of the task queue.
private (bool, optional): A boolean specifying whether the
queue is exclusive to its creator. Defaults to False.
runs_executable_tasks (bool, optional): A Boolean specifying
whether the queue runs executable tasks. Defaults to
True.
runs_docker_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Docker containers. Defaults to True.
runs_singularity_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers. Defaults to True.
active (bool, optional): A boolean specifying whether the
queue is active. Default to True.
whitelists (list, optional): A list of task whitelist IDs.
Defaults to None (which gets translated to []).
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just created.
"""
# Translate whitelists None to [] if necessary
if whitelists is None:
whitelists = []
# Create the object
request_url = self._client.base_api_url + self.list_url
data_to_post = {
"name": name,
"description": description,
"private": private,
"runs_executable_tasks": runs_executable_tasks,
"runs_docker_container_tasks": runs_docker_container_tasks,
"runs_singularity_container_tasks": runs_singularity_container_tasks,
"active": active,
"whitelists": whitelists,
}
response = self._client.session.post(request_url, data=data_to_post)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
def patch(
self,
id,
name=None,
description=None,
private=None,
runs_executable_tasks=None,
runs_docker_container_tasks=None,
runs_singularity_container_tasks=None,
active=None,
whitelists=None,
):
"""Partially updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str, optional): The name of the task queue.
description (str, optional): The description of the task
queue.
private (bool, optional): A Booleon signalling whether the
queue can only be used by its associated user.
runs_executable_tasks (bool, optional): A Boolean specifying
whether the queue runs executable tasks.
runs_docker_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Docker containers.
runs_singularity_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool, optional): A Booleon signalling whether the
queue is active.
whitelists (list, optional): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_patch = {}
if name is not None:
data_to_patch["name"] = name
if description is not None:
data_to_patch["description"] = description
if private is not None:
data_to_patch["private"] = private
if runs_executable_tasks is not None:
data_to_patch["runs_executable_tasks"] = runs_executable_tasks
if runs_docker_container_tasks is not None:
data_to_patch[
"runs_docker_container_tasks"
] = runs_docker_container_tasks
if runs_singularity_container_tasks is not None:
data_to_patch[
"runs_singularity_container_tasks"
] = runs_singularity_container_tasks
if active is not None:
data_to_patch["active"] = active
if whitelists is not None:
data_to_patch["whitelists"] = whitelists
response = self._client.session.patch(request_url, data=data_to_patch)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
def put(
self,
id,
name,
description,
private,
runs_executable_tasks,
runs_docker_container_tasks,
runs_singularity_container_tasks,
active,
whitelists,
):
"""Updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str): The name of the task queue.
description (str): The description of the task queue.
private (bool): A Booleon signalling whether the queue can
only be used by its associated user.
runs_executable_tasks (bool): A Boolean specifying whether
the queue runs executable tasks.
runs_docker_container_tasks (bool): A Boolean specifying
whether the queue runs container tasks that run in
Docker containers.
runs_singularity_container_tasks (bool): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool): A Booleon signalling whether the queue is
active.
whitelists (list): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_put = {
"name": name,
"description": description,
"private": private,
"runs_executable_tasks": runs_executable_tasks,
"runs_docker_container_tasks": runs_docker_container_tasks,
"runs_singularity_container_tasks": runs_singularity_container_tasks,
"active": active,
"whitelists": whitelists,
}
response = self._client.session.put(request_url, data=data_to_put)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
|
/saltant-py-0.4.0.tar.gz/saltant-py-0.4.0/saltant/models/task_queue.py
| 0.926578 | 0.190536 |
task_queue.py
|
pypi
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from saltant.constants import HTTP_200_OK, HTTP_201_CREATED
from .resource import Model, ModelManager
class TaskWhitelist(Model):
"""Base model for a task whitelist.
Attributes:
id (int): The ID of the task whitelist.
user (str): The user who created the task whitelist.
name (str): The name of the task whitelist.
description (str): The description of the task whitelist.
whitelisted_container_task_types (list): A list of whitelisted
container task type IDs.
whitelisted_executable_task_types (list): A list of whitelisted
executable task type IDs.
manager (:class:`saltant.models.task_whitelist.TaskWhitelistManager`):
The task whitelist manager which spawned this task whitelist.
"""
def __init__(
self,
id,
user,
name,
description,
whitelisted_container_task_types,
whitelisted_executable_task_types,
manager,
):
"""Initialize a task whitelist.
Args:
id (int): The ID of the task whitelist.
user (str): The user who created the task whitelist.
name (str): The name of the task whitelist.
description (str): The description of the task whitelist.
whitelisted_container_task_types (list): A list of
whitelisted container task type IDs.
whitelisted_executable_task_types (list): A list of
whitelisted executable task type IDs.
manager (:class:`saltant.models.task_whitelist.TaskWhitelistManager`):
The task whitelist manager which spawned this task instance.
"""
# Call the parent constructor
super(TaskWhitelist, self).__init__(manager)
# Add in task whitelist stuff
self.id = id
self.user = user
self.name = name
self.description = description
self.whitelisted_container_task_types = (
whitelisted_container_task_types
)
self.whitelisted_executable_task_types = (
whitelisted_executable_task_types
)
def __str__(self):
"""String representation of the task whitelist."""
return self.name
def sync(self):
"""Sync this model with latest data on the saltant server.
Note that in addition to returning the updated object, it also
updates the existing object.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
This task whitelist instance after syncing.
"""
self = self.manager.get(id=self.id)
return self
def patch(self):
"""Updates this task whitelist on the saltant server.
This is an alias for the model's put method. (Both are identical
operations on the model level.)
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just updated.
"""
return self.put()
def put(self):
"""Updates this task whitelist on the saltant server.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
whitelisted_container_task_types=(
self.whitelisted_container_task_types
),
whitelisted_executable_task_types=(
self.whitelisted_executable_task_types
),
)
class TaskWhitelistManager(ModelManager):
"""Manager for task whitelists.
Attributes:
_client (:class:`saltant.client.Client`): An authenticated
saltant client.
list_url (str): The URL to list task whitelists.
detail_url (str): The URL format to get specific task
whitelists.
model (:class:`saltant.models.task_whitelist.TaskWhitelist`):
The model of the task whitelist being used.
"""
list_url = "taskwhitelists/"
detail_url = "taskwhitelists/{id}/"
model = TaskWhitelist
def get(self, id=None, name=None):
"""Get a task whitelist.
Either the id xor the name of the task type must be specified.
Args:
id (int, optional): The id of the task type to get.
name (str, optional): The name of the task type to get.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task whitelist
requested.
Raises:
ValueError: Neither id nor name were set *or* both id and
name were set.
"""
# Validate arguments - use an xor
if not (id is None) ^ (name is None):
raise ValueError("Either id or name must be set (but not both!)")
# If it's just ID provided, call the parent function
if id is not None:
return super(TaskWhitelistManager, self).get(id=id)
# Try getting the task whitelist by name
return self.list(filters={"name": name})[0]
def create(
self,
name,
description="",
whitelisted_container_task_types=None,
whitelisted_executable_task_types=None,
):
"""Create a task whitelist.
Args:
name (str): The name of the task whitelist.
description (str, optional): A description of the task whitelist.
whitelisted_container_task_types (list, optional): A list of
whitelisted container task type IDs.
whitelisted_executable_task_types (list, optional): A list
of whitelisted executable task type IDs.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just created.
"""
# Translate whitelists None to [] if necessary
if whitelisted_container_task_types is None:
whitelisted_container_task_types = []
if whitelisted_executable_task_types is None:
whitelisted_executable_task_types = []
# Create the object
request_url = self._client.base_api_url + self.list_url
data_to_post = {
"name": name,
"description": description,
"whitelisted_container_task_types": whitelisted_container_task_types,
"whitelisted_executable_task_types": whitelisted_executable_task_types,
}
response = self._client.session.post(request_url, data=data_to_post)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
def patch(
self,
id,
name=None,
description=None,
whitelisted_container_task_types=None,
whitelisted_executable_task_types=None,
):
"""Partially updates a task whitelist on the saltant server.
Args:
id (int): The ID of the task whitelist.
name (str, optional): The name of the task whitelist.
description (str, optional): A description of the task whitelist.
whitelisted_container_task_types (list, optional): A list of
whitelisted container task type IDs.
whitelisted_executable_task_types (list, optional): A list
of whitelisted executable task type IDs.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_patch = {}
if name is not None:
data_to_patch["name"] = name
if description is not None:
data_to_patch["description"] = description
if whitelisted_container_task_types is not None:
data_to_patch[
"whitelisted_container_task_types"
] = whitelisted_container_task_types
if whitelisted_executable_task_types is not None:
data_to_patch[
"whitelisted_executable_task_types"
] = whitelisted_executable_task_types
response = self._client.session.patch(request_url, data=data_to_patch)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
def put(
self,
id,
name,
description,
whitelisted_container_task_types,
whitelisted_executable_task_types,
):
"""Updates a task whitelist on the saltant server.
Args:
id (int): The ID of the task whitelist.
name (str): The name of the task whitelist.
description (str): The description of the task whitelist.
whitelisted_container_task_types (list): A list of
whitelisted container task type IDs.
whitelisted_executable_task_types (list): A list of
whitelisted executable task type IDs.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_put = {
"name": name,
"description": description,
"whitelisted_container_task_types": whitelisted_container_task_types,
"whitelisted_executable_task_types": whitelisted_executable_task_types,
}
response = self._client.session.put(request_url, data=data_to_put)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
|
/saltant-py-0.4.0.tar.gz/saltant-py-0.4.0/saltant/models/task_whitelist.py
| 0.871146 | 0.156234 |
task_whitelist.py
|
pypi
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import time
import dateutil.parser
from saltant.constants import (
DEFAULT_TASK_INSTANCE_WAIT_REFRESH_PERIOD,
HTTP_201_CREATED,
HTTP_202_ACCEPTED,
TASK_INSTANCE_FINISH_STATUSES,
)
from .resource import Model, ModelManager
class BaseTaskInstance(Model):
"""Base model for a task instance.
Attributes:
uuid (str): The UUID of the task instance.
name (str): The name of the task instance.
state (str): The state of the task instance.
user (str): The username of the user who started the task.
task_queue (int): The ID of the task queue the instance is
running on.
task_type (int): The ID of the task type for the instance.
datetime_created (:class:`datetime.datetime`): The datetime when
the task instance was created.
datetime_finished (:class:`datetime.datetime`): The datetime
when the task instance finished.
arguments (dict): The arguments the task instance was run with.
manager (:class:`saltant.models.base_task_instance.BaseTaskInstanceManager`):
The task instance manager which spawned this task instance.
This is used to conveniently add clone, terminate, and
wait_until_finished methods to the task instance model
itself (such convenience!).
"""
def __init__(
self,
uuid,
name,
state,
user,
task_queue,
task_type,
datetime_created,
datetime_finished,
arguments,
manager,
):
"""Initialize a task instance.
Args:
uuid (str): The UUID of the task instance.
name (str): The name of the task instance.
state (str): The state of the task instance.
user (str): The username of the user who started the task.
task_queue (int): The ID of the task queue the instance is
running on.
task_type (int): The ID of the task type for the instance.
datetime_created (:class:`datetime.datetime`): The datetime
when the task instance was created.
datetime_finished (:class:`datetime.datetime`): The datetime
when the task instance finished.
arguments (dict): The arguments the task instance was run
with.
manager (:class:`saltant.models.base_task_instance.BaseTaskInstanceManager`):
The task instance manager which spawned this task
instance.
"""
# Call the parent constructor
super(BaseTaskInstance, self).__init__(manager)
# Add in task instance stuff
self.name = name
self.uuid = uuid
self.state = state
self.user = user
self.task_queue = task_queue
self.task_type = task_type
self.datetime_created = datetime_created
self.datetime_finished = datetime_finished
self.arguments = arguments
def __str__(self):
"""String representation of the task instance."""
return self.uuid
def sync(self):
"""Sync this model with latest data on the saltant server.
Note that in addition to returning the updated object, it also
updates the existing object.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
This task instance ... instance after syncing.
"""
self = self.manager.get(uuid=self.uuid)
return self
def clone(self):
"""Clone this task instance.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance created due to the clone.
"""
return self.manager.clone(self.uuid)
def terminate(self):
"""Terminate this task instance.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
This task instance model after it was told to terminate.
"""
return self.manager.terminate(self.uuid)
def wait_until_finished(
self, refresh_period=DEFAULT_TASK_INSTANCE_WAIT_REFRESH_PERIOD
):
"""Wait until a task instance with the given UUID is finished.
Args:
refresh_period (int, optional): How many seconds to wait
before checking the task's status. Defaults to 5
seconds.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
This task instance model after it finished.
"""
return self.manager.wait_until_finished(
uuid=self.uuid, refresh_period=refresh_period
)
class BaseTaskInstanceManager(ModelManager):
"""Base manager for task instances.
Attributes:
_client (:class:`saltant.client.Client`): An authenticated
saltant client.
list_url (str): The URL to list task instances.
detail_url (str): The URL format to get specific task instances.
clone_url (str): The URL format to clone a task instance.
terminate_url (str): The URL format to terminate a task
instance.
model (:class:`saltant.models.resource.Model`): The model of the
task instance being used.
"""
clone_url = "NotImplemented"
terminate_url = "NotImplemented"
model = BaseTaskInstance
def get(self, uuid):
"""Get the task instance with given UUID.
Args:
uuid (str): The UUID of the task instance to get.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance requested.
"""
# Basically identical to parent get method, except re-name id
# to uuid
return super(BaseTaskInstanceManager, self).get(id=uuid)
def create(self, task_type_id, task_queue_id, arguments=None, name=""):
"""Create a task instance.
Args:
task_type_id (int): The ID of the task type to base the task
instance on.
task_queue_id (int): The ID of the task queue to run the job
on.
arguments (dict, optional): The arguments to give the task
type.
name (str, optional): A non-unique name to give the task
instance.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance just created.
"""
# Make arguments an empty dictionary if None
if arguments is None:
arguments = {}
# Create the object
request_url = self._client.base_api_url + self.list_url
data_to_post = {
"name": name,
"arguments": json.dumps(arguments),
"task_type": task_type_id,
"task_queue": task_queue_id,
}
response = self._client.session.post(request_url, data=data_to_post)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
def clone(self, uuid):
"""Clone the task instance with given UUID.
Args:
uuid (str): The UUID of the task instance to clone.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance created due to the clone.
"""
# Clone the object
request_url = self._client.base_api_url + self.clone_url.format(
id=uuid
)
response = self._client.session.post(request_url)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance
return self.response_data_to_model_instance(response.json())
def clone_many(self, uuids):
"""Clone the task instances with given UUIDs.
Args:
uuids (list): A list of strings containing the UUIDs of the
task instances to clone.
Returns:
list:
A list of
:class:`saltant.models.base_task_instance.BaseTaskInstance`
subclass instances representing the task instances
created due to the clone.
"""
return [self.clone(uuid) for uuid in uuids]
def terminate(self, uuid):
"""Terminate the task instance with given UUID.
Args:
uuid (str): The UUID of the task instance to terminate.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance that was told to terminate.
"""
# Clone the object
request_url = self._client.base_api_url + self.terminate_url.format(
id=uuid
)
response = self._client.session.post(request_url)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_202_ACCEPTED,
)
# Return a model instance
return self.response_data_to_model_instance(response.json())
def terminate_many(self, uuids):
"""Terminate the task instances with given UUIDs.
Args:
uuids (list): A list of strings containing the UUIDs of the
task instances to terminate.
Returns:
list:
A list of
:class:`saltant.models.base_task_instance.BaseTaskInstance`
instances representing the task instances told to
terminate.
"""
return [self.terminate(uuid) for uuid in uuids]
def wait_until_finished(
self, uuid, refresh_period=DEFAULT_TASK_INSTANCE_WAIT_REFRESH_PERIOD
):
"""Wait until a task instance with the given UUID is finished.
Args:
uuid (str): The UUID of the task instance to wait for.
refresh_period (float, optional): How many seconds to wait
in between checking the task's status. Defaults to 5
seconds.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance which we waited for.
"""
# Wait for the task to finish
task_instance = self.get(uuid)
while task_instance.state not in TASK_INSTANCE_FINISH_STATUSES:
# Wait a bit
time.sleep(refresh_period)
# Query again
task_instance = self.get(uuid)
return task_instance
def response_data_to_model_instance(self, response_data):
"""Convert response data to a task instance model.
Args:
response_data (dict): The data from the request's response.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance from the reponse data.
"""
# Coerce datetime strings into datetime objects
response_data["datetime_created"] = dateutil.parser.parse(
response_data["datetime_created"]
)
if response_data["datetime_finished"]:
response_data["datetime_finished"] = dateutil.parser.parse(
response_data["datetime_finished"]
)
# Instantiate a model for the task instance
return super(
BaseTaskInstanceManager, self
).response_data_to_model_instance(response_data)
|
/saltant-py-0.4.0.tar.gz/saltant-py-0.4.0/saltant/models/base_task_instance.py
| 0.877791 | 0.166066 |
base_task_instance.py
|
pypi
|

[](https://circleci.com/gh/lincolnloop/saltdash/tree/master)
[](https://pypi.org/project/saltdash/)

# Salt Dash
Read-only web interface to read from Salt's [external job cache](https://docs.saltstack.com/en/latest/topics/jobs/external_cache.html) using the [`pgjsonb`](https://docs.saltstack.com/en/latest/ref/returners/all/salt.returners.pgjsonb.html) returner.

## Development
### Pre-requisites
* Node.js for building the front-end.
* [Pipenv](https://docs.pipenv.org/) for the back-end.
* A Postgresql database
### Installation
```bash
git clone [email protected]:lincolnloop/saltdash.git
cd saltdash
make all # download dependencies and build the world
$EDITOR saltdash.yml # change settings as needed
pipenv shell # activate the Python virtual environment
saltdash migrate # setup the database
saltdash runserver # run a development server
```
### Client-side
Uses [parcel](https://parceljs.org/). To start a development environment with live reloading, run:
```bash
cd client
yarn run watch
```
## Running in Production
```bash
pip install saltdash
```
`saltdash runserver` is not suitable for production. A production-level
webserver is included and can be started with `saltdash serve`. If Docker is
more your speed, there's a `Dockerfile` as well.
⚠️ The built-in webserver does not handle HTTPS. The default settings assume the
app is deployed behind a proxy which is terminating HTTPS connections and
properly handling headers. If this is not the case, [you should read this](https://docs.djangoproject.com/en/2.2/ref/settings/#secure-proxy-ssl-header) and take appropriate actions.
### Configuration
Configuration can be done via environment variables, a file, or a combination
of both thanks to [`Goodconf`](https://pypi.org/project/goodconf/). By default
it will look for a YAML file named `saltdash.yml` in `/etc/saltdash/` or the current
directory. You can also specify a configuration file with the `-C` or `--config`
flags. `saltdash-generate-config` can be used to generate a sample config file
containing the following variables:
* **DEBUG**
Enable debugging.
type: `bool`
* **SECRET_KEY** _REQUIRED_
a long random string you keep secret https://docs.djangoproject.com/en/2.2/ref/settings/#secret-key
type: `str`
* **DATABASE_URL**
type: `str`
default: `postgres://localhost:5432/salt`
* **ALLOWED_HOSTS**
Hosts allowed to serve the site https://docs.djangoproject.com/en/2.2/ref/settings/#allowed-hosts
type: `list`
default: `['*']`
* **HIDE_OUTPUT**
List of modules to hide the output from in the web interface.
type: `list`
default: `['pillar.*']`
* **GITHUB_TEAM_ID**
type: `str`
* **GITHUB_CLIENT_ID**
type: `str`
* **GITHUB_CLIENT_SECRET**
type: `str`
* **SENTRY_DSN**
type: `str`
* **LISTEN**
Socket for webserver to listen on.
type: `str`
default: `127.0.0.1:8077`
GitHub Team authentication is included by setting the relevant `GITHUB_*` variables.
You'll need to setup an OAuth App at `https://github.com/organizations/<org>/settings/applications` with a callback URL in the form: `https://your-site.example.com/auth/complete/github-team/`
To retrieve your team IDs:
1. Create [a token at GitHub](https://github.com/settings/tokens)
2. `curl -H "Authorization: token <token>" https://api.github.com/orgs/<org>/teams`
## Setting up Salt
Once you've setup a Postgresql database using `saltdash migrate`, connect Salt's external job cache to the database by adding the following lines to `/etc/salt/master.d/job_cache.conf`:
```ini
# Replace items in brackets with actual values
master_job_cache: pgjsonb
returner.pgjsonb.host: [db-host]
returner.pgjsonb.pass: [db-password]
returner.pgjsonb.db: [db-database-name]
returner.pgjsonb.port: [db-port]
returner.pgjsonb.user: [db-user]
```
Restart your `salt-master` and all future jobs should get stored in the database.
If you have *lots* of jobs, you'll probably want to purge the cache periodically. A helper command is provided to do just that, run:
```bash
saltdash purge_job_cache [days_older_than_to_purge]
```
If you want to automate this, use the `--no-input` flag to bypass the confirmation prompt.
## Protecting Secrets
It is very easy to accidentally expose secrets in Salt via the logs and/or
console output. The same applies for Saltdash. Since secrets are often stored
in encrypted pillar data, by default the output from any `pillar.*` calls is
hidden via the `HIDE_OUTPUT` setting. If you have additional modules you know
expose secret data, they should be added to the list.
There are many other ways secrets can leak, however. A few general tips (which
are good practice whether you use Saltdash or not).
* Set `show_changes: false` on any `file.*` actions which deal with sensitive data.
* Set `hide_output: true` on any `cmd.*` state which may output sensitive data.
* When working with files, use templates or `pillar_contents` when appropriate.
* Avoid passing secrets as arguments to modules or states. Typically Salt can
read them from a pillar or config instead.
## Attributions
Icon by [BornSymbols](https://thenounproject.com/term/salt/705369) used under `CCBY` license.
|
/saltdash-0.9.9.tar.gz/saltdash-0.9.9/README.md
| 0.412648 | 0.923212 |
README.md
|
pypi
|
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in Salt
Extension Modules for Azure Resource Manager project and our community a
harassment-free experience for everyone, regardless of age, body size, visible
or invisible disability, ethnicity, sex characteristics, gender identity and
expression, level of experience, education, socio-economic status, nationality,
personal appearance, race, religion, or sexual identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at [email protected].
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
|
/saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/CODE-OF-CONDUCT.md
| 0.62223 | 0.653673 |
CODE-OF-CONDUCT.md
|
pypi
|
import logging
import saltext.azurerm.utils.azurerm
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.compute.models # pylint: disable=unused-import
from azure.core.exceptions import HttpResponseError
HAS_LIBS = True
except ImportError:
pass
__func_alias__ = {"list_": "list"}
log = logging.getLogger(__name__)
def get(location, publisher, offer, sku, version, **kwargs):
"""
.. versionadded:: 2.1.0
Gets a virtual machine image.
:param location: The name of a supported Azure region.
:param publisher: A valid image publisher.
:param offer: A valid image publisher offer.
:param sku: A valid image SKU.
:param version: A valid image SKU version.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_virtual_machine_image.get "eastus" test_publisher test_offer test_sku test_version
"""
result = {}
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
try:
image = compconn.virtual_machine_images.get(
location=location,
publisher_name=publisher,
offer=offer,
skus=sku,
version=version,
)
result = image.as_dict()
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def list_(location, publisher, offer, sku, **kwargs):
"""
.. versionadded:: 2.1.0
Gets a list of all virtual machine image versions for the specified location, publisher, offer, and SKU.
:param location: The name of a supported Azure region.
:param publisher: A valid image publisher.
:param offer: A valid image publisher offer.
:param sku: A valid image SKU.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_virtual_machine_image.list "eastus" test_publisher test_offer test_sku
"""
result = {}
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
try:
images = compconn.virtual_machine_images.list(
location=location,
skus=sku,
publisher_name=publisher,
offer=offer,
**kwargs,
)
for image in images:
img = image.as_dict()
result[img["name"]] = img
except (HttpResponseError, AttributeError) as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def list_offers(location, publisher, **kwargs):
"""
.. versionadded:: 2.1.0
Gets a list of virtual machine image offers for the specified location and publisher.
:param location: The name of a supported Azure region.
:param publisher: A valid image publisher.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_virtual_machine_image.list_offers "eastus" test_publisher
"""
result = {}
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
try:
images = compconn.virtual_machine_images.list_offers(
location=location, publisher_name=publisher, **kwargs
)
for image in images:
img = image.as_dict()
result[img["name"]] = img
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def list_publishers(location, **kwargs):
"""
.. versionadded:: 2.1.0
Gets a list of virtual machine image publishers for the specified Azure location.
:param location: The name of a supported Azure region.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_virtual_machine_image.list_publishers "eastus"
"""
result = {}
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
try:
images = compconn.virtual_machine_images.list_publishers(location=location, **kwargs)
for image in images:
img = image.as_dict()
result[img["name"]] = img
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def list_skus(location, publisher, offer, **kwargs):
"""
.. versionadded:: 2.1.0
Gets a list of virtual machine image offers for the specified location and publisher.
:param location: The name of a supported Azure region.
:param publisher: A valid image publisher.
:param offer: A valid image publisher offer.
CLI Example:
.. code-block:: bash
salt-call azurerm_compute_virtual_machine_image.list_skus "eastus" test_publisher test_offer
"""
result = {}
compconn = saltext.azurerm.utils.azurerm.get_client("compute", **kwargs)
try:
images = compconn.virtual_machine_images.list_skus(
location=location, publisher_name=publisher, offer=offer, **kwargs
)
for image in images:
img = image.as_dict()
result[img["name"]] = img
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
|
/saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/modules/azurerm_compute_virtual_machine_image.py
| 0.795181 | 0.25745 |
azurerm_compute_virtual_machine_image.py
|
pypi
|
import logging
import saltext.azurerm.utils.azurerm
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.keyvault.models # pylint: disable=unused-import
from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
HAS_LIBS = True
except ImportError:
pass
__func_alias__ = {"list_": "list"}
log = logging.getLogger(__name__)
def check_name_availability(name, **kwargs):
"""
.. versionadded:: 2.1.0
Checks that the vault name is valid and is not already in use.
:param name: The vault name.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_vault.check_name_availability test_name
"""
result = {}
vconn = saltext.azurerm.utils.azurerm.get_client("keyvault", **kwargs)
try:
avail = vconn.vaults.check_name_availability(
name=name,
)
result = avail.as_dict()
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("keyvault", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def create_or_update(
name,
resource_group,
location,
tenant_id,
sku,
access_policies=None,
vault_uri=None,
create_mode=None,
enabled_for_deployment=None,
enabled_for_disk_encryption=None,
enabled_for_template_deployment=None,
enable_soft_delete=None,
soft_delete_retention=None,
enable_purge_protection=None,
enable_rbac_authorization=None,
network_acls=None,
tags=None,
**kwargs,
):
"""
.. versionadded:: 2.1.0
Create or update a key vault in the specified subscription.
:param name: The vault name.
:param resource_group: The name of the resource group to which the vault belongs.
:param location: The supported Azure location where the key vault should be created.
:param tenant_id: The Azure Active Direction tenant ID that should be used for authenticating requests to
the key vault.
:param sku: The SKU name to specify whether the key vault is a standard vault or a premium vault. Possible
values include: 'standard' and 'premium'.
:param access_policies: A list of 0 to 16 dictionaries that represent AccessPolicyEntry objects. The
AccessPolicyEntry objects represent identities that have access to the key vault. All identities in the
list must use the same tenant ID as the key vault's tenant ID. When createMode is set to "recover", access
policies are not required. Otherwise, access policies are required. Valid parameters are:
- ``tenant_id``: (Required) The Azure Active Directory tenant ID that should be used for authenticating
requests to the key vault.
- ``object_id``: (Required) The object ID of a user, service principal, or security group in the Azure Active
Directory tenant for the vault. The object ID must be unique for the list of access policies.
- ``application_id``: (Optional) Application ID of the client making request on behalf of a principal.
- ``permissions``: (Required) A dictionary representing permissions the identity has for keys, secrets, and
certifications. Valid parameters include:
- ``keys``: A list that represents permissions to keys. Possible values include: 'backup', 'create',
'decrypt', 'delete', 'encrypt', 'get', 'import_enum', 'list', 'purge', 'recover', 'restore', 'sign',
'unwrap_key', 'update', 'verify', and 'wrap_key'.
- ``secrets``: A list that represents permissions to secrets. Possible values include: 'backup', 'delete',
'get', 'list', 'purge', 'recover', 'restore', and 'set'.
- ``certificates``: A list that represents permissions to certificates. Possible values include: 'create',
'delete', 'deleteissuers', 'get', 'getissuers', 'import_enum', 'list', 'listissuers', 'managecontacts',
'manageissuers', 'purge', 'recover', 'setissuers', and 'update'.
- ``storage``: A list that represents permissions to storage accounts. Possible values include: 'backup',
'delete', 'deletesas', 'get', 'getsas', 'list', 'listsas', 'purge', 'recover', 'regeneratekey',
'restore', 'set', 'setsas', and 'update'.
:param vault_uri: The URI of the vault for performing operations on keys and secrets.
:param create_mode: The vault's create mode to indicate whether the vault needs to be recovered or not.
Possible values include: 'recover' and 'default'.
:param enabled_for_deployment: A boolean value specifying whether Azure Virtual Machines are permitted to
retrieve certificates stored as secrets from the key vault.
:param enabled_for_disk_encryption: A boolean value specifying whether Azure Disk Encrpytion is permitted
to retrieve secrets from the vault and unwrap keys.
:param enabled_for_template_deployment: A boolean value specifying whether Azure Resource Manager is
permitted to retrieve secrets from the key vault.
:param create_mode: The vault's create mode to indicate whether the vault needs to be recovered or not.
Possible values include: 'recover' and 'default'.
:param enable_soft_delete: A boolean value that specifies whether the 'soft delete' functionality is
enabled for this key vault. If it's not set to any value (True or False) when creating new key vault, it will
be set to True by default. Once set to True, it cannot be reverted to False.
:param soft_delete_retention: The soft delete data retention period in days. It accepts values between
7-90, inclusive. Default value is 90.
:param enable_purge_protection: A boolean value specifying whether protection against purge is enabled for this
vault. Setting this property to True activates protection against purge for this vault and its content - only
the Key Vault service may initiate a hard, irrecoverable deletion. Enabling this functionality is irreversible,
that is, the property does not accept False as its value. This is only effective if soft delete has been
enabled via the ``enable_soft_delete`` parameter.
:param enable_rbac_authorization: A boolean value that controls how data actions are authorized. When set to True,
the key vault will use Role Based Access Control (RBAC) for authorization of data actions, and the access
policies specified in vault properties will be ignored (warning: this is a preview feature). When set as
False, the key vault will use the access policies specified in vault properties, and any policy stored on Azure
Resource Manager will be ignored. Note that management actions are always authorized with RBAC. Defaults
to False.
:param network_acls: A dictionary representing a NetworkRuleSet. Rules governing the accessibility of
the key vault from specific network locations.
:param tags: The tags that will be assigned to the key vault.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_vault.create_or_update tst_name tst_rg tst_location tst_tenant tst_sku tst_policies
"""
result = {}
vconn = saltext.azurerm.utils.azurerm.get_client("keyvault", **kwargs)
sku = {"name": sku}
if not access_policies:
access_policies = []
# Create the VaultProperties object
try:
propsmodel = saltext.azurerm.utils.azurerm.create_object_model(
"keyvault",
"VaultProperties",
tenant_id=tenant_id,
sku=sku,
access_policies=access_policies,
vault_uri=vault_uri,
create_mode=create_mode,
enable_soft_delete=enable_soft_delete,
enable_purge_protection=enable_purge_protection,
enabled_for_deployment=enabled_for_deployment,
enabled_for_disk_encryption=enabled_for_disk_encryption,
enabled_for_template_deployment=enabled_for_template_deployment,
soft_delete_retention_in_days=soft_delete_retention,
enable_rbac_authorization=enable_rbac_authorization,
network_acls=network_acls,
**kwargs,
)
except TypeError as exc:
result = {"error": "The object model could not be built. ({})".format(str(exc))}
return result
# Create the VaultCreateOrUpdateParameters object
try:
paramsmodel = saltext.azurerm.utils.azurerm.create_object_model(
"keyvault",
"VaultCreateOrUpdateParameters",
location=location,
properties=propsmodel,
tags=tags,
)
except TypeError as exc:
result = {"error": "The object model could not be built. ({})".format(str(exc))}
return result
try:
vault = vconn.vaults.begin_create_or_update(
vault_name=name, resource_group_name=resource_group, parameters=paramsmodel
)
vault.wait()
result = vault.result().as_dict()
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("keyvault", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def delete(name, resource_group, **kwargs):
"""
.. versionadded:: 2.1.0
Deletes the specified Azure key vault.
:param name: The vault name.
:param resource_group: The name of the resource group to which the vault belongs.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_vault.delete test_name test_rg
"""
result = False
vconn = saltext.azurerm.utils.azurerm.get_client("keyvault", **kwargs)
try:
vconn.vaults.delete(vault_name=name, resource_group_name=resource_group)
result = True
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("keyvault", str(exc), **kwargs)
return result
def get(name, resource_group, **kwargs):
"""
.. versionadded:: 2.1.0
Gets the specified Azure key vault.
:param name: The vault name.
:param resource_group: The name of the resource group to which the vault belongs.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_vault.get test_name test_rg
"""
result = {}
vconn = saltext.azurerm.utils.azurerm.get_client("keyvault", **kwargs)
try:
vault = vconn.vaults.get(vault_name=name, resource_group_name=resource_group)
result = vault.as_dict()
except (HttpResponseError, ResourceNotFoundError) as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("keyvault", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def get_deleted(name, location, **kwargs):
"""
.. versionadded:: 2.1.0
Gets the deleted Azure key vault.
:param name: The vault name.
:param location: The location of the deleted vault.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_vault.get_deleted test_name test_location
"""
result = {}
vconn = saltext.azurerm.utils.azurerm.get_client("keyvault", **kwargs)
try:
vault = vconn.vaults.get_deleted(vault_name=name, location=location)
result = vault.as_dict()
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("keyvault", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def list_(resource_group=None, top=None, **kwargs):
"""
.. versionadded:: 2.1.0
Gets information about the vaults associated with the subscription.
:param resource_group: The name of the resource group to limit the results.
:param top: Maximum number of results to return.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_vault.list
"""
result = {}
vconn = saltext.azurerm.utils.azurerm.get_client("keyvault", **kwargs)
try:
if resource_group:
vaults = saltext.azurerm.utils.azurerm.paged_object_to_list(
vconn.vaults.list_by_resource_group(resource_group_name=resource_group, top=top)
)
else:
vaults = saltext.azurerm.utils.azurerm.paged_object_to_list(vconn.vaults.list(top=top))
for vault in vaults:
result[vault["name"]] = vault
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("keyvault", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def list_by_subscription(top=None, **kwargs):
"""
.. versionadded:: 2.1.0
The List operation gets information about the vaults associated with the subscription.
:param top: Maximum number of results to return.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_vault.list_by_subscription
"""
result = {}
vconn = saltext.azurerm.utils.azurerm.get_client("keyvault", **kwargs)
try:
vaults = saltext.azurerm.utils.azurerm.paged_object_to_list(
vconn.vaults.list_by_subscription(top=top)
)
for vault in vaults:
result[vault["name"]] = vault
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("keyvault", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def list_deleted(**kwargs):
"""
.. versionadded:: 2.1.0
Gets information about the deleted vaults in a subscription.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_vault.list_deleted
"""
result = {}
vconn = saltext.azurerm.utils.azurerm.get_client("keyvault", **kwargs)
try:
vaults = saltext.azurerm.utils.azurerm.paged_object_to_list(vconn.vaults.list_deleted())
for vault in vaults:
result[vault["name"]] = vault
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("keyvault", str(exc), **kwargs)
result = {"error": str(exc)}
return result
def purge_deleted(name, location, **kwargs):
"""
.. versionadded:: 2.1.0
Permanently deletes (purges) the specified Azure key vault.
:param name: The name of the soft-deleted vault.
:param location: The location of the soft-deleted vault.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_vault.purge_deleted test_name test_location
"""
result = False
vconn = saltext.azurerm.utils.azurerm.get_client("keyvault", **kwargs)
try:
vault = vconn.vaults.begin_purge_deleted(vault_name=name, location=location)
vault.wait()
result = True
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("keyvault", str(exc), **kwargs)
return result
def update_access_policy(name, resource_group, operation_kind, access_policies, **kwargs):
"""
.. versionadded:: 2.1.0
Update access policies in a key vault in the specified subscription.
:param name: The name of the vault.
:param resource_group: The name of the resource group to which the server belongs.
:param operation_kind: Name of the operation. Possible values include: 'add', 'replace', and 'remove'.
:param access_policies: A list of 0 to 16 dictionaries that represent AccessPolicyEntry objects. The
AccessPolicyEntry objects represent identities that have access to the key vault. All identities in the
list must use the same tenant ID as the key vault's tenant ID. When createMode is set to "recover", access
policies are not required. Otherwise, access policies are required. Valid parameters are:
- ``tenant_id``: (Required) The Azure Active Directory tenant ID that should be used for authenticating
requests to the key vault.
- ``object_id``: (Required) The object ID of a user, service principal, or security group in the Azure Active
Directory tenant for the vault. The object ID must be unique for the list of access policies.
- ``application_id``: (Optional) Application ID of the client making request on behalf of a principal.
- ``permissions``: (Required) A dictionary representing permissions the identity has for keys, secrets, and
certifications. Valid parameters include:
- ``keys``: A list that represents permissions to keys. Possible values include: 'backup', 'create',
'decrypt', 'delete', 'encrypt', 'get', 'import_enum', 'list', 'purge', 'recover', 'restore', 'sign',
'unwrap_key', 'update', 'verify', and 'wrap_key'.
- ``secrets``: A list that represents permissions to secrets. Possible values include: 'backup', 'delete',
'get', 'list', 'purge', 'recover', 'restore', and 'set'.
- ``certificates``: A list that represents permissions to certificates. Possible values include: 'create',
'delete', 'deleteissuers', 'get', 'getissuers', 'import_enum', 'list', 'listissuers', 'managecontacts',
'manageissuers', 'purge', 'recover', 'setissuers', and 'update'.
- ``storage``: A list that represents permissions to storage accounts. Possible values include: 'backup',
'delete', 'deletesas', 'get', 'getsas', 'list', 'listsas', 'purge', 'recover', 'regeneratekey',
'restore', 'set', 'setsas', and 'update'.
CLI Example:
.. code-block:: bash
salt-call azurerm_keyvault_vault.update_access_policy test_name test_rg test_kind test_policies
"""
result = {}
vconn = saltext.azurerm.utils.azurerm.get_client("keyvault", **kwargs)
# Create the VaultAccessPolicyProperties object
try:
propsmodel = saltext.azurerm.utils.azurerm.create_object_model(
"keyvault",
"VaultAccessPolicyProperties",
access_policies=access_policies,
**kwargs,
)
except TypeError as exc:
result = {"error": "The object model could not be built. ({})".format(str(exc))}
return result
try:
vault = vconn.vaults.update_access_policy(
vault_name=name,
resource_group_name=resource_group,
operation_kind=operation_kind,
properties=propsmodel,
)
result = vault.as_dict()
except HttpResponseError as exc:
saltext.azurerm.utils.azurerm.log_cloud_error("keyvault", str(exc), **kwargs)
result = {"error": str(exc)}
return result
|
/saltext.azurerm-4.0.1.tar.gz/saltext.azurerm-4.0.1/src/saltext/azurerm/modules/azurerm_keyvault_vault.py
| 0.759582 | 0.166472 |
azurerm_keyvault_vault.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.