desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Ensure that nested values can be selectivly overridden in a map file'
| def test_cloud_map_merge_conf(self):
| with patch('salt.config.check_driver_dependencies', MagicMock(return_value=True)):
with patch('salt.cloud.Map.read', MagicMock(return_value=EXAMPLE_MAP)):
self.maxDiff = None
opts = {'extension_modules': '/var/cache/salt/master/extmods', 'providers': EXAMPLE_PROVIDERS, 'profiles': EXAMPLE_PROFILES}
cloud_map = salt.cloud.Map(opts)
merged_profile = {'create': {'db1': {'cluster': 'nycvirt', 'cpus': 4, 'datastore': 'datastore1', 'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1', 'size': 40}}, 'network': {'Network Adapter 1': {'mac': '22:4a:b2:92:b3:eb', 'name': 'vlan50', 'switch_type': 'standard'}}, 'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}}, 'driver': 'vmware', 'extra_config': {'mem.hotadd': 'yes'}, 'folder': 'coreinfra', 'image': 'rhel6_64Guest', 'memory': '16GB', 'name': 'db1', 'num_cpus': 2, 'password': '123456', 'power_on': True, 'profile': 'nyc-vm', 'provider': 'nyc_vcenter:vmware', 'resourcepool': 'Resources', 'url': 'vca1.saltstack.com', 'user': 'root'}}}
self.assertEqual(cloud_map.map_data(), merged_profile)
|
'Tests passing in master_config_path and master_config kwargs.'
| @skipIf(NO_MOCK, NO_MOCK_REASON)
def test_cloud_config_double_master_path(self):
| with patch('salt.config.load_config', MagicMock(return_value={})):
self.assertRaises(SaltCloudConfigError, sconfig.cloud_config, PATH, master_config_path='foo', master_config='bar')
|
'Tests passing in providers_config_path and providers_config kwargs.'
| @skipIf(NO_MOCK, NO_MOCK_REASON)
def test_cloud_config_double_providers_path(self):
| with patch('salt.config.load_config', MagicMock(return_value={})):
self.assertRaises(SaltCloudConfigError, sconfig.cloud_config, PATH, providers_config_path='foo', providers_config='bar')
|
'Tests passing in profiles_config_path and profiles_config kwargs.'
| @skipIf(NO_MOCK, NO_MOCK_REASON)
def test_cloud_config_double_profiles_path(self):
| with patch('salt.config.load_config', MagicMock(return_value={})):
self.assertRaises(SaltCloudConfigError, sconfig.cloud_config, PATH, profiles_config_path='foo', profiles_config='bar')
|
'Tests mixing old cloud providers with pre-configured providers configurations
using the providers_config kwarg'
| @skipIf(NO_MOCK, NO_MOCK_REASON)
def test_cloud_config_providers_in_opts(self):
| with patch('salt.config.load_config', MagicMock(return_value={})):
with patch('salt.config.apply_cloud_config', MagicMock(return_value={'providers': 'foo'})):
self.assertRaises(SaltCloudConfigError, sconfig.cloud_config, PATH, providers_config='bar')
|
'Tests mixing old cloud providers with pre-configured providers configurations
using the providers_config_path kwarg'
| @skipIf(NO_MOCK, NO_MOCK_REASON)
def test_cloud_config_providers_in_opts_path(self):
| with patch('salt.config.load_config', MagicMock(return_value={})):
with patch('salt.config.apply_cloud_config', MagicMock(return_value={'providers': 'foo'})):
with patch('os.path.isfile', MagicMock(return_value=True)):
self.assertRaises(SaltCloudConfigError, sconfig.cloud_config, PATH, providers_config_path='bar')
|
'Tests the contents of the \'deploy_scripts_search_path\' tuple to ensure that
the correct deploy search paths are present.
There should be two search paths reported in the tuple: ``/etc/salt/cloud.deploy.d``
and ``<path-to-salt-install>/salt/cloud/deploy``. The first element is usually
``/etc/salt/cloud.deploy.d``, but sometimes is can be something like
``/etc/local/salt/cloud.deploy.d``, so we\'ll only test against the last part of
the path.'
| @skipIf(NO_MOCK, NO_MOCK_REASON)
def test_cloud_config_deploy_scripts_search_path(self):
| with patch('os.path.isdir', MagicMock(return_value=True)):
search_paths = sconfig.cloud_config('/etc/salt/cloud').get('deploy_scripts_search_path')
etc_deploy_path = '/salt/cloud.deploy.d'
deploy_path = '/salt/cloud/deploy'
if salt.utils.platform.is_windows():
etc_deploy_path = '/salt\\cloud.deploy.d'
deploy_path = '\\salt\\cloud\\deploy'
self.assertTrue(search_paths[0].endswith(etc_deploy_path))
self.assertTrue(search_paths[1].endswith(deploy_path))
|
'Tests when the provider is not contained in a list of details'
| def test_apply_cloud_config_no_provider_detail_list(self):
| overrides = {'providers': {'foo': [{'bar': 'baz'}]}}
self.assertRaises(SaltCloudConfigError, sconfig.apply_cloud_config, overrides, defaults=DEFAULT)
|
'Tests when the provider is not contained in the details dictionary'
| def test_apply_cloud_config_no_provider_detail_dict(self):
| overrides = {'providers': {'foo': {'bar': 'baz'}}}
self.assertRaises(SaltCloudConfigError, sconfig.apply_cloud_config, overrides, defaults=DEFAULT)
|
'Tests success when valid data is passed into the function as a list'
| @skipIf(NO_MOCK, NO_MOCK_REASON)
def test_apply_cloud_config_success_list(self):
| with patch('salt.config.old_to_new', MagicMock(return_value={'default_include': 'path/to/some/cloud/conf/file', 'providers': {'foo': {'bar': {'driver': 'foo:bar'}}}})):
overrides = {'providers': {'foo': [{'driver': 'bar'}]}}
ret = {'default_include': 'path/to/some/cloud/conf/file', 'providers': {'foo': {'bar': {'driver': 'foo:bar'}}}}
self.assertEqual(sconfig.apply_cloud_config(overrides, defaults=DEFAULT), ret)
|
'Tests success when valid data is passed into function as a dictionary'
| @skipIf(NO_MOCK, NO_MOCK_REASON)
def test_apply_cloud_config_success_dict(self):
| with patch('salt.config.old_to_new', MagicMock(return_value={'default_include': 'path/to/some/cloud/conf/file', 'providers': {'foo': {'bar': {'driver': 'foo:bar'}}}})):
overrides = {'providers': {'foo': {'driver': 'bar'}}}
ret = {'default_include': 'path/to/some/cloud/conf/file', 'providers': {'foo': {'bar': {'driver': 'foo:bar'}}}}
self.assertEqual(sconfig.apply_cloud_config(overrides, defaults=DEFAULT), ret)
|
'Tests passing in a bad profile format in overrides'
| def test_apply_vm_profiles_config_bad_profile_format(self):
| overrides = {'foo': 'bar', 'conf_file': PATH}
self.assertRaises(SaltCloudConfigError, sconfig.apply_vm_profiles_config, PATH, overrides, defaults=DEFAULT)
|
'Tests passing in valid provider and profile config files successfully'
| def test_apply_vm_profiles_config_success(self):
| providers = {'test-provider': {'digital_ocean': {'driver': 'digital_ocean', 'profiles': {}}}}
overrides = {'test-profile': {'provider': 'test-provider', 'image': 'Ubuntu 12.10 x64', 'size': '512MB'}, 'conf_file': PATH}
ret = {'test-profile': {'profile': 'test-profile', 'provider': 'test-provider:digital_ocean', 'image': 'Ubuntu 12.10 x64', 'size': '512MB'}}
self.assertEqual(sconfig.apply_vm_profiles_config(providers, overrides, defaults=DEFAULT), ret)
|
'Tests profile extends functionality with valid provider and profile configs'
| def test_apply_vm_profiles_config_extend_success(self):
| providers = {'test-config': {'ec2': {'profiles': {}, 'driver': 'ec2'}}}
overrides = {'Amazon': {'image': 'test-image-1', 'extends': 'dev-instances'}, 'Fedora': {'image': 'test-image-2', 'extends': 'dev-instances'}, 'conf_file': PATH, 'dev-instances': {'ssh_username': 'test_user', 'provider': 'test-config'}}
ret = {'Amazon': {'profile': 'Amazon', 'ssh_username': 'test_user', 'image': 'test-image-1', 'provider': 'test-config:ec2'}, 'Fedora': {'profile': 'Fedora', 'ssh_username': 'test_user', 'image': 'test-image-2', 'provider': 'test-config:ec2'}, 'dev-instances': {'profile': 'dev-instances', 'ssh_username': 'test_user', 'provider': 'test-config:ec2'}}
self.assertEqual(sconfig.apply_vm_profiles_config(providers, overrides, defaults=DEFAULT), ret)
|
'Tests profile extends and recursively merges data elements'
| def test_apply_vm_profiles_config_extend_override_success(self):
| self.maxDiff = None
providers = {'test-config': {'ec2': {'profiles': {}, 'driver': 'ec2'}}}
overrides = {'Fedora': {'image': 'test-image-2', 'extends': 'dev-instances', 'minion': {'grains': {'stage': 'experimental'}}}, 'conf_file': PATH, 'dev-instances': {'ssh_username': 'test_user', 'provider': 'test-config', 'minion': {'grains': {'role': 'webserver'}}}}
ret = {'Fedora': {'profile': 'Fedora', 'ssh_username': 'test_user', 'image': 'test-image-2', 'minion': {'grains': {'role': 'webserver', 'stage': 'experimental'}}, 'provider': 'test-config:ec2'}, 'dev-instances': {'profile': 'dev-instances', 'ssh_username': 'test_user', 'minion': {'grains': {'role': 'webserver'}}, 'provider': 'test-config:ec2'}}
self.assertEqual(sconfig.apply_vm_profiles_config(providers, overrides, defaults=DEFAULT), ret)
|
'Tests when two providers are given with the same provider name'
| def test_apply_cloud_providers_config_same_providers(self):
| overrides = {'my-dev-envs': [{'id': 'ABCDEFGHIJKLMNOP', 'key': 'supersecretkeysupersecretkey', 'driver': 'ec2'}, {'apikey': 'abcdefghijklmnopqrstuvwxyz', 'password': 'supersecret', 'driver': 'ec2'}], 'conf_file': PATH}
self.assertRaises(SaltCloudConfigError, sconfig.apply_cloud_providers_config, overrides, DEFAULT)
|
'Tests the successful extension of a cloud provider'
| def test_apply_cloud_providers_config_extend(self):
| overrides = {'my-production-envs': [{'extends': 'my-dev-envs:ec2', 'location': 'us-east-1', 'user': '[email protected]'}], 'my-dev-envs': [{'id': 'ABCDEFGHIJKLMNOP', 'user': '[email protected]', 'location': 'ap-southeast-1', 'key': 'supersecretkeysupersecretkey', 'driver': 'ec2'}, {'apikey': 'abcdefghijklmnopqrstuvwxyz', 'password': 'supersecret', 'driver': 'linode'}], 'conf_file': PATH}
ret = {'my-production-envs': {'ec2': {'profiles': {}, 'location': 'us-east-1', 'key': 'supersecretkeysupersecretkey', 'driver': 'ec2', 'id': 'ABCDEFGHIJKLMNOP', 'user': '[email protected]'}}, 'my-dev-envs': {'linode': {'apikey': 'abcdefghijklmnopqrstuvwxyz', 'password': 'supersecret', 'profiles': {}, 'driver': 'linode'}, 'ec2': {'profiles': {}, 'location': 'ap-southeast-1', 'key': 'supersecretkeysupersecretkey', 'driver': 'ec2', 'id': 'ABCDEFGHIJKLMNOP', 'user': '[email protected]'}}}
self.assertEqual(ret, sconfig.apply_cloud_providers_config(overrides, defaults=DEFAULT))
|
'Tests the successful extension of two cloud providers'
| def test_apply_cloud_providers_config_extend_multiple(self):
| overrides = {'my-production-envs': [{'extends': 'my-dev-envs:ec2', 'location': 'us-east-1', 'user': '[email protected]'}, {'password': 'new-password', 'extends': 'my-dev-envs:linode', 'location': 'Salt Lake City'}], 'my-dev-envs': [{'id': 'ABCDEFGHIJKLMNOP', 'user': '[email protected]', 'location': 'ap-southeast-1', 'key': 'supersecretkeysupersecretkey', 'driver': 'ec2'}, {'apikey': 'abcdefghijklmnopqrstuvwxyz', 'password': 'supersecret', 'driver': 'linode'}], 'conf_file': PATH}
ret = {'my-production-envs': {'linode': {'apikey': 'abcdefghijklmnopqrstuvwxyz', 'profiles': {}, 'location': 'Salt Lake City', 'driver': 'linode', 'password': 'new-password'}, 'ec2': {'user': '[email protected]', 'key': 'supersecretkeysupersecretkey', 'driver': 'ec2', 'id': 'ABCDEFGHIJKLMNOP', 'profiles': {}, 'location': 'us-east-1'}}, 'my-dev-envs': {'linode': {'apikey': 'abcdefghijklmnopqrstuvwxyz', 'password': 'supersecret', 'profiles': {}, 'driver': 'linode'}, 'ec2': {'profiles': {}, 'user': '[email protected]', 'key': 'supersecretkeysupersecretkey', 'driver': 'ec2', 'id': 'ABCDEFGHIJKLMNOP', 'location': 'ap-southeast-1'}}}
self.assertEqual(ret, sconfig.apply_cloud_providers_config(overrides, defaults=DEFAULT))
|
'Tests when the extension contains an alias not found in providers list'
| def test_apply_cloud_providers_config_extends_bad_alias(self):
| overrides = {'my-production-envs': [{'extends': 'test-alias:ec2', 'location': 'us-east-1', 'user': '[email protected]'}], 'my-dev-envs': [{'id': 'ABCDEFGHIJKLMNOP', 'user': '[email protected]', 'location': 'ap-southeast-1', 'key': 'supersecretkeysupersecretkey', 'driver': 'ec2'}], 'conf_file': PATH}
self.assertRaises(SaltCloudConfigError, sconfig.apply_cloud_providers_config, overrides, DEFAULT)
|
'Tests when the extension contains a provider not found in providers list'
| def test_apply_cloud_providers_config_extends_bad_provider(self):
| overrides = {'my-production-envs': [{'extends': 'my-dev-envs:linode', 'location': 'us-east-1', 'user': '[email protected]'}], 'my-dev-envs': [{'id': 'ABCDEFGHIJKLMNOP', 'user': '[email protected]', 'location': 'ap-southeast-1', 'key': 'supersecretkeysupersecretkey', 'driver': 'ec2'}], 'conf_file': PATH}
self.assertRaises(SaltCloudConfigError, sconfig.apply_cloud_providers_config, overrides, DEFAULT)
|
'Tests when no provider is supplied in the extends statement'
| def test_apply_cloud_providers_config_extends_no_provider(self):
| overrides = {'my-production-envs': [{'extends': 'my-dev-envs', 'location': 'us-east-1', 'user': '[email protected]'}], 'my-dev-envs': [{'id': 'ABCDEFGHIJKLMNOP', 'user': '[email protected]', 'location': 'ap-southeast-1', 'key': 'supersecretkeysupersecretkey', 'driver': 'linode'}], 'conf_file': PATH}
self.assertRaises(SaltCloudConfigError, sconfig.apply_cloud_providers_config, overrides, DEFAULT)
|
'Tests when extends is not in the list of providers'
| def test_apply_cloud_providers_extends_not_in_providers(self):
| overrides = {'my-production-envs': [{'extends': 'my-dev-envs ec2', 'location': 'us-east-1', 'user': '[email protected]'}], 'my-dev-envs': [{'id': 'ABCDEFGHIJKLMNOP', 'user': '[email protected]', 'location': 'ap-southeast-1', 'key': 'supersecretkeysupersecretkey', 'driver': 'linode'}], 'conf_file': PATH}
self.assertRaises(SaltCloudConfigError, sconfig.apply_cloud_providers_config, overrides, DEFAULT)
|
'Tests when provider alias is not in opts'
| def test_is_provider_configured_no_alias(self):
| opts = {'providers': 'test'}
provider = 'foo:bar'
self.assertFalse(sconfig.is_provider_configured(opts, provider))
|
'Tests when provider driver is not in opts'
| def test_is_provider_configured_no_driver(self):
| opts = {'providers': {'foo': 'baz'}}
provider = 'foo:bar'
self.assertFalse(sconfig.is_provider_configured(opts, provider))
|
'Tests when a required configuration key is not set'
| def test_is_provider_configured_key_is_none(self):
| opts = {'providers': {'foo': {'bar': {'api_key': None}}}}
provider = 'foo:bar'
self.assertFalse(sconfig.is_provider_configured(opts, provider, required_keys=('api_key',)))
|
'Tests successful cloud provider configuration'
| def test_is_provider_configured_success(self):
| opts = {'providers': {'foo': {'bar': {'api_key': 'baz'}}}}
provider = 'foo:bar'
ret = {'api_key': 'baz'}
self.assertEqual(sconfig.is_provider_configured(opts, provider, required_keys=('api_key',)), ret)
|
'Tests when the drive is not the same as the provider when
searching through multiple providers'
| def test_is_provider_configured_multiple_driver_not_provider(self):
| opts = {'providers': {'foo': {'bar': {'api_key': 'baz'}}}}
provider = 'foo'
self.assertFalse(sconfig.is_provider_configured(opts, provider))
|
'Tests when a required configuration key is not set when
searching through multiple providers'
| def test_is_provider_configured_multiple_key_is_none(self):
| opts = {'providers': {'foo': {'bar': {'api_key': None}}}}
provider = 'bar'
self.assertFalse(sconfig.is_provider_configured(opts, provider, required_keys=('api_key',)))
|
'Tests successful cloud provider configuration when searching
through multiple providers'
| def test_is_provider_configured_multiple_success(self):
| opts = {'providers': {'foo': {'bar': {'api_key': 'baz'}}}}
provider = 'bar'
ret = {'api_key': 'baz'}
self.assertEqual(sconfig.is_provider_configured(opts, provider, required_keys=('api_key',)), ret)
|
'Tests that cloud.{providers,profiles}.d directories are loaded, even if not
directly passed in through path'
| def test_includes_load(self):
| config = sconfig.cloud_config(self.get_config_file_path('cloud'))
self.assertIn('ec2-config', config['providers'])
self.assertIn('ec2-test', config['profiles'])
|
'Tests that include_config function returns valid configuration'
| @skipIf(NO_MOCK, NO_MOCK_REASON)
def test_include_config_without_errors(self):
| include_file = 'minion.d/my.conf'
config_path = '/etc/salt/minion'
config_opts = {'id': 'myminion.example.com'}
with patch('glob.glob', MagicMock(return_value=include_file)):
with patch('salt.config._read_conf_file', MagicMock(return_value=config_opts)):
configuration = sconfig.include_config(include_file, config_path, verbose=False)
self.assertEqual(config_opts, configuration)
|
'Tests that include_config function returns valid configuration even on errors'
| @skipIf(NO_MOCK, NO_MOCK_REASON)
def test_include_config_with_errors(self):
| include_file = 'minion.d/my.conf'
config_path = '/etc/salt/minion'
config_opts = {}
with patch('glob.glob', MagicMock(return_value=include_file)):
with patch('salt.config._read_conf_file', _salt_configuration_error):
configuration = sconfig.include_config(include_file, config_path, verbose=False)
self.assertEqual(config_opts, configuration)
|
'Tests that include_config exits on errors'
| @skipIf(NO_MOCK, NO_MOCK_REASON)
def test_include_config_with_errors_exit(self):
| include_file = 'minion.d/my.conf'
config_path = '/etc/salt/minion'
with patch('glob.glob', MagicMock(return_value=include_file)):
with patch('salt.config._read_conf_file', _salt_configuration_error):
with self.assertRaises(SystemExit):
sconfig.include_config(include_file, config_path, verbose=False, exit_on_config_errors=True)
|
'Tests the opts value of the \'log_file\' after running through the
various default dict updates. \'log_file\' should be updated to match
the DEFAULT_API_OPTS \'api_logfile\' value.'
| def test_api_config_log_file_values(self):
| with patch('salt.config.client_config', MagicMock(return_value=MOCK_MASTER_DEFAULT_OPTS)):
expected = '/var/log/salt/api'
if salt.utils.platform.is_windows():
expected = 'c:\\salt\\var\\log\\salt\\api'
ret = salt.config.api_config('/some/fake/path')
self.assertEqual(ret['log_file'], expected)
|
'Tests the opts value of the \'pidfile\' after running through the
various default dict updates. \'pidfile\' should be updated to match
the DEFAULT_API_OPTS \'api_pidfile\' value.'
| def test_api_config_pidfile_values(self):
| with patch('salt.config.client_config', MagicMock(return_value=MOCK_MASTER_DEFAULT_OPTS)):
expected = '/var/run/salt-api.pid'
if salt.utils.platform.is_windows():
expected = 'c:\\salt\\var\\run\\salt-api.pid'
ret = salt.config.api_config('/some/fake/path')
self.assertEqual(ret['pidfile'], expected)
|
'Tests the opts value of the api config values after running through the
various default dict updates that should be overridden by settings in
the user\'s master config file.'
| @destructiveTest
def test_master_config_file_overrides_defaults(self):
| foo_dir = '/foo/bar/baz'
hello_dir = '/hello/world'
if salt.utils.platform.is_windows():
foo_dir = 'c:\\foo\\bar\\baz'
hello_dir = 'c:\\hello\\world'
mock_master_config = {'api_pidfile': foo_dir, 'api_logfile': hello_dir, 'rest_timeout': 5}
mock_master_config.update(MOCK_MASTER_DEFAULT_OPTS.copy())
with patch('salt.config.client_config', MagicMock(return_value=mock_master_config)):
ret = salt.config.api_config('/some/fake/path')
self.assertEqual(ret['rest_timeout'], 5)
self.assertEqual(ret['api_pidfile'], foo_dir)
self.assertEqual(ret['pidfile'], foo_dir)
self.assertEqual(ret['api_logfile'], hello_dir)
self.assertEqual(ret['log_file'], hello_dir)
|
'Tests the opts value of the api_logfile, log_file, api_pidfile, and pidfile
when a custom root directory is used. This ensures that each of these
values is present in the list of opts keys that should have the root_dir
prepended when the api_config function returns the opts dictionary.'
| @destructiveTest
def test_api_config_prepend_root_dirs_return(self):
| mock_log = '/mock/root/var/log/salt/api'
mock_pid = '/mock/root/var/run/salt-api.pid'
mock_master_config = MOCK_MASTER_DEFAULT_OPTS.copy()
mock_master_config['root_dir'] = '/mock/root/'
if salt.utils.platform.is_windows():
mock_log = 'c:\\mock\\root\\var\\log\\salt\\api'
mock_pid = 'c:\\mock\\root\\var\\run\\salt-api.pid'
mock_master_config['root_dir'] = 'c:\\mock\\root'
with patch('salt.config.client_config', MagicMock(return_value=mock_master_config)):
ret = salt.config.api_config('/some/fake/path')
self.assertEqual(ret['api_logfile'], mock_log)
self.assertEqual(ret['log_file'], mock_log)
self.assertEqual(ret['api_pidfile'], mock_pid)
self.assertEqual(ret['pidfile'], mock_pid)
|
'This tests whether or not a larger hash causes the sock path to exceed
the system\'s max sock path length. See the below link for more
information.
https://github.com/saltstack/salt/issues/12172#issuecomment-43903643'
| @skip_if_not_root
def test_sock_path_len(self):
| opts = {'id': 'salt-testing', 'hash_type': 'sha512', 'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion'), 'extension_modules': ''}
with patch.dict(__opts__, opts):
try:
event_publisher = event.AsyncEventPublisher(__opts__)
result = True
except SaltSystemExit:
result = False
self.assertTrue(result)
|
'Tests that the _handle_decoded_payload function returns when a jid is given that is already present
in the jid_queue.
Note: This test doesn\'t contain all of the patch decorators above the function like the other tests
for _handle_decoded_payload below. This is essential to this test as the call to the function must
return None BEFORE any of the processes are spun up because we should be avoiding firing duplicate
jobs.'
| def test_handle_decoded_payload_jid_match_in_jid_queue(self):
| mock_opts = salt.config.DEFAULT_MINION_OPTS
mock_data = {'fun': 'foo.bar', 'jid': 123}
mock_jid_queue = [123]
try:
minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue), io_loop=tornado.ioloop.IOLoop())
ret = minion._handle_decoded_payload(mock_data)
self.assertEqual(minion.jid_queue, mock_jid_queue)
self.assertIsNone(ret)
finally:
minion.destroy()
|
'Tests that the _handle_decoded_payload function adds a jid to the minion\'s jid_queue when the new
jid isn\'t already present in the jid_queue.'
| def test_handle_decoded_payload_jid_queue_addition(self):
| with patch('salt.minion.Minion.ctx', MagicMock(return_value={})):
with patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)):
with patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)):
mock_jid = 11111
mock_opts = salt.config.DEFAULT_MINION_OPTS
mock_data = {'fun': 'foo.bar', 'jid': mock_jid}
mock_jid_queue = [123, 456]
try:
minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue), io_loop=tornado.ioloop.IOLoop())
self.assertEqual(minion.jid_queue, mock_jid_queue)
minion._handle_decoded_payload(mock_data)
mock_jid_queue.append(mock_jid)
self.assertEqual(minion.jid_queue, mock_jid_queue)
finally:
minion.destroy()
|
'Tests that the _handle_decoded_payload function removes a jid from the minion\'s jid_queue when the
minion\'s jid_queue high water mark (minion_jid_queue_hwm) is hit.'
| def test_handle_decoded_payload_jid_queue_reduced_minion_jid_queue_hwm(self):
| with patch('salt.minion.Minion.ctx', MagicMock(return_value={})):
with patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)):
with patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)):
mock_opts = salt.config.DEFAULT_MINION_OPTS
mock_opts['minion_jid_queue_hwm'] = 2
mock_data = {'fun': 'foo.bar', 'jid': 789}
mock_jid_queue = [123, 456]
try:
minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue), io_loop=tornado.ioloop.IOLoop())
self.assertEqual(minion.jid_queue, mock_jid_queue)
minion._handle_decoded_payload(mock_data)
self.assertEqual(len(minion.jid_queue), 2)
self.assertEqual(minion.jid_queue, [456, 789])
finally:
minion.destroy()
|
'Class to set up zmq echo socket'
| @classmethod
def setUpClass(cls):
| def echo_server():
'\n A server that echos the message sent to it over zmq\n\n Optional "sleep" can be sent to delay response\n '
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind('tcp://*:{0}'.format(SREQTestCase.port))
payload = salt.payload.Serial('msgpack')
while SREQTestCase.thread_running.is_set():
try:
message = socket.recv(zmq.NOBLOCK)
msg_deserialized = payload.loads(message)
log.info('Echo server received message: {0}'.format(msg_deserialized))
if (isinstance(msg_deserialized['load'], dict) and msg_deserialized['load'].get('sleep')):
log.info('Test echo server sleeping for {0} seconds'.format(msg_deserialized['load']['sleep']))
time.sleep(msg_deserialized['load']['sleep'])
socket.send(message)
except zmq.ZMQError as exc:
if (exc.errno == errno.EAGAIN):
continue
raise
SREQTestCase.thread_running = threading.Event()
SREQTestCase.thread_running.set()
SREQTestCase.echo_server = threading.Thread(target=echo_server)
SREQTestCase.echo_server.start()
|
'Remove echo server'
| @classmethod
def tearDownClass(cls):
| SREQTestCase.thread_running.clear()
SREQTestCase.echo_server.join()
|
'Test creation, send/rect'
| def test_send_auto(self):
| sreq = self.get_sreq()
assert (sreq.send_auto({}) == {'enc': 'clear', 'load': {}})
assert (sreq.send_auto({'load': 'foo'}) == {'load': 'foo', 'enc': 'clear'})
|
'Test SREQ Timeouts'
| @skipIf(True, 'Disabled until we can figure out how to make this more reliable.')
def test_timeout(self):
| sreq = self.get_sreq()
start = time.time()
log.info('Sending tries=0, timeout=0')
try:
sreq.send('clear', 'foo', tries=0, timeout=0)
except salt.exceptions.SaltReqTimeoutError:
pass
assert ((time.time() - start) < 1)
log.info('Sending tries=1, timeout=1')
start = time.time()
with self.assertRaises(salt.exceptions.SaltReqTimeoutError):
sreq.send('clear', {'sleep': 2}, tries=1, timeout=1)
assert ((time.time() - start) >= 1)
log.info('Sending tries=2, timeout=1')
start = time.time()
with self.assertRaises(salt.exceptions.SaltReqTimeoutError):
sreq.send('clear', {'sleep': 2}, tries=2, timeout=1)
assert ((time.time() - start) >= 2)
log.info('Sending regular send')
assert (sreq.send('clear', 'foo') == {'enc': 'clear', 'load': 'foo'})
|
'Test the __del__ capabilities'
| def test_destroy(self):
| sreq = self.get_sreq()
sreq.destroy()
|
'Ensure we aren\'t persisting context dunders between tests'
| def setUp(self):
| docker_mod.__context__.pop('docker.client', None)
|
'Check that docker.ps called with host is ``True``,
include resutlt of ``network.interfaces`` command in returned result.'
| def test_ps_with_host_true(self):
| client = Mock()
client.containers = MagicMock(return_value=[])
get_client_mock = MagicMock(return_value=client)
network_interfaces = Mock(return_value={'mocked': None})
with patch.dict(docker_mod.__salt__, {'network.interfaces': network_interfaces}):
with patch.object(docker_mod, '_get_client', get_client_mock):
ret = docker_mod.ps_(host=True)
self.assertEqual(ret, {'host': {'interfaces': {'mocked': None}}})
|
'Check that docker.ps accept filters parameter.'
| def test_ps_with_filters(self):
| client = Mock()
client.containers = MagicMock(return_value=[])
get_client_mock = MagicMock(return_value=client)
with patch.object(docker_mod, '_get_client', get_client_mock):
docker_mod.ps_(filters={'label': 'KEY'})
client.containers.assert_called_once_with(all=True, filters={'label': 'KEY'})
|
'Every command that might modify docker containers state.
Should trig an update on ``mine.send``'
| def test_check_mine_cache_is_refreshed_on_container_change_event(self):
| with patch.object(docker_mod, '_get_exec_driver'):
client_args_mock = MagicMock(return_value={'create_container': ['image', 'command', 'hostname', 'user', 'detach', 'stdin_open', 'tty', 'ports', 'environment', 'volumes', 'network_disabled', 'name', 'entrypoint', 'working_dir', 'domainname', 'cpuset', 'host_config', 'mac_address', 'labels', 'volume_driver', 'stop_signal', 'networking_config', 'healthcheck', 'stop_timeout'], 'host_config': ['binds', 'port_bindings', 'lxc_conf', 'publish_all_ports', 'links', 'privileged', 'dns', 'dns_search', 'volumes_from', 'network_mode', 'restart_policy', 'cap_add', 'cap_drop', 'devices', 'extra_hosts', 'read_only', 'pid_mode', 'ipc_mode', 'security_opt', 'ulimits', 'log_config', 'mem_limit', 'memswap_limit', 'mem_reservation', 'kernel_memory', 'mem_swappiness', 'cgroup_parent', 'group_add', 'cpu_quota', 'cpu_period', 'blkio_weight', 'blkio_weight_device', 'device_read_bps', 'device_write_bps', 'device_read_iops', 'device_write_iops', 'oom_kill_disable', 'shm_size', 'sysctls', 'tmpfs', 'oom_score_adj', 'dns_opt', 'cpu_shares', 'cpuset_cpus', 'userns_mode', 'pids_limit', 'isolation', 'auto_remove', 'storage_opt'], 'networking_config': ['aliases', 'links', 'ipv4_address', 'ipv6_address', 'link_local_ips']})
for (command_name, args) in (('create', ()), ('rm_', ()), ('kill', ()), ('pause', ()), ('signal_', ('KILL',)), ('start', ()), ('stop', ()), ('unpause', ()), ('_run', ('command',)), ('_script', ('command',))):
mine_send = Mock()
command = getattr(docker_mod, command_name)
client = MagicMock()
client.api_version = '1.12'
with patch.dict(docker_mod.__salt__, {'mine.send': mine_send, 'container_resource.run': MagicMock(), 'cp.cache_file': MagicMock(return_value=False), 'docker.get_client_args': client_args_mock}):
with patch.object(docker_mod, '_get_client', client):
command('container', *args)
mine_send.assert_called_with('docker.ps', verbose=True, all=True, host=True)
|
'test list networks.'
| @skipIf((_docker_py_version() < (1, 5, 0)), 'docker module must be installed to run this test or is too old. >=1.5.0')
def test_list_networks(self, *args):
| __salt__ = {'config.get': Mock(), 'mine.send': Mock()}
host_config = {}
client = Mock()
client.api_version = '1.21'
get_client_mock = MagicMock(return_value=client)
with patch.dict(docker_mod.__dict__, {'__salt__': __salt__}):
with patch.object(docker_mod, '_get_client', get_client_mock):
docker_mod.networks(names=['foo'], ids=['01234'])
client.networks.assert_called_once_with(names=['foo'], ids=['01234'])
|
'test create network.'
| @skipIf((docker_version < (1, 5, 0)), 'docker module must be installed to run this test or is too old. >=1.5.0')
def test_create_network(self, *args):
| __salt__ = {'config.get': Mock(), 'mine.send': Mock()}
host_config = {}
client = Mock()
client.api_version = '1.21'
get_client_mock = MagicMock(return_value=client)
with patch.dict(docker_mod.__dict__, {'__salt__': __salt__}):
with patch.object(docker_mod, '_get_client', get_client_mock):
docker_mod.create_network('foo', driver='bridge', driver_opts={})
client.create_network.assert_called_once_with('foo', driver='bridge', options={}, check_duplicate=True)
|
'test remove network.'
| @skipIf((docker_version < (1, 5, 0)), 'docker module must be installed to run this test or is too old. >=1.5.0')
def test_remove_network(self, *args):
| __salt__ = {'config.get': Mock(), 'mine.send': Mock()}
host_config = {}
client = Mock()
client.api_version = '1.21'
get_client_mock = MagicMock(return_value=client)
with patch.dict(docker_mod.__dict__, {'__salt__': __salt__}):
with patch.object(docker_mod, '_get_client', get_client_mock):
docker_mod.remove_network('foo')
client.remove_network.assert_called_once_with('foo')
|
'test inspect network.'
| @skipIf((docker_version < (1, 5, 0)), 'docker module must be installed to run this test or is too old. >=1.5.0')
def test_inspect_network(self, *args):
| __salt__ = {'config.get': Mock(), 'mine.send': Mock()}
host_config = {}
client = Mock()
client.api_version = '1.21'
get_client_mock = MagicMock(return_value=client)
with patch.dict(docker_mod.__dict__, {'__salt__': __salt__}):
with patch.object(docker_mod, '_get_client', get_client_mock):
docker_mod.inspect_network('foo')
client.inspect_network.assert_called_once_with('foo')
|
'test connect_container_to_network'
| @skipIf((docker_version < (1, 5, 0)), 'docker module must be installed to run this test or is too old. >=1.5.0')
def test_connect_container_to_network(self, *args):
| __salt__ = {'config.get': Mock(), 'mine.send': Mock()}
host_config = {}
client = Mock()
client.api_version = '1.21'
get_client_mock = MagicMock(return_value=client)
context = {'docker.exec_driver': 'docker-exec'}
with patch.dict(docker_mod.__dict__, {'__salt__': __salt__}):
with patch.dict(docker_mod.__context__, context):
with patch.object(docker_mod, '_get_client', get_client_mock):
docker_mod.connect_container_to_network('container', 'foo')
client.connect_container_to_network.assert_called_once_with('container', 'foo', None)
|
'test disconnect_container_from_network'
| @skipIf((docker_version < (1, 5, 0)), 'docker module must be installed to run this test or is too old. >=1.5.0')
def test_disconnect_container_from_network(self, *args):
| __salt__ = {'config.get': Mock(), 'mine.send': Mock()}
host_config = {}
client = Mock()
client.api_version = '1.21'
get_client_mock = MagicMock(return_value=client)
with patch.dict(docker_mod.__dict__, {'__salt__': __salt__}):
with patch.object(docker_mod, '_get_client', get_client_mock):
docker_mod.disconnect_container_from_network('container', 'foo')
client.disconnect_container_from_network.assert_called_once_with('container', 'foo')
|
'test list volumes.'
| @skipIf((docker_version < (1, 5, 0)), 'docker module must be installed to run this test or is too old. >=1.5.0')
def test_list_volumes(self, *args):
| __salt__ = {'config.get': Mock(), 'mine.send': Mock()}
client = Mock()
client.api_version = '1.21'
get_client_mock = MagicMock(return_value=client)
with patch.dict(docker_mod.__dict__, {'__salt__': __salt__}):
with patch.object(docker_mod, '_get_client', get_client_mock):
docker_mod.volumes(filters={'dangling': [True]})
client.volumes.assert_called_once_with(filters={'dangling': [True]})
|
'test create volume.'
| @skipIf((docker_version < (1, 5, 0)), 'docker module must be installed to run this test or is too old. >=1.5.0')
def test_create_volume(self, *args):
| __salt__ = {'config.get': Mock(), 'mine.send': Mock()}
client = Mock()
client.api_version = '1.21'
get_client_mock = MagicMock(return_value=client)
with patch.dict(docker_mod.__dict__, {'__salt__': __salt__}):
with patch.object(docker_mod, '_get_client', get_client_mock):
docker_mod.create_volume('foo', driver='bridge', driver_opts={})
client.create_volume.assert_called_once_with('foo', driver='bridge', driver_opts={})
|
'test remove volume.'
| @skipIf((docker_version < (1, 5, 0)), 'docker module must be installed to run this test or is too old. >=1.5.0')
def test_remove_volume(self, *args):
| __salt__ = {'config.get': Mock(), 'mine.send': Mock()}
client = Mock()
client.api_version = '1.21'
get_client_mock = MagicMock(return_value=client)
with patch.dict(docker_mod.__dict__, {'__salt__': __salt__}):
with patch.object(docker_mod, '_get_client', get_client_mock):
docker_mod.remove_volume('foo')
client.remove_volume.assert_called_once_with('foo')
|
'test inspect volume.'
| @skipIf((docker_version < (1, 5, 0)), 'docker module must be installed to run this test or is too old. >=1.5.0')
def test_inspect_volume(self, *args):
| __salt__ = {'config.get': Mock(), 'mine.send': Mock()}
client = Mock()
client.api_version = '1.21'
get_client_mock = MagicMock(return_value=client)
with patch.dict(docker_mod.__dict__, {'__salt__': __salt__}):
with patch.object(docker_mod, '_get_client', get_client_mock):
docker_mod.inspect_volume('foo')
client.inspect_volume.assert_called_once_with('foo')
|
'test build sls image.'
| def test_sls_build(self, *args):
| docker_start_mock = MagicMock(return_value={})
docker_create_mock = MagicMock(return_value={'Id': 'ID', 'Name': 'NAME'})
docker_stop_mock = MagicMock(return_value={'state': {'old': 'running', 'new': 'stopped'}, 'result': True})
docker_rm_mock = MagicMock(return_value={})
docker_commit_mock = MagicMock(return_value={'Id': 'ID2', 'Image': 'foo', 'Time_Elapsed': 42})
docker_sls_mock = MagicMock(return_value={'file_|-/etc/test.sh_|-/etc/test.sh_|-managed': {'comment': 'File /etc/test.sh is in the correct state', 'name': '/etc/test.sh', 'start_time': '07:04:26.834792', 'result': True, 'duration': 13.492, '__run_num__': 0, 'changes': {}}, 'test_|-always-passes_|-foo_|-succeed_without_changes': {'comment': 'Success!', 'name': 'foo', 'start_time': '07:04:26.848915', 'result': True, 'duration': 0.363, '__run_num__': 1, 'changes': {}}})
ret = None
with patch.object(docker_mod, 'start', docker_start_mock):
with patch.object(docker_mod, 'create', docker_create_mock):
with patch.object(docker_mod, 'stop', docker_stop_mock):
with patch.object(docker_mod, 'commit', docker_commit_mock):
with patch.object(docker_mod, 'sls', docker_sls_mock):
with patch.object(docker_mod, 'rm_', docker_rm_mock):
ret = docker_mod.sls_build('foo', mods='foo')
docker_create_mock.assert_called_once_with(cmd='sleep infinity', image='opensuse/python', interactive=True, tty=True)
docker_start_mock.assert_called_once_with('ID')
docker_sls_mock.assert_called_once_with('ID', 'foo', 'base')
docker_stop_mock.assert_called_once_with('ID')
docker_rm_mock.assert_called_once_with('ID')
docker_commit_mock.assert_called_once_with('ID', 'foo')
self.assertEqual({'Id': 'ID2', 'Image': 'foo', 'Time_Elapsed': 42}, ret)
|
'test build sls image in dryrun mode.'
| def test_sls_build_dryrun(self, *args):
| docker_start_mock = MagicMock(return_value={})
docker_create_mock = MagicMock(return_value={'Id': 'ID', 'Name': 'NAME'})
docker_stop_mock = MagicMock(return_value={'state': {'old': 'running', 'new': 'stopped'}, 'result': True})
docker_rm_mock = MagicMock(return_value={})
docker_sls_mock = MagicMock(return_value={'file_|-/etc/test.sh_|-/etc/test.sh_|-managed': {'comment': 'File /etc/test.sh is in the correct state', 'name': '/etc/test.sh', 'start_time': '07:04:26.834792', 'result': True, 'duration': 13.492, '__run_num__': 0, 'changes': {}}, 'test_|-always-passes_|-foo_|-succeed_without_changes': {'comment': 'Success!', 'name': 'foo', 'start_time': '07:04:26.848915', 'result': True, 'duration': 0.363, '__run_num__': 1, 'changes': {}}})
ret = None
with patch.object(docker_mod, 'start', docker_start_mock):
with patch.object(docker_mod, 'create', docker_create_mock):
with patch.object(docker_mod, 'stop', docker_stop_mock):
with patch.object(docker_mod, 'rm_', docker_rm_mock):
with patch.object(docker_mod, 'sls', docker_sls_mock):
ret = docker_mod.sls_build('foo', mods='foo', dryrun=True)
docker_create_mock.assert_called_once_with(cmd='sleep infinity', image='opensuse/python', interactive=True, tty=True)
docker_start_mock.assert_called_once_with('ID')
docker_sls_mock.assert_called_once_with('ID', 'foo', 'base')
docker_stop_mock.assert_called_once_with('ID')
docker_rm_mock.assert_called_once_with('ID')
self.assertEqual({'file_|-/etc/test.sh_|-/etc/test.sh_|-managed': {'comment': 'File /etc/test.sh is in the correct state', 'name': '/etc/test.sh', 'start_time': '07:04:26.834792', 'result': True, 'duration': 13.492, '__run_num__': 0, 'changes': {}}, 'test_|-always-passes_|-foo_|-succeed_without_changes': {'comment': 'Success!', 'name': 'foo', 'start_time': '07:04:26.848915', 'result': True, 'duration': 0.363, '__run_num__': 1, 'changes': {}}}, ret)
|
'test module calling inside containers'
| def test_call_success(self):
| ret = None
docker_run_all_mock = MagicMock(return_value={'retcode': 0, 'stdout': '{"retcode": 0, "comment": "container cmd"}', 'stderr': 'err'})
docker_copy_to_mock = MagicMock(return_value={'retcode': 0})
docker_config_mock = MagicMock(return_value='')
client = Mock()
client.put_archive = Mock()
get_client_mock = MagicMock(return_value=client)
context = {'docker.exec_driver': 'docker-exec'}
salt_dunder = {'config.option': docker_config_mock}
with patch.object(docker_mod, 'run_all', docker_run_all_mock):
with patch.object(docker_mod, 'copy_to', docker_copy_to_mock):
with patch.object(docker_mod, '_get_client', get_client_mock):
with patch.dict(docker_mod.__opts__, {'cachedir': '/tmp'}):
with patch.dict(docker_mod.__salt__, salt_dunder):
with patch.dict(docker_mod.__context__, context):
for i in range(2):
ret = docker_mod.call('ID', 'test.arg', 1, 2, arg1='val1')
self.maxDiff = None
self.assertIn('mkdir', docker_run_all_mock.mock_calls[0][1][1])
self.assertIn('mkdir', docker_run_all_mock.mock_calls[4][1][1])
self.assertNotEqual(docker_run_all_mock.mock_calls[0][1][1], docker_run_all_mock.mock_calls[4][1][1])
self.assertIn('salt-call', docker_run_all_mock.mock_calls[2][1][1])
self.assertIn('salt-call', docker_run_all_mock.mock_calls[6][1][1])
self.assertNotEqual(docker_run_all_mock.mock_calls[2][1][1], docker_run_all_mock.mock_calls[6][1][1])
self.assertIn('tarfile', docker_run_all_mock.mock_calls[1][1][1])
self.assertIn('tarfile', docker_run_all_mock.mock_calls[5][1][1])
self.assertNotEqual(docker_run_all_mock.mock_calls[1][1][1], docker_run_all_mock.mock_calls[5][1][1])
self.assertIn('rm -rf', docker_run_all_mock.mock_calls[3][1][1])
self.assertIn('rm -rf', docker_run_all_mock.mock_calls[7][1][1])
self.assertNotEqual(docker_run_all_mock.mock_calls[3][1][1], docker_run_all_mock.mock_calls[7][1][1])
self.assertEqual({'retcode': 0, 'comment': 'container cmd'}, ret)
|
'docker 1.12 reports also images without tags with `null`.'
| def test_images_with_empty_tags(self):
| client = Mock()
client.api_version = '1.24'
client.images = Mock(return_value=[{'Id': 'sha256:abcde', 'RepoTags': None}, {'Id': 'sha256:abcdef'}, {'Id': 'sha256:abcdefg', 'RepoTags': ['image:latest']}])
get_client_mock = MagicMock(return_value=client)
with patch.object(docker_mod, '_get_client', get_client_mock):
docker_mod._clear_context()
result = docker_mod.images()
self.assertEqual(result, {'sha256:abcdefg': {'RepoTags': ['image:latest']}})
|
'Test for parser of rc-status results'
| def test_service_list_parser(self):
| mock = MagicMock(return_value='')
with patch.dict(gentoo_service.__salt__, {'cmd.run': mock}):
self.assertFalse(gentoo_service.get_enabled())
mock.assert_called_once_with('rc-update -v show')
|
'Test for Return a list of service that are enabled on boot'
| def test_get_enabled_single_runlevel(self):
| service_name = 'name'
runlevels = ['default']
mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': mock}):
enabled_services = gentoo_service.get_enabled()
self.assertTrue((service_name in enabled_services))
self.assertEqual(enabled_services[service_name], runlevels)
|
'Test for Return a list of service that are enabled on boot'
| def test_get_enabled_filters_out_disabled_services(self):
| service_name = 'name'
runlevels = ['default']
disabled_service = 'disabled'
service_list = self.__services({service_name: runlevels, disabled_service: []})
mock = MagicMock(return_value=service_list)
with patch.dict(gentoo_service.__salt__, {'cmd.run': mock}):
enabled_services = gentoo_service.get_enabled()
self.assertEqual(len(enabled_services), 1)
self.assertTrue((service_name in enabled_services))
self.assertEqual(enabled_services[service_name], runlevels)
|
'Test for Return a list of service that are enabled on boot at more than one runlevel'
| def test_get_enabled_with_multiple_runlevels(self):
| service_name = 'name'
runlevels = ['non-default', 'default']
mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': mock}):
enabled_services = gentoo_service.get_enabled()
self.assertTrue((service_name in enabled_services))
self.assertEqual(enabled_services[service_name][0], runlevels[1])
self.assertEqual(enabled_services[service_name][1], runlevels[0])
|
'Test for Return a list of service that are installed but disabled'
| def test_get_disabled(self):
| disabled_service = 'disabled'
enabled_service = 'enabled'
service_list = self.__services({disabled_service: [], enabled_service: ['default']})
mock = MagicMock(return_value=service_list)
with patch.dict(gentoo_service.__salt__, {'cmd.run': mock}):
disabled_services = gentoo_service.get_disabled()
self.assertTrue(len(disabled_services), 1)
self.assertTrue((disabled_service in disabled_services))
|
'Test for Returns ``True`` if the specified service is
available, otherwise returns
``False``.'
| def test_available(self):
| disabled_service = 'disabled'
enabled_service = 'enabled'
multilevel_service = 'multilevel'
missing_service = 'missing'
shutdown_service = 'shutdown'
service_list = self.__services({disabled_service: [], enabled_service: ['default'], multilevel_service: ['default', 'shutdown'], shutdown_service: ['shutdown']})
mock = MagicMock(return_value=service_list)
with patch.dict(gentoo_service.__salt__, {'cmd.run': mock}):
self.assertTrue(gentoo_service.available(enabled_service))
self.assertTrue(gentoo_service.available(multilevel_service))
self.assertTrue(gentoo_service.available(disabled_service))
self.assertTrue(gentoo_service.available(shutdown_service))
self.assertFalse(gentoo_service.available(missing_service))
|
'Test for The inverse of service.available.'
| def test_missing(self):
| disabled_service = 'disabled'
enabled_service = 'enabled'
service_list = self.__services({disabled_service: [], enabled_service: ['default']})
mock = MagicMock(return_value=service_list)
with patch.dict(gentoo_service.__salt__, {'cmd.run': mock}):
self.assertFalse(gentoo_service.missing(enabled_service))
self.assertFalse(gentoo_service.missing(disabled_service))
self.assertTrue(gentoo_service.missing('missing'))
|
'Test for Return all available boot services'
| def test_getall(self):
| disabled_service = 'disabled'
enabled_service = 'enabled'
service_list = self.__services({disabled_service: [], enabled_service: ['default']})
mock = MagicMock(return_value=service_list)
with patch.dict(gentoo_service.__salt__, {'cmd.run': mock}):
all_services = gentoo_service.get_all()
self.assertEqual(len(all_services), 2)
self.assertTrue((disabled_service in all_services))
self.assertTrue((enabled_service in all_services))
|
'Test for Start the specified service'
| def test_start(self):
| mock = MagicMock(return_value=True)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.start('name'))
mock.assert_called_once_with('/etc/init.d/name start', python_shell=False)
|
'Test for Stop the specified service'
| def test_stop(self):
| mock = MagicMock(return_value=True)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.stop('name'))
mock.assert_called_once_with('/etc/init.d/name stop', python_shell=False)
|
'Test for Restart the named service'
| def test_restart(self):
| mock = MagicMock(return_value=True)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.restart('name'))
mock.assert_called_once_with('/etc/init.d/name restart', python_shell=False)
|
'Test for Reload the named service'
| def test_reload_(self):
| mock = MagicMock(return_value=True)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.reload_('name'))
mock.assert_called_once_with('/etc/init.d/name reload', python_shell=False)
|
'Test for Reload the named service'
| def test_zap(self):
| mock = MagicMock(return_value=True)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.zap('name'))
mock.assert_called_once_with('/etc/init.d/name zap', python_shell=False)
|
'Test for Return the status for a service'
| def test_status(self):
| mock = MagicMock(return_value=True)
with patch.dict(gentoo_service.__salt__, {'status.pid': mock}):
self.assertTrue(gentoo_service.status('name', 1))
mock = MagicMock(return_value=0)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertTrue(gentoo_service.status('name'))
mock.assert_called_once_with('/etc/init.d/name status', python_shell=False)
mock = MagicMock(return_value=1)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.status('name'))
mock.assert_called_once_with('/etc/init.d/name status', python_shell=False)
mock = MagicMock(return_value=3)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.status('name'))
mock.assert_called_once_with('/etc/init.d/name status', python_shell=False)
mock = MagicMock(return_value=32)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.status('name'))
mock.assert_called_once_with('/etc/init.d/name status', python_shell=False)
|
'Test for Enable the named service to start at boot'
| def test_enable(self):
| rc_update_mock = MagicMock(return_value=0)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name'))
rc_update_mock.assert_called_once_with('rc-update add name', python_shell=False)
rc_update_mock.reset_mock()
service_name = 'name'
runlevels = ['l1']
level_list_mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name', runlevels='l2'))
rc_update_mock.assert_has_calls([call('rc-update delete name l1', python_shell=False), call('rc-update add name l2', python_shell=False)])
rc_update_mock.reset_mock()
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name', runlevels='l1'))
self.assertTrue((rc_update_mock.call_count == 0))
rc_update_mock.reset_mock()
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name', runlevels=['l1']))
self.assertTrue((rc_update_mock.call_count == 0))
rc_update_mock.reset_mock()
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name', runlevels=['l2', 'l1']))
rc_update_mock.assert_called_once_with('rc-update add name l2', python_shell=False)
rc_update_mock.reset_mock()
runlevels = ['l1', 'l2']
level_list_mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name', runlevels=['l2']))
rc_update_mock.assert_called_once_with('rc-update delete name l1', python_shell=False)
rc_update_mock.reset_mock()
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name', runlevels=['l1', 'l3']))
rc_update_mock.assert_has_calls([call('rc-update delete name l2', python_shell=False), call('rc-update add name l3', python_shell=False)])
rc_update_mock.reset_mock()
runlevels = ['l1', 'l3', 'l5']
level_list_mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name', runlevels=['l2', 'l4', 'l5']))
rc_update_mock.assert_has_calls([call('rc-update delete name l1 l3', python_shell=False), call('rc-update add name l2 l4', python_shell=False)])
rc_update_mock.reset_mock()
rc_update_mock = MagicMock(return_value=1)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertFalse(gentoo_service.enable('name'))
rc_update_mock.assert_called_once_with('rc-update add name', python_shell=False)
rc_update_mock.reset_mock()
runlevels = ['l1']
level_list_mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertFalse(gentoo_service.enable('name', runlevels='l2'))
rc_update_mock.assert_called_once_with('rc-update delete name l1', python_shell=False)
rc_update_mock.reset_mock()
rc_update_mock = MagicMock()
rc_update_mock.side_effect = [0, 1]
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertFalse(gentoo_service.enable('name', runlevels='l2'))
rc_update_mock.assert_has_calls([call('rc-update delete name l1', python_shell=False), call('rc-update add name l2', python_shell=False)])
rc_update_mock.reset_mock()
|
'Test for Disable the named service to start at boot'
| def test_disable(self):
| rc_update_mock = MagicMock(return_value=0)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.disable('name'))
rc_update_mock.assert_called_once_with('rc-update delete name', python_shell=False)
rc_update_mock.reset_mock()
service_name = 'name'
runlevels = ['l1']
level_list_mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.disable('name', runlevels='l1'))
rc_update_mock.assert_called_once_with('rc-update delete name l1', python_shell=False)
rc_update_mock.reset_mock()
runlevels = ['l1']
level_list_mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.disable('name', runlevels=['l1']))
rc_update_mock.assert_called_once_with('rc-update delete name l1', python_shell=False)
rc_update_mock.reset_mock()
runlevels = ['l1', 'l2']
level_list_mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.disable('name', runlevels=['l1']))
rc_update_mock.assert_called_once_with('rc-update delete name l1', python_shell=False)
rc_update_mock.reset_mock()
runlevels = ['l2']
level_list_mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.disable('name', runlevels=['l1']))
self.assertTrue((rc_update_mock.call_count == 0))
rc_update_mock.reset_mock()
runlevels = ['l1', 'l2', 'l3']
level_list_mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.disable('name', runlevels=['l1', 'l3']))
rc_update_mock.assert_called_once_with('rc-update delete name l1 l3', python_shell=False)
rc_update_mock.reset_mock()
rc_update_mock = MagicMock(return_value=1)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertFalse(gentoo_service.disable('name'))
rc_update_mock.assert_called_once_with('rc-update delete name', python_shell=False)
rc_update_mock.reset_mock()
runlevels = ['l1']
level_list_mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertFalse(gentoo_service.disable('name', runlevels='l1'))
rc_update_mock.assert_called_once_with('rc-update delete name l1', python_shell=False)
rc_update_mock.reset_mock()
runlevels = ['l1', 'l2', 'l3']
level_list_mock = MagicMock(return_value=self.__services({service_name: runlevels}))
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertFalse(gentoo_service.disable('name', runlevels=['l1', 'l3']))
rc_update_mock.assert_called_once_with('rc-update delete name l1 l3', python_shell=False)
rc_update_mock.reset_mock()
|
'Test for Return True if the named service is enabled, false otherwise'
| def test_enabled(self):
| mock = MagicMock(return_value={'name': ['default']})
with patch.object(gentoo_service, 'get_enabled', mock):
self.assertTrue(gentoo_service.enabled('name'))
self.assertTrue(gentoo_service.enabled('name', runlevels='default'))
self.assertFalse(gentoo_service.enabled('name', runlevels='boot'))
mock = MagicMock(return_value={'name': ['boot', 'default']})
with patch.object(gentoo_service, 'get_enabled', mock):
self.assertTrue(gentoo_service.enabled('name'))
self.assertTrue(gentoo_service.enabled('name', runlevels='default'))
self.assertTrue(gentoo_service.enabled('name', runlevels=['boot', 'default']))
self.assertFalse(gentoo_service.enabled('name', runlevels='some-other-level'))
self.assertFalse(gentoo_service.enabled('name', runlevels=['boot', 'some-other-level']))
|
'Test for Return True if the named service is disabled, false otherwise'
| def test_disabled(self):
| mock = MagicMock(return_value=['name'])
with patch.object(gentoo_service, 'get_disabled', mock):
self.assertTrue(gentoo_service.disabled('name'))
|
'Test to return server version from varnishd -V'
| def test_version(self):
| with patch.dict(varnish.__salt__, {'cmd.run': MagicMock(return_value='(varnish-2.0)')}):
self.assertEqual(varnish.version(), '2.0')
|
'Test to add ban to the varnish cache'
| def test_ban(self):
| with patch.object(varnish, '_run_varnishadm', return_value={'retcode': 0}):
self.assertTrue(varnish.ban('ban_expression'))
|
'Test to list varnish cache current bans'
| def test_ban_list(self):
| with patch.object(varnish, '_run_varnishadm', return_value={'retcode': True}):
self.assertFalse(varnish.ban_list())
with patch.object(varnish, '_run_varnishadm', return_value={'retcode': False, 'stdout': 'A\nB\nC'}):
self.assertEqual(varnish.ban_list(), ['B', 'C'])
|
'Test to purge the varnish cache'
| def test_purge(self):
| with patch.object(varnish, 'ban', return_value=True):
self.assertTrue(varnish.purge())
|
'Test to set a param in varnish cache'
| def test_param_set(self):
| with patch.object(varnish, '_run_varnishadm', return_value={'retcode': 0}):
self.assertTrue(varnish.param_set('param', 'value'))
|
'Test to show params of varnish cache'
| def test_param_show(self):
| with patch.object(varnish, '_run_varnishadm', return_value={'retcode': True, 'stdout': 'A\nB\nC'}):
self.assertFalse(varnish.param_show('param'))
with patch.object(varnish, '_run_varnishadm', return_value={'retcode': False, 'stdout': 'A .1\nB .2\n'}):
self.assertEqual(varnish.param_show('param'), {'A': '.1'})
|
'Test if it return information on a directory located on the Moose'
| def test_dirinfo(self):
| mock = MagicMock(return_value={'stdout': 'Salt:salt'})
with patch.dict(moosefs.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(moosefs.dirinfo('/tmp/salt'), {'Salt': 'salt'})
|
'Test if it returns information on a file located on the Moose'
| def test_fileinfo(self):
| mock = MagicMock(return_value={'stdout': ''})
with patch.dict(moosefs.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(moosefs.fileinfo('/tmp/salt'), {})
|
'Test if it returns a list of current MooseFS mounts'
| def test_mounts(self):
| mock = MagicMock(return_value={'stdout': ''})
with patch.dict(moosefs.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(moosefs.mounts(), {})
|
'Test if it returns goal(s) for a file or directory'
| def test_getgoal(self):
| mock = MagicMock(return_value={'stdout': 'Salt: salt'})
with patch.dict(moosefs.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(moosefs.getgoal('/tmp/salt'), {'goal': 'salt'})
|
'Test packages listing.
:return:'
| def test_list_pkgs(self):
| def _add_data(data, key, value):
data.setdefault(key, []).append(value)
rpm_out = ['python-urlgrabber_|-(none)_|-3.10_|-8.el7_|-noarch_|-(none)_|-1487838471', 'alsa-lib_|-(none)_|-1.1.1_|-1.el7_|-x86_64_|-(none)_|-1487838475', 'gnupg2_|-(none)_|-2.0.22_|-4.el7_|-x86_64_|-(none)_|-1487838477', 'rpm-python_|-(none)_|-4.11.3_|-21.el7_|-x86_64_|-(none)_|-1487838477', 'pygpgme_|-(none)_|-0.3_|-9.el7_|-x86_64_|-(none)_|-1487838478', 'yum_|-(none)_|-3.4.3_|-150.el7.centos_|-noarch_|-(none)_|-1487838479', 'lzo_|-(none)_|-2.06_|-8.el7_|-x86_64_|-(none)_|-1487838479', 'qrencode-libs_|-(none)_|-3.4.1_|-3.el7_|-x86_64_|-(none)_|-1487838480', 'ustr_|-(none)_|-1.0.4_|-16.el7_|-x86_64_|-(none)_|-1487838480', 'shadow-utils_|-2_|-4.1.5.1_|-24.el7_|-x86_64_|-(none)_|-1487838481', 'util-linux_|-(none)_|-2.23.2_|-33.el7_|-x86_64_|-(none)_|-1487838484', 'openssh_|-(none)_|-6.6.1p1_|-33.el7_3_|-x86_64_|-(none)_|-1487838485', 'virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486']
with patch.dict(yumpkg.__grains__, {'osarch': 'x86_64'}):
with patch.dict(yumpkg.__salt__, {'cmd.run': MagicMock(return_value=os.linesep.join(rpm_out))}):
with patch.dict(yumpkg.__salt__, {'pkg_resource.add_pkg': _add_data}):
with patch.dict(yumpkg.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}):
with patch.dict(yumpkg.__salt__, {'pkg_resource.stringify': MagicMock()}):
pkgs = yumpkg.list_pkgs(versions_as_list=True)
for (pkg_name, pkg_version) in {'python-urlgrabber': '3.10-8.el7', 'alsa-lib': '1.1.1-1.el7', 'gnupg2': '2.0.22-4.el7', 'rpm-python': '4.11.3-21.el7', 'pygpgme': '0.3-9.el7', 'yum': '3.4.3-150.el7.centos', 'lzo': '2.06-8.el7', 'qrencode-libs': '3.4.1-3.el7', 'ustr': '1.0.4-16.el7', 'shadow-utils': '2:4.1.5.1-24.el7', 'util-linux': '2.23.2-33.el7', 'openssh': '6.6.1p1-33.el7_3', 'virt-what': '1.13-8.el7'}.items():
self.assertTrue(pkgs.get(pkg_name))
self.assertEqual(pkgs[pkg_name], [pkg_version])
|
'Test packages listing with the attr parameter
:return:'
| def test_list_pkgs_with_attr(self):
| def _add_data(data, key, value):
data.setdefault(key, []).append(value)
rpm_out = ['python-urlgrabber_|-(none)_|-3.10_|-8.el7_|-noarch_|-(none)_|-1487838471', 'alsa-lib_|-(none)_|-1.1.1_|-1.el7_|-x86_64_|-(none)_|-1487838475', 'gnupg2_|-(none)_|-2.0.22_|-4.el7_|-x86_64_|-(none)_|-1487838477', 'rpm-python_|-(none)_|-4.11.3_|-21.el7_|-x86_64_|-(none)_|-1487838477', 'pygpgme_|-(none)_|-0.3_|-9.el7_|-x86_64_|-(none)_|-1487838478', 'yum_|-(none)_|-3.4.3_|-150.el7.centos_|-noarch_|-(none)_|-1487838479', 'lzo_|-(none)_|-2.06_|-8.el7_|-x86_64_|-(none)_|-1487838479', 'qrencode-libs_|-(none)_|-3.4.1_|-3.el7_|-x86_64_|-(none)_|-1487838480', 'ustr_|-(none)_|-1.0.4_|-16.el7_|-x86_64_|-(none)_|-1487838480', 'shadow-utils_|-2_|-4.1.5.1_|-24.el7_|-x86_64_|-(none)_|-1487838481', 'util-linux_|-(none)_|-2.23.2_|-33.el7_|-x86_64_|-(none)_|-1487838484', 'openssh_|-(none)_|-6.6.1p1_|-33.el7_3_|-x86_64_|-(none)_|-1487838485', 'virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486']
with patch.dict(yumpkg.__grains__, {'osarch': 'x86_64'}):
with patch.dict(yumpkg.__salt__, {'cmd.run': MagicMock(return_value=os.linesep.join(rpm_out))}):
with patch.dict(yumpkg.__salt__, {'pkg_resource.add_pkg': _add_data}):
with patch.dict(yumpkg.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}):
with patch.dict(yumpkg.__salt__, {'pkg_resource.stringify': MagicMock()}):
pkgs = yumpkg.list_pkgs(attr=['arch', 'install_date_time_t'])
for (pkg_name, pkg_attr) in {'python-urlgrabber': {'version': '3.10-8.el7', 'arch': 'noarch', 'install_date_time_t': 1487838471}, 'alsa-lib': {'version': '1.1.1-1.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838475}, 'gnupg2': {'version': '2.0.22-4.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838477}, 'rpm-python': {'version': '4.11.3-21.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838477}, 'pygpgme': {'version': '0.3-9.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838478}, 'yum': {'version': '3.4.3-150.el7.centos', 'arch': 'noarch', 'install_date_time_t': 1487838479}, 'lzo': {'version': '2.06-8.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838479}, 'qrencode-libs': {'version': '3.4.1-3.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838480}, 'ustr': {'version': '1.0.4-16.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838480}, 'shadow-utils': {'version': '2:4.1.5.1-24.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838481}, 'util-linux': {'version': '2.23.2-33.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838484}, 'openssh': {'version': '6.6.1p1-33.el7_3', 'arch': 'x86_64', 'install_date_time_t': 1487838485}, 'virt-what': {'version': '1.13-8.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838486}}.items():
self.assertTrue(pkgs.get(pkg_name))
self.assertEqual(pkgs[pkg_name], [pkg_attr])
|
'Tests checking iot thing type existence when the iot thing type already exists'
| def test_that_when_checking_if_a_thing_type_exists_and_a_thing_type_exists_the_thing_type_exists_method_returns_true(self):
| self.conn.describe_thing_type.return_value = thing_type_ret
result = boto_iot.thing_type_exists(thingTypeName=thing_type_name, **conn_parameters)
self.assertTrue(result['exists'])
|
'Tests checking iot thing type existence when the iot thing type does not exist'
| def test_that_when_checking_if_a_thing_type_exists_and_a_thing_type_does_not_exist_the_thing_type_exists_method_returns_false(self):
| self.conn.describe_thing_type.side_effect = not_found_error
result = boto_iot.thing_type_exists(thingTypeName='non existent thing type', **conn_parameters)
self.assertFalse(result['exists'])
|
'Tests checking iot thing type existence when boto returns an error'
| def test_that_when_checking_if_a_thing_type_exists_and_boto3_returns_an_error_the_thing_type_exists_method_returns_error(self):
| self.conn.describe_thing_type.side_effect = ClientError(error_content, 'describe_thing_type')
result = boto_iot.thing_type_exists(thingTypeName='mythingtype', **conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('describe_thing_type'))
|
'Tests describe thing type for an existing thing type'
| def test_that_when_describing_thing_type_and_thing_type_exists_the_describe_thing_type_method_returns_thing_type(self):
| self.conn.describe_thing_type.return_value = thing_type_ret
result = boto_iot.describe_thing_type(thingTypeName=thing_type_name, **conn_parameters)
self.assertEqual(result.get('thing_type'), thing_type_ret)
|
'Tests describe thing type for an non existent thing type'
| def test_that_when_describing_thing_type_and_thing_type_does_not_exists_the_describe_thing_type_method_returns_none(self):
| self.conn.describe_thing_type.side_effect = not_found_error
result = boto_iot.describe_thing_type(thingTypeName='non existent thing type', **conn_parameters)
self.assertEqual(result.get('thing_type'), None)
|
'tests True when thing type created'
| def test_that_when_creating_a_thing_type_succeeds_the_create_thing_type_method_returns_true(self):
| self.conn.create_thing_type.return_value = create_thing_type_ret
result = boto_iot.create_thing_type(thingTypeName=thing_type_name, thingTypeDescription=thing_type_desc, searchableAttributesList=[thing_type_attr_1], **conn_parameters)
self.assertTrue(result['created'])
self.assertTrue(result['thingTypeArn'], thing_type_arn)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.