code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'fbcode_builder steps to build Facebook Thrift'
import specs.fbthrift as fbthrift
def fbcode_builder_spec(builder):
return {
'depends_on': [fbthrift],
}
config = {
'github_project': 'facebook/fbthrift',
'fbcode_builder_spec': fbcode_builder_spec,
}
| getyourguide/fbthrift | build/fbcode_builder_config.py | Python | apache-2.0 | 449 |
def emptyLayout(layout):
for i in reversed(range(layout.count())):
layout.itemAt(i).widget().setParent(None)
| fireeye/flare-wmi | python-cim/samples/ui/uicommon.py | Python | apache-2.0 | 121 |
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test.utils import override_settings
from sis_provisioner.tests import (
fdao_pws_override, fdao_hrp_override, fdao_bridge_override)
from sis_provisioner.tests.account_managers import set_uw_account
user_file_name_override = override_settings(
BRIDGE_IMPORT_USER_FILENAME="users")
def set_db_records():
affiemp = set_uw_account("affiemp")
javerage = set_uw_account("javerage")
ellen = set_uw_account("ellen")
staff = set_uw_account("staff")
staff.set_disable()
retiree = set_uw_account("retiree")
tyler = set_uw_account("faculty")
leftuw = set_uw_account("leftuw")
leftuw.set_terminate_date()
testid = set_uw_account("testid")
| uw-it-aca/bridge-sis-provisioner | sis_provisioner/tests/csv/__init__.py | Python | apache-2.0 | 784 |
import eventlet
import gettext
import sys
from staccato.common import config
import staccato.openstack.common.wsgi as os_wsgi
import staccato.openstack.common.pastedeploy as os_pastedeploy
# Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True)
gettext.install('staccato', unicode=1)
def fail(returncode, e):
sys.stderr.write("ERROR: %s\n" % e)
sys.exit(returncode)
def main():
try:
conf = config.get_config_object()
paste_file = conf.find_file(conf.paste_deploy.config_file)
wsgi_app = os_pastedeploy.paste_deploy_app(paste_file,
'staccato-api',
conf)
server = os_wsgi.Service(wsgi_app, conf.bind_port)
server.start()
server.wait()
except RuntimeError as e:
fail(1, e)
main()
| buzztroll/staccato | staccato/cmd/api.py | Python | apache-2.0 | 899 |
#!/usr/bin/env python
# Use Netmiko to execute 'show arp' on pynet-rtr1, pynet-rtr2, and juniper-srx.
from netmiko import ConnectHandler
def main():
# Definition of routers
rtr1 = {
'device_type': 'cisco_ios',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': '88newclass',
}
rtr2 = {
'device_type': 'cisco_ios',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': '88newclass',
'port': 8022,
}
srx = {
'device_type': 'juniper',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': '88newclass',
'port': 9822,
}
# Create a list of all the routers.
all_routers = [rtr1, rtr2, srx]
# Loop through all the routers and show arp.
for a_router in all_routers:
net_connect = ConnectHandler(**a_router)
output = net_connect.send_command("show arp")
print "\n\n>>>>>>>>> Device {0} <<<<<<<<<".format(a_router['device_type'])
print output
print ">>>>>>>>> End <<<<<<<<<"
if __name__ == "__main__":
main()
| dprzybyla/python-ansible | week4/netmiko_sh_arp.py | Python | apache-2.0 | 1,128 |
""" IO classes for Omnivor input file
Copyright (C) 2013 DTU Wind Energy
Author: Emmanuel Branlard
Email: [email protected]
Last revision: 25/11/2013
Namelist IO: badis functions to read and parse a fortran file into python dictonary and write it back to a file
The parser was adapted from: fortran-namelist on code.google with the following info:
__author__ = 'Stephane Chamberland ([email protected])'
__version__ = '$Revision: 1.0 $'[11:-2]
__date__ = '$Date: 2006/09/05 21:16:24 $'
__copyright__ = 'Copyright (c) 2006 RPN'
__license__ = 'LGPL'
Recognizes files of the form:
&namelistname
opt1 = value1
...
/
"""
from __future__ import print_function
from we_file_io import WEFileIO, TestWEFileIO
import unittest
import numpy as np
import os.path as path
import sys
import re
import tempfile
import os
__author__ = 'E. Branlard '
class FortranNamelistIO(WEFileIO):
"""
Fortran Namelist IO class
Scan a Fortran Namelist file and put Section/Parameters into a dictionary
Write the file back if needed.
"""
def _write(self):
""" Write a file (overrided)
"""
with open(self.filename, 'w') as f:
for nml in self.data :
f.write('&'+nml+'\n')
# Sorting dictionary data (in the same order as it was created, thanks to id)
SortedList = sorted(self.data[nml].items(), key=lambda(k, v): v['id'])
# for param in self.data[nml]:
for param in map(lambda(k,v):k,SortedList):
f.write(param+'='+','.join(self.data[nml][param]['val']))
if len(self.data[nml][param]['com']) >0:
f.write(' !'+self.data[nml][param]['com'])
f.write('\n')
f.write('/\n')
def _read(self):
""" Read the file (overrided)
"""
with open(self.filename, 'r') as f:
data = f.read()
varname = r'\b[a-zA-Z][a-zA-Z0-9_]*\b'
valueInt = re.compile(r'[+-]?[0-9]+')
valueReal = re.compile(r'[+-]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)')
valueNumber = re.compile(r'\b(([\+\-]?[0-9]+)?\.)?[0-9]*([eE][-+]?[0-9]+)?')
valueBool = re.compile(r"(\.(true|false|t|f)\.)",re.I)
valueTrue = re.compile(r"(\.(true|t)\.)",re.I)
spaces = r'[\s\t]*'
quote = re.compile(r"[\s\t]*[\'\"]")
namelistname = re.compile(r"^[\s\t]*&(" + varname + r")[\s\t]*$")
paramname = re.compile(r"[\s\t]*(" + varname+r')[\s\t]*=[\s\t]*')
namlistend = re.compile(r"^" + spaces + r"/" + spaces + r"$")
#split sections/namelists
mynmlfile = {}
mynmlfileRaw = {}
mynmlname = ''
for item in FortranNamelistIO.clean(data.split("\n"),cleancomma=1):
if re.match(namelistname,item):
mynmlname = re.sub(namelistname,r"\1",item)
mynmlfile[mynmlname] = {}
mynmlfileRaw[mynmlname] = []
elif re.match(namlistend,item):
mynmlname = ''
else:
if mynmlname:
mynmlfileRaw[mynmlname].append(item)
#parse param in each section/namelist
for mynmlname in mynmlfile.keys():
#split strings
bb = []
for item in mynmlfileRaw[mynmlname]:
if item[0]!='!':
# discarding lines that starts with a comment
bb.extend(FortranNamelistIO.splitstring(item))
#split comma and =
aa = []
for item in bb:
if not re.match(quote,item):
aa.extend(re.sub(r"[\s\t]*=",r" =\n",re.sub(r",+",r"\n",item)).split("\n"))
# aa.extend(re.sub(r"[\s\t]*=",r" =\n",item).split("\n"))
else:
aa.append(item)
del(bb)
aa = FortranNamelistIO.clean(aa,cleancomma=1)
myparname = ''
id_cum=0
for item in aa:
if re.search(paramname,item):
#myparname = re.sub(paramname,r"\1",item).lower() ! NO MORE LOWER CASE
myparname = re.sub(paramname,r"\1",item)
id_cum=id_cum+1
mynmlfile[mynmlname][myparname] = {
'val' : [],
'id' : id_cum,
'com' : ''
}
elif paramname:
# Storing comments
item2=item.split('!')
item=item2[0]
if len(item) > 1 :
mynmlfile[mynmlname][myparname]['com']=''.join(item2[1:])
if re.match(valueBool,item):
if re.match(valueTrue,item):
mynmlfile[mynmlname][myparname]['val'].append('.true.')
else:
mynmlfile[mynmlname][myparname]['val'].append('.false.')
else:
# item2=re.sub(r"(^[\'\"]|[\'\"]$)",r"",item.strip())
mynmlfile[mynmlname][myparname]['val'].append(item.strip())
self.data=mynmlfile
# Accessor and mutator dictionary style
def __getitem__(self, key):
""" Transform the class instance into a dictionary."""
return self.data[key]
def __setitem__(self, key, value):
""" Transform the class instance into a dictionary."""
self.data[key] = value
#==== Helper functions for Parsing of files
@staticmethod
def clean(mystringlist,commentexpr=r"^[\s\t]*\#.*$",spacemerge=0,cleancomma=0):
"""
Remove leading and trailing blanks, comments/empty lines from a list of strings
mystringlist = foo.clean(mystringlist,spacemerge=0,commentline=r"^[\s\t]*\#",cleancharlist="")
commentline: definition of commentline
spacemerge: if <>0, merge/collapse multi space
cleancomma: Remove leading and trailing commas
"""
aa = mystringlist
if cleancomma:
aa = [re.sub("(^([\s\t]*\,)+)|((\,[\s\t]*)+$)","",item).strip() for item in aa]
if commentexpr:
aa = [re.sub(commentexpr,"",item).strip() for item in aa]
if spacemerge:
aa = [re.sub("[\s\t]+"," ",item).strip() for item in aa if len(item.strip()) <> 0]
else:
aa = [item.strip() for item in aa if len(item.strip()) <> 0]
return aa
@staticmethod
def splitstring(mystr):
"""
Split a string in a list of strings at quote boundaries
Input: String
Output: list of strings
"""
dquote=r'(^[^\"\']*)(\"[^"]*\")(.*)$'
squote=r"(^[^\"\']*)(\'[^']*\')(.*$)"
mystrarr = re.sub(dquote,r"\1\n\2\n\3",re.sub(squote,r"\1\n\2\n\3",mystr)).split("\n")
#remove zerolenght items
mystrarr = [item for item in mystrarr if len(item) <> 0]
if len(mystrarr) > 1:
mystrarr2 = []
for item in mystrarr:
mystrarr2.extend(FortranNamelistIO.splitstring(item))
mystrarr = mystrarr2
return mystrarr
## Do Some testing -------------------------------------------------------
class TestFortranNamelist(TestWEFileIO):
""" Test class for MyFileType class """
test_file = './test/fortran/fortran_namelist.nml'
def test_output_identical(self):
InputFile=FortranNamelistIO(self.test_file)
test_fileout=tempfile.mkstemp()[1]
InputFile.write(test_fileout)
with open(self.test_file, 'r') as f:
data_expected = f.read()
with open(test_fileout, 'r') as f:
data_read = f.read()
try:
self.assertMultiLineEqual(data_read, data_expected)
finally:
os.remove(test_fileout)
def test_duplication(self):
self._test_duplication(FortranNamelistIO, self.test_file)
## Main function ---------------------------------------------------------
if __name__ == '__main__':
""" This is the main fuction that will run the tests automatically
"""
unittest.main()
| DTUWindEnergy/Python4WindEnergy | py4we/fortran_namelist_io.py | Python | apache-2.0 | 8,294 |
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2014 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
IPMI power manager driver.
Uses the 'ipmitool' command (http://ipmitool.sourceforge.net/) to remotely
manage hardware. This includes setting the boot device, getting a
serial-over-LAN console, and controlling the power state of the machine.
NOTE THAT CERTAIN DISTROS MAY INSTALL openipmi BY DEFAULT, INSTEAD OF ipmitool,
WHICH PROVIDES DIFFERENT COMMAND-LINE OPTIONS AND *IS NOT SUPPORTED* BY THIS
DRIVER.
"""
import contextlib
import os
import re
import subprocess
import tempfile
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules import console_utils
CONF = cfg.CONF
CONF.import_opt('retry_timeout',
'ironic.drivers.modules.ipminative',
group='ipmi')
CONF.import_opt('min_command_interval',
'ironic.drivers.modules.ipminative',
group='ipmi')
LOG = logging.getLogger(__name__)
VALID_PRIV_LEVELS = ['ADMINISTRATOR', 'CALLBACK', 'OPERATOR', 'USER']
VALID_PROTO_VERSIONS = ('2.0', '1.5')
REQUIRED_PROPERTIES = {
'ipmi_address': _("IP address or hostname of the node. Required.")
}
OPTIONAL_PROPERTIES = {
'ipmi_password': _("password. Optional."),
'ipmi_priv_level': _("privilege level; default is ADMINISTRATOR. One of "
"%s. Optional.") % ', '.join(VALID_PRIV_LEVELS),
'ipmi_username': _("username; default is NULL user. Optional."),
'ipmi_bridging': _("bridging_type; default is \"no\". One of \"single\", "
"\"dual\", \"no\". Optional."),
'ipmi_transit_channel': _("transit channel for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_transit_address': _("transit address for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_target_channel': _("destination channel for bridged request. "
"Required only if ipmi_bridging is set to "
"\"single\" or \"dual\"."),
'ipmi_target_address': _("destination address for bridged request. "
"Required only if ipmi_bridging is set "
"to \"single\" or \"dual\"."),
'ipmi_local_address': _("local IPMB address for bridged requests. "
"Used only if ipmi_bridging is set "
"to \"single\" or \"dual\". Optional."),
'ipmi_protocol_version': _('the version of the IPMI protocol; default '
'is "2.0". One of "1.5", "2.0". Optional.'),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
CONSOLE_PROPERTIES = {
'ipmi_terminal_port': _("node's UDP port to connect to. Only required for "
"console access.")
}
BRIDGING_OPTIONS = [('local_address', '-m'),
('transit_channel', '-B'), ('transit_address', '-T'),
('target_channel', '-b'), ('target_address', '-t')]
LAST_CMD_TIME = {}
TIMING_SUPPORT = None
SINGLE_BRIDGE_SUPPORT = None
DUAL_BRIDGE_SUPPORT = None
TMP_DIR_CHECKED = None
ipmitool_command_options = {
'timing': ['ipmitool', '-N', '0', '-R', '0', '-h'],
'single_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0', '-h'],
'dual_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0',
'-B', '0', '-T', '0', '-h']}
# Note(TheJulia): This string is hardcoded in ipmitool's lanplus driver
# and is substituted in return for the error code received from the IPMI
# controller. As of 1.8.15, no internationalization support appears to
# be in ipmitool which means the string should always be returned in this
# form regardless of locale.
IPMITOOL_RETRYABLE_FAILURES = ['insufficient resources for session']
def _check_option_support(options):
"""Checks if the specific ipmitool options are supported on host.
This method updates the module-level variables indicating whether
an option is supported so that it is accessible by any driver
interface class in this module. It is intended to be called from
the __init__ method of such classes only.
:param options: list of ipmitool options to be checked
:raises: OSError
"""
for opt in options:
if _is_option_supported(opt) is None:
try:
cmd = ipmitool_command_options[opt]
# NOTE(cinerama): use subprocess.check_call to
# check options & suppress ipmitool output to
# avoid alarming people
with open(os.devnull, 'wb') as nullfile:
subprocess.check_call(cmd, stdout=nullfile,
stderr=nullfile)
except subprocess.CalledProcessError:
LOG.info(_LI("Option %(opt)s is not supported by ipmitool"),
{'opt': opt})
_is_option_supported(opt, False)
else:
LOG.info(_LI("Option %(opt)s is supported by ipmitool"),
{'opt': opt})
_is_option_supported(opt, True)
def _is_option_supported(option, is_supported=None):
"""Indicates whether the particular ipmitool option is supported.
:param option: specific ipmitool option
:param is_supported: Optional Boolean. when specified, this value
is assigned to the module-level variable indicating
whether the option is supported. Used only if a value
is not already assigned.
:returns: True, indicates the option is supported
:returns: False, indicates the option is not supported
:returns: None, indicates that it is not aware whether the option
is supported
"""
global SINGLE_BRIDGE_SUPPORT
global DUAL_BRIDGE_SUPPORT
global TIMING_SUPPORT
if option == 'single_bridge':
if (SINGLE_BRIDGE_SUPPORT is None) and (is_supported is not None):
SINGLE_BRIDGE_SUPPORT = is_supported
return SINGLE_BRIDGE_SUPPORT
elif option == 'dual_bridge':
if (DUAL_BRIDGE_SUPPORT is None) and (is_supported is not None):
DUAL_BRIDGE_SUPPORT = is_supported
return DUAL_BRIDGE_SUPPORT
elif option == 'timing':
if (TIMING_SUPPORT is None) and (is_supported is not None):
TIMING_SUPPORT = is_supported
return TIMING_SUPPORT
def _console_pwfile_path(uuid):
"""Return the file path for storing the ipmi password for a console."""
file_name = "%(uuid)s.pw" % {'uuid': uuid}
return os.path.join(CONF.tempdir, file_name)
@contextlib.contextmanager
def _make_password_file(password):
"""Makes a temporary file that contains the password.
:param password: the password
:returns: the absolute pathname of the temporary file
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file
"""
f = None
try:
f = tempfile.NamedTemporaryFile(mode='w', dir=CONF.tempdir)
f.write(str(password))
f.flush()
except (IOError, OSError) as exc:
if f is not None:
f.close()
raise exception.PasswordFileFailedToCreate(error=exc)
except Exception:
with excutils.save_and_reraise_exception():
if f is not None:
f.close()
try:
# NOTE(jlvillal): This yield can not be in the try/except block above
# because an exception by the caller of this function would then get
# changed to a PasswordFileFailedToCreate exception which would mislead
# about the problem and its cause.
yield f.name
finally:
if f is not None:
f.close()
def _parse_driver_info(node):
"""Gets the parameters required for ipmitool to access the node.
:param node: the Node of interest.
:returns: dictionary of parameters.
:raises: InvalidParameterValue when an invalid value is specified
:raises: MissingParameterValue when a required ipmi parameter is missing.
"""
info = node.driver_info or {}
bridging_types = ['single', 'dual']
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"Missing the following IPMI credentials in node's"
" driver_info: %s.") % missing_info)
address = info.get('ipmi_address')
username = info.get('ipmi_username')
password = info.get('ipmi_password')
port = info.get('ipmi_terminal_port')
priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR')
bridging_type = info.get('ipmi_bridging', 'no')
local_address = info.get('ipmi_local_address')
transit_channel = info.get('ipmi_transit_channel')
transit_address = info.get('ipmi_transit_address')
target_channel = info.get('ipmi_target_channel')
target_address = info.get('ipmi_target_address')
protocol_version = str(info.get('ipmi_protocol_version', '2.0'))
if protocol_version not in VALID_PROTO_VERSIONS:
valid_versions = ', '.join(VALID_PROTO_VERSIONS)
raise exception.InvalidParameterValue(_(
"Invalid IPMI protocol version value %(version)s, the valid "
"value can be one of %(valid_versions)s") %
{'version': protocol_version, 'valid_versions': valid_versions})
if port:
try:
port = int(port)
except ValueError:
raise exception.InvalidParameterValue(_(
"IPMI terminal port is not an integer."))
# check if ipmi_bridging has proper value
if bridging_type == 'no':
# if bridging is not selected, then set all bridging params to None
(local_address, transit_channel, transit_address, target_channel,
target_address) = (None,) * 5
elif bridging_type in bridging_types:
# check if the particular bridging option is supported on host
if not _is_option_supported('%s_bridge' % bridging_type):
raise exception.InvalidParameterValue(_(
"Value for ipmi_bridging is provided as %s, but IPMI "
"bridging is not supported by the IPMI utility installed "
"on host. Ensure ipmitool version is > 1.8.11"
) % bridging_type)
# ensure that all the required parameters are provided
params_undefined = [param for param, value in [
("ipmi_target_channel", target_channel),
('ipmi_target_address', target_address)] if value is None]
if bridging_type == 'dual':
params_undefined2 = [param for param, value in [
("ipmi_transit_channel", transit_channel),
('ipmi_transit_address', transit_address)
] if value is None]
params_undefined.extend(params_undefined2)
else:
# if single bridging was selected, set dual bridge params to None
transit_channel = transit_address = None
# If the required parameters were not provided,
# raise an exception
if params_undefined:
raise exception.MissingParameterValue(_(
"%(param)s not provided") % {'param': params_undefined})
else:
raise exception.InvalidParameterValue(_(
"Invalid value for ipmi_bridging: %(bridging_type)s,"
" the valid value can be one of: %(bridging_types)s"
) % {'bridging_type': bridging_type,
'bridging_types': bridging_types + ['no']})
if priv_level not in VALID_PRIV_LEVELS:
valid_priv_lvls = ', '.join(VALID_PRIV_LEVELS)
raise exception.InvalidParameterValue(_(
"Invalid privilege level value:%(priv_level)s, the valid value"
" can be one of %(valid_levels)s") %
{'priv_level': priv_level, 'valid_levels': valid_priv_lvls})
return {
'address': address,
'username': username,
'password': password,
'port': port,
'uuid': node.uuid,
'priv_level': priv_level,
'local_address': local_address,
'transit_channel': transit_channel,
'transit_address': transit_address,
'target_channel': target_channel,
'target_address': target_address,
'protocol_version': protocol_version,
}
def _exec_ipmitool(driver_info, command):
"""Execute the ipmitool command.
:param driver_info: the ipmitool parameters for accessing a node.
:param command: the ipmitool command to be executed.
:returns: (stdout, stderr) from executing the command.
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file.
:raises: processutils.ProcessExecutionError from executing the command.
"""
ipmi_version = ('lanplus'
if driver_info['protocol_version'] == '2.0'
else 'lan')
args = ['ipmitool',
'-I',
ipmi_version,
'-H',
driver_info['address'],
'-L', driver_info['priv_level']
]
if driver_info['username']:
args.append('-U')
args.append(driver_info['username'])
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
args.append(option)
args.append(driver_info[name])
# specify retry timing more precisely, if supported
num_tries = max(
(CONF.ipmi.retry_timeout // CONF.ipmi.min_command_interval), 1)
if _is_option_supported('timing'):
args.append('-R')
args.append(str(num_tries))
args.append('-N')
args.append(str(CONF.ipmi.min_command_interval))
end_time = (time.time() + CONF.ipmi.retry_timeout)
while True:
num_tries = num_tries - 1
# NOTE(deva): ensure that no communications are sent to a BMC more
# often than once every min_command_interval seconds.
time_till_next_poll = CONF.ipmi.min_command_interval - (
time.time() - LAST_CMD_TIME.get(driver_info['address'], 0))
if time_till_next_poll > 0:
time.sleep(time_till_next_poll)
# Resetting the list that will be utilized so the password arguments
# from any previous execution are preserved.
cmd_args = args[:]
# 'ipmitool' command will prompt password if there is no '-f'
# option, we set it to '\0' to write a password file to support
# empty password
with _make_password_file(driver_info['password'] or '\0') as pw_file:
cmd_args.append('-f')
cmd_args.append(pw_file)
cmd_args.extend(command.split(" "))
try:
out, err = utils.execute(*cmd_args)
return out, err
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception() as ctxt:
err_list = [x for x in IPMITOOL_RETRYABLE_FAILURES
if x in e.args[0]]
if ((time.time() > end_time) or
(num_tries == 0) or
not err_list):
LOG.error(_LE('IPMI Error while attempting "%(cmd)s"'
'for node %(node)s. Error: %(error)s'), {
'node': driver_info['uuid'],
'cmd': e.cmd, 'error': e
})
else:
ctxt.reraise = False
LOG.warning(_LW('IPMI Error encountered, retrying '
'"%(cmd)s" for node %(node)s. '
'Error: %(error)s'), {
'node': driver_info['uuid'],
'cmd': e.cmd, 'error': e
})
finally:
LAST_CMD_TIME[driver_info['address']] = time.time()
def _sleep_time(iter):
"""Return the time-to-sleep for the n'th iteration of a retry loop.
This implementation increases exponentially.
:param iter: iteration number
:returns: number of seconds to sleep
"""
if iter <= 1:
return 1
return iter ** 2
def _set_and_wait(target_state, driver_info):
"""Helper function for DynamicLoopingCall.
This method changes the power state and polls the BMCuntil the desired
power state is reached, or CONF.ipmi.retry_timeout would be exceeded by the
next iteration.
This method assumes the caller knows the current power state and does not
check it prior to changing the power state. Most BMCs should be fine, but
if a driver is concerned, the state should be checked prior to calling this
method.
:param target_state: desired power state
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states
"""
if target_state == states.POWER_ON:
state_name = "on"
elif target_state == states.POWER_OFF:
state_name = "off"
def _wait(mutable):
try:
# Only issue power change command once
if mutable['iter'] < 0:
_exec_ipmitool(driver_info, "power %s" % state_name)
else:
mutable['power'] = _power_status(driver_info)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError,
exception.IPMIFailure):
# Log failures but keep trying
LOG.warning(_LW("IPMI power %(state)s failed for node %(node)s."),
{'state': state_name, 'node': driver_info['uuid']})
finally:
mutable['iter'] += 1
if mutable['power'] == target_state:
raise loopingcall.LoopingCallDone()
sleep_time = _sleep_time(mutable['iter'])
if (sleep_time + mutable['total_time']) > CONF.ipmi.retry_timeout:
# Stop if the next loop would exceed maximum retry_timeout
LOG.error(_LE('IPMI power %(state)s timed out after '
'%(tries)s retries on node %(node_id)s.'),
{'state': state_name, 'tries': mutable['iter'],
'node_id': driver_info['uuid']})
mutable['power'] = states.ERROR
raise loopingcall.LoopingCallDone()
else:
mutable['total_time'] += sleep_time
return sleep_time
# Use mutable objects so the looped method can change them.
# Start 'iter' from -1 so that the first two checks are one second apart.
status = {'power': None, 'iter': -1, 'total_time': 0}
timer = loopingcall.DynamicLoopingCall(_wait, status)
timer.start().wait()
return status['power']
def _power_on(driver_info):
"""Turn the power ON for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_ON, driver_info)
def _power_off(driver_info):
"""Turn the power OFF for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_OFF or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_OFF, driver_info)
def _power_status(driver_info):
"""Get the power status for a node.
:param driver_info: the ipmitool access parameters for a node.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool.
"""
cmd = "power status"
try:
out_err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW("IPMI power status failed for node %(node_id)s with "
"error: %(error)s."),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.IPMIFailure(cmd=cmd)
if out_err[0] == "Chassis Power is on\n":
return states.POWER_ON
elif out_err[0] == "Chassis Power is off\n":
return states.POWER_OFF
else:
return states.ERROR
def _process_sensor(sensor_data):
sensor_data_fields = sensor_data.split('\n')
sensor_data_dict = {}
for field in sensor_data_fields:
if not field:
continue
kv_value = field.split(':')
if len(kv_value) != 2:
continue
sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip()
return sensor_data_dict
def _get_sensor_type(node, sensor_data_dict):
# Have only three sensor type name IDs: 'Sensor Type (Analog)'
# 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)'
for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)',
'Sensor Type (Threshold)'):
try:
return sensor_data_dict[key].split(' ', 1)[0]
except KeyError:
continue
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, unknown sensor type"
" data: %(sensors_data)s"),
{'sensors_data': sensor_data_dict}))
def _parse_ipmi_sensors_data(node, sensors_data):
"""Parse the IPMI sensors data and format to the dict grouping by type.
We run 'ipmitool' command with 'sdr -v' options, which can return sensor
details in human-readable format, we need to format them to JSON string
dict-based data for Ceilometer Collector which can be sent it as payload
out via notification bus and consumed by Ceilometer Collector.
:param sensors_data: the sensor data returned by ipmitool command.
:returns: the sensor data with JSON format, grouped by sensor type.
:raises: FailedToParseSensorData when error encountered during parsing.
"""
sensors_data_dict = {}
if not sensors_data:
return sensors_data_dict
sensors_data_array = sensors_data.split('\n\n')
for sensor_data in sensors_data_array:
sensor_data_dict = _process_sensor(sensor_data)
if not sensor_data_dict:
continue
sensor_type = _get_sensor_type(node, sensor_data_dict)
# ignore the sensors which has no current 'Sensor Reading' data
if 'Sensor Reading' in sensor_data_dict:
sensors_data_dict.setdefault(
sensor_type,
{})[sensor_data_dict['Sensor ID']] = sensor_data_dict
# get nothing, no valid sensor data
if not sensors_data_dict:
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, get nothing with input"
" data: %(sensors_data)s")
% {'sensors_data': sensors_data}))
return sensors_data_dict
@task_manager.require_exclusive_lock
def send_raw(task, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
node_uuid = task.node.uuid
LOG.debug('Sending node %(node)s raw bytes %(bytes)s',
{'bytes': raw_bytes, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'raw %s' % raw_bytes
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('send raw bytes returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "raw bytes" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def _check_temp_dir():
"""Check for Valid temp directory."""
global TMP_DIR_CHECKED
# because a temporary file is used to pass the password to ipmitool,
# we should check the directory
if TMP_DIR_CHECKED is None:
try:
utils.check_dir()
except (exception.PathNotFound,
exception.DirectoryNotWritable,
exception.InsufficientDiskSpace) as e:
with excutils.save_and_reraise_exception():
TMP_DIR_CHECKED = False
err_msg = (_("Ipmitool drivers need to be able to create "
"temporary files to pass password to ipmitool. "
"Encountered error: %s") % e)
e.message = err_msg
LOG.error(err_msg)
else:
TMP_DIR_CHECKED = True
class IPMIPower(base.PowerInterface):
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Validate driver_info for ipmitool driver.
Check that node['driver_info'] contains IPMI credentials.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
# NOTE(deva): don't actually touch the BMC in validate because it is
# called too often, and BMCs are too fragile.
# This is a temporary measure to mitigate problems while
# 1314954 and 1314961 are resolved.
def get_power_state(self, task):
"""Get the current power state of the task's node.
:param task: a TaskManager instance containing the node to act on.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
:raises: IPMIFailure on an error from ipmitool (from _power_status
call).
"""
driver_info = _parse_driver_info(task.node)
return _power_status(driver_info)
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
:param task: a TaskManager instance containing the node to act on.
:param pstate: The desired power state, one of ironic.common.states
POWER_ON, POWER_OFF.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: MissingParameterValue if required ipmi parameters are missing
:raises: PowerStateFailure if the power couldn't be set to pstate.
"""
driver_info = _parse_driver_info(task.node)
if pstate == states.POWER_ON:
state = _power_on(driver_info)
elif pstate == states.POWER_OFF:
state = _power_off(driver_info)
else:
raise exception.InvalidParameterValue(
_("set_power_state called "
"with invalid power state %s.") % pstate)
if state != pstate:
raise exception.PowerStateFailure(pstate=pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: PowerStateFailure if the final state of the node is not
POWER_ON.
"""
driver_info = _parse_driver_info(task.node)
_power_off(driver_info)
state = _power_on(driver_info)
if state != states.POWER_ON:
raise exception.PowerStateFailure(pstate=states.POWER_ON)
class IPMIManagement(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def validate(self, task):
"""Check that 'driver_info' contains IPMI credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self, task):
"""Get a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM,
boot_devices.BIOS, boot_devices.SAFE]
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is specified
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: IPMIFailure on an error from ipmitool.
"""
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
# note(JayF): IPMI spec indicates unless you send these raw bytes the
# boot device setting times out after 60s. Since it's possible it
# could be >60s before a node is rebooted, we should always send them.
# This mimics pyghmi's current behavior, and the "option=timeout"
# setting on newer ipmitool binaries.
timeout_disable = "0x00 0x08 0x03 0x08"
send_raw(task, timeout_disable)
cmd = "chassis bootdev %s" % device
if persistent:
cmd = cmd + " options=persistent"
driver_info = _parse_driver_info(task.node)
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI set boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Returns the current boot device of the node.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
cmd = "chassis bootparam get 5"
driver_info = _parse_driver_info(task.node)
response = {'boot_device': None, 'persistent': None}
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI get boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
re_obj = re.search('Boot Device Selector : (.+)?\n', out)
if re_obj:
boot_selector = re_obj.groups('')[0]
if 'PXE' in boot_selector:
response['boot_device'] = boot_devices.PXE
elif 'Hard-Drive' in boot_selector:
if 'Safe-Mode' in boot_selector:
response['boot_device'] = boot_devices.SAFE
else:
response['boot_device'] = boot_devices.DISK
elif 'BIOS' in boot_selector:
response['boot_device'] = boot_devices.BIOS
elif 'CD/DVD' in boot_selector:
response['boot_device'] = boot_devices.CDROM
response['persistent'] = 'Options apply to all future boots' in out
return response
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: FailedToParseSensorData when parsing sensor data fails.
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: MissingParameterValue if a required parameter is missing.
:returns: returns a dict of sensor data group by sensor type.
"""
driver_info = _parse_driver_info(task.node)
# with '-v' option, we can get the entire sensor data including the
# extended sensor informations
cmd = "sdr -v"
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
raise exception.FailedToGetSensorData(node=task.node.uuid,
error=e)
return _parse_ipmi_sensors_data(task.node, out)
class VendorPassthru(base.VendorInterface):
def __init__(self):
try:
_check_option_support(['single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def send_raw(self, task, http_method, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
send_raw(task, raw_bytes)
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def bmc_reset(self, task, http_method, warm=True):
"""Reset BMC with IPMI command 'bmc reset (warm|cold)'.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param warm: boolean parameter to decide on warm or cold reset.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified
"""
node_uuid = task.node.uuid
if warm:
warm_param = 'warm'
else:
warm_param = 'cold'
LOG.debug('Doing %(warm)s BMC reset on node %(node)s',
{'warm': warm_param, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'bmc reset %s' % warm_param
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('bmc reset returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "bmc reset" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task, method, **kwargs):
"""Validate vendor-specific actions.
If invalid, raises an exception; otherwise returns None.
Valid methods:
* send_raw
* bmc_reset
:param task: a task from TaskManager.
:param method: method to be validated
:param kwargs: info for action.
:raises: InvalidParameterValue when an invalid parameter value is
specified.
:raises: MissingParameterValue if a required parameter is missing.
"""
if method == 'send_raw':
if not kwargs.get('raw_bytes'):
raise exception.MissingParameterValue(_(
'Parameter raw_bytes (string of bytes) was not '
'specified.'))
_parse_driver_info(task.node)
class IPMIShellinaboxConsole(base.ConsoleInterface):
"""A ConsoleInterface that uses ipmitool and shellinabox."""
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def get_properties(self):
d = COMMON_PROPERTIES.copy()
d.update(CONSOLE_PROPERTIES)
return d
def validate(self, task):
"""Validate the Node console info.
:param task: a task from TaskManager.
:raises: InvalidParameterValue
:raises: MissingParameterValue when a required parameter is missing
"""
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
raise exception.MissingParameterValue(_(
"Missing 'ipmi_terminal_port' parameter in node's"
" driver_info."))
if driver_info['protocol_version'] != '2.0':
raise exception.InvalidParameterValue(_(
"Serial over lan only works with IPMI protocol version 2.0. "
"Check the 'ipmi_protocol_version' parameter in "
"node's driver_info"))
def start_console(self, task):
"""Start a remote console for the node.
:param task: a task from TaskManager
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: PasswordFileFailedToCreate if unable to create a file
containing the password
:raises: ConsoleError if the directory for the PID file cannot be
created
:raises: ConsoleSubprocessFailed when invoking the subprocess failed
"""
driver_info = _parse_driver_info(task.node)
path = _console_pwfile_path(driver_info['uuid'])
pw_file = console_utils.make_persistent_password_file(
path, driver_info['password'])
ipmi_cmd = ("/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s"
" -I lanplus -U %(user)s -f %(pwfile)s"
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': driver_info['address'],
'user': driver_info['username'],
'pwfile': pw_file})
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
ipmi_cmd = " ".join([ipmi_cmd,
option, driver_info[name]])
if CONF.debug:
ipmi_cmd += " -v"
ipmi_cmd += " sol activate"
try:
console_utils.start_shellinabox_console(driver_info['uuid'],
driver_info['port'],
ipmi_cmd)
except (exception.ConsoleError, exception.ConsoleSubprocessFailed):
with excutils.save_and_reraise_exception():
utils.unlink_without_raise(path)
def stop_console(self, task):
"""Stop the remote console session for the node.
:param task: a task from TaskManager
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: ConsoleError if unable to stop the console
"""
driver_info = _parse_driver_info(task.node)
try:
console_utils.stop_shellinabox_console(driver_info['uuid'])
finally:
utils.unlink_without_raise(
_console_pwfile_path(driver_info['uuid']))
def get_console(self, task):
"""Get the type and connection information about the console."""
driver_info = _parse_driver_info(task.node)
url = console_utils.get_shellinabox_console_url(driver_info['port'])
return {'type': 'shellinabox', 'url': url}
| Tan0/ironic | ironic/drivers/modules/ipmitool.py | Python | apache-2.0 | 44,249 |
#!/usr/bin/env python
from random import choice
from python.decorators import euler_timer
SQUARES = ["GO",
"A1", "CC1", "A2", "T1", "R1", "B1", "CH1", "B2", "B3",
"JAIL",
"C1", "U1", "C2", "C3", "R2", "D1", "CC2", "D2", "D3",
"FP",
"E1", "CH2", "E2", "E3", "R3", "F1", "F2", "U2", "F3",
"G2J",
"G1", "G2", "CC3", "G3", "R4", "CH3", "H1", "T2", "H2"]
def roll_die(size):
first_die = choice(range(1, size + 1))
second_die = choice(range(1, size + 1))
return (first_die + second_die, (first_die == second_die))
def back(square, step):
index = SQUARES.index(square)
new_index = (index - step) % len(SQUARES)
return SQUARES[new_index]
def next_specific(square, next_type):
if next_type not in ["R", "U"]:
raise Exception("next_specific only intended for R and U")
# R1=5, R2=15, R3=25, R4=35
index = SQUARES.index(square)
if next_type == "R":
if 0 <= index < 5 or 35 < index:
return "R1"
elif 5 < index < 15:
return "R2"
elif 15 < index < 25:
return "R3"
elif 25 < index < 35:
return "R4"
else:
raise Exception("Case should not occur")
# U1=12, U2=28
elif next_type == "U":
if 0 <= index < 12 or index > 28:
return "U1"
elif 12 < index < 28:
return "U2"
else:
return Exception("Case should not occur")
else:
raise Exception("Case should not occur")
def next_square(landing_square, chance_card, chest_card):
if landing_square not in ["CC1", "CC2", "CC3", "CH1", "CH2", "CH3", "G2J"]:
return (landing_square, chance_card, chest_card)
if landing_square == "G2J":
return ("JAIL", chance_card, chest_card)
elif landing_square in ["CC1", "CC2", "CC3"]:
# 1/16 Go, Jail
# 14/16 Stay
chest_card = (chest_card + 1) % 16
if chest_card == 0:
return ("GO", chance_card, chest_card)
elif chest_card == 1:
return ("JAIL", chance_card, chest_card)
else:
return (landing_square, chance_card, chest_card)
elif landing_square in ["CH1", "CH2", "CH3"]:
# 1/16 Go, Jail, C1, E3, H2, R1, next U, back 3
# 1/8 Next R
chance_card = (chance_card + 1) % 16
if chance_card == 0:
return ("GO", chance_card, chest_card)
elif chance_card == 1:
return ("JAIL", chance_card, chest_card)
elif chance_card == 2:
return ("C1", chance_card, chest_card)
elif chance_card == 3:
return ("E3", chance_card, chest_card)
elif chance_card == 4:
return ("H2", chance_card, chest_card)
elif chance_card == 5:
return ("R1", chance_card, chest_card)
elif chance_card == 6:
return (next_specific(landing_square, "U"),
chance_card, chest_card)
elif chance_card == 7:
return next_square(back(landing_square, 3),
chance_card, chest_card)
elif chance_card in [8, 9]:
return (next_specific(landing_square, "R"),
chance_card, chest_card)
else:
return (landing_square, chance_card, chest_card)
else:
raise Exception("Case should not occur")
def main(verbose=False):
GAME_PLAY = 10 ** 6
dice_size = 4
visited = {"GO": 1}
current = "GO"
chance_card = 0
chest_card = 0
doubles = 0
for place in xrange(GAME_PLAY):
total, double = roll_die(dice_size)
if double:
doubles += 1
else:
doubles = 0
if doubles == 3:
doubles = 0
current = "JAIL"
else:
index = SQUARES.index(current)
landing_square = SQUARES[(index + total) % len(SQUARES)]
(current, chance_card,
chest_card) = next_square(landing_square, chance_card, chest_card)
# if current is not in visited, sets to 1
# (default 0 returned by get)
visited[current] = visited.get(current, 0) + 1
top_visited = sorted(visited.items(),
key=lambda pair: pair[1],
reverse=True)
top_visited = [SQUARES.index(square[0]) for square in top_visited[:3]]
return ''.join(str(index).zfill(2) for index in top_visited)
if __name__ == '__main__':
print euler_timer(84)(main)(verbose=True)
| dhermes/project-euler | python/complete/no084.py | Python | apache-2.0 | 4,565 |
# Copyright 2022 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add tenant_id to lcm_subscriptions and lcm_op_occs
Revision ID: d6ae359ab0d6
Revises: 3ff50553e9d3
Create Date: 2022-01-06 13:35:53.868106
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd6ae359ab0d6'
down_revision = '3ff50553e9d3'
def upgrade(active_plugins=None, options=None):
op.add_column('vnf_lcm_subscriptions',
sa.Column('tenant_id', sa.String(length=64),
nullable=False))
op.add_column('vnf_lcm_op_occs',
sa.Column('tenant_id', sa.String(length=64),
nullable=False))
| stackforge/tacker | tacker/db/migration/alembic_migrations/versions/d6ae359ab0d6_add_tenant_id_to_lcm_subscriptions_and_.py | Python | apache-2.0 | 1,237 |
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes and utilities for image datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import contrib
from tensor2tensor.utils import metrics
import tensorflow.compat.v1 as tf
def matplotlib_pyplot():
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("agg")
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
return plt
def image_to_tf_summary_value(image, tag):
"""Converts a NumPy image to a tf.Summary.Value object.
Args:
image: 3-D NumPy array.
tag: name for tf.Summary.Value for display in tensorboard.
Returns:
image_summary: A tf.Summary.Value object.
"""
curr_image = np.asarray(image, dtype=np.uint8)
height, width, n_channels = curr_image.shape
# If monochrome image, then reshape to [height, width]
if n_channels == 1:
curr_image = np.reshape(curr_image, [height, width])
s = io.BytesIO()
matplotlib_pyplot().imsave(s, curr_image, format="png")
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=height, width=width,
colorspace=n_channels)
return tf.Summary.Value(tag=tag, image=img_sum)
def convert_predictions_to_image_summaries(hook_args):
"""Optionally converts images from hooks_args to image summaries.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
summaries: list of tf.Summary values if hook_args.decode_hpara
"""
decode_hparams = hook_args.decode_hparams
if not decode_hparams.display_decoded_images:
return []
predictions = hook_args.predictions[0]
# Display ten random inputs and outputs so that tensorboard does not hang.
all_summaries = []
rand_predictions = np.random.choice(predictions, size=10)
for ind, prediction in enumerate(rand_predictions):
output_summary = image_to_tf_summary_value(
prediction["outputs"], tag="%d_output" % ind)
input_summary = image_to_tf_summary_value(
prediction["inputs"], tag="%d_input" % ind)
all_summaries.append(input_summary)
all_summaries.append(output_summary)
return all_summaries
def resize_by_area(img, size):
"""image resize function used by quite a few image problems."""
return tf.to_int64(
tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA))
def make_multiscale(image, resolutions,
resize_method=tf.image.ResizeMethod.BICUBIC,
num_channels=3):
"""Returns list of scaled images, one for each resolution.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to.
resize_method: tf.image.ResizeMethod.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels].
"""
scaled_images = []
for height in resolutions:
scaled_image = tf.image.resize_images(
image,
size=[height, height], # assuming that height = width
method=resize_method)
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([height, height, num_channels])
scaled_images.append(scaled_image)
return scaled_images
def make_multiscale_dilated(image, resolutions, num_channels=3):
"""Returns list of scaled images, one for each resolution.
Resizes by skipping every nth pixel.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to. The function
assumes VALID padding, so the original image's height must be divisible
by each resolution's height to return the exact resolution size.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels] if resolutions properly
divide the original image's height; otherwise shape height and width is up
to valid skips.
"""
image_height = common_layers.shape_list(image)[0]
scaled_images = []
for height in resolutions:
dilation_rate = image_height // height # assuming height = width
scaled_image = image[::dilation_rate, ::dilation_rate]
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([None, None, num_channels])
scaled_images.append(scaled_image)
return scaled_images
class ImageProblem(problem.Problem):
"""Base class for problems with images."""
@property
def num_channels(self):
"""Number of color channels."""
return 3
@property
def vocab_size(self):
"""Number of pixel values."""
return 256
def example_reading_spec(self):
data_fields = {
"image/encoded": tf.FixedLenFeature((), tf.string),
"image/format": tf.FixedLenFeature((), tf.string),
}
data_items_to_decoders = {
"inputs":
contrib.slim().tfexample_decoder.Image(
image_key="image/encoded",
format_key="image/format",
channels=self.num_channels),
}
return data_fields, data_items_to_decoders
def preprocess_example(self, example, mode, hparams):
if not self._was_reversed:
example["inputs"] = tf.image.per_image_standardization(example["inputs"])
return example
def eval_metrics(self):
eval_metrics = [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY
]
if self._was_reversed:
eval_metrics += [metrics.Metrics.IMAGE_SUMMARY]
return eval_metrics
@property
def decode_hooks(self):
return [convert_predictions_to_image_summaries]
class Image2ClassProblem(ImageProblem):
"""Base class for image classification problems."""
@property
def is_small(self):
raise NotImplementedError()
@property
def num_classes(self):
raise NotImplementedError()
@property
def train_shards(self):
raise NotImplementedError()
@property
def dev_shards(self):
return 1
@property
def class_labels(self):
return ["ID_%d" % i for i in range(self.num_classes)]
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": text_encoder.ImageEncoder(channels=self.num_channels),
"targets": text_encoder.ClassLabelEncoder(self.class_labels)
}
def generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError()
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2ClassProblem, self).example_reading_spec())
data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64)
data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor(
label_key)
return data_fields, data_items_to_decoders
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.CLASS_LABEL}
p.vocab_size = {"inputs": 256,
"targets": self.num_classes}
p.batch_size_multiplier = 4 if self.is_small else 256
p.loss_multiplier = 3.0 if self.is_small else 1.0
if self._was_reversed:
p.loss_multiplier = 1.0
p.input_space_id = problem.SpaceID.IMAGE
p.target_space_id = problem.SpaceID.IMAGE_LABEL
def generate_data(self, data_dir, tmp_dir, task_id=-1):
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True),
self.training_filepaths(data_dir, self.train_shards, shuffled=False),
self.generator(data_dir, tmp_dir, False),
self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))
def encode_images_as_png(images):
"""Yield images encoded as pngs."""
if tf.executing_eagerly():
for image in images:
yield tf.image.encode_png(image).numpy()
else:
(height, width, channels) = images[0].shape
with tf.Graph().as_default():
image_t = tf.placeholder(dtype=tf.uint8, shape=(height, width, channels))
encoded_image_t = tf.image.encode_png(image_t)
with tf.Session() as sess:
for image in images:
enc_string = sess.run(encoded_image_t, feed_dict={image_t: image})
yield enc_string
def image_generator(images, labels):
"""Generator for images that takes image and labels lists and creates pngs.
Args:
images: list of images given as [width x height x channels] numpy arrays.
labels: list of ints, same length as images.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as PNG,
* image/format: the string "png" representing image format,
* image/class/label: an integer representing the label,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a singleton list of the corresponding type.
Raises:
ValueError: if images is an empty list.
"""
if not images:
raise ValueError("Must provide some images for the generator.")
width, height, _ = images[0].shape
for (enc_image, label) in zip(encode_images_as_png(images), labels):
yield {
"image/encoded": [enc_image],
"image/format": ["png"],
"image/class/label": [int(label)],
"image/height": [height],
"image/width": [width]
}
class Image2TextProblem(ImageProblem):
"""Base class for image-to-text problems."""
@property
def is_character_level(self):
raise NotImplementedError()
@property
def vocab_problem(self):
raise NotImplementedError() # Not needed if self.is_character_level.
@property
def target_space_id(self):
raise NotImplementedError()
@property
def train_shards(self):
raise NotImplementedError()
@property
def dev_shards(self):
raise NotImplementedError()
def generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError()
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2TextProblem, self).example_reading_spec())
data_fields[label_key] = tf.VarLenFeature(tf.int64)
data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor(
label_key)
return data_fields, data_items_to_decoders
def feature_encoders(self, data_dir):
if self.is_character_level:
encoder = text_encoder.ByteTextEncoder()
else:
vocab_filename = os.path.join(
data_dir, self.vocab_problem.vocab_filename)
encoder = text_encoder.SubwordTextEncoder(vocab_filename)
input_encoder = text_encoder.ImageEncoder(channels=self.num_channels)
return {"inputs": input_encoder, "targets": encoder}
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.SYMBOL}
p.vocab_size = {"inputs": 256,
"targets": self._encoders["targets"].vocab_size}
p.batch_size_multiplier = 256
p.loss_multiplier = 1.0
p.input_space_id = problem.SpaceID.IMAGE
p.target_space_id = self.target_space_id
def generate_data(self, data_dir, tmp_dir, task_id=-1):
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True),
self.training_filepaths(data_dir, self.train_shards, shuffled=False),
self.generator(data_dir, tmp_dir, False),
self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))
def image_augmentation(images, do_colors=False, crop_size=None):
"""Image augmentation: cropping, flipping, and color transforms."""
if crop_size is None:
crop_size = [299, 299]
images = tf.random_crop(images, crop_size + [3])
images = tf.image.random_flip_left_right(images)
if do_colors: # More augmentation, but might be slow.
images = tf.image.random_brightness(images, max_delta=32. / 255.)
images = tf.image.random_saturation(images, lower=0.5, upper=1.5)
images = tf.image.random_hue(images, max_delta=0.2)
images = tf.image.random_contrast(images, lower=0.5, upper=1.5)
return images
def cifar_image_augmentation(images):
"""Image augmentation suitable for CIFAR-10/100.
As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).
Args:
images: a Tensor.
Returns:
Tensor of the same shape as images.
"""
images = tf.image.resize_image_with_crop_or_pad(images, 40, 40)
images = tf.random_crop(images, [32, 32, 3])
images = tf.image.random_flip_left_right(images)
return images
def random_shift(image, wsr=0.1, hsr=0.1):
"""Apply random horizontal and vertical shift to images.
This is the default data-augmentation strategy used on CIFAR in Glow.
Args:
image: a 3-D Tensor
wsr: Width shift range, as a float fraction of the width.
hsr: Height shift range, as a float fraction of the width.
Returns:
images: images translated by the provided wsr and hsr.
"""
height, width, _ = common_layers.shape_list(image)
width_range, height_range = wsr*width, hsr*height
height_translations = tf.random_uniform((1,), -height_range, height_range)
width_translations = tf.random_uniform((1,), -width_range, width_range)
translations = tf.concat((height_translations, width_translations), axis=0)
return contrib.image().translate(image, translations=translations)
| tensorflow/tensor2tensor | tensor2tensor/data_generators/image_utils.py | Python | apache-2.0 | 14,495 |
"""
Generate a toy dataset for the matrix factorisation case, and store it.
We use dimensions 100 by 50 for the dataset, and 10 latent factors.
As the prior for U and V we take value 1 for all entries (so exp 1).
As a result, each value in R has a value of around 20, and a variance of 100-120.
For contrast, the Sanger dataset of 705 by 140 shifted to nonnegative has mean
31.522999753779082 and variance 243.2427345740027.
We add Gaussian noise of precision tau = 1 (prior for gamma: alpha=1,beta=1).
(Simply using the expectation of our Gamma distribution over tau)
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../"
sys.path.append(project_location)
from BNMTF.code.models.distributions.exponential import exponential_draw
from BNMTF.code.models.distributions.normal import normal_draw
from BNMTF.code.cross_validation.mask import generate_M
import numpy, itertools, matplotlib.pyplot as plt
def generate_dataset(I,J,K,lambdaU,lambdaV,tau):
# Generate U, V
U = numpy.zeros((I,K))
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
U[i,k] = exponential_draw(lambdaU[i,k])
V = numpy.zeros((J,K))
for j,k in itertools.product(xrange(0,J),xrange(0,K)):
V[j,k] = exponential_draw(lambdaV[j,k])
# Generate R
true_R = numpy.dot(U,V.T)
R = add_noise(true_R,tau)
return (U,V,tau,true_R,R)
def add_noise(true_R,tau):
if numpy.isinf(tau):
return numpy.copy(true_R)
(I,J) = true_R.shape
R = numpy.zeros((I,J))
for i,j in itertools.product(xrange(0,I),xrange(0,J)):
R[i,j] = normal_draw(true_R[i,j],tau)
return R
def try_generate_M(I,J,fraction_unknown,attempts):
for attempt in range(1,attempts+1):
try:
M = generate_M(I,J,fraction_unknown)
sums_columns = M.sum(axis=0)
sums_rows = M.sum(axis=1)
for i,c in enumerate(sums_rows):
assert c != 0, "Fully unobserved row in M, row %s. Fraction %s." % (i,fraction_unknown)
for j,c in enumerate(sums_columns):
assert c != 0, "Fully unobserved column in M, column %s. Fraction %s." % (j,fraction_unknown)
print "Took %s attempts to generate M." % attempt
return M
except AssertionError:
pass
raise Exception("Tried to generate M %s times, with I=%s, J=%s, fraction=%s, but failed." % (attempts,I,J,fraction_unknown))
##########
if __name__ == "__main__":
output_folder = project_location+"BNMTF/data_toy/bnmf/"
I,J,K = 100, 80, 10 #20, 10, 5 #
fraction_unknown = 0.1
alpha, beta = 1., 1.
lambdaU = numpy.ones((I,K))
lambdaV = numpy.ones((I,K))
tau = alpha / beta
(U,V,tau,true_R,R) = generate_dataset(I,J,K,lambdaU,lambdaV,tau)
# Try to generate M
M = try_generate_M(I,J,fraction_unknown,attempts=1000)
# Store all matrices in text files
numpy.savetxt(open(output_folder+"U.txt",'w'),U)
numpy.savetxt(open(output_folder+"V.txt",'w'),V)
numpy.savetxt(open(output_folder+"R_true.txt",'w'),true_R)
numpy.savetxt(open(output_folder+"R.txt",'w'),R)
numpy.savetxt(open(output_folder+"M.txt",'w'),M)
print "Mean R: %s. Variance R: %s. Min R: %s. Max R: %s." % (numpy.mean(R),numpy.var(R),R.min(),R.max())
fig = plt.figure()
plt.hist(R.flatten(),bins=range(0,int(R.max())+1))
plt.show() | ThomasBrouwer/BNMTF | data_toy/bnmf/generate_bnmf.py | Python | apache-2.0 | 3,430 |
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.stub_utils
~~~~~~~~~~~~
Test utilities.
"""
import logging
import random
from collections import namedtuple
CommandOutput = namedtuple('CommandOutput', ['stdout', 'stderr'])
# Logger
log = logging.getLogger(__name__)
# The current time.
test_time = 0
def set_time(value):
global test_time
test_time = value
log.debug("Time now set to : %d" % test_time)
def get_time():
return test_time
def get_mac():
"""
Gets a random mac address.
"""
mac = ("%02x:%02x:%02x:%02x:%02x:%02x" %
(random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)))
return mac
# Exception raised when tests reach the end.
class TestOverException(Exception):
pass
| nbartos/calico | calico/felix/test/stub_utils.py | Python | apache-2.0 | 1,545 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.analytics.data_v1alpha.types import analytics_data_api
from .base import AlphaAnalyticsDataTransport, DEFAULT_CLIENT_INFO
from .grpc import AlphaAnalyticsDataGrpcTransport
class AlphaAnalyticsDataGrpcAsyncIOTransport(AlphaAnalyticsDataTransport):
"""gRPC AsyncIO backend transport for AlphaAnalyticsData.
Google Analytics reporting data service.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "analyticsdata.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "analyticsdata.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def run_report(
self,
) -> Callable[
[analytics_data_api.RunReportRequest],
Awaitable[analytics_data_api.RunReportResponse],
]:
r"""Return a callable for the run report method over gRPC.
Returns a customized report of your Google Analytics
event data. Reports contain statistics derived from data
collected by the Google Analytics tracking code. The
data returned from the API is as a table with columns
for the requested dimensions and metrics. Metrics are
individual measurements of user activity on your
property, such as active users or event count.
Dimensions break down metrics across some common
criteria, such as country or event name.
Returns:
Callable[[~.RunReportRequest],
Awaitable[~.RunReportResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "run_report" not in self._stubs:
self._stubs["run_report"] = self.grpc_channel.unary_unary(
"/google.analytics.data.v1alpha.AlphaAnalyticsData/RunReport",
request_serializer=analytics_data_api.RunReportRequest.serialize,
response_deserializer=analytics_data_api.RunReportResponse.deserialize,
)
return self._stubs["run_report"]
@property
def run_pivot_report(
self,
) -> Callable[
[analytics_data_api.RunPivotReportRequest],
Awaitable[analytics_data_api.RunPivotReportResponse],
]:
r"""Return a callable for the run pivot report method over gRPC.
Returns a customized pivot report of your Google
Analytics event data. Pivot reports are more advanced
and expressive formats than regular reports. In a pivot
report, dimensions are only visible if they are included
in a pivot. Multiple pivots can be specified to further
dissect your data.
Returns:
Callable[[~.RunPivotReportRequest],
Awaitable[~.RunPivotReportResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "run_pivot_report" not in self._stubs:
self._stubs["run_pivot_report"] = self.grpc_channel.unary_unary(
"/google.analytics.data.v1alpha.AlphaAnalyticsData/RunPivotReport",
request_serializer=analytics_data_api.RunPivotReportRequest.serialize,
response_deserializer=analytics_data_api.RunPivotReportResponse.deserialize,
)
return self._stubs["run_pivot_report"]
@property
def batch_run_reports(
self,
) -> Callable[
[analytics_data_api.BatchRunReportsRequest],
Awaitable[analytics_data_api.BatchRunReportsResponse],
]:
r"""Return a callable for the batch run reports method over gRPC.
Returns multiple reports in a batch. All reports must
be for the same Entity.
Returns:
Callable[[~.BatchRunReportsRequest],
Awaitable[~.BatchRunReportsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_run_reports" not in self._stubs:
self._stubs["batch_run_reports"] = self.grpc_channel.unary_unary(
"/google.analytics.data.v1alpha.AlphaAnalyticsData/BatchRunReports",
request_serializer=analytics_data_api.BatchRunReportsRequest.serialize,
response_deserializer=analytics_data_api.BatchRunReportsResponse.deserialize,
)
return self._stubs["batch_run_reports"]
@property
def batch_run_pivot_reports(
self,
) -> Callable[
[analytics_data_api.BatchRunPivotReportsRequest],
Awaitable[analytics_data_api.BatchRunPivotReportsResponse],
]:
r"""Return a callable for the batch run pivot reports method over gRPC.
Returns multiple pivot reports in a batch. All
reports must be for the same Entity.
Returns:
Callable[[~.BatchRunPivotReportsRequest],
Awaitable[~.BatchRunPivotReportsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_run_pivot_reports" not in self._stubs:
self._stubs["batch_run_pivot_reports"] = self.grpc_channel.unary_unary(
"/google.analytics.data.v1alpha.AlphaAnalyticsData/BatchRunPivotReports",
request_serializer=analytics_data_api.BatchRunPivotReportsRequest.serialize,
response_deserializer=analytics_data_api.BatchRunPivotReportsResponse.deserialize,
)
return self._stubs["batch_run_pivot_reports"]
@property
def get_metadata(
self,
) -> Callable[
[analytics_data_api.GetMetadataRequest], Awaitable[analytics_data_api.Metadata]
]:
r"""Return a callable for the get metadata method over gRPC.
Returns metadata for dimensions and metrics available in
reporting methods. Used to explore the dimensions and metrics.
In this method, a Google Analytics GA4 Property Identifier is
specified in the request, and the metadata response includes
Custom dimensions and metrics as well as Universal metadata.
For example if a custom metric with parameter name
``levels_unlocked`` is registered to a property, the Metadata
response will contain ``customEvent:levels_unlocked``. Universal
metadata are dimensions and metrics applicable to any property
such as ``country`` and ``totalUsers``.
Returns:
Callable[[~.GetMetadataRequest],
Awaitable[~.Metadata]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_metadata" not in self._stubs:
self._stubs["get_metadata"] = self.grpc_channel.unary_unary(
"/google.analytics.data.v1alpha.AlphaAnalyticsData/GetMetadata",
request_serializer=analytics_data_api.GetMetadataRequest.serialize,
response_deserializer=analytics_data_api.Metadata.deserialize,
)
return self._stubs["get_metadata"]
@property
def run_realtime_report(
self,
) -> Callable[
[analytics_data_api.RunRealtimeReportRequest],
Awaitable[analytics_data_api.RunRealtimeReportResponse],
]:
r"""Return a callable for the run realtime report method over gRPC.
The Google Analytics Realtime API returns a
customized report of realtime event data for your
property. These reports show events and usage from the
last 30 minutes.
Returns:
Callable[[~.RunRealtimeReportRequest],
Awaitable[~.RunRealtimeReportResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "run_realtime_report" not in self._stubs:
self._stubs["run_realtime_report"] = self.grpc_channel.unary_unary(
"/google.analytics.data.v1alpha.AlphaAnalyticsData/RunRealtimeReport",
request_serializer=analytics_data_api.RunRealtimeReportRequest.serialize,
response_deserializer=analytics_data_api.RunRealtimeReportResponse.deserialize,
)
return self._stubs["run_realtime_report"]
__all__ = ("AlphaAnalyticsDataGrpcAsyncIOTransport",)
| googleapis/python-analytics-data | google/analytics/data_v1alpha/services/alpha_analytics_data/transports/grpc_asyncio.py | Python | apache-2.0 | 19,474 |
# Generated by Django 2.1.7 on 2019-04-30 13:20
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='publishablemodel',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
]
| flavoi/diventi | diventi/core/migrations/0002_auto_20190430_1520.py | Python | apache-2.0 | 446 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from solum.api.controllers import common_types
from solum.api.controllers.v1.datamodel import types as api_types
#from solum.openstack.common import log as logging
#LOG = logging.getLogger(__name__)
class Extensions(api_types.Base):
"""extensions resource"""
extension_links = [common_types.Link]
"""This attribute contains Links to extension resources that contain
information about the extensions supported by this Platform."""
def __init__(self, **kwds):
# LOG.debug("extensions constructor: %s" % kwds)
super(Extensions, self).__init__(**kwds)
| gilbertpilz/solum | solum/api/controllers/camp/v1_1/datamodel/extensions.py | Python | apache-2.0 | 1,139 |
"""Custom Exception Classes for Phylotyper Module
"""
class PhylotyperError(Exception):
"""Basic exception for errors raised by Phylotyper modules"""
def __init__(self, subtype, msg=None):
if msg is None:
msg = "An error occured for subtype {}".format(subtype)
super(PhylotyperError, self).__init__(msg)
self.subtype = subtype
class ValuesError(PhylotyperError):
"""Unknown subtype"""
def __init__(self, subtype, msg=None):
super(PhylotyperError, self).__init__(
subtype, msg="Unrecognized subtype {}".format(subtype))
class DatabaseError(PhylotyperError):
"""Missing data in Database"""
def __init__(self, subtype, data, msg=None):
m = "Database is missing data {} for {}".format(data, subtype)
super(PhylotyperError, self).__init__(subtype, m)
self.data = data | superphy/backend | app/modules/phylotyper/exceptions.py | Python | apache-2.0 | 874 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')
| Fokko/incubator-airflow | tests/models/test_variable.py | Python | apache-2.0 | 3,100 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for download_data."""
from unittest import mock
from google.protobuf import text_format
import tensorflow as tf
from tensorboard.backend.event_processing import plugin_event_multiplexer
from tensorboard.plugins import base_plugin
from tensorboard.plugins.hparams import api_pb2
from tensorboard.plugins.hparams import backend_context
from tensorboard.plugins.hparams import download_data
EXPERIMENT = """
description: 'Test experiment'
user: 'Test user'
hparam_infos: [
{
name: 'initial_temp'
type: DATA_TYPE_FLOAT64
},
{
name: 'final_temp'
type: DATA_TYPE_FLOAT64
},
{ name: 'string_hparam' },
{ name: 'bool_hparam' },
{ name: 'optional_string_hparam' }
]
metric_infos: [
{ name: { tag: 'current_temp' } },
{ name: { tag: 'delta_temp' } },
{ name: { tag: 'optional_metric' } }
]
"""
SESSION_GROUPS = """
session_groups {
name: "group_1"
hparams { key: "bool_hparam" value { bool_value: true } }
hparams { key: "final_temp" value { number_value: 150.0 } }
hparams { key: "initial_temp" value { number_value: 270.0 } }
hparams { key: "string_hparam" value { string_value: "a string" } }
metric_values {
name { tag: "current_temp" }
value: 10
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" } value: 15
training_step: 2
wall_time_secs: 10.0
}
metric_values { name { tag: "optional_metric" } value: 33
training_step: 20
wall_time_secs: 2.0
}
sessions {
name: "session_1"
start_time_secs: 314159
end_time_secs: 314164
status: STATUS_SUCCESS
metric_values {
name { tag: "current_temp" }
value: 10
training_step: 1
wall_time_secs: 1.0
}
metric_values {
name { tag: "delta_temp" }
value: 15
training_step: 2
wall_time_secs: 10.0
}
metric_values {
name { tag: "optional_metric" }
value: 33
training_step: 20
wall_time_secs: 2.0
}
}
}
session_groups {
name: "group_2"
hparams { key: "bool_hparam" value { bool_value: false } }
hparams { key: "final_temp" value { number_value: 100.0 } }
hparams { key: "initial_temp" value { number_value: 280.0 } }
hparams { key: "string_hparam" value { string_value: "AAAAA"}}
metric_values {
name { tag: "current_temp" }
value: 51.0
training_step: 1
wall_time_secs: 1.0
}
metric_values {
name { tag: "delta_temp" }
value: 44.5
training_step: 2
wall_time_secs: 10.3333333
}
sessions {
name: "session_2"
start_time_secs: 314159
end_time_secs: 314164
status: STATUS_SUCCESS
metric_values {
name { tag: "current_temp" }
value: 100
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" }
value: 150
training_step: 3
wall_time_secs: 11.0
}
}
sessions {
name: "session_3"
start_time_secs: 314159
end_time_secs: 314164
status: STATUS_FAILURE
metric_values {
name { tag: "current_temp" }
value: 1.0
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" }
value: 1.5
training_step: 2
wall_time_secs: 10.0
}
}
sessions {
name: "session_5"
start_time_secs: 314159
end_time_secs: 314164
status: STATUS_SUCCESS
metric_values {
name { tag: "current_temp" }
value: 52.0
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" }
value: -18
training_step: 2
wall_time_secs: 10.0
}
}
}
session_groups {
name: "group_3"
hparams { key: "bool_hparam" value { bool_value: true } }
hparams { key: "final_temp" value { number_value: 0.000012 } }
hparams { key: "initial_temp" value { number_value: 300.0 } }
hparams { key: "string_hparam" value { string_value: "a string_3"}}
hparams {
key: 'optional_string_hparam' value { string_value: 'BB' }
}
metric_values {
name { tag: "current_temp" }
value: 101.0
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" } value: -15100000.0
training_step: 2
wall_time_secs: 10.0
}
sessions {
name: "session_4"
start_time_secs: 314159
end_time_secs: 314164
status: STATUS_UNKNOWN
metric_values {
name { tag: "current_temp" }
value: 101.0
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" } value: -151000000.0
training_step: 2
wall_time_secs: 10.0
}
}
}
total_size: 3
"""
EXPECTED_LATEX = r"""\begin{table}[tbp]
\begin{tabular}{llllllll}
initial\_temp & final\_temp & string\_hparam & bool\_hparam & optional\_string\_hparam & current\_temp & delta\_temp & optional\_metric \\ \hline
$270$ & $150$ & a string & $1$ & & $10$ & $15$ & $33$ \\
$280$ & $100$ & AAAAA & $0$ & & $51$ & $44.5$ & - \\
$300$ & $1.2\cdot 10^{-5}$ & a string\_3 & $1$ & BB & $101$ & $-1.51\cdot 10^{7}$ & - \\
\hline
\end{tabular}
\end{table}
"""
EXPECTED_CSV = """initial_temp,final_temp,string_hparam,bool_hparam,optional_string_hparam,current_temp,delta_temp,optional_metric\r
270.0,150.0,a string,True,,10.0,15.0,33.0\r
280.0,100.0,AAAAA,False,,51.0,44.5,\r
300.0,1.2e-05,a string_3,True,BB,101.0,-15100000.0,\r
"""
class DownloadDataTest(tf.test.TestCase):
def setUp(self):
self._mock_multiplexer = mock.create_autospec(
plugin_event_multiplexer.EventMultiplexer
)
self._mock_tb_context = base_plugin.TBContext(
multiplexer=self._mock_multiplexer
)
def _run_handler(self, experiment, session_groups, response_format):
experiment_proto = text_format.Merge(experiment, api_pb2.Experiment())
session_groups_proto = text_format.Merge(
session_groups, api_pb2.ListSessionGroupsResponse()
)
num_columns = len(experiment_proto.hparam_infos) + len(
experiment_proto.metric_infos
)
handler = download_data.Handler(
backend_context.Context(self._mock_tb_context),
experiment_proto,
session_groups_proto,
response_format,
[True] * num_columns,
)
return handler.run()
def test_csv(self):
body, mime_type = self._run_handler(
EXPERIMENT, SESSION_GROUPS, download_data.OutputFormat.CSV
)
self.assertEqual("text/csv", mime_type)
self.assertEqual(EXPECTED_CSV, body)
def test_latex(self):
body, mime_type = self._run_handler(
EXPERIMENT, SESSION_GROUPS, download_data.OutputFormat.LATEX
)
self.assertEqual("application/x-latex", mime_type)
self.assertEqual(EXPECTED_LATEX, body)
def test_json(self):
body, mime_type = self._run_handler(
EXPERIMENT, SESSION_GROUPS, download_data.OutputFormat.JSON
)
self.assertEqual("application/json", mime_type)
expected_result = {
"header": [
"initial_temp",
"final_temp",
"string_hparam",
"bool_hparam",
"optional_string_hparam",
"current_temp",
"delta_temp",
"optional_metric",
],
"rows": [
[270.0, 150.0, "a string", True, "", 10.0, 15.0, 33.0],
[280.0, 100.0, "AAAAA", False, "", 51.0, 44.5, None],
[
300.0,
1.2e-05,
"a string_3",
True,
"BB",
101.0,
-15100000.0,
None,
],
],
}
self.assertEqual(expected_result, body)
if __name__ == "__main__":
tf.test.main()
| tensorflow/tensorboard | tensorboard/plugins/hparams/download_data_test.py | Python | apache-2.0 | 8,571 |
#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
| mF2C/COMPSs | compss/programming_model/bindings/python/src/exaqute/ExaquteTaskPyCOMPSs.py | Python | apache-2.0 | 1,613 |
from rest_framework import generics, permissions, views, response,status
from .models import Account
from .serializers import AccountCreateSerializer, AccountSerializer, AuthenticateSerializer, \
UpdateAccountSerializer, AccountRetrieveSerializer
# Create your views here.
class AccountCreateView(generics.CreateAPIView):
queryset = Account.objects.all()
serializer_class = AccountCreateSerializer
permission_classes = [permissions.AllowAny]
class AccountListView(generics.ListAPIView):
queryset = Account.objects.all()
serializer_class = AccountSerializer
permission_classes = [permissions.IsAuthenticated]
class AccountRetrieveView(generics.RetrieveAPIView):
queryset = Account.objects.all()
serializer_class = AccountRetrieveSerializer
class UpdateAccountView(generics.UpdateAPIView):
queryset = Account.objects.all()
serializer_class = UpdateAccountSerializer
# permission_classes = [permissions.IsAuthenticated]
class AccountAuthenticationView(views.APIView):
queryset = Account.objects.all()
serializer_class = AuthenticateSerializer
def post(self, request):
data = request.data
serializer = AuthenticateSerializer(data=data)
if serializer.is_valid(raise_exception=True):
new_date = serializer.data
return response.Response(new_date,status=status.HTTP_200_OK)
return response.Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | jiafengwu0301/App_BackEnd | api/views.py | Python | apache-2.0 | 1,474 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'litleleprikon'
from random import randint
FIGURES = ['камень', 'бумага', 'ножницы']
FIG_LEN = len(FIGURES)
class Player:
"""
Player class is needed to store tactics and to generate figures by this tactic
-- Doctests --
>>> player = Player()
>>> player.figure in FIGURES
True
"""
def __init__(self, number: int):
self.name = 'игрок{}'.format(number)
tactic = randint(0, FIG_LEN-1)
self.main_figure = FIGURES[tactic]
self.__figures = [FIGURES[(tactic+i) % FIG_LEN] for i in range(FIG_LEN)]
def __str__(self):
return '{}: {}'.format(self.name, self.main_figure)
@property
def figure(self):
rand = randint(0, FIG_LEN)
return self.__figures[rand % FIG_LEN]
| litleleprikon/innopolis_test_task | problem_2_rock_paper_scissor/player.py | Python | apache-2.0 | 846 |
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import collections
import hashlib
import os
import subprocess
import sys
import settings
OUTPUT_DIR = os.path.join(settings.PROJECT_DIR, 'build', 'tests')
Options = collections.namedtuple('Options', ['name', 'build_args', 'test_args'])
Options.__new__.__defaults__ = ([], [])
OPTIONS_PROFILE_MIN = ['--profile=minimal']
OPTIONS_PROFILE_ES51 = [] # NOTE: same as ['--profile=es5.1']
OPTIONS_PROFILE_ES2015 = ['--profile=es2015-subset']
OPTIONS_DEBUG = ['--debug']
OPTIONS_SNAPSHOT = ['--snapshot-save=on', '--snapshot-exec=on', '--jerry-cmdline-snapshot=on']
OPTIONS_UNITTESTS = ['--unittests=on', '--jerry-cmdline=off', '--error-messages=on',
'--snapshot-save=on', '--snapshot-exec=on', '--vm-exec-stop=on',
'--line-info=on', '--mem-stats=on']
OPTIONS_DOCTESTS = ['--doctests=on', '--jerry-cmdline=off', '--error-messages=on',
'--snapshot-save=on', '--snapshot-exec=on', '--vm-exec-stop=on']
# Test options for unittests
JERRY_UNITTESTS_OPTIONS = [
Options('unittests-es2015_subset',
OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES2015),
Options('unittests-es2015_subset-debug',
OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES2015 + OPTIONS_DEBUG),
Options('doctests-es2015_subset',
OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES2015),
Options('doctests-es2015_subset-debug',
OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES2015 + OPTIONS_DEBUG),
Options('unittests-es5.1',
OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51),
Options('unittests-es5.1-debug',
OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG),
Options('doctests-es5.1',
OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES51),
Options('doctests-es5.1-debug',
OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG)
]
# Test options for jerry-tests
JERRY_TESTS_OPTIONS = [
Options('jerry_tests-es5.1',
OPTIONS_PROFILE_ES51),
Options('jerry_tests-es5.1-snapshot',
OPTIONS_PROFILE_ES51 + OPTIONS_SNAPSHOT,
['--snapshot']),
Options('jerry_tests-es5.1-debug',
OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG),
Options('jerry_tests-es5.1-debug-snapshot',
OPTIONS_PROFILE_ES51 + OPTIONS_SNAPSHOT + OPTIONS_DEBUG,
['--snapshot']),
Options('jerry_tests-es5.1-debug-cpointer_32bit',
OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG + ['--cpointer-32bit=on', '--mem-heap=1024']),
Options('jerry_tests-es5.1-debug-external_context',
OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG + ['--jerry-libc=off', '--external-context=on']),
Options('jerry_tests-es2015_subset-debug',
OPTIONS_PROFILE_ES2015 + OPTIONS_DEBUG),
]
# Test options for jerry-test-suite
JERRY_TEST_SUITE_OPTIONS = JERRY_TESTS_OPTIONS[:]
JERRY_TEST_SUITE_OPTIONS.extend([
Options('jerry_test_suite-minimal',
OPTIONS_PROFILE_MIN),
Options('jerry_test_suite-minimal-snapshot',
OPTIONS_PROFILE_MIN + OPTIONS_SNAPSHOT,
['--snapshot']),
Options('jerry_test_suite-minimal-debug',
OPTIONS_PROFILE_MIN + OPTIONS_DEBUG),
Options('jerry_test_suite-minimal-debug-snapshot',
OPTIONS_PROFILE_MIN + OPTIONS_SNAPSHOT + OPTIONS_DEBUG,
['--snapshot']),
Options('jerry_test_suite-es2015_subset',
OPTIONS_PROFILE_ES2015),
Options('jerry_test_suite-es2015_subset-snapshot',
OPTIONS_PROFILE_ES2015 + OPTIONS_SNAPSHOT,
['--snapshot']),
Options('jerry_test_suite-es2015_subset-debug-snapshot',
OPTIONS_PROFILE_ES2015 + OPTIONS_SNAPSHOT + OPTIONS_DEBUG,
['--snapshot']),
])
# Test options for test262
TEST262_TEST_SUITE_OPTIONS = [
Options('test262_tests')
]
# Test options for jerry-debugger
DEBUGGER_TEST_OPTIONS = [
Options('jerry_debugger_tests',
['--debug', '--jerry-debugger=on', '--jerry-libc=off'])
]
# Test options for buildoption-test
JERRY_BUILDOPTIONS = [
Options('buildoption_test-lto',
['--lto=on']),
Options('buildoption_test-error_messages',
['--error-messages=on']),
Options('buildoption_test-all_in_one',
['--all-in-one=on']),
Options('buildoption_test-valgrind',
['--valgrind=on']),
Options('buildoption_test-mem_stats',
['--mem-stats=on']),
Options('buildoption_test-show_opcodes',
['--show-opcodes=on']),
Options('buildoption_test-show_regexp_opcodes',
['--show-regexp-opcodes=on']),
Options('buildoption_test-compiler_default_libc',
['--jerry-libc=off']),
Options('buildoption_test-cpointer_32bit',
['--jerry-libc=off', '--compile-flag=-m32', '--cpointer-32bit=on', '--system-allocator=on']),
Options('buildoption_test-external_context',
['--jerry-libc=off', '--external-context=on']),
Options('buildoption_test-shared_libs',
['--jerry-libc=off', '--shared-libs=on']),
Options('buildoption_test-cmdline_test',
['--jerry-cmdline-test=on']),
Options('buildoption_test-cmdline_snapshot',
['--jerry-cmdline-snapshot=on']),
]
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--toolchain', metavar='FILE',
help='Add toolchain file')
parser.add_argument('-q', '--quiet', action='store_true',
help='Only print out failing tests')
parser.add_argument('--buildoptions', metavar='LIST',
help='Add a comma separated list of extra build options to each test')
parser.add_argument('--skip-list', metavar='LIST',
help='Add a comma separated list of patterns of the excluded JS-tests')
parser.add_argument('--outdir', metavar='DIR', default=OUTPUT_DIR,
help='Specify output directory (default: %(default)s)')
parser.add_argument('--check-signed-off', metavar='TYPE', nargs='?',
choices=['strict', 'tolerant', 'travis'], const='strict',
help='Run signed-off check (%(choices)s; default type if not given: %(const)s)')
parser.add_argument('--check-cppcheck', action='store_true',
help='Run cppcheck')
parser.add_argument('--check-doxygen', action='store_true',
help='Run doxygen')
parser.add_argument('--check-pylint', action='store_true',
help='Run pylint')
parser.add_argument('--check-vera', action='store_true',
help='Run vera check')
parser.add_argument('--check-license', action='store_true',
help='Run license check')
parser.add_argument('--check-magic-strings', action='store_true',
help='Run "magic string source code generator should be executed" check')
parser.add_argument('--jerry-debugger', action='store_true',
help='Run jerry-debugger tests')
parser.add_argument('--jerry-tests', action='store_true',
help='Run jerry-tests')
parser.add_argument('--jerry-test-suite', action='store_true',
help='Run jerry-test-suite')
parser.add_argument('--test262', action='store_true',
help='Run test262')
parser.add_argument('--unittests', action='store_true',
help='Run unittests (including doctests)')
parser.add_argument('--buildoption-test', action='store_true',
help='Run buildoption-test')
parser.add_argument('--all', '--precommit', action='store_true',
help='Run all tests')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
script_args = parser.parse_args()
return script_args
BINARY_CACHE = {}
def create_binary(job, options):
build_args = job.build_args[:]
if options.buildoptions:
for option in options.buildoptions.split(','):
if option not in build_args:
build_args.append(option)
build_cmd = [settings.BUILD_SCRIPT] + build_args
build_dir_path = os.path.join(options.outdir, job.name)
build_cmd.append('--builddir=%s' % build_dir_path)
install_dir_path = os.path.join(build_dir_path, 'local')
build_cmd.append('--install=%s' % install_dir_path)
if options.toolchain:
build_cmd.append('--toolchain=%s' % options.toolchain)
sys.stderr.write('Build command: %s\n' % ' '.join(build_cmd))
binary_key = tuple(sorted(build_args))
if binary_key in BINARY_CACHE:
ret, build_dir_path = BINARY_CACHE[binary_key]
sys.stderr.write('(skipping: already built at %s with returncode %d)\n' % (build_dir_path, ret))
return ret, build_dir_path
try:
subprocess.check_output(build_cmd)
ret = 0
except subprocess.CalledProcessError as err:
ret = err.returncode
BINARY_CACHE[binary_key] = (ret, build_dir_path)
return ret, build_dir_path
def get_binary_path(build_dir_path):
return os.path.join(build_dir_path, 'local', 'bin', 'jerry')
def hash_binary(bin_path):
blocksize = 65536
hasher = hashlib.sha1()
with open(bin_path, 'rb') as bin_file:
buf = bin_file.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = bin_file.read(blocksize)
return hasher.hexdigest()
def iterate_test_runner_jobs(jobs, options):
tested_paths = set()
tested_hashes = {}
for job in jobs:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
yield job, ret_build, build_dir_path, None
if build_dir_path in tested_paths:
sys.stderr.write('(skipping: already tested with %s)\n' % build_dir_path)
continue
else:
tested_paths.add(build_dir_path)
bin_path = get_binary_path(build_dir_path)
bin_hash = hash_binary(bin_path)
if bin_hash in tested_hashes:
sys.stderr.write('(skipping: already tested with equivalent %s)\n' % tested_hashes[bin_hash])
continue
else:
tested_hashes[bin_hash] = build_dir_path
test_cmd = [settings.TEST_RUNNER_SCRIPT, bin_path]
yield job, ret_build, test_cmd
def run_check(runnable):
sys.stderr.write('Test command: %s\n' % ' '.join(runnable))
try:
ret = subprocess.check_call(runnable)
except subprocess.CalledProcessError as err:
return err.returncode
return ret
def run_jerry_debugger_tests(options):
ret_build = ret_test = 0
for job in DEBUGGER_TEST_OPTIONS:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
break
for test_file in os.listdir(settings.DEBUGGER_TESTS_DIR):
if test_file.endswith(".cmd"):
test_case, _ = os.path.splitext(test_file)
test_case_path = os.path.join(settings.DEBUGGER_TESTS_DIR, test_case)
test_cmd = [
settings.DEBUGGER_TEST_RUNNER_SCRIPT,
get_binary_path(build_dir_path),
settings.DEBUGGER_CLIENT_SCRIPT,
os.path.relpath(test_case_path, settings.PROJECT_DIR)
]
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd)
return ret_build | ret_test
def run_jerry_tests(options):
ret_build = ret_test = 0
for job, ret_build, test_cmd in iterate_test_runner_jobs(JERRY_TESTS_OPTIONS, options):
if ret_build:
break
test_cmd.append(settings.JERRY_TESTS_DIR)
if options.quiet:
test_cmd.append("-q")
skip_list = []
if '--profile=es2015-subset' in job.build_args:
skip_list.append(r"es5.1\/")
else:
skip_list.append(r"es2015\/")
if options.skip_list:
skip_list.append(options.skip_list)
if skip_list:
test_cmd.append("--skip-list=" + ",".join(skip_list))
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd)
return ret_build | ret_test
def run_jerry_test_suite(options):
ret_build = ret_test = 0
for job, ret_build, test_cmd in iterate_test_runner_jobs(JERRY_TEST_SUITE_OPTIONS, options):
if ret_build:
break
if '--profile=minimal' in job.build_args:
test_cmd.append(settings.JERRY_TEST_SUITE_MINIMAL_LIST)
elif '--profile=es2015-subset' in job.build_args:
test_cmd.append(settings.JERRY_TEST_SUITE_DIR)
else:
test_cmd.append(settings.JERRY_TEST_SUITE_ES51_LIST)
if options.quiet:
test_cmd.append("-q")
if options.skip_list:
test_cmd.append("--skip-list=" + options.skip_list)
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd)
return ret_build | ret_test
def run_test262_test_suite(options):
ret_build = ret_test = 0
for job in TEST262_TEST_SUITE_OPTIONS:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
break
test_cmd = [
settings.TEST262_RUNNER_SCRIPT,
get_binary_path(build_dir_path),
settings.TEST262_TEST_SUITE_DIR
]
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd)
return ret_build | ret_test
def run_unittests(options):
ret_build = ret_test = 0
for job in JERRY_UNITTESTS_OPTIONS:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
break
ret_test |= run_check([
settings.UNITTEST_RUNNER_SCRIPT,
os.path.join(build_dir_path, 'tests'),
"-q" if options.quiet else "",
])
return ret_build | ret_test
def run_buildoption_test(options):
for job in JERRY_BUILDOPTIONS:
ret, _ = create_binary(job, options)
if ret:
break
return ret
Check = collections.namedtuple('Check', ['enabled', 'runner', 'arg'])
def main(options):
checks = [
Check(options.check_signed_off, run_check, [settings.SIGNED_OFF_SCRIPT]
+ {'tolerant': ['--tolerant'], 'travis': ['--travis']}.get(options.check_signed_off, [])),
Check(options.check_cppcheck, run_check, [settings.CPPCHECK_SCRIPT]),
Check(options.check_doxygen, run_check, [settings.DOXYGEN_SCRIPT]),
Check(options.check_pylint, run_check, [settings.PYLINT_SCRIPT]),
Check(options.check_vera, run_check, [settings.VERA_SCRIPT]),
Check(options.check_license, run_check, [settings.LICENSE_SCRIPT]),
Check(options.check_magic_strings, run_check, [settings.MAGIC_STRINGS_SCRIPT]),
Check(options.jerry_debugger, run_jerry_debugger_tests, options),
Check(options.jerry_tests, run_jerry_tests, options),
Check(options.jerry_test_suite, run_jerry_test_suite, options),
Check(options.test262, run_test262_test_suite, options),
Check(options.unittests, run_unittests, options),
Check(options.buildoption_test, run_buildoption_test, options),
]
for check in checks:
if check.enabled or options.all:
ret = check.runner(check.arg)
if ret:
sys.exit(ret)
if __name__ == "__main__":
main(get_arguments())
| yichoi/jerryscript | tools/run-tests.py | Python | apache-2.0 | 16,290 |
import math
def isPrime(num):
if num < 2:
return False # 0, 1不是质数
# num为100时, 它是不可能有因子是大于50的. 比如说60 * ? = 100, 这是不可能的, 所以这里只要比较sqrt(), 平方根
boundary = int(math.sqrt(num)) + 1
for i in range(2, boundary):
if num % i == 0:
return False
return True
def primeSieve(size):
sieve = [True] * size # 某格一为乘积, 就置为False
sieve[0] = False
sieve[1] = True
# num为100时, 它是不可能有因子是大于50的. 比如说60 * ? = 100, 这是不可能的, 所以这里只要比较sqrt(), 平方根
boundary = int(math.sqrt(size)) + 1
for i in range(2, boundary):
pointer = i * 2 # startPosition. 以3为例, 3其实是质数, 但它的位数6,9, 12, ...都不是质数
while pointer < size:
sieve[pointer] = False
pointer += i
ret = [] # contains all the prime number within "size"
for i in range(size):
if sieve[i] == True:
ret.append(str(i))
return ret
if __name__ == '__main__':
primes = primeSieve(100)
primesString = ", ".join(primes)
print("prime : ", primesString)
'''
prime : 1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97
''' | songzhw/Hello-kotlin | Python101/src/math/PrimeSieve.py | Python | apache-2.0 | 1,333 |
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pyparsing as pp
uninary_operators = ("not", )
binary_operator = (u">=", u"<=", u"!=", u">", u"<", u"=", u"==", u"eq", u"ne",
u"lt", u"gt", u"ge", u"le", u"in", u"like", u"≠", u"≥",
u"≤", u"like" "in")
multiple_operators = (u"and", u"or", u"∧", u"∨")
operator = pp.Regex(u"|".join(binary_operator))
null = pp.Regex("None|none|null").setParseAction(pp.replaceWith(None))
boolean = "False|True|false|true"
boolean = pp.Regex(boolean).setParseAction(lambda t: t[0].lower() == "true")
hex_string = lambda n: pp.Word(pp.hexnums, exact=n)
uuid = pp.Combine(hex_string(8) + ("-" + hex_string(4)) * 3 +
"-" + hex_string(12))
number = r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?"
number = pp.Regex(number).setParseAction(lambda t: float(t[0]))
identifier = pp.Word(pp.alphas, pp.alphanums + "_")
quoted_string = pp.QuotedString('"') | pp.QuotedString("'")
comparison_term = pp.Forward()
in_list = pp.Group(pp.Suppress('[') +
pp.Optional(pp.delimitedList(comparison_term)) +
pp.Suppress(']'))("list")
comparison_term << (null | boolean | uuid | identifier | number |
quoted_string | in_list)
condition = pp.Group(comparison_term + operator + comparison_term)
expr = pp.operatorPrecedence(condition, [
("not", 1, pp.opAssoc.RIGHT, ),
("and", 2, pp.opAssoc.LEFT, ),
("∧", 2, pp.opAssoc.LEFT, ),
("or", 2, pp.opAssoc.LEFT, ),
("∨", 2, pp.opAssoc.LEFT, ),
])
def _parsed_query2dict(parsed_query):
result = None
while parsed_query:
part = parsed_query.pop()
if part in binary_operator:
result = {part: {parsed_query.pop(): result}}
elif part in multiple_operators:
if result.get(part):
result[part].append(
_parsed_query2dict(parsed_query.pop()))
else:
result = {part: [result]}
elif part in uninary_operators:
result = {part: result}
elif isinstance(part, pp.ParseResults):
kind = part.getName()
if kind == "list":
res = part.asList()
else:
res = _parsed_query2dict(part)
if result is None:
result = res
elif isinstance(result, dict):
list(result.values())[0].append(res)
else:
result = part
return result
def search_query_builder(query):
parsed_query = expr.parseString(query)[0]
return _parsed_query2dict(parsed_query)
def list2cols(cols, objs):
return cols, [tuple([o[k] for k in cols])
for o in objs]
def format_string_list(objs, field):
objs[field] = ", ".join(objs[field])
def format_dict_list(objs, field):
objs[field] = "\n".join(
"- " + ", ".join("%s: %s" % (k, v)
for k, v in elem.items())
for elem in objs[field])
def format_move_dict_to_root(obj, field):
for attr in obj[field]:
obj["%s/%s" % (field, attr)] = obj[field][attr]
del obj[field]
def format_archive_policy(ap):
format_dict_list(ap, "definition")
format_string_list(ap, "aggregation_methods")
def dict_from_parsed_args(parsed_args, attrs):
d = {}
for attr in attrs:
value = getattr(parsed_args, attr)
if value is not None:
d[attr] = value
return d
def dict_to_querystring(objs):
return "&".join(["%s=%s" % (k, v)
for k, v in objs.items()
if v is not None])
| chungg/python-aodhclient | aodhclient/utils.py | Python | apache-2.0 | 4,174 |
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from solum import objects
from solum.objects import extension as abstract_extension
from solum.objects import operation as abstract_operation
from solum.objects import plan as abstract_plan
from solum.objects import sensor as abstract_sensor
from solum.objects import service as abstract_srvc
from solum.objects.sqlalchemy import extension
from solum.objects.sqlalchemy import operation
from solum.objects.sqlalchemy import plan
from solum.objects.sqlalchemy import sensor
from solum.objects.sqlalchemy import service
def load():
"""Activate the sqlalchemy backend."""
objects.registry.add(abstract_plan.Plan, plan.Plan)
objects.registry.add(abstract_plan.PlanList, plan.PlanList)
objects.registry.add(abstract_srvc.Service, service.Service)
objects.registry.add(abstract_srvc.ServiceList, service.ServiceList)
objects.registry.add(abstract_operation.Operation, operation.Operation)
objects.registry.add(abstract_operation.OperationList,
operation.OperationList)
objects.registry.add(abstract_sensor.Sensor, sensor.Sensor)
objects.registry.add(abstract_sensor.SensorList, sensor.SensorList)
objects.registry.add(abstract_extension.Extension, extension.Extension)
objects.registry.add(abstract_extension.ExtensionList,
extension.ExtensionList)
| shakamunyi/solum | solum/objects/sqlalchemy/__init__.py | Python | apache-2.0 | 1,920 |
default_app_config = 'providers.com.dailyssrn.apps.AppConfig'
| zamattiac/SHARE | providers/com/dailyssrn/__init__.py | Python | apache-2.0 | 62 |
# -*- coding:utf-8 -*-
#
# Copyright (c) 2017 mooncake. All Rights Reserved
####
# @brief
# @author Eric Yue ( [email protected] )
# @version 0.0.1
from distutils.core import setup
V = "0.7"
setup(
name = 'mooncake_utils',
packages = ['mooncake_utils'],
version = V,
description = 'just a useful utils for mooncake personal project.',
author = 'mooncake',
author_email = '[email protected]',
url = 'https://github.com/ericyue/mooncake_utils',
download_url = 'https://github.com/ericyue/mooncake_utils/archive/%s.zip' % V,
keywords = ['utils','data','machine-learning'], # arbitrary keywords
classifiers = [],
)
| ericyue/mooncake_utils | setup.py | Python | apache-2.0 | 646 |
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import datetime
import re
import mock
import six
from osprofiler import profiler
from osprofiler.tests import test
class ProfilerGlobMethodsTestCase(test.TestCase):
def test_get_profiler_not_inited(self):
profiler.clean()
self.assertIsNone(profiler.get())
def test_get_profiler_and_init(self):
p = profiler.init("secret", base_id="1", parent_id="2")
self.assertEqual(profiler.get(), p)
self.assertEqual(p.get_base_id(), "1")
# NOTE(boris-42): until we make first start we don't have
self.assertEqual(p.get_id(), "2")
def test_start_not_inited(self):
profiler.clean()
profiler.start("name")
def test_start(self):
p = profiler.init("secret", base_id="1", parent_id="2")
p.start = mock.MagicMock()
profiler.start("name", info="info")
p.start.assert_called_once_with("name", info="info")
def test_stop_not_inited(self):
profiler.clean()
profiler.stop()
def test_stop(self):
p = profiler.init("secret", base_id="1", parent_id="2")
p.stop = mock.MagicMock()
profiler.stop(info="info")
p.stop.assert_called_once_with(info="info")
class ProfilerTestCase(test.TestCase):
def test_profiler_get_shorten_id(self):
uuid_id = "4e3e0ec6-2938-40b1-8504-09eb1d4b0dee"
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
result = prof.get_shorten_id(uuid_id)
expected = "850409eb1d4b0dee"
self.assertEqual(expected, result)
def test_profiler_get_shorten_id_int(self):
short_id_int = 42
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
result = prof.get_shorten_id(short_id_int)
expected = "2a"
self.assertEqual(expected, result)
def test_profiler_get_base_id(self):
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
self.assertEqual(prof.get_base_id(), "1")
@mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
def test_profiler_get_parent_id(self, mock_generate_uuid):
mock_generate_uuid.return_value = "42"
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
prof.start("test")
self.assertEqual(prof.get_parent_id(), "2")
@mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
def test_profiler_get_base_id_unset_case(self, mock_generate_uuid):
mock_generate_uuid.return_value = "42"
prof = profiler._Profiler("secret")
self.assertEqual(prof.get_base_id(), "42")
self.assertEqual(prof.get_parent_id(), "42")
@mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
def test_profiler_get_id(self, mock_generate_uuid):
mock_generate_uuid.return_value = "43"
prof = profiler._Profiler("secret")
prof.start("test")
self.assertEqual(prof.get_id(), "43")
@mock.patch("osprofiler.profiler.datetime")
@mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
@mock.patch("osprofiler.profiler.notifier.notify")
def test_profiler_start(self, mock_notify, mock_generate_uuid,
mock_datetime):
mock_generate_uuid.return_value = "44"
now = datetime.datetime.utcnow()
mock_datetime.datetime.utcnow.return_value = now
info = {"some": "info"}
payload = {
"name": "test-start",
"base_id": "1",
"parent_id": "2",
"trace_id": "44",
"info": info,
"timestamp": now.strftime("%Y-%m-%dT%H:%M:%S.%f"),
}
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
prof.start("test", info=info)
mock_notify.assert_called_once_with(payload)
@mock.patch("osprofiler.profiler.datetime")
@mock.patch("osprofiler.profiler.notifier.notify")
def test_profiler_stop(self, mock_notify, mock_datetime):
now = datetime.datetime.utcnow()
mock_datetime.datetime.utcnow.return_value = now
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
prof._trace_stack.append("44")
prof._name.append("abc")
info = {"some": "info"}
prof.stop(info=info)
payload = {
"name": "abc-stop",
"base_id": "1",
"parent_id": "2",
"trace_id": "44",
"info": info,
"timestamp": now.strftime("%Y-%m-%dT%H:%M:%S.%f"),
}
mock_notify.assert_called_once_with(payload)
self.assertEqual(len(prof._name), 0)
self.assertEqual(prof._trace_stack, collections.deque(["1", "2"]))
def test_profiler_hmac(self):
hmac = "secret"
prof = profiler._Profiler(hmac, base_id="1", parent_id="2")
self.assertEqual(hmac, prof.hmac_key)
class WithTraceTestCase(test.TestCase):
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_trace(self, mock_start, mock_stop):
with profiler.Trace("a", info="a1"):
mock_start.assert_called_once_with("a", info="a1")
mock_start.reset_mock()
with profiler.Trace("b", info="b1"):
mock_start.assert_called_once_with("b", info="b1")
mock_stop.assert_called_once_with()
mock_stop.reset_mock()
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_trace_etype(self, mock_start, mock_stop):
def foo():
with profiler.Trace("foo"):
raise ValueError("bar")
self.assertRaises(ValueError, foo)
mock_start.assert_called_once_with("foo", info=None)
mock_stop.assert_called_once_with(info={
"etype": "ValueError",
"message": "bar"
})
@profiler.trace("function", info={"info": "some_info"})
def traced_func(i):
return i
@profiler.trace("hide_args", hide_args=True)
def trace_hide_args_func(a, i=10):
return (a, i)
@profiler.trace("foo", hide_args=True)
def test_fn_exc():
raise ValueError()
@profiler.trace("hide_result", hide_result=False)
def trace_with_result_func(a, i=10):
return (a, i)
class TraceDecoratorTestCase(test.TestCase):
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_duplicate_trace_disallow(self, mock_start, mock_stop):
@profiler.trace("test")
def trace_me():
pass
self.assertRaises(
ValueError,
profiler.trace("test-again", allow_multiple_trace=False),
trace_me)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_args(self, mock_start, mock_stop):
self.assertEqual(1, traced_func(1))
expected_info = {
"info": "some_info",
"function": {
"name": "osprofiler.tests.unit.test_profiler.traced_func",
"args": str((1,)),
"kwargs": str({})
}
}
mock_start.assert_called_once_with("function", info=expected_info)
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_args(self, mock_start, mock_stop):
self.assertEqual((1, 2), trace_hide_args_func(1, i=2))
expected_info = {
"function": {
"name": "osprofiler.tests.unit.test_profiler"
".trace_hide_args_func"
}
}
mock_start.assert_called_once_with("hide_args", info=expected_info)
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_exception(self, mock_start, mock_stop):
self.assertRaises(ValueError, test_fn_exc)
expected_info = {
"function": {
"name": "osprofiler.tests.unit.test_profiler.test_fn_exc"
}
}
expected_stop_info = {"etype": "ValueError", "message": ""}
mock_start.assert_called_once_with("foo", info=expected_info)
mock_stop.assert_called_once_with(info=expected_stop_info)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_result(self, mock_start, mock_stop):
self.assertEqual((1, 2), trace_with_result_func(1, i=2))
start_info = {
"function": {
"name": "osprofiler.tests.unit.test_profiler"
".trace_with_result_func",
"args": str((1,)),
"kwargs": str({"i": 2})
}
}
stop_info = {
"function": {
"result": str((1, 2))
}
}
mock_start.assert_called_once_with("hide_result", info=start_info)
mock_stop.assert_called_once_with(info=stop_info)
class FakeTracedCls(object):
def method1(self, a, b, c=10):
return a + b + c
def method2(self, d, e):
return d - e
def method3(self, g=10, h=20):
return g * h
def _method(self, i):
return i
@profiler.trace_cls("rpc", info={"a": 10})
class FakeTraceClassWithInfo(FakeTracedCls):
pass
@profiler.trace_cls("a", info={"b": 20}, hide_args=True)
class FakeTraceClassHideArgs(FakeTracedCls):
pass
@profiler.trace_cls("rpc", trace_private=True)
class FakeTracePrivate(FakeTracedCls):
pass
class FakeTraceStaticMethodBase(FakeTracedCls):
@staticmethod
def static_method(arg):
return arg
@profiler.trace_cls("rpc", trace_static_methods=True)
class FakeTraceStaticMethod(FakeTraceStaticMethodBase):
pass
@profiler.trace_cls("rpc")
class FakeTraceStaticMethodSkip(FakeTraceStaticMethodBase):
pass
class FakeTraceClassMethodBase(FakeTracedCls):
@classmethod
def class_method(cls, arg):
return arg
@profiler.trace_cls("rpc")
class FakeTraceClassMethodSkip(FakeTraceClassMethodBase):
pass
def py3_info(info):
# NOTE(boris-42): py33 I hate you.
info_py3 = copy.deepcopy(info)
new_name = re.sub("FakeTrace[^.]*", "FakeTracedCls",
info_py3["function"]["name"])
info_py3["function"]["name"] = new_name
return info_py3
def possible_mock_calls(name, info):
# NOTE(boris-42): py33 I hate you.
return [mock.call(name, info=info), mock.call(name, info=py3_info(info))]
class TraceClsDecoratorTestCase(test.TestCase):
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_args(self, mock_start, mock_stop):
fake_cls = FakeTraceClassWithInfo()
self.assertEqual(30, fake_cls.method1(5, 15))
expected_info = {
"a": 10,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceClassWithInfo.method1"),
"args": str((fake_cls, 5, 15)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_kwargs(self, mock_start, mock_stop):
fake_cls = FakeTraceClassWithInfo()
self.assertEqual(50, fake_cls.method3(g=5, h=10))
expected_info = {
"a": 10,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceClassWithInfo.method3"),
"args": str((fake_cls,)),
"kwargs": str({"g": 5, "h": 10})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_private(self, mock_start, mock_stop):
fake_cls = FakeTraceClassHideArgs()
self.assertEqual(10, fake_cls._method(10))
self.assertFalse(mock_start.called)
self.assertFalse(mock_stop.called)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_args(self, mock_start, mock_stop):
fake_cls = FakeTraceClassHideArgs()
self.assertEqual(40, fake_cls.method1(5, 15, c=20))
expected_info = {
"b": 20,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceClassHideArgs.method1"),
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("a", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_private_methods(self, mock_start, mock_stop):
fake_cls = FakeTracePrivate()
self.assertEqual(5, fake_cls._method(5))
expected_info = {
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTracePrivate._method"),
"args": str((fake_cls, 5)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
@test.testcase.skip(
"Static method tracing was disabled due the bug. This test should be "
"skipped until we find the way to address it.")
def test_static(self, mock_start, mock_stop):
fake_cls = FakeTraceStaticMethod()
self.assertEqual(25, fake_cls.static_method(25))
expected_info = {
"function": {
# fixme(boris-42): Static methods are treated differently in
# Python 2.x and Python 3.x. So in PY2 we
# expect to see method4 because method is
# static and doesn't have reference to class
# - and FakeTraceStatic.method4 in PY3
"name":
"osprofiler.tests.unit.test_profiler"
".method4" if six.PY2 else
"osprofiler.tests.unit.test_profiler.FakeTraceStatic"
".method4",
"args": str((25,)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_static_method_skip(self, mock_start, mock_stop):
self.assertEqual(25, FakeTraceStaticMethodSkip.static_method(25))
self.assertFalse(mock_start.called)
self.assertFalse(mock_stop.called)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_class_method_skip(self, mock_start, mock_stop):
self.assertEqual("foo", FakeTraceClassMethodSkip.class_method("foo"))
self.assertFalse(mock_start.called)
self.assertFalse(mock_stop.called)
@six.add_metaclass(profiler.TracedMeta)
class FakeTraceWithMetaclassBase(object):
__trace_args__ = {"name": "rpc",
"info": {"a": 10}}
def method1(self, a, b, c=10):
return a + b + c
def method2(self, d, e):
return d - e
def method3(self, g=10, h=20):
return g * h
def _method(self, i):
return i
class FakeTraceDummy(FakeTraceWithMetaclassBase):
def method4(self, j):
return j
class FakeTraceWithMetaclassHideArgs(FakeTraceWithMetaclassBase):
__trace_args__ = {"name": "a",
"info": {"b": 20},
"hide_args": True}
def method5(self, k, l):
return k + l
class FakeTraceWithMetaclassPrivate(FakeTraceWithMetaclassBase):
__trace_args__ = {"name": "rpc",
"trace_private": True}
def _new_private_method(self, m):
return 2 * m
class TraceWithMetaclassTestCase(test.TestCase):
def test_no_name_exception(self):
def define_class_with_no_name():
@six.add_metaclass(profiler.TracedMeta)
class FakeTraceWithMetaclassNoName(FakeTracedCls):
pass
self.assertRaises(TypeError, define_class_with_no_name, 1)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_args(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassBase()
self.assertEqual(30, fake_cls.method1(5, 15))
expected_info = {
"a": 10,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceWithMetaclassBase.method1"),
"args": str((fake_cls, 5, 15)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_kwargs(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassBase()
self.assertEqual(50, fake_cls.method3(g=5, h=10))
expected_info = {
"a": 10,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceWithMetaclassBase.method3"),
"args": str((fake_cls,)),
"kwargs": str({"g": 5, "h": 10})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_private(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassHideArgs()
self.assertEqual(10, fake_cls._method(10))
self.assertFalse(mock_start.called)
self.assertFalse(mock_stop.called)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_args(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassHideArgs()
self.assertEqual(20, fake_cls.method5(5, 15))
expected_info = {
"b": 20,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceWithMetaclassHideArgs.method5")
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("a", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_private_methods(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassPrivate()
self.assertEqual(10, fake_cls._new_private_method(5))
expected_info = {
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceWithMetaclassPrivate._new_private_method"),
"args": str((fake_cls, 5)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
| stackforge/osprofiler | osprofiler/tests/unit/test_profiler.py | Python | apache-2.0 | 21,196 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Google Chrome Cache files event formatter."""
import unittest
from plaso.formatters import chrome_cache
from tests.formatters import test_lib
class ChromeCacheEntryEventFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Chrome Cache entry event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = chrome_cache.ChromeCacheEntryEventFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = chrome_cache.ChromeCacheEntryEventFormatter()
expected_attribute_names = [u'original_url']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main()
| dc3-plaso/plaso | tests/formatters/chrome_cache.py | Python | apache-2.0 | 925 |
macimport os
import subprocess
name = "gobuildmaster"
current_hash = ""
for line in os.popen("md5sum " + name).readlines():
current_hash = line.split(' ')[0]
# Move the old version over
for line in os.popen('cp ' + name + ' old' + name).readlines():
print line.strip()
# Rebuild
for line in os.popen('go build').readlines():
print line.strip()
size_1 = os.path.getsize('./old' + name)
size_2 = os.path.getsize('./' + name)
lines = os.popen('ps -ef | grep ' + name).readlines()
running = False
for line in lines:
if "./" + name in line:
running = True
new_hash = ""
for line in os.popen("md5sum " + name).readlines():
new_hash = line.split(' ')[0]
if size_1 != size_2 or new_hash != current_hash or not running:
if not running:
for line in os.popen('cat out.txt | mail -E -s "Crash Report ' + name + '" [email protected]').readlines():
pass
for line in os.popen('echo "" > out.txt').readlines():
pass
for line in os.popen('killall ' + name).readlines():
pass
subprocess.Popen(['./' + name])
| brotherlogic/gobuildmaster | BuildAndRun.py | Python | apache-2.0 | 1,102 |
import unittest2
import helper
import simplejson as json
from nose.plugins.attrib import attr
PORTAL_ID = 62515
class ListsClientTest(unittest2.TestCase):
"""
Unit tests for the HubSpot List API Python wrapper (hapipy) client.
This file contains some unittest tests for the List API.
Questions, comments, etc: http://developers.hubspot.com
"""
def setUp(self):
self.client = ListsClient(**helper.get_options())
def tearDown(self):
pass
@attr('api')
def test_get_list(self):
# create a list to get
dummy_data = json.dumps(dict(
name='try_and_get_me',
dynamic=False,
portalId=PORTAL_ID
))
created_list = self.client.create_list(dummy_data)
# make sure it was created
self.asserTrue(len(created_list['lists']))
# the id number of the list the test is trying to get
id_to_get = created_list['listID']
# try and get it
recieved_lists = self.client.get_list(id_to_get)
# see if the test got the right list
self.assertEqual(recieved_lists['lists'][0]['listId'], created_list['listId'])
print "Got this list: %s" % json.dumps(recieved_list['lists'][0])
# clean up
self.client.delete_list(id_to_get)
@attr('api')
def test_get_batch_lists(self):
# holds the ids of the lists being retrieved
list_ids = []
# make a list to get
dummy_data = json.dumps(dict(
name='first_test_list',
dynamic=False,
portalId=PORTAL_ID
))
created_list = self.client.create_list(dummy_data)
# make sure it was actually made
self.assertTrue(created_list['listID'])
# put the id of the newly made list in list_ids
list_ids[0] = created_list['listId']
#change the data a little and make another list
dummy_data['name'] = 'second_test_list'
created_list = self.client.create_list(dummy_data)
# make sure itwas actually made
self.assertTrue(created_list['listID'])
# put the id number in list_ids
list_ids[1] = created_list['listId']
# try and get them
batch_lists = self.client.get_batch_lists(list_ids)
# make sure you got as many lists as you were searching for
self.assertEqual(len(list_ids), len(batch_lists['lists']))
# clean up
self.client.delete_list(list_ids[0])
self.client.delete_list(list_ids[1])
@attr('api')
def test_get_lists(self):
# try and get lists
recieved_lists = self.client.get_lists()
# see if the test got at least one
if len(recieved_lists['lists']) == 0:
self.fail("Unable to retrieve any lists")
else:
print "Got these lists %s" % json.dumps(recieved_lists)
@attr('api')
def test_get_static_lists(self):
# create a static list to get
dummy_data = json.dumps(dict(
name='static_test_list',
dynamic=False,
portalId=PORTAL_ID
))
created_list = self.client.create_list(dummy_data)
# make sure it was actually made
self.assertTrue(created_list['listID'])
# this call will return 20 lists if not given another value
static_lists = self.client.get_static_lists()
if len(static_lists['lists']) == 0:
self.fail("Unable to retrieve any static lists")
else:
print "Found these static lists: %s" % json.dumps(static_lists)
# clean up
self.client.delete_list(created_list['listId'])
@attr('api')
def test_get_dynamic_lists(self):
# make a dynamic list to get
dummy_data = json.dumps(dict(
name='test_dynamic_list',
dynamic=True,
portalId=PORTAL_ID
))
created_list = self.client.create_list(dummy_data)
# make sure the dynamic list was made
self.assertTrue(created_list['listId'])
dynamic_lists = self.client.get_dynamic_lists()
if len(dynamic_lists['lists']) == 0:
self.fail("Unable to retrieve any dynamic lists")
else:
print "Found these dynamic lists: %s" % json.dumps(dynamic_lists)
# clean up
self.client.delete_list(created_list['listId'])
@attr('api')
def test_get_list_contacts(self):
# the id number of the list you want the contacts of
# which_list =
# try and get the contacts
contacts = self.client.get_list_contacts(which_list)
# make sure you get at least one
self.assertTrue(len(contacts['contacts'])
print "Got these contacts: %s from this list: %s" % json.dumps(contacts), which_list)
@attr('api')
def test_get_list_contacts_recent(self):
# the id number of the list you want the recent contacts of
which_list =
recent_contacts = self.client.get_list_contacts_recent(which_list)
if len(recent_contacts['lists']) == 0:
self.fail("Did not find any recent contacts")
else:
print "Found these recent contacts: %s" % json.dumps(recent_conacts)
@attr('api')
def test_create_list(self):
# the data for the list the test is making
dummy_data = json.dumps(dict(
list_name='test_list',
dynamic=False,
portalId=PORTAL_ID
))
# try and make the list
created_list = self.client.create_list(dummy_data)
# make sure it was created
if len(created_lists['lists']) == 0:
self.fail("Did not create the list")
else:
print "Created this list: %s" % json.dumps(created_lists)
# clean up
self.client.delete_list(created_lists['lists'][0]['listId'])
@attr('api')
def test_update_list(self):
# make a list to update
dummy_data = json.dumps(dict(
name='delete_me',
dynamic=False,
portalId=PORTAL_ID
))
created_list = self.client.create_list(dummy_data)
# make sure it was actually made
self.assertTrue(len(created_list['listId']))
# get the id number of the list
update_list_id = created_list['listId']
# this is the data updating the list
update_data = json.dumps(dict(
list_name='really_delete_me',
))
# try and do the update
http_response = self.client.update_list(update_list_id, update_data)
if http_response >= 400:
self.fail("Unable to update list!")
else:
print("Updated a list!")
# clean up
self.client.delete_list(update_list_id)
@attr('api')
def test_add_contacts_to_list_from_emails(self):
# make a list to add contacts to
dummy_data = json.dumps(dict(
name='give_me_contact_emails',
dynamic=False,
portalId=PORTAL_ID
))
created_list = self.client.create_list(dummy_data)
# make sure it was actually made
self.assertTrue(len(created_list['lists']))
# the id number of the list being added to
which_list = created_list['listId']
# the emails of the contacts being added
emails = json.dumps(dict(
emails
))
# try and add the contacts
self.client.add_contacts_to_list_from_emails(which_list, emails)
@attr('api')
def test_add_contact_to_list(self):
# make a list to add a contact to
dummy_data = json.dumps(dict(
name='add_a_contact',
dynamic=False,
portalId=PORTAL_ID
))
created_list = self.client.create_list(dummy_data)
# make sure it was actually made
self.assertTrue(created_list['listId'])
# the id number of the list the contact is being added to
which_list = created_list['listId']
# the id number of the contact being added to the list
which_contact =
added = self.client.add_contact_to_list(which_list, which_contact)
if added['updated'] == which_contact:
print "Succesfully added contact: %s to list: %s" % which_contact, which_list
# if it worked, clean up
self.client.delete_list(which_list)
else:
self.fail("Did not add contact: %s to list: %a" % which_contact, which_list)
@attr('api')
def test_remove_contact_from_list(self):
# make a list to remove a contact from
fake_data = json.dumps(dict(
name='remove_this_contact'
dynamic=False,
portalId=PORTAL_ID
))
created_list = self.client.create_list(fake_data)
# make sure it was actually made
self.assertTrue(created_list['listId'])
# the id number of the list the contact is being deleted from
which_list = created_list['listId']
# the id number of the contact being deleted
which_contact =
# put the contact in the list so it can be removed
added = self.client.add_contact_to_list(which_list, which_contact)
# make sure it was added
self.assertTrue(added['updated'])
# try and remove it
removed = self.client.remove_contact_from_list(which_list, which_contact)
# check if it was actually removed
if removed['updated'] == which_contact:
print "Succesfully removed contact: %s from list: %s" % which_contact, which_list
# clean up
self.client.delete_list(created_list['listId'])
else:
self.fail("Did not remove contact %s from list: %s" % which_contact, which_list)
@attr('api')
def test_delete_list(self):
# make a list to delete
dummy_data = json.dumps(dict(
name='should_be_deleted',
dynamic=False,
portalId=PORTAL_ID
))
created_list = self.client.create_list(dummy_data)
# check if it was actually made
self.assertTrue(created_list['listId'])
# the id number of the list being deleted
id_to_delete = created_list['listId']
# try deleting it
self.client.delete_list(id_to_delete)
# try and get the list that should have been deleted
check = self.client.get_list(id_to_delete)
# check should not have any lists
self.assertEqual(len(check['lists']), 0)
print "Sucessfully deleted a test list"
@attr('api')
def test_refresh_list(self):
# make a dynamic list to refresh
dummy_data = json.dumps(dict(
name='refresh_this_list',
dynamic=True,
portalId=PORTAL_ID
))
created_list = self.client.create_list(dummy_data)
# make sure it actually made the list
self.assertTrue(created_list['listId'])
# do the refresh
refresh_response = self.client.refresh_list(created_list['listId'])
# check if it worked
if refresh_response >= 400:
self.fail("Failed to refresh list: %s" % json.dumps(created_list))
else:
print "Succesfully refreshed list: %s" % json.dumps(created_list)
# clean up
self.client.delete_list(created_list['listId'])
if __name__ == "__main__":
unittest2.main()
| jonathan-s/happy | happy/test/test_lists.py | Python | apache-2.0 | 11,513 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from six.moves.urllib import parse as urllib
from tempest_lib import exceptions as lib_exc
from tempest.api_schema.response.compute.v2_1 import images as schema
from tempest.common import service_client
from tempest.common import waiters
class ImagesClientJSON(service_client.ServiceClient):
def create_image(self, server_id, name, meta=None):
"""Creates an image of the original server."""
post_body = {
'createImage': {
'name': name,
}
}
if meta is not None:
post_body['createImage']['metadata'] = meta
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
self.validate_response(schema.create_image, resp, body)
return service_client.ResponseBody(resp, body)
def list_images(self, params=None):
"""Returns a list of all images filtered by any parameters."""
url = 'images'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_images, resp, body)
return service_client.ResponseBodyList(resp, body['images'])
def list_images_with_detail(self, params=None):
"""Returns a detailed list of images filtered by any parameters."""
url = 'images/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_images_details, resp, body)
return service_client.ResponseBodyList(resp, body['images'])
def show_image(self, image_id):
"""Returns the details of a single image."""
resp, body = self.get("images/%s" % str(image_id))
self.expected_success(200, resp.status)
body = json.loads(body)
self.validate_response(schema.get_image, resp, body)
return service_client.ResponseBody(resp, body['image'])
def delete_image(self, image_id):
"""Deletes the provided image."""
resp, body = self.delete("images/%s" % str(image_id))
self.validate_response(schema.delete, resp, body)
return service_client.ResponseBody(resp, body)
def wait_for_image_status(self, image_id, status):
"""Waits for an image to reach a given status."""
waiters.wait_for_image_status(self, image_id, status)
def list_image_metadata(self, image_id):
"""Lists all metadata items for an image."""
resp, body = self.get("images/%s/metadata" % str(image_id))
body = json.loads(body)
self.validate_response(schema.image_metadata, resp, body)
return service_client.ResponseBody(resp, body['metadata'])
def set_image_metadata(self, image_id, meta):
"""Sets the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.put('images/%s/metadata' % str(image_id), post_body)
body = json.loads(body)
self.validate_response(schema.image_metadata, resp, body)
return service_client.ResponseBody(resp, body['metadata'])
def update_image_metadata(self, image_id, meta):
"""Updates the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('images/%s/metadata' % str(image_id), post_body)
body = json.loads(body)
self.validate_response(schema.image_metadata, resp, body)
return service_client.ResponseBody(resp, body['metadata'])
def get_image_metadata_item(self, image_id, key):
"""Returns the value for a specific image metadata key."""
resp, body = self.get("images/%s/metadata/%s" % (str(image_id), key))
body = json.loads(body)
self.validate_response(schema.image_meta_item, resp, body)
return service_client.ResponseBody(resp, body['meta'])
def set_image_metadata_item(self, image_id, key, meta):
"""Sets the value for a specific image metadata key."""
post_body = json.dumps({'meta': meta})
resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
post_body)
body = json.loads(body)
self.validate_response(schema.image_meta_item, resp, body)
return service_client.ResponseBody(resp, body['meta'])
def delete_image_metadata_item(self, image_id, key):
"""Deletes a single image metadata key/value pair."""
resp, body = self.delete("images/%s/metadata/%s" %
(str(image_id), key))
self.validate_response(schema.delete, resp, body)
return service_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.show_image(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'image'
| yamt/tempest | tempest/services/compute/json/images_client.py | Python | apache-2.0 | 5,741 |
"""A client for the REST API of imeji instances."""
import logging
from collections import OrderedDict
import requests
from six import string_types
from pyimeji import resource
from pyimeji.config import Config
log = logging.getLogger(__name__)
class ImejiError(Exception):
def __init__(self, message, error):
super(ImejiError, self).__init__(message)
self.error = error.get('error') if isinstance(error, dict) else error
class GET(object):
"""Handle GET requests.
This includes requests
- to retrieve single objects,
- to fetch lists of object references (which are returned as `OrderedDict` mapping
object `id` to additional metadata present in the response).
"""
def __init__(self, api, name):
"""Initialize a handler.
:param api: An Imeji API instance.
:param name: Name specifying the kind of object(s) to retrieve. We check whether\
this name has a plural "s" to determine if a list is to be retrieved.
"""
self._list = name.endswith('s')
self.rsc = getattr(resource, (name[:-1] if self._list else name).capitalize())
self.api = api
self.name = name
self.path = name
if not self._list:
self.path += 's'
def __call__(self, id='', **kw):
"""Calling the handler initiates an HTTP request to the imeji server.
:param id: If a single object is to be retrieved it must be specified by id.
:return: An OrderedDict mapping id to additional metadata for lists, a \
:py:class:`pyimeji.resource.Resource` instance for single objects.
"""
if not self._list and not id:
raise ValueError('no id given')
if id:
id = '/' + id
res = self.api._req('/%s%s' % (self.path, id), params=kw)
if not self._list:
return self.rsc(res, self.api)
return OrderedDict([(d['id'], d) for d in res])
class Imeji(object):
"""The client.
>>> api = Imeji(service_url='http://demo.imeji.org/imeji/')
>>> collection_id = list(api.collections().keys())[0]
>>> collection = api.collection(collection_id)
>>> collection = api.create('collection', title='the new collection')
>>> item = collection.add_item(fetchUrl='http://example.org')
>>> item.delete()
"""
def __init__(self, cfg=None, service_url=None):
self.cfg = cfg or Config()
self.service_url = service_url or self.cfg.get('service', 'url')
user = self.cfg.get('service', 'user', default=None)
password = self.cfg.get('service', 'password', default=None)
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
def _req(self, path, method='get', json=True, assert_status=200, **kw):
"""Make a request to the API of an imeji instance.
:param path: HTTP path.
:param method: HTTP method.
:param json: Flag signalling whether the response should be treated as JSON.
:param assert_status: Expected HTTP response status of a successful request.
:param kw: Additional keyword parameters will be handed through to the \
appropriate function of the requests library.
:return: The return value of the function of the requests library or a decoded \
JSON object/array.
"""
method = getattr(self.session, method.lower())
res = method(self.service_url + '/rest' + path, **kw)
status_code = res.status_code
if json:
try:
res = res.json()
except ValueError: # pragma: no cover
log.error(res.text[:1000])
raise
if assert_status:
if status_code != assert_status:
log.error(
'got HTTP %s, expected HTTP %s' % (status_code, assert_status))
log.error(res.text[:1000] if hasattr(res, 'text') else res)
raise ImejiError('Unexpected HTTP status code', res)
return res
def __getattr__(self, name):
"""Names of resource classes are accepted and resolved as dynamic attribute names.
This allows convenient retrieval of resources as api.<resource-class>(id=<id>),
or api.<resource-class>s(q='x').
"""
return GET(self, name)
def create(self, rsc, **kw):
if isinstance(rsc, string_types):
cls = getattr(resource, rsc.capitalize())
rsc = cls(kw, self)
return rsc.save()
def delete(self, rsc):
return rsc.delete()
def update(self, rsc, **kw):
for k, v in kw.items():
setattr(rsc, k, v)
return rsc.save()
| xrotwang/pyimeji | pyimeji/api.py | Python | apache-2.0 | 4,739 |
#! /usr/bin/python
import sys
import os
import json
import grpc
import time
import subprocess
from google.oauth2 import service_account
import google.oauth2.credentials
import google.auth.transport.requests
import google.auth.transport.grpc
from google.firestore.v1beta1 import firestore_pb2
from google.firestore.v1beta1 import firestore_pb2_grpc
from google.firestore.v1beta1 import document_pb2
from google.firestore.v1beta1 import document_pb2_grpc
from google.firestore.v1beta1 import common_pb2
from google.firestore.v1beta1 import common_pb2_grpc
from google.firestore.v1beta1 import write_pb2
from google.firestore.v1beta1 import write_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
def first_message(database, write):
messages = [
firestore_pb2.WriteRequest(database = database, writes = [])
]
for msg in messages:
yield msg
def generate_messages(database, writes, stream_id, stream_token):
# writes can be an array and append to the messages, so it can write multiple Write
# here just write one as example
messages = [
firestore_pb2.WriteRequest(database=database, writes = []),
firestore_pb2.WriteRequest(database=database, writes = [writes], stream_id = stream_id, stream_token = stream_token)
]
for msg in messages:
yield msg
def main():
fl = os.path.dirname(os.path.abspath(__file__))
fn = os.path.join(fl, 'grpc.json')
with open(fn) as grpc_file:
item = json.load(grpc_file)
creds = item["grpc"]["Write"]["credentials"]
credentials = service_account.Credentials.from_service_account_file("{}".format(creds))
scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/datastore'])
http_request = google.auth.transport.requests.Request()
channel = google.auth.transport.grpc.secure_authorized_channel(scoped_credentials, http_request, 'firestore.googleapis.com:443')
stub = firestore_pb2_grpc.FirestoreStub(channel)
database = item["grpc"]["Write"]["database"]
name = item["grpc"]["Write"]["name"]
first_write = write_pb2.Write()
responses = stub.Write(first_message(database, first_write))
for response in responses:
print("Received message %s" % (response.stream_id))
print(response.stream_token)
value_ = document_pb2.Value(string_value = "foo_boo")
update = document_pb2.Document(name=name, fields={"foo":value_})
writes = write_pb2.Write(update_mask=common_pb2.DocumentMask(field_paths = ["foo"]), update=update)
r2 = stub.Write(generate_messages(database, writes, response.stream_id, response.stream_token))
for r in r2:
print(r.write_results)
if __name__ == "__main__":
main()
| GoogleCloudPlatform/grpc-gcp-python | firestore/examples/end2end/src/Write.py | Python | apache-2.0 | 2,967 |
import sys
sys.path.append("helper")
import web
from helper import session
web.config.debug = False
urls = (
"/", "controller.start.index",
"/1", "controller.start.one",
"/2", "controller.start.two",
)
app = web.application(urls, globals())
sessions = session.Sessions()
if __name__ == "__main__":
app.run()
| 0x00/web.py-jinja2-pyjade-bootstrap | app.py | Python | apache-2.0 | 331 |
from typing import List
class Solution:
def partitionLabels(self, S: str) -> List[int]:
lastPos, seen, currMax = {}, set(), -1
res = []
for i in range(0, 26):
c = chr(97+i)
lastPos[c] = S.rfind(c)
for i, c in enumerate(S):
# Encounter new index higher than currMax
if i > currMax:
res.append(currMax+1)
currMax = max(currMax, lastPos[c])
res.append(len(S))
ans = [res[i]-res[i-1] for i in range(1, len(res))]
return ans | saisankargochhayat/algo_quest | leetcode/763. Partition Labels/soln.py | Python | apache-2.0 | 555 |
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def mnist_many_cols_gbm_large():
train = h2o.import_file(path=pyunit_utils.locate("bigdata/laptop/mnist/train.csv.gz"))
train.tail()
gbm_mnist = H2OGradientBoostingEstimator(ntrees=1,
max_depth=1,
min_rows=10,
learn_rate=0.01)
gbm_mnist.train(x=range(784), y=784, training_frame=train)
gbm_mnist.show()
if __name__ == "__main__":
pyunit_utils.standalone_test(mnist_many_cols_gbm_large)
else:
mnist_many_cols_gbm_large()
| madmax983/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_mnist_manyCols_gbm_large.py | Python | apache-2.0 | 712 |
#!/Users/wuga/Documents/website/wuga/env/bin/python2.7
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
# --------------------------------------------------------------------
# an image animation player
class UI(tkinter.Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
duration = im.info.get("duration", 100)
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
duration = im.info.get("duration", 100)
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| wuga214/Django-Wuga | env/bin/player.py | Python | apache-2.0 | 2,120 |
# Copyright 2018 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from oslo_versionedobjects import base as object_base
from cyborg.common import exception
from cyborg.db import api as dbapi
from cyborg.objects import base
from cyborg.objects import fields as object_fields
from cyborg.objects.deployable import Deployable
from cyborg.objects.virtual_function import VirtualFunction
LOG = logging.getLogger(__name__)
@base.CyborgObjectRegistry.register
class PhysicalFunction(Deployable):
# Version 1.0: Initial version
VERSION = '1.0'
virtual_function_list = []
def create(self, context):
# To ensure the creating type is PF
if self.type != 'pf':
raise exception.InvalidDeployType()
super(PhysicalFunction, self).create(context)
def save(self, context):
"""In addition to save the pf, it should also save the
vfs associated with this pf
"""
# To ensure the saving type is PF
if self.type != 'pf':
raise exception.InvalidDeployType()
for exist_vf in self.virtual_function_list:
exist_vf.save(context)
super(PhysicalFunction, self).save(context)
def add_vf(self, vf):
"""add a vf object to the virtual_function_list.
If the vf already exists, it will ignore,
otherwise, the vf will be appended to the list
"""
if not isinstance(vf, VirtualFunction) or vf.type != 'vf':
raise exception.InvalidDeployType()
for exist_vf in self.virtual_function_list:
if base.obj_equal_prims(vf, exist_vf):
LOG.warning("The vf already exists")
return None
vf.parent_uuid = self.uuid
vf.root_uuid = self.root_uuid
vf_copy = copy.deepcopy(vf)
self.virtual_function_list.append(vf_copy)
def delete_vf(self, context, vf):
"""remove a vf from the virtual_function_list
if the vf does not exist, ignore it
"""
for idx, exist_vf in self.virtual_function_list:
if base.obj_equal_prims(vf, exist_vf):
removed_vf = self.virtual_function_list.pop(idx)
removed_vf.destroy(context)
return
LOG.warning("The removing vf does not exist!")
def destroy(self, context):
"""Delete a the pf from the DB."""
del self.virtual_function_list[:]
super(PhysicalFunction, self).destroy(context)
@classmethod
def get(cls, context, uuid):
"""Find a DB Physical Function and return an Obj Physical Function.
In addition, it will also finds all the Virtual Functions associated
with this Physical Function and place them in virtual_function_list
"""
db_pf = cls.dbapi.deployable_get(context, uuid)
obj_pf = cls._from_db_object(cls(context), db_pf)
pf_uuid = obj_pf.uuid
query = {"parent_uuid": pf_uuid, "type": "vf"}
db_vf_list = cls.dbapi.deployable_get_by_filters(context, query)
for db_vf in db_vf_list:
obj_vf = VirtualFunction.get(context, db_vf.uuid)
obj_pf.virtual_function_list.append(obj_vf)
return obj_pf
@classmethod
def get_by_filter(cls, context,
filters, sort_key='created_at',
sort_dir='desc', limit=None,
marker=None, join=None):
obj_dpl_list = []
filters['type'] = 'pf'
db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters,
sort_key=sort_key,
sort_dir=sort_dir,
limit=limit,
marker=marker,
join_columns=join)
for db_dpl in db_dpl_list:
obj_dpl = cls._from_db_object(cls(context), db_dpl)
query = {"parent_uuid": obj_dpl.uuid}
vf_get_list = VirtualFunction.get_by_filter(context,
query)
obj_dpl.virtual_function_list = vf_get_list
obj_dpl_list.append(obj_dpl)
return obj_dpl_list
@classmethod
def _from_db_object(cls, obj, db_obj):
"""Converts a physical function to a formal object.
:param obj: An object of the class.
:param db_obj: A DB model of the object
:return: The object of the class with the database entity added
"""
obj = Deployable._from_db_object(obj, db_obj)
if cls is PhysicalFunction:
obj.virtual_function_list = []
return obj
| openstack/nomad | cyborg/objects/physical_function.py | Python | apache-2.0 | 5,396 |
class Solution:
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
arr_pre_order = preorder.split(',')
stack = []
for node in arr_pre_order:
stack.append(node)
while len(stack) > 1 and stack[-1] == '#' and stack[-2] == '#':
stack.pop()
stack.pop()
if len(stack) < 1:
return False
stack[-1] = '#'
if len(stack) == 1 and stack[0] == '#':
return True
return False
| MingfeiPan/leetcode | stack/331.py | Python | apache-2.0 | 615 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: violinsolo
# Created on 08/03/2018
import tensorflow as tf
import numpy as np
x_shape = [5, 3, 3, 2]
x = np.arange(reduce(lambda t, s: t*s, list(x_shape), 1))
print x
x = x.reshape([5, 3, 3, -1])
print x.shape
X = tf.Variable(x)
with tf.Session() as sess:
m = tf.nn.moments(X, axes=[0])
# m = tf.nn.moments(X, axes=[0,1])
# m = tf.nn.moments(X, axes=np.arange(len(x_shape)-1))
mean, variance = m
print(sess.run(m, feed_dict={X: x}))
# print(sess.run(m, feed_dict={X: x})) | iViolinSolo/DeepLearning-GetStarted | TF-Persion-ReID/test/test_tf_nn_moments.py | Python | apache-2.0 | 557 |
# -*- coding: utf-8 -*-
"""Template shortcut & filters"""
import os
import datetime
from jinja2 import Environment, FileSystemLoader
from uwsgi_sloth.settings import ROOT
from uwsgi_sloth import settings, __VERSION__
template_path = os.path.join(ROOT, 'templates')
env = Environment(loader=FileSystemLoader(template_path))
# Template filters
def friendly_time(msecs):
secs, msecs = divmod(msecs, 1000)
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
if hours:
return '%dh%dm%ds' % (hours, mins, secs)
elif mins:
return '%dm%ds' % (mins, secs)
elif secs:
return '%ds%dms' % (secs, msecs)
else:
return '%.2fms' % msecs
env.filters['friendly_time'] = friendly_time
def render_template(template_name, context={}):
template = env.get_template(template_name)
context.update(
SETTINGS=settings,
now=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
version='.'.join(map(str, __VERSION__)))
return template.render(**context)
| piglei/uwsgi-sloth | uwsgi_sloth/template.py | Python | apache-2.0 | 1,040 |
# Copyright 2017,2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
import unittest
from zvmsdk.sdkwsgi.handlers import version
from zvmsdk import version as sdk_version
class HandlersRootTest(unittest.TestCase):
def setUp(self):
pass
def test_version(self):
req = mock.Mock()
ver_str = {"rc": 0,
"overallRC": 0,
"errmsg": "",
"modID": None,
"output":
{"api_version": version.APIVERSION,
"min_version": version.APIVERSION,
"version": sdk_version.__version__,
"max_version": version.APIVERSION,
},
"rs": 0}
res = version.version(req)
self.assertEqual('application/json', req.response.content_type)
# version_json = json.dumps(ver_res)
# version_str = utils.to_utf8(version_json)
ver_res = json.loads(req.response.body.decode('utf-8'))
self.assertEqual(ver_str, ver_res)
self.assertEqual('application/json', res.content_type)
| mfcloud/python-zvm-sdk | zvmsdk/tests/unit/sdkwsgi/handlers/test_version.py | Python | apache-2.0 | 1,674 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Journey'
db.create_table('places_journey', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('route', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Route'])),
('external_ref', self.gf('django.db.models.fields.TextField')()),
('notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('runs_on_monday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_tuesday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_wednesday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_thursday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_friday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_saturday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_sunday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_in_termtime', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_in_school_holidays', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_bank_holidays', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_non_bank_holidays', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_from', self.gf('django.db.models.fields.DateField')()),
('runs_until', self.gf('django.db.models.fields.DateField')()),
('vehicle', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('places', ['Journey'])
# Adding model 'ScheduledStop'
db.create_table('places_scheduledstop', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'])),
('journey', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Journey'])),
('order', self.gf('django.db.models.fields.IntegerField')()),
('sta', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
('std', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
('times_estimated', self.gf('django.db.models.fields.BooleanField')(default=False)),
('fare_stage', self.gf('django.db.models.fields.BooleanField')(default=False)),
('activity', self.gf('django.db.models.fields.CharField')(default='B', max_length=1)),
))
db.send_create_signal('places', ['ScheduledStop'])
def backwards(self, orm):
# Deleting model 'Journey'
db.delete_table('places_journey')
# Deleting model 'ScheduledStop'
db.delete_table('places_scheduledstop')
models = {
'places.entity': {
'Meta': {'object_name': 'Entity'},
'_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['places.Identifier']", 'symmetrical': 'False'}),
'_metadata': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'absolute_url': ('django.db.models.fields.TextField', [], {}),
'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entities'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entities_completion'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['places.EntityGroup']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'identifier_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'is_stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Entity']", 'null': 'True'}),
'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.EntityType']", 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Source']"})
},
'places.entitygroup': {
'Meta': {'object_name': 'EntityGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ref_code': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Source']"})
},
'places.entitygroupname': {
'Meta': {'unique_together': "(('entity_group', 'language_code'),)", 'object_name': 'EntityGroupName'},
'entity_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': "orm['places.EntityGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'title': ('django.db.models.fields.TextField', [], {})
},
'places.entityname': {
'Meta': {'unique_together': "(('entity', 'language_code'),)", 'object_name': 'EntityName'},
'entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': "orm['places.Entity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'title': ('django.db.models.fields.TextField', [], {})
},
'places.entitytype': {
'Meta': {'object_name': 'EntityType'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.EntityTypeCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subtypes'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subtypes_completion'", 'blank': 'True', 'to': "orm['places.EntityType']"})
},
'places.entitytypecategory': {
'Meta': {'object_name': 'EntityTypeCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
},
'places.entitytypename': {
'Meta': {'unique_together': "(('entity_type', 'language_code'),)", 'object_name': 'EntityTypeName'},
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': "orm['places.EntityType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'verbose_name': ('django.db.models.fields.TextField', [], {}),
'verbose_name_plural': ('django.db.models.fields.TextField', [], {}),
'verbose_name_singular': ('django.db.models.fields.TextField', [], {})
},
'places.identifier': {
'Meta': {'object_name': 'Identifier'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'places.journey': {
'Meta': {'object_name': 'Journey'},
'external_ref': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Route']"}),
'runs_from': ('django.db.models.fields.DateField', [], {}),
'runs_in_school_holidays': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_in_termtime': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_bank_holidays': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_friday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_monday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_non_bank_holidays': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_saturday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_sunday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_thursday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_tuesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_wednesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_until': ('django.db.models.fields.DateField', [], {}),
'vehicle': ('django.db.models.fields.TextField', [], {})
},
'places.route': {
'Meta': {'object_name': 'Route'},
'external_ref': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'operator': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'service_id': ('django.db.models.fields.TextField', [], {}),
'service_name': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'stops': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['places.Entity']", 'through': "orm['places.StopOnRoute']", 'symmetrical': 'False'})
},
'places.scheduledstop': {
'Meta': {'ordering': "['order']", 'object_name': 'ScheduledStop'},
'activity': ('django.db.models.fields.CharField', [], {'default': "'B'", 'max_length': '1'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Entity']"}),
'fare_stage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Journey']"}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'sta': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'std': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'times_estimated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'places.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'module_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'places.stoponroute': {
'Meta': {'ordering': "['order']", 'object_name': 'StopOnRoute'},
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Entity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Route']"})
}
}
complete_apps = ['places']
| mollyproject/mollyproject | molly/apps/places/migrations/0009_auto__add_journey__add_scheduledstop.py | Python | apache-2.0 | 13,531 |
# -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
import ee
import math
from cmt.mapclient_qt import addToMap
from cmt.util.miscUtilities import safe_get_info
import modis_utilities
'''
Contains implementations of several simple MODIS-based flood detection algorithms.
'''
#==============================================================
def dem_threshold(domain, b):
'''Just use a height threshold on the DEM!'''
heightLevel = float(domain.algorithm_params['dem_threshold'])
dem = domain.get_dem().image
return dem.lt(heightLevel).select(['elevation'], ['b1'])
#==============================================================
def evi(domain, b):
'''Simple EVI based classifier'''
#no_clouds = b['b3'].lte(2100).select(['sur_refl_b03'], ['b1'])
criteria1 = b['EVI'].lte(0.3).And(b['LSWI'].subtract(b['EVI']).gte(0.05)).select(['sur_refl_b02'], ['b1'])
criteria2 = b['EVI'].lte(0.05).And(b['LSWI'].lte(0.0)).select(['sur_refl_b02'], ['b1'])
#return no_clouds.And(criteria1.Or(criteria2))
return criteria1.Or(criteria2)
def xiao(domain, b):
'''Method from paper: Xiao, Boles, Frolking, et. al. Mapping paddy rice agriculture in South and Southeast Asia using
multi-temporal MODIS images, Remote Sensing of Environment, 2006.
This method implements a very simple decision tree from several standard MODIS data products.
The default constants were tuned for (wet) rice paddy detection.
'''
return b['LSWI'].subtract(b['NDVI']).gte(0.05).Or(b['LSWI'].subtract(b['EVI']).gte(0.05)).select(['sur_refl_b02'], ['b1']);
#==============================================================
def get_diff(b):
'''Just the internals of the difference method'''
return b['b2'].subtract(b['b1']).select(['sur_refl_b02'], ['b1'])
def diff_learned(domain, b):
'''modis_diff but with the threshold calculation included (training image required)'''
if domain.unflooded_domain == None:
print('No unflooded training domain provided.')
return None
unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
water_mask = modis_utilities.get_permanent_water_mask()
threshold = modis_utilities.compute_binary_threshold(get_diff(unflooded_b), water_mask, domain.bounds)
return modis_diff(domain, b, threshold)
def modis_diff(domain, b, threshold=None):
'''Compute (b2-b1) < threshold, a simple water detection index.
This method may be all that is needed in cases where the threshold can be hand tuned.
'''
if threshold == None: # If no threshold value passed in, load it based on the data set.
threshold = float(domain.algorithm_params['modis_diff_threshold'])
return get_diff(b).lte(threshold)
#==============================================================
def get_dartmouth(b):
A = 500
B = 2500
return b['b2'].add(A).divide(b['b1'].add(B)).select(['sur_refl_b02'], ['b1'])
def dart_learned(domain, b):
'''The dartmouth method but with threshold calculation included (training image required)'''
if domain.unflooded_domain == None:
print('No unflooded training domain provided.')
return None
unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
water_mask = modis_utilities.get_permanent_water_mask()
threshold = modis_utilities.compute_binary_threshold(get_dartmouth(unflooded_b), water_mask, domain.bounds)
return dartmouth(domain, b, threshold)
def dartmouth(domain, b, threshold=None):
'''A flood detection method from the Dartmouth Flood Observatory.
This method is a refinement of the simple b2-b1 detection method.
'''
if threshold == None:
threshold = float(domain.algorithm_params['dartmouth_threshold'])
return get_dartmouth(b).lte(threshold)
#==============================================================
def get_mod_ndwi(b):
return b['b6'].subtract(b['b4']).divide(b['b4'].add(b['b6'])).select(['sur_refl_b06'], ['b1'])
def mod_ndwi_learned(domain, b):
if domain.unflooded_domain == None:
print('No unflooded training domain provided.')
return None
unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
water_mask = modis_utilities.get_permanent_water_mask()
threshold = modis_utilities.compute_binary_threshold(get_mod_ndwi(unflooded_b), water_mask, domain.bounds)
return mod_ndwi(domain, b, threshold)
def mod_ndwi(domain, b, threshold=None):
if threshold == None:
threshold = float(domain.algorithm_params['mod_ndwi_threshold'])
return get_mod_ndwi(b).lte(threshold)
#==============================================================
def get_fai(b):
'''Just the internals of the FAI method'''
return b['b2'].subtract(b['b1'].add(b['b5'].subtract(b['b1']).multiply((859.0 - 645) / (1240 - 645)))).select(['sur_refl_b02'], ['b1'])
def fai_learned(domain, b):
if domain.unflooded_domain == None:
print('No unflooded training domain provided.')
return None
unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
water_mask = modis_utilities.get_permanent_water_mask()
threshold = modis_utilities.compute_binary_threshold(get_fai(unflooded_b), water_mask, domain.bounds)
return fai(domain, b, threshold)
def fai(domain, b, threshold=None):
''' Floating Algae Index. Method from paper: Feng, Hu, Chen, Cai, Tian, Gan,
Assessment of inundation changes of Poyang Lake using MODIS observations
between 2000 and 2010. Remote Sensing of Environment, 2012.
'''
if threshold == None:
threshold = float(domain.algorithm_params['fai_threshold'])
return get_fai(b).lte(threshold)
| nasa/CrisisMappingToolkit | cmt/modis/simple_modis_algorithms.py | Python | apache-2.0 | 6,672 |
# -*- coding:utf-8 -*-
__author__ = 'q00222219@huawei'
import time
from heat.openstack.common import log as logging
import heat.engine.resources.cloudmanager.commonutils as commonutils
import heat.engine.resources.cloudmanager.constant as constant
import heat.engine.resources.cloudmanager.exception as exception
import pdb
LOG = logging.getLogger(__name__)
class CascadedConfiger(object):
def __init__(self, public_ip_api, api_ip, domain, user, password,
cascading_domain, cascading_api_ip, cascaded_domain,
cascaded_api_ip, cascaded_api_subnet_gateway):
self.public_ip_api = public_ip_api
self.api_ip = api_ip
self.domain = domain
self.user = user
self.password = password
self.cascading_domain = cascading_domain
self.cascading_api_ip = cascading_api_ip
self.cascaded_domain = cascaded_domain
self.cascaded_ip = cascaded_api_ip
self.gateway = cascaded_api_subnet_gateway
def do_config(self):
start_time = time.time()
#pdb.set_trace()
LOG.info("start config cascaded, cascaded: %s" % self.domain)
# wait cascaded tunnel can visit
commonutils.check_host_status(host=self.public_ip_api,
user=self.user,
password=self.password,
retry_time=500, interval=1)
# config cascaded host
self._config_az_cascaded()
cost_time = time.time() - start_time
LOG.info("first config success, cascaded: %s, cost time: %d"
% (self.domain, cost_time))
# check config result
for i in range(3):
try:
# check 90s
commonutils.check_host_status(
host=self.public_ip_api,
user=constant.VcloudConstant.ROOT,
password=constant.VcloudConstant.ROOT_PWD,
retry_time=15,
interval=1)
LOG.info("cascaded api is ready..")
break
except exception.CheckHostStatusFailure:
if i == 2:
LOG.error("check cascaded api failed ...")
break
LOG.error("check cascaded api error, "
"retry config cascaded ...")
self._config_az_cascaded()
cost_time = time.time() - start_time
LOG.info("config cascaded success, cascaded: %s, cost_time: %d"
% (self.domain, cost_time))
def _config_az_cascaded(self):
LOG.info("start config cascaded host, host: %s" % self.api_ip)
# modify dns server address
address = "/%(cascading_domain)s/%(cascading_ip)s,/%(cascaded_domain)s/%(cascaded_ip)s" \
% {"cascading_domain": self.cascading_domain,
"cascading_ip": self.cascading_api_ip,
"cascaded_domain":self.cascaded_domain,
"cascaded_ip":self.cascaded_ip}
for i in range(30):
try:
commonutils.execute_cmd_without_stdout(
host=self.public_ip_api,
user=self.user,
password=self.password,
cmd='cd %(dir)s; source /root/adminrc; sh %(script)s replace %(address)s'
% {"dir": constant.PublicConstant.SCRIPTS_DIR,
"script": constant.PublicConstant.
MODIFY_DNS_SERVER_ADDRESS,
"address": address})
break
except exception.SSHCommandFailure as e:
LOG.error("modify cascaded dns address error, cascaded: "
"%s, error: %s"
% (self.domain, e.format_message()))
time.sleep(1)
LOG.info(
"config cascaded dns address success, cascaded: %s"
% self.public_ip_api)
return True
| hgqislub/hybird-orchard | code/cloudmanager/install/hws/hws_cascaded_configer.py | Python | apache-2.0 | 4,081 |
"""Neural network operations."""
from __future__ import absolute_import as _abs
from . import _make
def conv2d(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
weight_layout="OIHW",
out_layout="",
out_dtype=""):
r"""2D convolution.
This operator takes the weight as the convolution kernel
and convolves it with data to produce an output.
In the default case, where the data_layout is `NCHW`
and weight_layout is `OIHW`, conv2d takes in
a data Tensor with shape `(batch_size, in_channels, height, width)`,
and a weight Tensor with shape `(channels, in_channels, kernel_size[0], kernel_size[1])`
to produce an output Tensor with the following rule:
.. math::
\mbox{out}[b, c, y, x] = \sum_{dy, dx, k}
\mbox{data}[b, k, \mbox{strides}[0] * y + dy, \mbox{strides}[1] * x + dx] *
\mbox{weight}[c, k, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification.
Semantically, the operator will convert the layout to the canonical layout
(`NCHW` for data and `OIHW` for weight), perform the computation,
then convert to the out_layout.
Parameters
----------
data : relay.Expr
The input data to the operator.
weight : relay.Expr
The weight expressions.
strides : tuple of int, optional
The strides of convoltution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
weight_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.conv2d(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
weight_layout, out_layout, out_dtype)
def softmax(data, axis):
r"""Computes softmax.
.. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: relay.Expr
The input data to the operator.
axis: int
The axis to sum over when computing softmax
"""
return _make.softmax(data, axis)
def log_softmax(data, axis):
r"""Computes log softmax.
.. math::
\text{log_softmax}(x)_i = \log \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: relay.Expr
The input data to the operator.
axis: int
The axis to sum over when computing softmax
"""
return _make.log_softmax(data, axis)
def max_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False):
r"""2D maximum pooling operator.
This operator takes data as input and does 2D max value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w) and pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \max_{m=0, \ldots, kh-1} \max_{n=0, \ldots, kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
This operator accepts data layout specification.
Parameters
----------
data : relay.Expr
The input data to the operator.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.max_pool2d(data, pool_size, strides, padding,
layout, ceil_mode)
def avg_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False,
count_include_pad=False):
r"""2D average pooling operator.
This operator takes data as input and does 2D average value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w), pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \frac{1}{kh * kw} \sum_{m=0}^{kh-1} \sum_{n=0}^{kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : relay.Expr
The input data to the operator.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
count_include_pad : bool, optional
To include padding to compute the average.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.avg_pool2d(data, pool_size, strides, padding,
layout, ceil_mode, count_include_pad)
def global_max_pool2d(data,
layout="NCHW"):
r"""2D global maximum pooling operator.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \max_{m=0, \ldots, h} \max_{n=0, \ldots, w}
\mbox{data}(b, c, m, n)
Parameters
----------
data : relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.global_max_pool2d(data, layout)
def global_avg_pool2d(data,
layout="NCHW"):
r"""2D global average pooling operator.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \frac{1}{h * w} \sum_{m=0}^{h-1} \sum_{n=0}^{w-1}
\mbox{data}(b, c, m, n)
Parameters
----------
data : relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.global_avg_pool2d(data, layout)
def upsampling(data,
scale=1,
layout="NCHW",
method="NEAREST_NEIGHBOR"):
"""Upsampling.
This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is `NCHW`
with data of shape (n, c, h, w)
out will have a shape (n, c, h*scale, w*scale)
method indicates the algorithm to be used while calculating ghe out value
and method can be one of ("BILINEAR", "NEAREST_NEIGHBOR")
Parameters
----------
data : relay.Expr
The input data to the operator.
scale : relay.Expr
The scale factor for upsampling.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [NEAREST_NEIGHBOR, BILINEAR].
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.upsampling(data, scale, layout, method)
def batch_flatten(data):
"""BatchFlatten.
This operator flattens all the dimensions except for the batch dimension.
which results a 2D output.
For data with shape ``(d1, d2, ..., dk)``
batch_flatten(data) returns reshaped output of shape ``(d1, d2*...*dk)``.
Parameters
----------
data : relay.Expr
The input data to the operator.
Returns
-------
result: relay.Expr
The Flattened result.
"""
return _make.batch_flatten(data)
| mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/python/tvm/relay/op/nn/nn.py | Python | apache-2.0 | 10,085 |
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2016
Contact: [email protected]
Repository: https://github.com/wateraccounting/wa
Module: Collect/MOD17
Description:
This module downloads MOD17 GPP data from
http://e4ftl01.cr.usgs.gov/. Use the MOD17.GPP_8daily function to
download and create 8 daily GPP images in Gtiff format.
The data is available between 2000-02-18 till present.
Examples:
from wa.Collect import MOD17
MOD17.GPP_8daily(Dir='C:/Temp3/', Startdate='2003-12-01', Enddate='2003-12-20',
latlim=[41, 45], lonlim=[-8, -5])
MOD17.NPP_yearly(Dir='C:/Temp3/', Startdate='2003-12-01', Enddate='2003-12-20',
latlim=[41, 45], lonlim=[-8, -5])
"""
from .GPP_8daily import main as GPP_8daily
from .NPP_yearly import main as NPP_yearly
__all__ = ['GPP_8daily', 'NPP_yearly']
__version__ = '0.1'
| wateraccounting/wa | Collect/MOD17/__init__.py | Python | apache-2.0 | 860 |
# Copyright 2019 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Term aggregations."""
from __future__ import unicode_literals
from timesketch.lib.aggregators import manager
from timesketch.lib.aggregators import interface
def get_spec(field, limit=10, query='', query_dsl=''):
"""Returns aggregation specs for a term of filtered events.
The aggregation spec will summarize values of an attribute
whose events fall under a filter.
Args:
field (str): this denotes the event attribute that is used
for aggregation.
limit (int): How many buckets to return, defaults to 10.
query (str): the query field to run on all documents prior to
aggregating the results.
query_dsl (str): the query DSL field to run on all documents prior
to aggregating the results (optional). Either a query string
or a query DSL has to be present.
Raises:
ValueError: if neither query_string or query_dsl is provided.
Returns:
a dict value that can be used as an aggregation spec.
"""
if query:
query_filter = {
'bool': {
'must': [
{
'query_string': {
'query': query
}
}
]
}
}
elif query_dsl:
query_filter = query_dsl
else:
raise ValueError('Neither query nor query DSL provided.')
return {
'query': query_filter,
'aggs': {
'aggregation': {
'terms': {
'field': field,
'size': limit
}
}
}
}
class FilteredTermsAggregation(interface.BaseAggregator):
"""Query Filter Term Aggregation."""
NAME = 'query_bucket'
DISPLAY_NAME = 'Filtered Terms Aggregation'
DESCRIPTION = 'Aggregating values of a field after applying a filter'
SUPPORTED_CHARTS = frozenset(
['barchart', 'circlechart', 'hbarchart', 'linechart', 'table'])
FORM_FIELDS = [
{
'type': 'ts-dynamic-form-select-input',
'name': 'supported_charts',
'label': 'Chart type to render',
'options': list(SUPPORTED_CHARTS),
'display': True
},
{
'name': 'query_string',
'type': 'ts-dynamic-form-text-input',
'label': 'The filter query to narrow down the result set',
'placeholder': 'Query',
'default_value': '',
'display': True
},
{
'name': 'query_dsl',
'type': 'ts-dynamic-form-text-input',
'label': 'The filter query DSL to narrow down the result',
'placeholder': 'Query DSL',
'default_value': '',
'display': False
},
{
'name': 'field',
'type': 'ts-dynamic-form-text-input',
'label': 'What field to aggregate.',
'display': True
},
{
'type': 'ts-dynamic-form-datetime-input',
'name': 'start_time',
'label': (
'ISO formatted timestamp for the start time '
'of the aggregated data'),
'placeholder': 'Enter a start date for the aggregation',
'default_value': '',
'display': True
},
{
'type': 'ts-dynamic-form-datetime-input',
'name': 'end_time',
'label': 'ISO formatted end time for the aggregation',
'placeholder': 'Enter an end date for the aggregation',
'default_value': '',
'display': True
},
{
'type': 'ts-dynamic-form-text-input',
'name': 'limit',
'label': 'Number of results to return',
'placeholder': 'Enter number of results to return',
'default_value': '10',
'display': True
}
]
@property
def chart_title(self):
"""Returns a title for the chart."""
if self.field:
return 'Top filtered results for "{0:s}"'.format(self.field)
return 'Top results for an unknown field after filtering'
# pylint: disable=arguments-differ
def run(
self, field, query_string='', query_dsl='',
supported_charts='table', start_time='', end_time='', limit=10):
"""Run the aggregation.
Args:
field (str): this denotes the event attribute that is used
for aggregation.
query_string (str): the query field to run on all documents prior to
aggregating the results.
query_dsl (str): the query DSL field to run on all documents prior
to aggregating the results. Either a query string or a query
DSL has to be present.
supported_charts: Chart type to render. Defaults to table.
start_time: Optional ISO formatted date string that limits the time
range for the aggregation.
end_time: Optional ISO formatted date string that limits the time
range for the aggregation.
limit (int): How many buckets to return, defaults to 10.
Returns:
Instance of interface.AggregationResult with aggregation result.
Raises:
ValueError: if neither query_string or query_dsl is provided.
"""
if not (query_string or query_dsl):
raise ValueError('Both query_string and query_dsl are missing')
self.field = field
formatted_field_name = self.format_field_by_type(field)
aggregation_spec = get_spec(
field=formatted_field_name, limit=limit, query=query_string,
query_dsl=query_dsl)
aggregation_spec = self._add_query_to_aggregation_spec(
aggregation_spec, start_time=start_time, end_time=end_time)
# Encoding information for Vega-Lite.
encoding = {
'x': {
'field': field,
'type': 'nominal',
'sort': {
'op': 'sum',
'field': 'count',
'order': 'descending'
}
},
'y': {'field': 'count', 'type': 'quantitative'},
'tooltip': [
{'field': field, 'type': 'nominal'},
{'field': 'count', 'type': 'quantitative'}],
}
response = self.opensearch_aggregation(aggregation_spec)
aggregations = response.get('aggregations', {})
aggregation = aggregations.get('aggregation', {})
buckets = aggregation.get('buckets', [])
values = []
for bucket in buckets:
d = {
field: bucket.get('key', 'N/A'),
'count': bucket.get('doc_count', 0)
}
values.append(d)
if query_string:
extra_query_url = 'AND {0:s}'.format(query_string)
else:
extra_query_url = ''
return interface.AggregationResult(
encoding=encoding, values=values, chart_type=supported_charts,
sketch_url=self._sketch_url, field=field,
extra_query_url=extra_query_url)
manager.AggregatorManager.register_aggregator(FilteredTermsAggregation)
| google/timesketch | timesketch/lib/aggregators/term.py | Python | apache-2.0 | 7,953 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from textwrap import dedent
import unittest
from eventlet.green import ssl
import mock
from six.moves.configparser import NoSectionError, NoOptionError
from swift.common.middleware import memcache
from swift.common.memcached import MemcacheRing
from swift.common.swob import Request
from swift.common.wsgi import loadapp
from test.unit import with_tempdir, patch_policies
class FakeApp(object):
def __call__(self, env, start_response):
return env
class ExcConfigParser(object):
def read(self, path):
raise Exception('read called with %r' % path)
class EmptyConfigParser(object):
def read(self, path):
return False
def get_config_parser(memcache_servers='1.2.3.4:5',
memcache_serialization_support='1',
memcache_max_connections='4',
section='memcache'):
_srvs = memcache_servers
_sers = memcache_serialization_support
_maxc = memcache_max_connections
_section = section
class SetConfigParser(object):
def items(self, section_name):
if section_name != section:
raise NoSectionError(section_name)
return {
'memcache_servers': memcache_servers,
'memcache_serialization_support':
memcache_serialization_support,
'memcache_max_connections': memcache_max_connections,
}
def read(self, path):
return True
def get(self, section, option):
if _section == section:
if option == 'memcache_servers':
if _srvs == 'error':
raise NoOptionError(option, section)
return _srvs
elif option == 'memcache_serialization_support':
if _sers == 'error':
raise NoOptionError(option, section)
return _sers
elif option in ('memcache_max_connections',
'max_connections'):
if _maxc == 'error':
raise NoOptionError(option, section)
return _maxc
else:
raise NoOptionError(option, section)
else:
raise NoSectionError(option)
return SetConfigParser
def start_response(*args):
pass
class TestCacheMiddleware(unittest.TestCase):
def setUp(self):
self.app = memcache.MemcacheMiddleware(FakeApp(), {})
def test_cache_middleware(self):
req = Request.blank('/something', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertTrue('swift.cache' in resp)
self.assertTrue(isinstance(resp['swift.cache'], MemcacheRing))
def test_conf_default_read(self):
with mock.patch.object(memcache, 'ConfigParser', ExcConfigParser):
for d in ({},
{'memcache_servers': '6.7.8.9:10'},
{'memcache_serialization_support': '0'},
{'memcache_max_connections': '30'},
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0'},
{'memcache_servers': '6.7.8.9:10',
'memcache_max_connections': '30'},
{'memcache_serialization_support': '0',
'memcache_max_connections': '30'}
):
with self.assertRaises(Exception) as catcher:
memcache.MemcacheMiddleware(FakeApp(), d)
self.assertEqual(
str(catcher.exception),
"read called with '/etc/swift/memcache.conf'")
def test_conf_set_no_read(self):
with mock.patch.object(memcache, 'ConfigParser', ExcConfigParser):
exc = None
try:
memcache.MemcacheMiddleware(
FakeApp(), {'memcache_servers': '1.2.3.4:5',
'memcache_serialization_support': '2',
'memcache_max_connections': '30'})
except Exception as err:
exc = err
self.assertIsNone(exc)
def test_conf_default(self):
with mock.patch.object(memcache, 'ConfigParser', EmptyConfigParser):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, False)
self.assertEqual(
app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)
def test_conf_inline(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0',
'memcache_max_connections': '5'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, True)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 5)
def test_conf_inline_ratelimiting(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'error_suppression_limit': '5',
'error_suppression_interval': '2.5'})
self.assertEqual(app.memcache._error_limit_count, 5)
self.assertEqual(app.memcache._error_limit_time, 2.5)
self.assertEqual(app.memcache._error_limit_duration, 2.5)
def test_conf_inline_tls(self):
fake_context = mock.Mock()
with mock.patch.object(ssl, 'create_default_context',
return_value=fake_context):
with mock.patch.object(memcache, 'ConfigParser',
get_config_parser()):
memcache.MemcacheMiddleware(
FakeApp(),
{'tls_enabled': 'true',
'tls_cafile': 'cafile',
'tls_certfile': 'certfile',
'tls_keyfile': 'keyfile'})
ssl.create_default_context.assert_called_with(cafile='cafile')
fake_context.load_cert_chain.assert_called_with('certfile',
'keyfile')
def test_conf_extra_no_section(self):
with mock.patch.object(memcache, 'ConfigParser',
get_config_parser(section='foobar')):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, False)
self.assertEqual(
app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)
def test_conf_extra_no_option(self):
replacement_parser = get_config_parser(
memcache_servers='error', memcache_serialization_support='error',
memcache_max_connections='error')
with mock.patch.object(memcache, 'ConfigParser', replacement_parser):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, False)
self.assertEqual(
app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)
def test_conf_inline_other_max_conn(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0',
'max_connections': '5'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, True)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 5)
def test_conf_inline_bad_max_conn(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0',
'max_connections': 'bad42'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, True)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 4)
def test_conf_from_extra_conf(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '1.2.3.4:5')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['1.2.3.4:5'].max_size, 4)
def test_conf_from_extra_conf_bad_max_conn(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser(
memcache_max_connections='bad42')):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '1.2.3.4:5')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['1.2.3.4:5'].max_size, 2)
def test_conf_from_inline_and_maxc_from_extra_conf(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, True)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 4)
def test_conf_from_inline_and_sers_from_extra_conf(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_max_connections': '42'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 42)
def test_filter_factory(self):
factory = memcache.filter_factory({'max_connections': '3'},
memcache_servers='10.10.10.10:10',
memcache_serialization_support='1')
thefilter = factory('myapp')
self.assertEqual(thefilter.app, 'myapp')
self.assertEqual(thefilter.memcache_servers, '10.10.10.10:10')
self.assertEqual(thefilter.memcache._allow_pickle, False)
self.assertEqual(thefilter.memcache._allow_unpickle, True)
self.assertEqual(
thefilter.memcache._client_cache['10.10.10.10:10'].max_size, 3)
@patch_policies
def _loadapp(self, proxy_config_path):
"""
Load a proxy from an app.conf to get the memcache_ring
:returns: the memcache_ring of the memcache middleware filter
"""
with mock.patch('swift.proxy.server.Ring'):
app = loadapp(proxy_config_path)
memcache_ring = None
while True:
memcache_ring = getattr(app, 'memcache', None)
if memcache_ring:
break
app = app.app
return memcache_ring
@with_tempdir
def test_real_config(self, tempdir):
config = """
[pipeline:main]
pipeline = cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
"""
config_path = os.path.join(tempdir, 'test.conf')
with open(config_path, 'w') as f:
f.write(dedent(config))
memcache_ring = self._loadapp(config_path)
# only one server by default
self.assertEqual(list(memcache_ring._client_cache.keys()),
['127.0.0.1:11211'])
# extra options
self.assertEqual(memcache_ring._connect_timeout, 0.3)
self.assertEqual(memcache_ring._pool_timeout, 1.0)
# tries is limited to server count
self.assertEqual(memcache_ring._tries, 1)
self.assertEqual(memcache_ring._io_timeout, 2.0)
@with_tempdir
def test_real_config_with_options(self, tempdir):
config = """
[pipeline:main]
pipeline = cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
memcache_servers = 10.0.0.1:11211,10.0.0.2:11211,10.0.0.3:11211,
10.0.0.4:11211
connect_timeout = 1.0
pool_timeout = 0.5
tries = 4
io_timeout = 1.0
tls_enabled = true
"""
config_path = os.path.join(tempdir, 'test.conf')
with open(config_path, 'w') as f:
f.write(dedent(config))
memcache_ring = self._loadapp(config_path)
self.assertEqual(sorted(memcache_ring._client_cache.keys()),
['10.0.0.%d:11211' % i for i in range(1, 5)])
# extra options
self.assertEqual(memcache_ring._connect_timeout, 1.0)
self.assertEqual(memcache_ring._pool_timeout, 0.5)
# tries is limited to server count
self.assertEqual(memcache_ring._tries, 4)
self.assertEqual(memcache_ring._io_timeout, 1.0)
self.assertEqual(memcache_ring._error_limit_count, 10)
self.assertEqual(memcache_ring._error_limit_time, 60)
self.assertEqual(memcache_ring._error_limit_duration, 60)
self.assertIsInstance(
list(memcache_ring._client_cache.values())[0]._tls_context,
ssl.SSLContext)
@with_tempdir
def test_real_memcache_config(self, tempdir):
proxy_config = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
connect_timeout = 1.0
""" % tempdir
proxy_config_path = os.path.join(tempdir, 'test.conf')
with open(proxy_config_path, 'w') as f:
f.write(dedent(proxy_config))
memcache_config = """
[memcache]
memcache_servers = 10.0.0.1:11211,10.0.0.2:11211,10.0.0.3:11211,
10.0.0.4:11211
connect_timeout = 0.5
io_timeout = 1.0
error_suppression_limit = 0
error_suppression_interval = 1.5
"""
memcache_config_path = os.path.join(tempdir, 'memcache.conf')
with open(memcache_config_path, 'w') as f:
f.write(dedent(memcache_config))
memcache_ring = self._loadapp(proxy_config_path)
self.assertEqual(sorted(memcache_ring._client_cache.keys()),
['10.0.0.%d:11211' % i for i in range(1, 5)])
# proxy option takes precedence
self.assertEqual(memcache_ring._connect_timeout, 1.0)
# default tries are not limited by servers
self.assertEqual(memcache_ring._tries, 3)
# memcache conf options are defaults
self.assertEqual(memcache_ring._io_timeout, 1.0)
self.assertEqual(memcache_ring._error_limit_count, 0)
self.assertEqual(memcache_ring._error_limit_time, 1.5)
self.assertEqual(memcache_ring._error_limit_duration, 1.5)
if __name__ == '__main__':
unittest.main()
| swiftstack/swift | test/unit/common/middleware/test_memcache.py | Python | apache-2.0 | 17,061 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import webob
from cinder.api.contrib import volume_type_access as type_access
from cinder.api.v2 import types as types_api_v2
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
def generate_type(type_id, is_public):
return {
'id': type_id,
'name': u'test',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
'updated_at': None,
'deleted_at': None,
'is_public': bool(is_public)
}
VOLUME_TYPES = {
fake.VOLUME_TYPE_ID: generate_type(fake.VOLUME_TYPE_ID, True),
fake.VOLUME_TYPE2_ID: generate_type(fake.VOLUME_TYPE2_ID, True),
fake.VOLUME_TYPE3_ID: generate_type(fake.VOLUME_TYPE3_ID, False),
fake.VOLUME_TYPE4_ID: generate_type(fake.VOLUME_TYPE4_ID, False)}
PROJ1_UUID = fake.PROJECT_ID
PROJ2_UUID = fake.PROJECT2_ID
PROJ3_UUID = fake.PROJECT3_ID
ACCESS_LIST = [{'volume_type_id': fake.VOLUME_TYPE3_ID,
'project_id': PROJ2_UUID},
{'volume_type_id': fake.VOLUME_TYPE3_ID,
'project_id': PROJ3_UUID},
{'volume_type_id': fake.VOLUME_TYPE4_ID,
'project_id': PROJ3_UUID}]
def fake_volume_type_get(context, id, inactive=False, expected_fields=None):
vol = VOLUME_TYPES[id]
if expected_fields and 'projects' in expected_fields:
vol['projects'] = [a['project_id']
for a in ACCESS_LIST if a['volume_type_id'] == id]
return vol
def _has_type_access(type_id, project_id):
for access in ACCESS_LIST:
if access['volume_type_id'] == type_id and \
access['project_id'] == project_id:
return True
return False
def fake_volume_type_get_all(context, inactive=False, filters=None,
marker=None, limit=None, sort_keys=None,
sort_dirs=None, offset=None, list_result=False):
if filters is None or filters['is_public'] is None:
if list_result:
return list(VOLUME_TYPES.values())
return VOLUME_TYPES
res = {}
for k, v in VOLUME_TYPES.items():
if filters['is_public'] and _has_type_access(k, context.project_id):
res.update({k: v})
continue
if v['is_public'] == filters['is_public']:
res.update({k: v})
if list_result:
return list(res.values())
return res
class FakeResponse(object):
obj = {'volume_type': {'id': fake.VOLUME_TYPE_ID},
'volume_types': [
{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE3_ID}]}
def attach(self, **kwargs):
pass
class FakeRequest(object):
environ = {"cinder.context": context.get_admin_context()}
def cached_resource_by_id(self, resource_id, name=None):
return VOLUME_TYPES[resource_id]
class VolumeTypeAccessTest(test.TestCase):
def setUp(self):
super(VolumeTypeAccessTest, self).setUp()
self.type_controller_v2 = types_api_v2.VolumeTypesController()
self.type_access_controller = type_access.VolumeTypeAccessController()
self.type_action_controller = type_access.VolumeTypeActionController()
self.req = FakeRequest()
self.context = self.req.environ['cinder.context']
self.stubs.Set(db, 'volume_type_get',
fake_volume_type_get)
self.stubs.Set(db, 'volume_type_get_all',
fake_volume_type_get_all)
def assertVolumeTypeListEqual(self, expected, observed):
self.assertEqual(len(expected), len(observed))
expected = sorted(expected, key=lambda item: item['id'])
observed = sorted(observed, key=lambda item: item['id'])
for d1, d2 in zip(expected, observed):
self.assertEqual(d1['id'], d2['id'])
def test_list_type_access_public(self):
"""Querying os-volume-type-access on public type should return 404."""
req = fakes.HTTPRequest.blank('/v2/%s/types/os-volume-type-access' %
fake.PROJECT_ID,
use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound,
self.type_access_controller.index,
req, fake.VOLUME_TYPE2_ID)
def test_list_type_access_private(self):
expected = {'volume_type_access': [
{'volume_type_id': fake.VOLUME_TYPE3_ID,
'project_id': PROJ2_UUID},
{'volume_type_id': fake.VOLUME_TYPE3_ID,
'project_id': PROJ3_UUID}]}
result = self.type_access_controller.index(self.req,
fake.VOLUME_TYPE3_ID)
self.assertEqual(expected, result)
def test_list_with_no_context(self):
req = fakes.HTTPRequest.blank('/v2/flavors/%s/flavors' %
fake.PROJECT_ID)
def fake_authorize(context, target=None, action=None):
raise exception.PolicyNotAuthorized(action='index')
self.stubs.Set(type_access, 'authorize', fake_authorize)
self.assertRaises(exception.PolicyNotAuthorized,
self.type_access_controller.index,
req, fake.PROJECT_ID)
def test_list_type_with_admin_default_proj1(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID,
use_admin_context=True)
req.environ['cinder.context'].project_id = PROJ1_UUID
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_default_proj2(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID},
{'id': fake.VOLUME_TYPE3_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types' % PROJ2_UUID,
use_admin_context=True)
req.environ['cinder.context'].project_id = PROJ2_UUID
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_ispublic_true(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=true' %
fake.PROJECT_ID,
use_admin_context=True)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_ispublic_false(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE3_ID},
{'id': fake.VOLUME_TYPE4_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' %
fake.PROJECT_ID,
use_admin_context=True)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_ispublic_false_proj2(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE3_ID},
{'id': fake.VOLUME_TYPE4_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' %
fake.PROJECT_ID,
use_admin_context=True)
req.environ['cinder.context'].project_id = PROJ2_UUID
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_ispublic_none(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID},
{'id': fake.VOLUME_TYPE3_ID},
{'id': fake.VOLUME_TYPE4_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=none' %
fake.PROJECT_ID,
use_admin_context=True)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_no_admin_default(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID,
use_admin_context=False)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_no_admin_ispublic_true(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=true' %
fake.PROJECT_ID,
use_admin_context=False)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_no_admin_ispublic_false(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' %
fake.PROJECT_ID,
use_admin_context=False)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_no_admin_ispublic_none(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=none' %
fake.PROJECT_ID,
use_admin_context=False)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_show(self):
resp = FakeResponse()
self.type_action_controller.show(self.req, resp, fake.VOLUME_TYPE_ID)
self.assertEqual({'id': fake.VOLUME_TYPE_ID,
'os-volume-type-access:is_public': True},
resp.obj['volume_type'])
def test_detail(self):
resp = FakeResponse()
self.type_action_controller.detail(self.req, resp)
self.assertEqual(
[{'id': fake.VOLUME_TYPE_ID,
'os-volume-type-access:is_public': True},
{'id': fake.VOLUME_TYPE3_ID,
'os-volume-type-access:is_public': False}],
resp.obj['volume_types'])
def test_create(self):
resp = FakeResponse()
self.type_action_controller.create(self.req, {}, resp)
self.assertEqual({'id': fake.VOLUME_TYPE_ID,
'os-volume-type-access:is_public': True},
resp.obj['volume_type'])
def test_add_project_access(self):
def stub_add_volume_type_access(context, type_id, project_id):
self.assertEqual(fake.VOLUME_TYPE4_ID, type_id, "type_id")
self.assertEqual(PROJ2_UUID, project_id, "project_id")
self.stubs.Set(db, 'volume_type_access_add',
stub_add_volume_type_access)
body = {'addProjectAccess': {'project': PROJ2_UUID}}
req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
fake.PROJECT_ID, fake.VOLUME_TYPE3_ID),
use_admin_context=True)
result = self.type_action_controller._addProjectAccess(
req, fake.VOLUME_TYPE4_ID, body)
self.assertEqual(202, result.status_code)
def test_add_project_access_with_no_admin_user(self):
req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
fake.PROJECT_ID, fake.VOLUME_TYPE3_ID),
use_admin_context=False)
body = {'addProjectAccess': {'project': PROJ2_UUID}}
self.assertRaises(exception.PolicyNotAuthorized,
self.type_action_controller._addProjectAccess,
req, fake.VOLUME_TYPE3_ID, body)
def test_add_project_access_with_already_added_access(self):
def stub_add_volume_type_access(context, type_id, project_id):
raise exception.VolumeTypeAccessExists(volume_type_id=type_id,
project_id=project_id)
self.stubs.Set(db, 'volume_type_access_add',
stub_add_volume_type_access)
body = {'addProjectAccess': {'project': PROJ2_UUID}}
req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True)
self.assertRaises(webob.exc.HTTPConflict,
self.type_action_controller._addProjectAccess,
req, fake.VOLUME_TYPE3_ID, body)
def test_remove_project_access_with_bad_access(self):
def stub_remove_volume_type_access(context, type_id, project_id):
raise exception.VolumeTypeAccessNotFound(volume_type_id=type_id,
project_id=project_id)
self.stubs.Set(db, 'volume_type_access_remove',
stub_remove_volume_type_access)
body = {'removeProjectAccess': {'project': PROJ2_UUID}}
req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound,
self.type_action_controller._removeProjectAccess,
req, fake.VOLUME_TYPE4_ID, body)
def test_remove_project_access_with_no_admin_user(self):
req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=False)
body = {'removeProjectAccess': {'project': PROJ2_UUID}}
self.assertRaises(exception.PolicyNotAuthorized,
self.type_action_controller._removeProjectAccess,
req, fake.VOLUME_TYPE3_ID, body)
| bswartz/cinder | cinder/tests/unit/api/contrib/test_volume_type_access.py | Python | apache-2.0 | 15,995 |
# Copyright 2020 Department of Computational Biology for Infection Research - Helmholtz Centre for Infection Research
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from src.utils import labels as utils_labels
from src.utils import load_ncbi_taxinfo
from src import binning_classes
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
import numpy as np
import os, sys, inspect
import pandas as pd
from collections import OrderedDict
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
def create_colors_list():
colors_list = []
for color in plt.cm.tab10(np.linspace(0, 1, 10))[:-1]:
colors_list.append(tuple(color))
colors_list.append("black")
for color in plt.cm.Set2(np.linspace(0, 1, 8)):
colors_list.append(tuple(color))
for color in plt.cm.Set3(np.linspace(0, 1, 12)):
colors_list.append(tuple(color))
return colors_list
def create_legend(color_indices, available_tools, output_dir):
colors_list = create_colors_list()
if color_indices:
colors_list = [colors_list[i] for i in color_indices]
colors_iter = iter(colors_list)
circles = [Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=10, markerfacecolor=next(colors_iter)) for label in available_tools]
fig = plt.figure(figsize=(0.5, 0.5))
fig.legend(circles, available_tools, loc='center', frameon=False, ncol=5, handletextpad=0.1)
fig.savefig(os.path.join(output_dir, 'genome', 'legend.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_precision_vs_bin_size(pd_bins, output_dir):
pd_plot = pd_bins[pd_bins[utils_labels.TOOL] != utils_labels.GS]
for tool_label, pd_tool in pd_plot.groupby(utils_labels.TOOL):
fig, axs = plt.subplots(figsize=(5, 4.5))
axs.scatter(np.log(pd_tool['total_length']), pd_tool['precision_bp'], marker='o')
axs.set_xlim([None, np.log(pd_tool['total_length'].max())])
axs.set_ylim([0.0, 1.0])
axs.set_title(tool_label, fontsize=12)
plt.ylabel('Purity per bin (%)', fontsize=12)
plt.xlabel('Bin size [log(# bp)]', fontsize=12)
fig.savefig(os.path.join(output_dir, 'genome', tool_label, 'purity_vs_bin_size.png'), dpi=200, format='png', bbox_inches='tight')
plt.close(fig)
def plot_by_genome_coverage(pd_bins, pd_target_column, available_tools, output_dir):
colors_list = create_colors_list()
if len(available_tools) > len(colors_list):
raise RuntimeError("Plot only supports 29 colors")
fig, axs = plt.subplots(figsize=(5, 4.5))
for i, (color, tool) in enumerate(zip(colors_list, available_tools)):
pd_tool = pd_bins[pd_bins[utils_labels.TOOL] == tool].sort_values(by=['genome_index'])
axs.scatter(pd_tool['genome_coverage'], pd_tool[pd_target_column], marker='o', color=colors_list[i], s=[3] * pd_tool.shape[0])
window = 50
rolling_mean = pd_tool[pd_target_column].rolling(window=window, min_periods=10).mean()
axs.plot(pd_tool['genome_coverage'], rolling_mean, color=colors_list[i])
axs.set_ylim([-0.01, 1.01])
axs.set_xticklabels(['{:,.1f}'.format(np.exp(x)) for x in axs.get_xticks()], fontsize=12)
axs.set_yticklabels(['{:3.0f}'.format(x * 100) for x in axs.get_yticks()], fontsize=12)
axs.tick_params(axis='x', labelsize=12)
if pd_target_column == 'precision_bp':
ylabel = 'Purity per bin (%)'
file_name = 'purity_by_genome_coverage'
else:
ylabel = 'Completeness per genome (%)'
file_name = 'completeness_by_genome_coverage'
plt.ylabel(ylabel, fontsize=15)
plt.xlabel('Average genome coverage', fontsize=15)
colors_iter = iter(colors_list)
circles = []
for x in range(len(available_tools)):
circles.append(Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=11, markerfacecolor=next(colors_iter)))
lgd = plt.legend(circles, available_tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False, fontsize=14)
fig.savefig(os.path.join(output_dir, 'genome', file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def get_pd_genomes_recall(sample_id_to_queries_list):
pd_genomes_recall = pd.DataFrame()
for sample_id in sample_id_to_queries_list:
for query in sample_id_to_queries_list[sample_id]:
if not isinstance(query, binning_classes.GenomeQuery):
continue
recall_df = query.recall_df_cami1[['genome_id', 'recall_bp']].copy()
recall_df[utils_labels.TOOL] = query.label
recall_df['sample_id'] = sample_id
recall_df = recall_df.reset_index().set_index(['sample_id', utils_labels.TOOL])
pd_genomes_recall = pd.concat([pd_genomes_recall, recall_df])
return pd_genomes_recall
def plot_precision_recall_by_coverage(sample_id_to_queries_list, pd_bins_g, coverages_pd, available_tools, output_dir):
# compute average genome coverage if coverages for multiple samples were provided
coverages_pd = coverages_pd.groupby(['GENOMEID']).mean()
coverages_pd.rename(columns={'GENOMEID': 'genome_id'})
coverages_pd = coverages_pd.sort_values(by=['COVERAGE'])
coverages_pd['rank'] = coverages_pd['COVERAGE'].rank()
pd_genomes_recall = get_pd_genomes_recall(sample_id_to_queries_list)
pd_genomes_recall['genome_index'] = pd_genomes_recall['genome_id'].map(coverages_pd['rank'].to_dict())
pd_genomes_recall = pd_genomes_recall.reset_index()
pd_genomes_recall['genome_coverage'] = np.log(pd_genomes_recall['genome_id'].map(coverages_pd['COVERAGE'].to_dict()))
plot_by_genome_coverage(pd_genomes_recall, 'recall_bp', available_tools, output_dir)
pd_bins_precision = pd_bins_g[[utils_labels.TOOL, 'precision_bp', 'genome_id']].copy().dropna(subset=['precision_bp'])
pd_bins_precision['genome_index'] = pd_bins_precision['genome_id'].map(coverages_pd['rank'].to_dict())
pd_bins_precision['genome_coverage'] = np.log(pd_bins_precision['genome_id'].map(coverages_pd['COVERAGE'].to_dict()))
plot_by_genome_coverage(pd_bins_precision, 'precision_bp', available_tools, output_dir)
def plot_heatmap(df_confusion, sample_id, output_dir, label, separate_bar=False, log_scale=False):
if log_scale:
df_confusion = df_confusion.apply(np.log10, inplace=True).replace(-np.inf, 0)
fig, axs = plt.subplots(figsize=(10, 8))
fontsize = 20
# replace columns and rows labels by numbers
d = {value: key for (key, value) in enumerate(df_confusion.columns.tolist(), 1)}
df_confusion = df_confusion.rename(index=str, columns=d)
df_confusion.index = range(1, len(df_confusion) + 1)
xticklabels = int(round(df_confusion.shape[1] / 10, -1))
yticklabels = int(round(df_confusion.shape[0] / 10, -1))
sns_plot = sns.heatmap(df_confusion, ax=axs, annot=False, cmap="YlGnBu_r", xticklabels=xticklabels, yticklabels=yticklabels, cbar=False, rasterized=True)
# sns_plot = sns.heatmap(df_confusion, ax=axs, annot=False, cmap="YlGnBu_r", xticklabels=False, yticklabels=False, cbar=True, rasterized=True)
sns_plot.set_xlabel("Genomes", fontsize=fontsize)
sns_plot.set_ylabel("Predicted bins", fontsize=fontsize)
plt.yticks(fontsize=12, rotation=0)
plt.xticks(fontsize=12)
mappable = sns_plot.get_children()[0]
cbar_ax = fig.add_axes([.915, .11, .017, .77])
cbar = plt.colorbar(mappable, ax=axs, cax=cbar_ax, orientation='vertical')
if log_scale:
cbar.set_label(fontsize=fontsize, label='log$_{10}$(# bp)')
else:
fmt = lambda x, pos: '{:.0f}'.format(x / 1000000)
cbar = plt.colorbar(mappable, ax=axs, cax=cbar_ax, orientation='vertical', format=ticker.FuncFormatter(fmt))
cbar.set_label(fontsize=fontsize, label='Millions of base pairs')
cbar.ax.tick_params(labelsize=fontsize)
cbar.outline.set_edgecolor(None)
axs.set_title(label, fontsize=fontsize, pad=10)
axs.set_ylim([len(df_confusion), 0])
# plt.yticks(fontsize=14, rotation=0)
# plt.xticks(fontsize=14)
output_dir = os.path.join(output_dir, 'genome', label)
fig.savefig(os.path.join(output_dir, 'heatmap_' + sample_id + '.pdf'), dpi=100, format='pdf', bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'heatmap_' + sample_id + '.png'), dpi=200, format='png', bbox_inches='tight')
plt.close(fig)
if not separate_bar:
return
# create separate figure for bar
fig = plt.figure(figsize=(6, 6))
mappable = sns_plot.get_children()[0]
fmt = lambda x, pos: '{:.0f}'.format(x / 1000000)
cbar = plt.colorbar(mappable, orientation='vertical', label='[millions of base pairs]', format=ticker.FuncFormatter(fmt))
text = cbar.ax.yaxis.label
font = matplotlib.font_manager.FontProperties(size=16)
text.set_font_properties(font)
cbar.outline.set_visible(False)
cbar.ax.tick_params(labelsize=14)
# store separate bar figure
plt.gca().set_visible(False)
fig.savefig(os.path.join(output_dir, 'heatmap_bar.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_boxplot(sample_id_to_queries_list, metric_name, output_dir, available_tools):
pd_bins = pd.DataFrame()
for sample_id in sample_id_to_queries_list:
for query in sample_id_to_queries_list[sample_id]:
metric_df = getattr(query, metric_name.replace('_bp', '_df')).copy()
metric_df[utils_labels.TOOL] = query.label
metric_df['sample_id'] = sample_id
metric_df = metric_df.reset_index().set_index(['sample_id', utils_labels.TOOL])
pd_bins = pd.concat([pd_bins, metric_df])
metric_all = []
for tool in available_tools:
pd_tool = pd_bins.iloc[pd_bins.index.get_level_values(utils_labels.TOOL) == tool]
metric_all.append(pd_tool[metric_name][pd_tool[metric_name].notnull()].tolist())
fig, axs = plt.subplots(figsize=(6, 5))
medianprops = dict(linewidth=2.5, color='gold')
bplot = axs.boxplot(metric_all, notch=0, vert=0, patch_artist=True, labels=available_tools, medianprops=medianprops, sym='k.')
colors_iter = iter(create_colors_list())
# turn on grid
axs.grid(which='major', linestyle=':', linewidth='0.5', color='lightgrey')
# force axes to be from 0 to 100%
axs.set_xlim([-0.01, 1.01])
# transform plot_labels to percentages
vals = axs.get_xticks()
axs.set_xticklabels(['{:3.0f}'.format(x * 100) for x in vals])
# enable code to rotate labels
tick_labels = axs.get_yticklabels()
plt.setp(tick_labels, fontsize=13) ## rotation=55
for box in bplot['boxes']:
box.set(facecolor=next(colors_iter), linewidth=0.1)
plt.ylim(plt.ylim()[::-1])
if metric_name == 'precision_bp':
axs.set_xlabel('Purity per bin (%)', fontsize=13)
metric_name = 'purity_bp'
else:
axs.set_xlabel('Completeness per genome (%)', fontsize=13)
metric_name = 'completeness_bp'
fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '.pdf'), dpi=100, format='pdf', bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '.png'), dpi=200, format='png', bbox_inches='tight')
# remove labels but keep grid
# axs.get_yaxis().set_ticklabels([])
# for tic in axs.yaxis.get_major_ticks():
# tic.tick1line.set_visible(False)
# tic.tick2line.set_visible(False)
# tic.label1.set_visible(False)
# tic.label2.set_visible(False)
# fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '_wo_legend.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_summary(color_indices, df_results, labels, output_dir, rank, plot_type, file_name, xlabel, ylabel):
available_tools = df_results[utils_labels.TOOL].unique()
tools = [tool for tool in labels if tool in available_tools]
colors_list = create_colors_list()
if color_indices:
colors_list = [colors_list[i] for i in color_indices]
df_mean = df_results.groupby(utils_labels.TOOL).mean().reindex(tools)
binning_type = df_results[utils_labels.BINNING_TYPE].iloc[0]
if len(df_mean) > len(colors_list):
raise RuntimeError("Plot only supports 29 colors")
fig, axs = plt.subplots(figsize=(5, 4.5))
# force axes to be from 0 to 100%
axs.set_xlim([0.0, 1.0])
axs.set_ylim([0.0, 1.0])
if plot_type == 'e':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.errorbar(df_row[utils_labels.AVG_PRECISION_BP], df_row[utils_labels.AVG_RECALL_BP], xerr=df_row['avg_precision_bp_var'], yerr=df_row['avg_recall_bp_var'],
fmt='o',
ecolor=colors_list[i],
mec=colors_list[i],
mfc=colors_list[i],
capsize=3,
markersize=8)
if plot_type == 'f':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.errorbar(df_row[utils_labels.AVG_PRECISION_SEQ], df_row[utils_labels.AVG_RECALL_SEQ], xerr=df_row[utils_labels.AVG_PRECISION_SEQ_SEM], yerr=df_row[utils_labels.AVG_RECALL_SEQ_SEM],
fmt='o',
ecolor=colors_list[i],
mec=colors_list[i],
mfc=colors_list[i],
capsize=3,
markersize=8)
if plot_type == 'w':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.plot(df_row[utils_labels.PRECISION_PER_BP], df_row[utils_labels.RECALL_PER_BP], marker='o', color=colors_list[i], markersize=10)
if plot_type == 'x':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.plot(df_row[utils_labels.PRECISION_PER_SEQ], df_row[utils_labels.RECALL_PER_SEQ], marker='o', color=colors_list[i], markersize=10)
elif plot_type == 'p':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.plot(df_row[utils_labels.ARI_BY_BP], df_row[utils_labels.PERCENTAGE_ASSIGNED_BPS], marker='o', color=colors_list[i], markersize=10)
# turn on grid
# axs.minorticks_on()
axs.grid(which='major', linestyle=':', linewidth='0.5')
# axs.grid(which='minor', linestyle=':', linewidth='0.5')
# transform plot_labels to percentages
if plot_type != 'p':
vals = axs.get_xticks()
axs.set_xticklabels(['{:3.0f}'.format(x * 100) for x in vals], fontsize=11)
else:
axs.tick_params(axis='x', labelsize=12)
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}'.format(x * 100) for x in vals], fontsize=11)
if rank:
file_name = rank + '_' + file_name
plt.title(rank)
ylabel = ylabel.replace('genome', 'taxon')
plt.xlabel(xlabel, fontsize=13)
plt.ylabel(ylabel, fontsize=13)
plt.tight_layout()
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.eps'), dpi=100, format='eps', bbox_inches='tight')
colors_iter = iter(colors_list)
circles = []
for x in range(len(df_mean)):
circles.append(Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=11, markerfacecolor=next(colors_iter)))
lgd = plt.legend(circles, tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False, fontsize=12)
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.png'), dpi=200, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def plot_avg_precision_recall(colors, df_results, labels, output_dir, rank=None):
plot_summary(colors,
df_results,
labels,
output_dir,
rank,
'e',
'avg_purity_completeness_bp',
'Average purity per bin (%)',
'Average completeness per genome (%)')
plot_summary(colors,
df_results,
labels,
output_dir,
rank,
'f',
'avg_purity_completeness_seq',
'Average purity per bin (%)',
'Average completeness per genome (%)')
def plot_precision_recall(colors, summary_per_query, labels, output_dir, rank=None):
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'w',
'purity_recall_bp',
'Purity for sample (%)',
'Completeness for sample (%)')
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'x',
'purity_completeness_seq',
'Purity for sample (%)',
'Completeness for sample (%)')
def plot_adjusted_rand_index_vs_assigned_bps(colors, summary_per_query, labels, output_dir, rank=None):
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'p',
'ari_vs_assigned_bps',
'Adjusted Rand index',
'Percentage of binned base pairs')
def plot_taxonomic_results(df_summary_t, metrics_list, errors_list, file_name, output_dir):
colors_list = ["#006cba", "#008000", "#ba9e00", "red"]
for tool, pd_results in df_summary_t.groupby(utils_labels.TOOL):
dict_metric_list = []
for metric in metrics_list:
rank_to_metric = OrderedDict([(k, .0) for k in load_ncbi_taxinfo.RANKS])
dict_metric_list.append(rank_to_metric)
dict_error_list = []
for error in errors_list:
rank_to_metric_error = OrderedDict([(k, .0) for k in load_ncbi_taxinfo.RANKS])
dict_error_list.append(rank_to_metric_error)
for index, row in pd_results.iterrows():
for rank_to_metric, metric in zip(dict_metric_list, metrics_list):
rank_to_metric[row[utils_labels.RANK]] = .0 if np.isnan(row[metric]) else row[metric]
for rank_to_metric_error, error in zip(dict_error_list, errors_list):
rank_to_metric_error[row[utils_labels.RANK]] = .0 if np.isnan(row[error]) else row[error]
fig, axs = plt.subplots(figsize=(6, 5))
# force axes to be from 0 to 100%
axs.set_xlim([0, 7])
axs.set_ylim([0.0, 1.0])
x_values = range(len(load_ncbi_taxinfo.RANKS))
y_values_list = []
for rank_to_metric, color in zip(dict_metric_list, colors_list):
y_values = list(rank_to_metric.values())
axs.plot(x_values, y_values, color=color)
y_values_list.append(y_values)
for rank_to_metric_error, y_values, color in zip(dict_error_list, y_values_list, colors_list):
sem = list(rank_to_metric_error.values())
plt.fill_between(x_values, np.subtract(y_values, sem).tolist(), np.add(y_values, sem).tolist(), color=color, alpha=0.5)
plt.xticks(x_values, load_ncbi_taxinfo.RANKS, rotation='vertical')
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
lgd = plt.legend(metrics_list, loc=1, borderaxespad=0., handlelength=2, frameon=False)
plt.tight_layout()
fig.savefig(os.path.join(output_dir, 'taxonomic', tool, file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'taxonomic', tool, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def create_contamination_column(pd_tool_bins):
pd_tool_bins['newcolumn'] = 1 - pd_tool_bins['precision_bp']
def create_completeness_minus_contamination_column(pd_tool_bins):
pd_tool_bins['newcolumn'] = pd_tool_bins['recall_bp'] + pd_tool_bins['precision_bp'] - 1
def plot_contamination(pd_bins, binning_type, title, xlabel, ylabel, create_column_function, output_dir):
if len(pd_bins) == 0:
return
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
create_column_function(pd_bins_copy)
colors_list = create_colors_list()
fig, axs = plt.subplots(figsize=(6, 5))
tools = pd_bins_copy[utils_labels.TOOL].unique().tolist()
for color, tool in zip(colors_list, tools):
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
pd_tool_bins = pd_tool_bins.sort_values(by='newcolumn', ascending=False).reset_index()
pd_tool_bins = pd_tool_bins.drop(['index'], axis=1)
axs.plot(list(range(1, len(pd_tool_bins) + 1)), pd_tool_bins['newcolumn'], color=color)
min_value = pd_bins_copy['newcolumn'].min()
axs.set_ylim(min_value if min_value < 1.0 else .9, 1.0)
axs.set_xlim(1, None)
axs.grid(which='major', linestyle='-', linewidth='0.5', color='lightgrey')
# transform plot_labels to percentages
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}'.format(y * 100) for y in vals])
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel + ' [%]', fontsize=14)
lgd = plt.legend(tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=1, frameon=False, fontsize=12)
plt.tight_layout()
file_name = title.lower().replace(' ', '_').replace('-', 'minus').replace('|', '')
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def get_number_of_hq_bins(tools, pd_bins):
pd_counts = pd.DataFrame()
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
for tool in tools:
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
x50 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .5) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
x70 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .7) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
x90 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .9) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
pd_tool_counts = pd.DataFrame([[x90, x70, x50]], columns=['>90%', '>70%', '>50%'], index=[tool])
pd_counts = pd_counts.append(pd_tool_counts)
return pd_counts
def get_number_of_hq_bins_by_score(tools, pd_bins):
pd_counts = pd.DataFrame()
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
pd_bins_copy['newcolumn'] = pd_bins_copy['recall_bp'] + 5 * (pd_bins_copy['precision_bp'] - 1)
for tool in tools:
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
x50 = pd_tool_bins[pd_tool_bins['newcolumn'] > .5].shape[0]
x70 = pd_tool_bins[pd_tool_bins['newcolumn'] > .7].shape[0]
x90 = pd_tool_bins[pd_tool_bins['newcolumn'] > .9].shape[0]
x50 -= x70
x70 -= x90
pd_tool_counts = pd.DataFrame([[x90, x70, x50]], columns=['>90', '>70', '>50'], index=[tool])
pd_counts = pd_counts.append(pd_tool_counts)
return pd_counts
def plot_counts(pd_bins, tools, output_dir, output_file, get_bin_counts_function):
pd_counts = get_bin_counts_function(tools, pd_bins)
fig, axs = plt.subplots(figsize=(11, 5))
if output_file == 'bin_counts':
fig = pd_counts.plot.bar(ax=axs, stacked=False, color=['#28334AFF', '#FBDE44FF', '#F65058FF'], width=.8, legend=None).get_figure()
else:
fig = pd_counts.plot.bar(ax=axs, stacked=True, color=['#9B4A97FF', '#FC766AFF', '#F9A12EFF'], width=.8, legend=None).get_figure()
axs.tick_params(axis='x', labelrotation=45, length=0)
axs.set_xticklabels(tools, horizontalalignment='right', fontsize=14)
axs.set_xlabel(None)
# axs.yaxis.set_major_locator(MaxNLocator(integer=True))
h, l = axs.get_legend_handles_labels()
axs.set_ylabel('#genome bins', fontsize=14)
# axs.grid(which='major', linestyle=':', linewidth='0.5')
# axs.grid(which='minor', linestyle=':', linewidth='0.5')
ph = [plt.plot([], marker='', ls='')[0]]
handles = ph + h
if output_file == 'bin_counts':
labels = ['Contamination < 10% Completeness '] + l
bbox_to_anchor = (0.49, 1.02)
else:
labels = ['Score '] + l
y_values = (pd_counts['>90'] + pd_counts['>70'] + pd_counts['>50']).tolist()
for i, v in enumerate(y_values):
axs.text(i - .25, v + 5, str(v), color='black', fontweight='bold')
bbox_to_anchor = (0.47, 1.02)
lgd = plt.legend(handles, labels, bbox_to_anchor=bbox_to_anchor, columnspacing=.5, loc=8, borderaxespad=0., handlelength=1, frameon=False, fontsize=14, ncol=5)
# plt.subplots_adjust(hspace=0.6, wspace=0.2)
fig.savefig(os.path.join(output_dir, 'genome', output_file + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'genome', output_file + '.png'), dpi=200, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
| CAMI-challenge/AMBER | src/plots.py | Python | apache-2.0 | 26,475 |
import os
import datetime
from jinja2 import Environment,PackageLoader,TemplateNotFound
from hotzenplotz.openstack.common import cfg
from hotzenplotz.openstack.common import log as logging
from hotzenplotz.openstack.common import utils
from hotzenplotz.common import exception
from hotzenplotz.api import validator
LOG = logging.getLogger(__name__)
class CronHandler(object):
"""Handler Cron Resource
"""
def __init__(self, **kwargs):
env = Environment(loader=PackageLoader('hotzenplotz.worker','templates'))
self.template = env.get_template('cron')
self.dir_path = None
# @utils.synchronized('haproxy')
def do_config(self, request):
try:
self._validate_request(request)
except exception.BadRequest as e:
LOG.warn('Bad request: %s' % e)
raise exception.CronConfigureError(explanation=str(e))
cmd = request['method']
msg = request['cron_resource']
if cmd == 'create_cron':
try:
self._create_cron(msg)
except exception.CronCreateError as e:
raise exception.CronConfigureError(explanation=str(e))
elif cmd == 'delete_cron':
try:
self._delete_cron(msg)
except exception.HaproxyDeleteError as e:
raise exception.CronConfigureError(explanation=str(e))
elif cmd == 'update_cron':
try:
self._update_cron(msg)
except exception.CronUpdateError as e:
raise exception.CronConfigureError(explanation=str(e))
def _create_cron(self,msg,syntax_check=False):
try:
output = self.template.render(cron_resource=msg)
except TemplateNotFound as e:
raise TemplateNotFound(str(e))
try:
if not self.dir_path:
self.dir_path = '/etc/puppet/modules/cron/'
cron_name = msg['title']
file_path = self.dir_path + cron_name
if not path.exists(file_path):
with open(file_path,'a') as f:
f.write(output)
except exception.CronCreateError as e:
raise exception.CronCreateError(explanation=str(e))
if syntax_check:
try:
self._test_syntax(file_path)
except exception.ProcessExecutionError as e:
raise exception.CronCreateError(explanation=str(e))
LOG.debug("Created the new cron successfully")
def _delete_cron(self, msg):
LOG.debug("Deleting cron for NAME:%s USER: %s PROJECT:%s" %
(msg['id'], msg['user_id'], msg['project_id']))
try:
new_cfg_path = self._create_lb_deleted_haproxy_cfg(msg)
except exception.HaproxyLBNotExists as e:
LOG.warn('%s', e)
return
##raise exception.HaproxyDeleteError(explanation=str(e))
try:
self._test_haproxy_config(new_cfg_path)
except exception.ProcessExecutionError as e:
raise exception.HaproxyDeleteError(explanation=str(e))
rc, backup_path = self._backup_original_cfg()
if rc != 0:
raise exception.HaproxyDeleteError(explanation=backup_path)
rc, strerror = self._replace_original_cfg_with_new(new_cfg_path)
if rc != 0:
raise exception.HaproxyDeleteError(explanation=strerror)
if self._reload_haproxy_cfg(backup_path) != 0:
e = 'Failed to reload haproxy'
raise exception.HaproxyDeleteError(explanation=str(e))
LOG.debug("Deleted the new load balancer successfully")
def _update_cron(self, msg):
LOG.debug("Updating the haproxy load "
"balancer for NAME:%s USER: %s PROJECT:%s" %
(msg['uuid'], msg['user_id'], msg['project_id']))
try:
lb_deleted_cfg_path = self._create_lb_deleted_haproxy_cfg(msg)
except exception.HaproxyLBNotExists as e:
LOG.warn('%s', e)
raise exception.HaproxyUpdateError(explanation=str(e))
try:
new_cfg_path = self._create_lb_haproxy_cfg(
msg, base_cfg_path=lb_deleted_cfg_path)
except exception.HaproxyCreateCfgError as e:
raise exception.HaproxyUpdateError(explanation=str(e))
try:
self._test_haproxy_config(new_cfg_path)
except exception.ProcessExecutionError as e:
raise exception.HaproxyUpdateError(explanation=str(e))
LOG.debug("Updated the new load balancer successfully")
def _validate_request(self, request):
validate.check_tcp_request(request)
def _get_lb_name(self, msg):
# TODO(wenjianhn): utf-8 support, base64
##return "%s_%s" % (msg['project_id'],
return "%s" % msg['uuid']
def _is_lb_in_use(self, lb_name,
base_cfg_path='/etc/haproxy/haproxy.cfg'):
with open(base_cfg_path) as cfg:
lines = cfg.readlines()
try:
in_use_lb_name = [line.split()[1] for line in lines
if line.startswith('listen')]
except IndexError:
LOG.error("No item was found after listen directive,"
"is the haproxy configuraion file valid?")
raise
return lb_name in in_use_lb_name
def _test_syntax(self, cfile_path):
LOG.info('Testing the new puppet configuration file')
cmd = "puppet parser validate %s" % cfile_path
try:
utils.execute(cmd)
except exception.ProcessExecutionError as e:
LOG.warn('Did not pass the configuration syntax test: %s', e)
raise
def _get_one_lb_info(self, line_all, line_index, line_total):
value = []
for i in range(line_index, line_total):
line = line_all[i]
if line.startswith('\t'):
value.append(line)
elif line.startswith('listen'):
return i, value
return line_total - 1, value
| NewpTone/hotzenplotz | hotzenplotz/worker/driver/cron.py | Python | apache-2.0 | 6,103 |
from must import MustHavePatterns
from successor import Successor
class TestSuccessor(object):
@classmethod
def setup_class(cls):
cls.test_patterns = MustHavePatterns(Successor)
def test_successor(self):
try:
self.test_patterns.create(Successor)
raise Exception("Recursive structure did not explode.")
except RuntimeError as re:
assert str(re).startswith("maximum recursion depth")
| umaptechnologies/must | examples/miscExamples/test_successor.py | Python | apache-2.0 | 457 |
#-*- encoding: utf-8 -*-
import csv, math, time, re, threading, sys
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
class ErAPI():
# Metodo constructor, seteos basicos necesarios de configuracion, instancia objetos utiles
def __init__(self):
self.data = {}
# Data format: {'XXCiro|BNC': {'id': 123456, 'nick': 'XXCiro', 'level': 49, 'strength': 532.5, 'rank_points': 1233354, 'citizenship': 'Argentina'}}
# Diccionario de puntos/rango
self.rank_required_points = {
"Recruit": 0,
"Private": 15,
"Private*": 45,
"Private**": 80,
"Private***": 120,
"Corporal": 170,
"Corporal*": 250,
"Corporal**": 350,
"Corporal***": 450,
"Sergeant": 600,
"Sergeant*": 800,
"Sergeant**": 1000,
"Sergeant***": 1400,
"Lieutenant": 1850,
"Lieutenant*": 2350,
"Lieutenant**": 3000,
"Lieutenant***": 3750,
"Captain": 5000,
"Captain*": 6500,
"Captain**": 9000,
"Captain***": 12000,
"Major": 15500,
"Major*": 20000,
"Major**": 25000,
"Major***": 31000,
"Commander": 40000,
"Commander*": 52000,
"Commander**": 67000,
"Commander***": 85000,
"Lt Colonel": 110000,
"Lt Colonel*": 140000,
"Lt Colonel**": 180000,
"Lt Colonel***": 225000,
"Colonel": 285000,
"Colonel*": 355000,
"Colonel**": 435000,
"Colonel***": 540000,
"General": 660000,
"General*": 800000,
"General**": 950000,
"General***": 1140000,
"Field Marshal": 1350000,
"Field Marshal*": 1600000,
"Field Marshal**": 1875000,
"Field Marshal***": 2185000,
"Supreme Marshal": 2550000,
"Supreme Marshal*": 3000000,
"Supreme Marshal**": 3500000,
"Supreme Marshal***": 4150000,
"National Force": 4900000,
"National Force*": 5800000,
"National Force**": 7000000,
"National Force***": 9000000,
"World Class Force": 11500000,
"World Class Force*": 14500000,
"World Class Force**": 18000000,
"World Class Force***": 22000000,
"Legendary Force": 26500000,
"Legendary Force*": 31500000,
"Legendary Force**": 37000000,
"Legendary Force***": 42000000,
"God of War": 50000000,
"God of War*": 100000000 ,
"God of War**": 200000000,
"God of War***": 500000000,
"Titan": 1000000000,
"Titan*": 2000000000,
"Titan**": 4000000000,
"Titan***": 10000000000}
# Lista ordenada de rangos segun importancia
self.rank_to_pos = [
"Recruit",
"Private",
"Private*",
"Private**",
"Private***",
"Corporal",
"Corporal*",
"Corporal**",
"Corporal***",
"Sergeant",
"Sergeant*",
"Sergeant**",
"Sergeant***",
"Lieutenant",
"Lieutenant*",
"Lieutenant**",
"Lieutenant***",
"Captain",
"Captain*",
"Captain**",
"Captain***",
"Major",
"Major*",
"Major**",
"Major***",
"Commander",
"Commander*",
"Commander**",
"Commander***",
"Lt Colonel",
"Lt Colonel*",
"Lt Colonel**",
"Lt Colonel***",
"Colonel",
"Colonel*",
"Colonel**",
"Colonel***",
"General",
"General*",
"General**",
"General***",
"Field Marshal",
"Field Marshal*",
"Field Marshal**",
"Field Marshal***",
"Supreme Marshal",
"Supreme Marshal*",
"Supreme Marshal**",
"Supreme Marshal***",
"National Force",
"National Force*",
"National Force**",
"National Force***",
"World Class Force",
"World Class Force*",
"World Class Force**",
"World Class Force***",
"Legendary Force",
"Legendary Force*",
"Legendary Force**",
"Legendary Force***",
"God of War",
"God of War*",
"God of War**",
"God of War***",
"Titan",
"Titan*",
"Titan**",
"Titan***",]
# Bandera de ejecucion, util en caso de que se decida matar de forma manual los threads para actualizar y guardar los datos
self.run = True
# Se paraleliza la carga de datos en un hilo nuevo, el cual es demonio del invocador en caso de "muerte prematura"
th = threading.Thread(target=self.data_loader)
th.daemon = True
th.start()
# Metodo invocador, carga datos y crea threads para guardar y actualizar informacion, solo llamado desde constructor
def data_loader(self):
self.load_data()
self.data_saver_th = threading.Thread(target=self.data_saver)
self.data_saver_th.daemon = True
self.data_saver_th.start()
self.data_updater_th = threading.Thread(target=self.data_updater)
self.data_updater_th.daemon = True
self.data_updater_th.start()
# Metodo para volcar informacion a archivo fisico, solo llamado de metodo data_loader
def data_saver(self):
while self.run:
self.save_data()
time.sleep(60)
# Metodo para actualizar informacion, solo llamado de metodo data_loader
def data_updater(self):
while self.run:
for irc_nick in self.data:
self.update_data(irc_nick)
time.sleep(30)
time.sleep(600)
# ---------------------------------------------------------------------------------- #
# @ PUBLIC METHODS #
# ---------------------------------------------------------------------------------- #
# Metodo para actualizar informacion local del objeto desde archivo
def load_data(self):
try:
f = open('data/er_nick-data.csv', 'rt')
reader = csv.reader(f)
for nick_irc,id,nick_er,level,strength,rank_points,citizenship in reader:
self.data[nick_irc] = {'id': int(id), 'nick': nick_er, 'level': int(level), 'strength': float(strength), 'rank_points': int(rank_points), 'citizenship': citizenship}
f.close()
except:
pass
# Metodo para guardar informacion local del objeto en archivo
def save_data(self):
try:
f = open('data/er_nick-data.csv', 'wt')
writer = csv.writer(f)
for u in self.data:
writer.writerow([u, self.data[u]['id'], self.data[u]['nick'], self.data[u]['level'], self.data[u]['strength'], self.data[u]['rank_points'], self.data[u]['citizenship']])
f.close()
except:
pass
# Metodo scraper para actualizar informacion local del objeto del nick de irc especificado
def update_data(self, irc_nick):
try:
id = self.data[irc_nick]['id']
c = urlopen('http://www.erepublik.com/es/citizen/profile/%d' % id)
page = c.read()
c.close()
self.data[irc_nick]['nick'] = re.search('<meta name="title" content="(.+?) - Ciudadano del Nuevo Mundo" \/>', page.decode('utf-8')).group(1)
self.data[irc_nick]['level'] = int(re.search('<strong class="citizen_level">(.+?)<\/strong>', page.decode('utf-8'), re.DOTALL).group(1))
self.data[irc_nick]['strength'] = float(re.search('<span class="military_box_info mb_bottom">(.+?)</span>', page.decode('utf-8'), re.DOTALL).group(1).strip('\r\n\t ').replace(',',''))
self.data[irc_nick]['rank_points'] = int(re.search('<span class="rank_numbers">(.+?) \/', page.decode('utf-8'), re.DOTALL).group(1).replace(',',''))
self.data[irc_nick]['citizenship'] = re.search('<a href="http\:\/\/www.erepublik.com\/es\/country\/society\/([^ \t\n\x0B\f\r]+?)">', page.decode('utf-8')).group(1)
except:
pass
# Metodo para actualizar informacion local del objeto con nick de irc e id especificados, fuerza actualizacion del mismo
def reg_nick_write(self, nick, id):
if(nick.lower() in self.data.keys()):
self.data[nick.lower()]['id'] = int(id)
else:
self.data[nick.lower()] = {'id': int(id), 'nick': nick, 'level': 1, 'strength': 0, 'rank_points': 0, 'citizenship': ''}
self.update_data(nick.lower())
# Metodo para obtener ID del nick de irc especificado
def get_id(self, nick):
return self.data[nick.lower()]['id']
# Metodo para obtener LEVEL del nick de irc especificado
def get_level(self, nick):
return self.data[nick.lower()]['level']
# Metodo para obtener STRENGTH del nick de irc especificado
def get_strength(self, nick):
return self.data[nick.lower()]['strength']
# Metodo para obtener RANK POINTS del nick de irc especificado
def get_rank_points(self, nick):
return self.data[nick.lower()]['rank_points']
# Metodo para obtener CITIZENSHIP del nick de irc especificado
def get_citizenship(self, nick):
return self.data[nick.lower()]['citizenship']
# Metodo para obtener NICK INGAME del nick de irc especificado
def get_nick(self, nick):
return self.data[nick.lower()]['nick']
# Metodo para obtener RANK NAME del nick de irc especificado
def calculate_rank_name(self, rank_points):
index = 0
for k in [key for key in self.rank_required_points.keys() if self.rank_required_points[key] < rank_points]:
if(self.rank_to_pos.index(k) > index):
index = self.rank_to_pos.index(k)
return self.rank_to_pos[index]
# Metodo para calcular DAÑO del nick de irc especificado segun datos adicionales
def calculate_damage(self, rank_points, strength, weapon_power, level, bonus):
index = 0
for k in [key for key in self.rank_required_points.keys() if self.rank_required_points[key] < rank_points]:
if(self.rank_to_pos.index(k) > index):
index = self.rank_to_pos.index(k)
return(math.trunc(((index / 20) + 0.3) * ((strength / 10) + 40) * (1 + (weapon_power / 100)) * (1.1 if level > 99 else 1) * bonus)) | CPedrini/TateTRES | erapi.py | Python | apache-2.0 | 11,009 |
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
from lxml import etree
import mock
import mox
from mox import IgnoreArg
from mox import IsA
import os
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import nfs as netapp_nfs
from cinder.volume.drivers.netapp import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
return configuration
class FakeVolume(object):
def __init__(self, size=0):
self.size = size
self.id = hash(self)
self.name = None
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetappDirectCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
def setUp(self):
super(NetappDirectCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
volume = FakeVolume(1)
snapshot = FakeSnapshot(1)
location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
drv.local_path(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(loc, expected_result)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
setattr(drv, 'ssc_enabled', False)
mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags')
netapp_nfs.NetAppDirectNfsDriver._check_flags()
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_get_if_info_by_ip')
mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver')
mox.StubOutWithMock(drv, '_clone_file')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
drv._get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol')
drv._clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._post_prov_deprov_in_ssc(IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address>127.0.0.1</address>
<administrative-status>up</administrative-status>
<current-node>fas3170rre-cmode-01</current-node>
<current-port>e1b-1165</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group/>
<failover-policy>disabled</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>fas3170rre-cmode-01</home-node>
<home-port>e1b-1165</home-port>
<interface-name>nfs_data1</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<netmask>255.255.255.0</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>c10.63.165.0/24</routing-group-name>
<use-failover-group>disabled</use-failover-group>
<vserver>openstack</vserver>
</net-interface-info></attributes-list>"""
response_el = etree.XML(res)
return api.NaElement(response_el).get_children()
def test_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
share = 'ip:/share'
drv._clone_volume(volume_name, clone_name, volume_id, share)
mox.VerifyAll()
def test_register_img_in_cache_noshare(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_register_img_in_cache_with_share(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_find_image_in_cache_no_shares(self):
drv = self._driver
drv._mounted_shares = []
result = drv._find_image_in_cache('image_id')
if not result:
pass
else:
self.fail('Return result is unexpected')
def test_find_image_in_cache_shares(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(os.path, 'exists')
drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
os.path.exists('/mnt/img-cache-id').AndReturn(True)
mox.ReplayAll()
result = drv._find_image_in_cache('id')
(share, file_name) = result[0]
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if (share == 'testshare' and file_name == 'img-cache-id'):
pass
else:
LOG.warn(_("Share %(share)s and file name %(file_name)s")
% {'share': share, 'file_name': file_name})
self.fail('Return result is unexpected')
def test_find_old_cache_files_notexists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', 720)
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == 0:
pass
else:
self.fail('No files expected but got return values.')
def test_find_old_cache_files_exists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', '720')
files = '/mnt/img-id1\n/mnt/img-id2\n'
r_files = ['img-id1', 'img-id2']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == len(r_files):
for f in res:
r_files.remove(f)
else:
self.fail('Returned files not same as expected.')
def test_delete_files_till_bytes_free_success(self):
drv = self._driver
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._delete_file('/mnt/img-cache-2').AndReturn(True)
drv._delete_file('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
def test_clean_image_cache_exec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_find_old_cache_files')
mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 19, 81))
drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
drv._delete_files_till_bytes_free(
['f1', 'f2'], 'testshare', bytes_to_free=31)
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clean_image_cache_noexec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clone_image_fromcache(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.ReplayAll()
drv.clone_image(volume, ('image_location', None), 'image_id', {})
mox.VerifyAll()
def get_img_info(self, format):
class img_info(object):
def __init__(self, fmt):
self.file_format = fmt
return img_info(format)
def test_clone_image_cloneableshare_nospace(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
if not cloned and not prop['provider_location']:
pass
else:
self.fail('Expected not cloned, got cloned.')
def test_clone_image_cloneableshare_raw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('raw'))
drv._clone_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_cloneableshare_notraw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_file_not_discovered(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False)
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_clone_image_resizefails(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file(
IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_is_cloneable_share_badformats(self):
drv = self._driver
strgs = ['10.61.666.22:/share/img',
'nfs://10.61.666.22:/share/img',
'nfs://10.61.666.22//share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com//share/img',
'com.netapp.com://share/im\g',
'http://com.netapp.com://share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com:8080//share/img'
'nfs://com.netapp.com//img',
'nfs://[ae::sr::ty::po]/img']
for strg in strgs:
res = drv._is_cloneable_share(strg)
if res:
msg = 'Invalid format matched for url %s.' % strg
self.fail(msg)
def test_is_cloneable_share_goodformat1(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat2(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat3(self):
drv = self._driver
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat4(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat5(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_check_share_in_use_no_conn(self):
drv = self._driver
share = drv._check_share_in_use(None, '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_invalid_conn(self):
drv = self._driver
share = drv._check_share_in_use(':8989', '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_incorrect_host(self):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_success(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
mox.VerifyAll()
if not share:
self.fail('Expected share not detected')
def test_construct_image_url_loc(self):
drv = self._driver
img_loc = (None,
[{'metadata':
{'share_location': 'nfs://host/path',
'mount_point': '/opt/stack/data/glance',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id'}])
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
super(NetappDirectCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {}
mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
volume_info = self._driver.create_volume(FakeVolume(1))
self.assertEqual(volume_info.get('provider_location'),
fake_share)
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_volume = FakeVolume(1)
fake_share = 'localhost:myshare'
fake_qos_policy = 'qos_policy_1'
mock_volume_extra_specs.return_value = extra_specs
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
with mock.patch.object(drv,
'_set_qos_policy_group_on_volume'
) as mock_set_qos:
volume_info = self._driver.create_volume(fake_volume)
self.assertEqual(volume_info.get('provider_location'),
'localhost:myshare')
mock_set_qos.assert_called_once_with(fake_volume,
fake_share,
fake_qos_policy)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
netapp_nfs.NetAppNFSDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
netapp_nfs.NetAppNFSDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = mock.Mock()
image_service.get_location.return_value = (mock.Mock(), mock.Mock())
image_service.show.return_value = {'size': 0}
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._construct_image_nfs_url = mock.Mock(return_value="")
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._get_host_ip = mock.Mock()
drv._get_provider_location = mock.Mock()
drv._get_export_path = mock.Mock(return_value="dr")
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
# Verify the original error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
def test_copyoffload_frm_cache_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
drv._copy_from_cache = mock.Mock(return_value=True)
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_cache.assert_called_once_with(volume,
image_id,
[('share', 'img')])
def test_copyoffload_frm_img_service_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._copy_from_img_service = mock.Mock()
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(context,
volume,
image_service,
image_id)
def test_cache_copyoffload_workflow_success(self):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='/share')
drv._post_clone_image = mock.Mock()
copied = drv._copy_from_cache(volume, image_id, cache_result)
self.assertTrue(copied)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
@mock.patch.object(image_utils, 'qemu_img_info')
def test_img_service_raw_copyoffload_workflow_success(self,
mock_qemu_img_info):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'raw'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert drv._execute.call_count == 1
drv._post_clone_image.assert_called_with(volume)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.exists')
def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
mock_qemu_img_info,
mock_cvrt_image):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'qcow2'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
assert drv._delete_file.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase):
"""Test direct NetApp C Mode driver."""
def _custom_setup(self):
self._driver = netapp_nfs.NetAppDirect7modeNfsDriver(
configuration=create_configuration())
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_check_for_setup_error_version(self):
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
# check exception raises when version not found
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
drv._client.set_api_version(1, 8)
# check exception raises when not supported version
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
drv._client.set_api_version(1, 9)
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_export_ip_path')
mox.StubOutWithMock(drv, '_get_actual_path_for_export')
mox.StubOutWithMock(drv, '_start_clone')
mox.StubOutWithMock(drv, '_wait_for_clone_finish')
if status == 'fail':
mox.StubOutWithMock(drv, '_clear_clone')
drv._get_export_ip_path(
IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
drv._get_actual_path_for_export(IgnoreArg()).AndReturn('/vol/vol1/nfs')
drv._start_clone(IgnoreArg(), IgnoreArg()).AndReturn(('1', '2'))
if status == 'fail':
drv._wait_for_clone_finish('1', '2').AndRaise(
api.NaApiError('error', 'error'))
drv._clear_clone('1')
else:
drv._wait_for_clone_finish('1', '2')
return mox
def test_clone_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
try:
drv._clone_volume(volume_name, clone_name, volume_id)
except Exception as e:
if isinstance(e, api.NaApiError):
pass
else:
raise
mox.VerifyAll()
| github-borat/cinder | cinder/tests/test_netapp_nfs.py | Python | apache-2.0 | 47,799 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
import six
import yaml
from heat.common import config
from heat.common import exception
from heat.common import template_format
from heat.tests.common import HeatTestCase
from heat.tests import utils
class JsonToYamlTest(HeatTestCase):
def setUp(self):
super(JsonToYamlTest, self).setUp()
self.expected_test_count = 2
self.longMessage = True
self.maxDiff = None
def test_convert_all_templates(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates')
template_test_count = 0
for (json_str,
yml_str,
file_name) in self.convert_all_json_to_yaml(path):
self.compare_json_vs_yaml(json_str, yml_str, file_name)
template_test_count += 1
if template_test_count >= self.expected_test_count:
break
self.assertTrue(template_test_count >= self.expected_test_count,
'Expected at least %d templates to be tested, not %d' %
(self.expected_test_count, template_test_count))
def compare_json_vs_yaml(self, json_str, yml_str, file_name):
yml = template_format.parse(yml_str)
self.assertEqual(u'2012-12-12', yml[u'HeatTemplateFormatVersion'],
file_name)
self.assertFalse(u'AWSTemplateFormatVersion' in yml, file_name)
del(yml[u'HeatTemplateFormatVersion'])
jsn = template_format.parse(json_str)
if u'AWSTemplateFormatVersion' in jsn:
del(jsn[u'AWSTemplateFormatVersion'])
self.assertEqual(yml, jsn, file_name)
def convert_all_json_to_yaml(self, dirpath):
for path in os.listdir(dirpath):
if not path.endswith('.template') and not path.endswith('.json'):
continue
f = open(os.path.join(dirpath, path), 'r')
json_str = f.read()
yml_str = template_format.convert_json_to_yaml(json_str)
yield (json_str, yml_str, f.name)
class YamlMinimalTest(HeatTestCase):
def _parse_template(self, tmpl_str, msg_str):
parse_ex = self.assertRaises(ValueError,
template_format.parse,
tmpl_str)
self.assertIn(msg_str, six.text_type(parse_ex))
def test_long_yaml(self):
template = {'HeatTemplateFormatVersion': '2012-12-12'}
config.cfg.CONF.set_override('max_template_size', 1024)
template['Resources'] = ['a'] * (config.cfg.CONF.max_template_size / 3)
limit = config.cfg.CONF.max_template_size
long_yaml = yaml.safe_dump(template)
self.assertTrue(len(long_yaml) > limit)
ex = self.assertRaises(exception.RequestLimitExceeded,
template_format.parse, long_yaml)
msg = ('Request limit exceeded: Template exceeds maximum allowed size '
'(1024 bytes)')
self.assertEqual(msg, six.text_type(ex))
def test_parse_no_version_format(self):
yaml = ''
self._parse_template(yaml, 'Template format version not found')
yaml2 = '''Parameters: {}
Mappings: {}
Resources: {}
Outputs: {}
'''
self._parse_template(yaml2, 'Template format version not found')
def test_parse_string_template(self):
tmpl_str = 'just string'
msg = 'The template is not a JSON object or YAML mapping.'
self._parse_template(tmpl_str, msg)
def test_parse_invalid_yaml_and_json_template(self):
tmpl_str = '{test'
msg = 'line 1, column 1'
self._parse_template(tmpl_str, msg)
def test_parse_json_document(self):
tmpl_str = '["foo" , "bar"]'
msg = 'The template is not a JSON object or YAML mapping.'
self._parse_template(tmpl_str, msg)
def test_parse_empty_json_template(self):
tmpl_str = '{}'
msg = 'Template format version not found'
self._parse_template(tmpl_str, msg)
def test_parse_yaml_template(self):
tmpl_str = 'heat_template_version: 2013-05-23'
expected = {'heat_template_version': '2013-05-23'}
self.assertEqual(expected, template_format.parse(tmpl_str))
class YamlParseExceptions(HeatTestCase):
scenarios = [
('scanner', dict(raised_exception=yaml.scanner.ScannerError())),
('parser', dict(raised_exception=yaml.parser.ParserError())),
('reader',
dict(raised_exception=yaml.reader.ReaderError('', '', '', '', ''))),
]
def test_parse_to_value_exception(self):
text = 'not important'
with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = self.raised_exception
self.assertRaises(ValueError,
template_format.parse, text)
class JsonYamlResolvedCompareTest(HeatTestCase):
def setUp(self):
super(JsonYamlResolvedCompareTest, self).setUp()
self.longMessage = True
self.maxDiff = None
def load_template(self, file_name):
filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates', file_name)
f = open(filepath)
t = template_format.parse(f.read())
f.close()
return t
def compare_stacks(self, json_file, yaml_file, parameters):
t1 = self.load_template(json_file)
t2 = self.load_template(yaml_file)
del(t1[u'AWSTemplateFormatVersion'])
t1[u'HeatTemplateFormatVersion'] = t2[u'HeatTemplateFormatVersion']
stack1 = utils.parse_stack(t1, parameters)
stack2 = utils.parse_stack(t2, parameters)
# compare resources separately so that resolved static data
# is compared
t1nr = dict(stack1.t.t)
del(t1nr['Resources'])
t2nr = dict(stack2.t.t)
del(t2nr['Resources'])
self.assertEqual(t1nr, t2nr)
self.assertEqual(set(stack1.keys()), set(stack2.keys()))
for key in stack1:
self.assertEqual(stack1[key].t, stack2[key].t)
def test_neutron_resolved(self):
self.compare_stacks('Neutron.template', 'Neutron.yaml', {})
def test_wordpress_resolved(self):
self.compare_stacks('WordPress_Single_Instance.template',
'WordPress_Single_Instance.yaml',
{'KeyName': 'test'})
| redhat-openstack/heat | heat/tests/test_template_format.py | Python | apache-2.0 | 7,015 |
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
import requests
from mistral.actions import std_actions
from mistral.db.v2 import api as db_api
from mistral.services import workflows as wf_service
from mistral.tests.unit import base as test_base
from mistral.tests.unit.engine import base
from mistral.workflow import states
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
ENV = {
'__actions': {
'std.http': {
'auth': 'librarian:password123',
'timeout': 30,
}
}
}
EXPECTED_ENV_AUTH = ('librarian', 'password123')
WORKFLOW1 = """
---
version: "2.0"
wf1:
type: direct
tasks:
task1:
action: std.http url="https://api.library.org/books"
publish:
result: <% $ %>
"""
WORKFLOW2 = """
---
version: "2.0"
wf2:
type: direct
tasks:
task1:
action: std.http url="https://api.library.org/books" timeout=60
publish:
result: <% $ %>
"""
WORKFLOW1_WITH_ITEMS = """
---
version: "2.0"
wf1_with_items:
type: direct
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %>
publish:
result: <% $ %>
"""
WORKFLOW2_WITH_ITEMS = """
---
version: "2.0"
wf2_with_items:
type: direct
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %> timeout=60
publish:
result: <% $ %>
"""
class ActionDefaultTest(base.EngineTestCase):
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1)
wf_ex = self.engine.start_workflow('wf1', env=ENV)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=ENV['__actions']['std.http']['timeout'])
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2)
wf_ex = self.engine.start_workflow('wf2', env=ENV)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=60
)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_with_items_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf1_with_items',
wf_input=wf_input,
env=ENV
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=ENV['__actions']['std.http']['timeout'])
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_with_items_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf2_with_items',
wf_input=wf_input,
env=ENV
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=60)
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
| StackStorm/mistral | mistral/tests/unit/engine/test_action_defaults.py | Python | apache-2.0 | 6,915 |
import re
import unicodedata
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, Union
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db.models.query import QuerySet
from django.forms.models import model_to_dict
from django.utils.translation import gettext as _
from typing_extensions import TypedDict
from zulip_bots.custom_exceptions import ConfigValidationError
from zerver.lib.avatar import avatar_url, get_avatar_field
from zerver.lib.cache import (
bulk_cached_fetch,
realm_user_dict_fields,
user_profile_by_id_cache_key,
user_profile_cache_key_id,
)
from zerver.lib.exceptions import OrganizationAdministratorRequired
from zerver.lib.request import JsonableError
from zerver.lib.timezone import canonicalize_timezone
from zerver.models import (
CustomProfileField,
CustomProfileFieldValue,
Realm,
Service,
UserProfile,
get_realm_user_dicts,
get_user_profile_by_id_in_realm,
)
def check_full_name(full_name_raw: str) -> str:
full_name = full_name_raw.strip()
if len(full_name) > UserProfile.MAX_NAME_LENGTH:
raise JsonableError(_("Name too long!"))
if len(full_name) < UserProfile.MIN_NAME_LENGTH:
raise JsonableError(_("Name too short!"))
for character in full_name:
if unicodedata.category(character)[0] == "C" or character in UserProfile.NAME_INVALID_CHARS:
raise JsonableError(_("Invalid characters in name!"))
# Names ending with e.g. `|15` could be ambiguous for
# sloppily-written parsers of our Markdown syntax for mentioning
# users with ambiguous names, and likely have no real use, so we
# ban them.
if re.search(r"\|\d+$", full_name_raw):
raise JsonableError(_("Invalid format!"))
return full_name
# NOTE: We don't try to absolutely prevent 2 bots from having the same
# name (e.g. you can get there by reactivating a deactivated bot after
# making a new bot with the same name). This is just a check designed
# to make it unlikely to happen by accident.
def check_bot_name_available(realm_id: int, full_name: str) -> None:
dup_exists = UserProfile.objects.filter(
realm_id=realm_id,
full_name=full_name.strip(),
is_active=True,
).exists()
if dup_exists:
raise JsonableError(_("Name is already in use!"))
def check_short_name(short_name_raw: str) -> str:
short_name = short_name_raw.strip()
if len(short_name) == 0:
raise JsonableError(_("Bad name or username"))
return short_name
def check_valid_bot_config(bot_type: int, service_name: str, config_data: Dict[str, str]) -> None:
if bot_type == UserProfile.INCOMING_WEBHOOK_BOT:
from zerver.lib.integrations import WEBHOOK_INTEGRATIONS
config_options = None
for integration in WEBHOOK_INTEGRATIONS:
if integration.name == service_name:
# key: validator
config_options = {c[1]: c[2] for c in integration.config_options}
break
if not config_options:
raise JsonableError(_("Invalid integration '{}'.").format(service_name))
missing_keys = set(config_options.keys()) - set(config_data.keys())
if missing_keys:
raise JsonableError(
_("Missing configuration parameters: {}").format(
missing_keys,
)
)
for key, validator in config_options.items():
value = config_data[key]
error = validator(key, value)
if error:
raise JsonableError(_("Invalid {} value {} ({})").format(key, value, error))
elif bot_type == UserProfile.EMBEDDED_BOT:
try:
from zerver.lib.bot_lib import get_bot_handler
bot_handler = get_bot_handler(service_name)
if hasattr(bot_handler, "validate_config"):
bot_handler.validate_config(config_data)
except ConfigValidationError:
# The exception provides a specific error message, but that
# message is not tagged translatable, because it is
# triggered in the external zulip_bots package.
# TODO: Think of some clever way to provide a more specific
# error message.
raise JsonableError(_("Invalid configuration data!"))
# Adds an outgoing webhook or embedded bot service.
def add_service(
name: str,
user_profile: UserProfile,
base_url: Optional[str] = None,
interface: Optional[int] = None,
token: Optional[str] = None,
) -> None:
Service.objects.create(
name=name, user_profile=user_profile, base_url=base_url, interface=interface, token=token
)
def check_bot_creation_policy(user_profile: UserProfile, bot_type: int) -> None:
# Realm administrators can always add bot
if user_profile.is_realm_admin:
return
if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_EVERYONE:
return
if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_ADMINS_ONLY:
raise OrganizationAdministratorRequired()
if (
user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
and bot_type == UserProfile.DEFAULT_BOT
):
raise OrganizationAdministratorRequired()
def check_valid_bot_type(user_profile: UserProfile, bot_type: int) -> None:
if bot_type not in user_profile.allowed_bot_types:
raise JsonableError(_("Invalid bot type"))
def check_valid_interface_type(interface_type: Optional[int]) -> None:
if interface_type not in Service.ALLOWED_INTERFACE_TYPES:
raise JsonableError(_("Invalid interface type"))
def is_administrator_role(role: int) -> bool:
return role in {UserProfile.ROLE_REALM_ADMINISTRATOR, UserProfile.ROLE_REALM_OWNER}
def bulk_get_users(
emails: List[str], realm: Optional[Realm], base_query: "QuerySet[UserProfile]" = None
) -> Dict[str, UserProfile]:
if base_query is None:
assert realm is not None
query = UserProfile.objects.filter(realm=realm, is_active=True)
realm_id = realm.id
else:
# WARNING: Currently, this code path only really supports one
# version of `base_query` being used (because otherwise,
# they'll share the cache, which can screw up the filtering).
# If you're using this flow, you'll need to re-do any filters
# in base_query in the code itself; base_query is just a perf
# optimization.
query = base_query
realm_id = 0
def fetch_users_by_email(emails: List[str]) -> List[UserProfile]:
# This should be just
#
# UserProfile.objects.select_related("realm").filter(email__iexact__in=emails,
# realm=realm)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
where_clause = "upper(zerver_userprofile.email::text) IN (SELECT upper(email) FROM unnest(%s) AS email)"
return query.select_related("realm").extra(where=[where_clause], params=(emails,))
def user_to_email(user_profile: UserProfile) -> str:
return user_profile.email.lower()
return bulk_cached_fetch(
# Use a separate cache key to protect us from conflicts with
# the get_user cache.
lambda email: "bulk_get_users:" + user_profile_cache_key_id(email, realm_id),
fetch_users_by_email,
[email.lower() for email in emails],
id_fetcher=user_to_email,
)
def get_user_id(user: UserProfile) -> int:
return user.id
def user_ids_to_users(user_ids: Sequence[int], realm: Realm) -> List[UserProfile]:
# TODO: Consider adding a flag to control whether deactivated
# users should be included.
def fetch_users_by_id(user_ids: List[int]) -> List[UserProfile]:
return list(UserProfile.objects.filter(id__in=user_ids).select_related())
user_profiles_by_id: Dict[int, UserProfile] = bulk_cached_fetch(
cache_key_function=user_profile_by_id_cache_key,
query_function=fetch_users_by_id,
object_ids=user_ids,
id_fetcher=get_user_id,
)
found_user_ids = user_profiles_by_id.keys()
missed_user_ids = [user_id for user_id in user_ids if user_id not in found_user_ids]
if missed_user_ids:
raise JsonableError(_("Invalid user ID: {}").format(missed_user_ids[0]))
user_profiles = list(user_profiles_by_id.values())
for user_profile in user_profiles:
if user_profile.realm != realm:
raise JsonableError(_("Invalid user ID: {}").format(user_profile.id))
return user_profiles
def access_bot_by_id(user_profile: UserProfile, user_id: int) -> UserProfile:
try:
target = get_user_profile_by_id_in_realm(user_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such bot"))
if not target.is_bot:
raise JsonableError(_("No such bot"))
if not user_profile.can_admin_user(target):
raise JsonableError(_("Insufficient permission"))
return target
def access_user_by_id(
user_profile: UserProfile,
target_user_id: int,
*,
allow_deactivated: bool = False,
allow_bots: bool = False,
for_admin: bool,
) -> UserProfile:
"""Master function for accessing another user by ID in API code;
verifies the user ID is in the same realm, and if requested checks
for administrative privileges, with flags for various special
cases.
"""
try:
target = get_user_profile_by_id_in_realm(target_user_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such user"))
if target.is_bot and not allow_bots:
raise JsonableError(_("No such user"))
if not target.is_active and not allow_deactivated:
raise JsonableError(_("User is deactivated"))
if not for_admin:
# Administrative access is not required just to read a user.
return target
if not user_profile.can_admin_user(target):
raise JsonableError(_("Insufficient permission"))
return target
class Accounts(TypedDict):
realm_name: str
realm_id: int
full_name: str
avatar: Optional[str]
def get_accounts_for_email(email: str) -> List[Accounts]:
profiles = (
UserProfile.objects.select_related("realm")
.filter(
delivery_email__iexact=email.strip(),
is_active=True,
realm__deactivated=False,
is_bot=False,
)
.order_by("date_joined")
)
accounts: List[Accounts] = []
for profile in profiles:
accounts.append(
dict(
realm_name=profile.realm.name,
realm_id=profile.realm.id,
full_name=profile.full_name,
avatar=avatar_url(profile),
)
)
return accounts
def get_api_key(user_profile: UserProfile) -> str:
return user_profile.api_key
def get_all_api_keys(user_profile: UserProfile) -> List[str]:
# Users can only have one API key for now
return [user_profile.api_key]
def validate_user_custom_profile_field(
realm_id: int, field: CustomProfileField, value: Union[int, str, List[int]]
) -> Union[int, str, List[int]]:
validators = CustomProfileField.FIELD_VALIDATORS
field_type = field.field_type
var_name = f"{field.name}"
if field_type in validators:
validator = validators[field_type]
return validator(var_name, value)
elif field_type == CustomProfileField.SELECT:
choice_field_validator = CustomProfileField.SELECT_FIELD_VALIDATORS[field_type]
field_data = field.field_data
# Put an assertion so that mypy doesn't complain.
assert field_data is not None
return choice_field_validator(var_name, field_data, value)
elif field_type == CustomProfileField.USER:
user_field_validator = CustomProfileField.USER_FIELD_VALIDATORS[field_type]
return user_field_validator(realm_id, value, False)
else:
raise AssertionError("Invalid field type")
def validate_user_custom_profile_data(
realm_id: int, profile_data: List[Dict[str, Union[int, str, List[int]]]]
) -> None:
# This function validate all custom field values according to their field type.
for item in profile_data:
field_id = item["id"]
try:
field = CustomProfileField.objects.get(id=field_id)
except CustomProfileField.DoesNotExist:
raise JsonableError(_("Field id {id} not found.").format(id=field_id))
try:
validate_user_custom_profile_field(realm_id, field, item["value"])
except ValidationError as error:
raise JsonableError(error.message)
def can_access_delivery_email(user_profile: UserProfile) -> bool:
realm = user_profile.realm
if realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS:
return user_profile.is_realm_admin
if realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_MODERATORS:
return user_profile.is_realm_admin or user_profile.is_moderator
return False
def format_user_row(
realm: Realm,
acting_user: Optional[UserProfile],
row: Dict[str, Any],
client_gravatar: bool,
user_avatar_url_field_optional: bool,
custom_profile_field_data: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Formats a user row returned by a database fetch using
.values(*realm_user_dict_fields) into a dictionary representation
of that user for API delivery to clients. The acting_user
argument is used for permissions checks.
"""
is_admin = is_administrator_role(row["role"])
is_owner = row["role"] == UserProfile.ROLE_REALM_OWNER
is_guest = row["role"] == UserProfile.ROLE_GUEST
is_bot = row["is_bot"]
result = dict(
email=row["email"],
user_id=row["id"],
avatar_version=row["avatar_version"],
is_admin=is_admin,
is_owner=is_owner,
is_guest=is_guest,
is_billing_admin=row["is_billing_admin"],
role=row["role"],
is_bot=is_bot,
full_name=row["full_name"],
timezone=canonicalize_timezone(row["timezone"]),
is_active=row["is_active"],
date_joined=row["date_joined"].isoformat(),
)
# Zulip clients that support using `GET /avatar/{user_id}` as a
# fallback if we didn't send an avatar URL in the user object pass
# user_avatar_url_field_optional in client_capabilities.
#
# This is a major network performance optimization for
# organizations with 10,000s of users where we would otherwise
# send avatar URLs in the payload (either because most users have
# uploaded avatars or because EMAIL_ADDRESS_VISIBILITY_ADMINS
# prevents the older client_gravatar optimization from helping).
# The performance impact is large largely because the hashes in
# avatar URLs structurally cannot compress well.
#
# The user_avatar_url_field_optional gives the server sole
# discretion in deciding for which users we want to send the
# avatar URL (Which saves clients an RTT at the cost of some
# bandwidth). At present, the server looks at `long_term_idle` to
# decide which users to include avatars for, piggy-backing on a
# different optimization for organizations with 10,000s of users.
include_avatar_url = not user_avatar_url_field_optional or not row["long_term_idle"]
if include_avatar_url:
result["avatar_url"] = get_avatar_field(
user_id=row["id"],
realm_id=realm.id,
email=row["delivery_email"],
avatar_source=row["avatar_source"],
avatar_version=row["avatar_version"],
medium=False,
client_gravatar=client_gravatar,
)
if acting_user is not None and can_access_delivery_email(acting_user):
result["delivery_email"] = row["delivery_email"]
if is_bot:
result["bot_type"] = row["bot_type"]
if row["email"] in settings.CROSS_REALM_BOT_EMAILS:
result["is_cross_realm_bot"] = True
# Note that bot_owner_id can be None with legacy data.
result["bot_owner_id"] = row["bot_owner_id"]
elif custom_profile_field_data is not None:
result["profile_data"] = custom_profile_field_data
return result
def user_profile_to_user_row(user_profile: UserProfile) -> Dict[str, Any]:
# What we're trying to do is simulate the user_profile having been
# fetched from a QuerySet using `.values(*realm_user_dict_fields)`
# even though we fetched UserProfile objects. This is messier
# than it seems.
#
# What we'd like to do is just call model_to_dict(user,
# fields=realm_user_dict_fields). The problem with this is
# that model_to_dict has a different convention than
# `.values()` in its handling of foreign keys, naming them as
# e.g. `bot_owner`, not `bot_owner_id`; we work around that
# here.
#
# This could be potentially simplified in the future by
# changing realm_user_dict_fields to name the bot owner with
# the less readable `bot_owner` (instead of `bot_owner_id`).
user_row = model_to_dict(user_profile, fields=[*realm_user_dict_fields, "bot_owner"])
user_row["bot_owner_id"] = user_row["bot_owner"]
del user_row["bot_owner"]
return user_row
def get_cross_realm_dicts() -> List[Dict[str, Any]]:
users = bulk_get_users(
list(settings.CROSS_REALM_BOT_EMAILS),
None,
base_query=UserProfile.objects.filter(realm__string_id=settings.SYSTEM_BOT_REALM),
).values()
result = []
for user in users:
# Important: We filter here, is addition to in
# `base_query`, because of how bulk_get_users shares its
# cache with other UserProfile caches.
if user.realm.string_id != settings.SYSTEM_BOT_REALM: # nocoverage
continue
user_row = user_profile_to_user_row(user)
# Because we want to avoid clients becing exposed to the
# implementation detail that these bots are self-owned, we
# just set bot_owner_id=None.
user_row["bot_owner_id"] = None
result.append(
format_user_row(
user.realm,
acting_user=user,
row=user_row,
client_gravatar=False,
user_avatar_url_field_optional=False,
custom_profile_field_data=None,
)
)
return result
def get_custom_profile_field_values(
custom_profile_field_values: List[CustomProfileFieldValue],
) -> Dict[int, Dict[str, Any]]:
profiles_by_user_id: Dict[int, Dict[str, Any]] = defaultdict(dict)
for profile_field in custom_profile_field_values:
user_id = profile_field.user_profile_id
if profile_field.field.is_renderable():
profiles_by_user_id[user_id][str(profile_field.field_id)] = {
"value": profile_field.value,
"rendered_value": profile_field.rendered_value,
}
else:
profiles_by_user_id[user_id][str(profile_field.field_id)] = {
"value": profile_field.value,
}
return profiles_by_user_id
def get_raw_user_data(
realm: Realm,
acting_user: Optional[UserProfile],
*,
target_user: Optional[UserProfile] = None,
client_gravatar: bool,
user_avatar_url_field_optional: bool,
include_custom_profile_fields: bool = True,
) -> Dict[int, Dict[str, str]]:
"""Fetches data about the target user(s) appropriate for sending to
acting_user via the standard format for the Zulip API. If
target_user is None, we fetch all users in the realm.
"""
profiles_by_user_id = None
custom_profile_field_data = None
# target_user is an optional parameter which is passed when user data of a specific user
# is required. It is 'None' otherwise.
if target_user is not None:
user_dicts = [user_profile_to_user_row(target_user)]
else:
user_dicts = get_realm_user_dicts(realm.id)
if include_custom_profile_fields:
base_query = CustomProfileFieldValue.objects.select_related("field")
# TODO: Consider optimizing this query away with caching.
if target_user is not None:
custom_profile_field_values = base_query.filter(user_profile=target_user)
else:
custom_profile_field_values = base_query.filter(field__realm_id=realm.id)
profiles_by_user_id = get_custom_profile_field_values(custom_profile_field_values)
result = {}
for row in user_dicts:
if profiles_by_user_id is not None:
custom_profile_field_data = profiles_by_user_id.get(row["id"], {})
result[row["id"]] = format_user_row(
realm,
acting_user=acting_user,
row=row,
client_gravatar=client_gravatar,
user_avatar_url_field_optional=user_avatar_url_field_optional,
custom_profile_field_data=custom_profile_field_data,
)
return result
| punchagan/zulip | zerver/lib/users.py | Python | apache-2.0 | 21,312 |
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import hashlib
import os
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.helpers import bulk_index
from warehouse.utils import AttributeDict
class Index(object):
_index = "warehouse"
def __init__(self, models, config):
self.models = models
self.config = config
self.es = Elasticsearch(
hosts=self.config.hosts,
**self.config.get("client_options", {})
)
self.types = AttributeDict()
def register(self, type_):
obj = type_(self)
self.types[obj._type] = obj
def reindex(self, index=None, alias=True, keep_old=False):
# Generate an Index Name for Warehouse
index = "".join([
index if index is not None else self._index,
hashlib.md5(os.urandom(16)).hexdigest()[:8],
])
# Create this index
self.es.indices.create(index, {
"mappings": {
doc_type._type: doc_type.get_mapping()
for doc_type in self.types.values()
},
})
# Index everything into the new index
for doc_type in self.types.values():
doc_type.index_all(index=index)
# Update the alias unless we've been told not to
if alias:
self.update_alias(self._index, index, keep_old=keep_old)
def update_alias(self, alias, index, keep_old=False):
# Get the old index from ElasticSearch
try:
old_index = self.es.indices.get_alias(self._index).keys()[0]
except TransportError as exc:
if not exc.status_code == 404:
raise
old_index = None
# Remove the alias to the old index if it exists
if old_index is not None:
actions = [{"remove": {"index": old_index, "alias": alias}}]
else:
actions = []
# Add the alias to the new index
actions += [{"add": {"index": index, "alias": alias}}]
# Update To the New Index
self.es.indices.update_aliases({"actions": actions})
# Delete the old index if it exists and unless we're keeping it
if not keep_old and old_index is not None:
self.es.indices.delete(old_index)
class BaseMapping(object):
SEARCH_LIMIT = 25
def __init__(self, index):
self.index = index
def get_mapping(self):
raise NotImplementedError
def get_indexable(self):
raise NotImplementedError
def extract_id(self, item):
raise NotImplementedError
def extract_document(self, item):
raise NotImplementedError
def index_all(self, index=None):
# Determine which index we are indexing into
_index = index if index is not None else self.index._index
# Bulk Index our documents
bulk_index(
self.index.es,
[
{
"_index": _index,
"_type": self._type,
"_id": self.extract_id(item),
"_source": self.extract_document(item),
}
for item in self.get_indexable()
],
)
def search(self, query):
raise NotImplementedError
| mattrobenolt/warehouse | warehouse/search/indexes.py | Python | apache-2.0 | 3,926 |
#!/usr/bin/python2.7
from __future__ import print_function
# -*- coding: utf-8 -*-
import wx
import threading
import lcm
import random
import Forseti
import configurator
BLUE = (24, 25, 141)
GOLD = (241, 169, 50)
class TeamPanel(wx.Panel):
def __init__(self, remote, letter, number, name, colour, *args, **kwargs):
super(TeamPanel, self).__init__(*args, **kwargs)
self.remote = remote
self.InitUI(letter, number, name, colour)
def InitUI(self, letter, number, name, colour=None):
if colour is not None:
self.SetBackgroundColour(colour)
dc = wx.ScreenDC()
self.num_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 2, dc.GetCharHeight()))
self.num_ctrl.AppendText(str(number))
self.get_button = wx.Button(self, label='Get', size=(dc.GetCharWidth() * 2, dc.GetCharHeight()))
self.get_button.Bind(wx.EVT_BUTTON, self.do_get_name)
self.name_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 16,
dc.GetCharHeight()))
self.name_ctrl.AppendText(name)
name_num_box = wx.BoxSizer(wx.HORIZONTAL)
name_num_box.Add(wx.StaticText(self, label=letter,
size=(dc.GetCharWidth() * 0.6, dc.GetCharHeight())))
name_num_box.Add(self.num_ctrl)
name_num_box.Add(self.get_button)
name_num_box.Add(self.name_ctrl)
#button_box = wx.BoxSizer(wx.HORIZONTAL)
#button_box.Add(wx.Button(self, label='Reset'))
#button_box.Add(wx.Button(self, label='Configure'))
#button_box.Add(wx.Button(self, label='Disable'))
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(name_num_box, flag=wx.CENTER)
#vbox.Add(button_box, flag=wx.CENTER)
self.SetSizer(self.vbox)
self.Show(True)
def do_get_name(self, event):
self.name = configurator.get_team_name(self.number)
@property
def name(self):
return self.name_ctrl.GetValue()
@name.setter
def name(self, val):
self.name_ctrl.SetValue(val)
@property
def number(self):
try:
return int(self.num_ctrl.GetValue())
except ValueError:
return 0
@number.setter
def number(self, val):
self.num_ctrl.SetValue(str(val))
class MatchControl(wx.Panel):
def __init__(self, remote, *args, **kwargs):
super(MatchControl, self).__init__(*args, **kwargs)
self.remote = remote
self.InitUI()
def InitUI(self):
vbox = wx.BoxSizer(wx.VERTICAL)
dc = wx.ScreenDC()
match_number = wx.BoxSizer(wx.HORIZONTAL)
match_number.Add(wx.StaticText(self, label='Match #'.format(1)))
self.match_num_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 2,
dc.GetCharHeight()))
match_number.Add(self.match_num_ctrl)
vbox.Add(match_number, flag=wx.CENTER)
teamSizer = wx.GridSizer(3, 2)
self.team_panels = [
TeamPanel(self.remote, 'A', 0, 'Unknown Team', BLUE, self),
TeamPanel(self.remote, 'C', 0, 'Unknown Team', GOLD, self),
TeamPanel(self.remote, 'B', 0, 'Unknown Team', BLUE, self),
TeamPanel(self.remote, 'D', 0, 'Unknown Team', GOLD, self),
]
teamSizer.AddMany(
[wx.StaticText(self, label='Blue Team'),
wx.StaticText(self, label='Gold Team')] +
[(panel, 0) for panel in self.team_panels])
vbox.Add(teamSizer, flag=wx.CENTER)
buttons = wx.BoxSizer(wx.HORIZONTAL)
self.init_button = wx.Button(self, label='Init')
self.init_button.Bind(wx.EVT_BUTTON, self.do_init)
self.go_button = wx.Button(self, label='GO!')
self.go_button.Bind(wx.EVT_BUTTON, self.do_go)
self.pause_button = wx.Button(self, label='Pause')
self.pause_button.Bind(wx.EVT_BUTTON, self.do_pause)
#self.save_button = wx.Button(self, label='Save')
#self.save_button.Bind(wx.EVT_BUTTON, self.do_save)
self.time_text = wx.StaticText(self, label='0:00')
self.stage_text = wx.StaticText(self, label='Unknown')
self.remote.time_text = self.time_text
#buttons.Add(self.save_button, flag=wx.LEFT)
buttons.Add(self.init_button)
buttons.Add(self.go_button)
buttons.Add(self.pause_button)
buttons.Add(self.time_text)
buttons.Add(self.stage_text)
vbox.Add(buttons, flag=wx.CENTER)
self.SetSizer(vbox)
self.Show(True)
def do_go(self, e):
self.remote.do_go()
def do_pause(self, e):
self.remote.do_pause()
def do_save(self, e):
self.remote.do_save(self.get_match())
def do_init(self, e):
self.remote.do_init(self.get_match())
def _set_match_panel(self, match, team_idx, panel_idx):
match.team_numbers[team_idx] = self.team_panels[panel_idx].number
match.team_names[team_idx] = self.team_panels[panel_idx].name
def _set_panel_match(self, match, team_idx, panel_idx):
self.team_panels[panel_idx].number = match.team_numbers[team_idx]
self.team_panels[panel_idx].name = match.team_names[team_idx]
def get_match(self):
match = Forseti.Match()
self._set_match_panel(match, 0, 0)
self._set_match_panel(match, 1, 2)
self._set_match_panel(match, 2, 1)
self._set_match_panel(match, 3, 3)
try:
match.match_number = int(self.match_num_ctrl.GetValue())
except ValueError:
match.match_number = random.getrandbits(31)
return match
def set_match(self, match):
self._set_panel_match(match, 0, 0)
self._set_panel_match(match, 1, 2)
self._set_panel_match(match, 2, 1)
self._set_panel_match(match, 3, 3)
self.match_num_ctrl.SetValue(str(match.match_number))
def set_time(self, match):
self.time_text.SetLabel(format_time(match.game_time_so_far))
self.stage_text.SetLabel(match.stage_name)
class ScheduleControl(wx.Panel):
def __init__(self, remote, match_control, *args, **kwargs):
self.remote = remote
super(ScheduleControl, self).__init__(*args, **kwargs)
self.InitUI()
self.remote.match_list_box = self.match_list
self.match_control = match_control
def InitUI(self):
self.match_list = wx.ListBox(self)
self.match_list.Bind(wx.EVT_LISTBOX, self.choose_match)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.load_button = wx.Button(self, label='Load All')
self.load_button.Bind(wx.EVT_BUTTON, self.do_load)
hbox.Add(self.load_button)
self.clear_first = wx.CheckBox(self, label='Clear first')
self.clear_first.SetValue(True)
hbox.Add(self.clear_first)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.match_list, 1, wx.EXPAND)
vbox.Add(hbox)
self.SetSizer(vbox)
self.Show(True)
def do_load(self, e):
self.remote.do_load(self.clear_first.GetValue())
def choose_match(self, event):
self.match_control.set_match(event.GetClientData())
class MainWindow(wx.Frame):
def __init__(self, remote, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.remote = remote
self.InitUI()
def InitUI(self):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
fitem = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit application')
menubar.Append(fileMenu, '&File')
self.SetMenuBar(menubar)
match_control = MatchControl(self.remote, self)
schedule_control = ScheduleControl(self.remote, match_control, self)
self.remote.match_control = match_control
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(match_control, 0, wx.ALIGN_CENTER | wx.ALIGN_TOP, 8)
vbox.Add(schedule_control, 1, wx.EXPAND | wx.ALIGN_CENTER | wx.ALL, 8)
self.Bind(wx.EVT_MENU, self.OnQuit, fitem)
self.SetSize((800, 600))
self.SetSizer(vbox)
self.SetTitle('Forseti Dashboard')
self.Centre()
self.Show(True)
def OnQuit(self, e):
self.Close()
def format_match(match):
print(match.match_number)
print(match.team_names)
print(match.team_numbers)
return '{}: {} ({}) & {} ({}) vs. {} ({}) & {} ({})'.format(
match.match_number,
match.team_names[0], match.team_numbers[0],
match.team_names[1], match.team_numbers[1],
match.team_names[2], match.team_numbers[2],
match.team_names[3], match.team_numbers[3],
)
class Remote(object):
def __init__(self):
self.lc = lcm.LCM('udpm://239.255.76.67:7667?ttl=1')
self.lc.subscribe('Schedule/Schedule', self.handle_schedule)
self.lc.subscribe('Timer/Time', self.handle_time)
self.match_list_box = None
self.match_control = None
self.thread = threading.Thread(target=self._loop)
self.thread.daemon = True
def start(self):
self.thread.start()
def _loop(self):
while True:
try:
self.lc.handle()
except Exception as ex:
print('Got exception while handling lcm message', ex)
def handle_schedule(self, channel, data):
msg = Forseti.Schedule.decode(data)
for i in range(msg.num_matches):
self.match_list_box.Insert(format_match(msg.matches[i]), i,
msg.matches[i])
def handle_time(self, channel, data):
msg = Forseti.Time.decode(data)
#wx.CallAfter(self.time_text.SetLabel, format_time(msg.game_time_so_far))
wx.CallAfter(self.match_control.set_time, msg)
def do_load(self, clear_first):
if clear_first:
self.match_list_box.Clear()
msg = Forseti.ScheduleLoadCommand()
msg.clear_first = clear_first
print('Requesting load')
self.lc.publish('Schedule/Load', msg.encode())
def do_save(self, match):
self.lc.publish('Match/Save', match.encode())
def do_init(self, match):
self.lc.publish('Match/Init', match.encode())
def do_time_ctrl(self, command):
msg = Forseti.TimeControl()
msg.command_name = command
self.lc.publish('Timer/Control', msg.encode())
def do_go(self):
self.do_time_ctrl('start')
def do_pause(self):
self.do_time_ctrl('pause')
def format_time(seconds):
return '{}:{:02}'.format(seconds // 60,
seconds % 60)
def main():
app = wx.App()
remote = Remote()
MainWindow(remote, None)
remote.start()
remote.do_load(False)
app.MainLoop()
if __name__ == '__main__':
main()
| pioneers/forseti | wxdash.py | Python | apache-2.0 | 10,763 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1", manifest={"SpecialistPool",},
)
class SpecialistPool(proto.Message):
r"""SpecialistPool represents customers' own workforce to work on
their data labeling jobs. It includes a group of specialist
managers and workers. Managers are responsible for managing the
workers in this pool as well as customers' data labeling jobs
associated with this pool. Customers create specialist pool as
well as start data labeling jobs on Cloud, managers and workers
handle the jobs using CrowdCompute console.
Attributes:
name (str):
Required. The resource name of the
SpecialistPool.
display_name (str):
Required. The user-defined name of the
SpecialistPool. The name can be up to 128
characters long and can be consist of any UTF-8
characters.
This field should be unique on project-level.
specialist_managers_count (int):
Output only. The number of managers in this
SpecialistPool.
specialist_manager_emails (Sequence[str]):
The email addresses of the managers in the
SpecialistPool.
pending_data_labeling_jobs (Sequence[str]):
Output only. The resource name of the pending
data labeling jobs.
specialist_worker_emails (Sequence[str]):
The email addresses of workers in the
SpecialistPool.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
specialist_managers_count = proto.Field(proto.INT32, number=3,)
specialist_manager_emails = proto.RepeatedField(proto.STRING, number=4,)
pending_data_labeling_jobs = proto.RepeatedField(proto.STRING, number=5,)
specialist_worker_emails = proto.RepeatedField(proto.STRING, number=7,)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleapis/python-aiplatform | google/cloud/aiplatform_v1/types/specialist_pool.py | Python | apache-2.0 | 2,601 |
# -*- coding: utf-8 -*-
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Prints the env_setup banner for cmd.exe.
This is done from Python as activating colors and printing ASCII art are not
easy to do in cmd.exe. Activated colors also don't persist in the parent
process.
"""
from __future__ import print_function
import argparse
import os
import sys
from .colors import Color, enable_colors # type: ignore
_PIGWEED_BANNER = u'''
▒█████▄ █▓ ▄███▒ ▒█ ▒█ ░▓████▒ ░▓████▒ ▒▓████▄
▒█░ █░ ░█▒ ██▒ ▀█▒ ▒█░ █ ▒█ ▒█ ▀ ▒█ ▀ ▒█ ▀█▌
▒█▄▄▄█░ ░█▒ █▓░ ▄▄░ ▒█░ █ ▒█ ▒███ ▒███ ░█ █▌
▒█▀ ░█░ ▓█ █▓ ░█░ █ ▒█ ▒█ ▄ ▒█ ▄ ░█ ▄█▌
▒█ ░█░ ░▓███▀ ▒█▓▀▓█░ ░▓████▒ ░▓████▒ ▒▓████▀
'''
def print_banner(bootstrap, no_shell_file):
"""Print the Pigweed or project-specific banner"""
enable_colors()
print(Color.green('\n WELCOME TO...'))
print(Color.magenta(_PIGWEED_BANNER))
if bootstrap:
print(
Color.green('\n BOOTSTRAP! Bootstrap may take a few minutes; '
'please be patient'))
print(
Color.green(
' On Windows, this stage is extremely slow (~10 minutes).\n'))
else:
print(
Color.green(
'\n ACTIVATOR! This sets your console environment variables.\n'
))
if no_shell_file:
print(Color.bold_red('Error!\n'))
print(
Color.red(' Your Pigweed environment does not seem to be'
' configured.'))
print(Color.red(' Run bootstrap.bat to perform initial setup.'))
return 0
def parse():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--bootstrap', action='store_true')
parser.add_argument('--no-shell-file', action='store_true')
return parser.parse_args()
def main():
"""Script entry point."""
if os.name != 'nt':
return 1
return print_banner(**vars(parse()))
if __name__ == '__main__':
sys.exit(main())
| google/pigweed | pw_env_setup/py/pw_env_setup/windows_env_start.py | Python | apache-2.0 | 2,955 |
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/registry -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_adm_registry
short_description: Module to manage openshift registry
description:
- Manage openshift registry programmatically.
options:
state:
description:
- The desired action when managing openshift registry
- present - update or create the registry
- absent - tear down the registry service and deploymentconfig
- list - returns the current representiation of a registry
required: false
default: False
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- The name of the registry
required: false
default: None
aliases: []
namespace:
description:
- The selector when filtering on node labels
required: false
default: None
aliases: []
images:
description:
- The image to base this registry on - ${component} will be replaced with --type
required: 'openshift3/ose-${component}:${version}'
default: None
aliases: []
latest_images:
description:
- If true, attempt to use the latest image for the registry instead of the latest release.
required: false
default: False
aliases: []
labels:
description:
- A set of labels to uniquely identify the registry and its components.
required: false
default: None
aliases: []
enforce_quota:
description:
- If set, the registry will refuse to write blobs if they exceed quota limits
required: False
default: False
aliases: []
mount_host:
description:
- If set, the registry volume will be created as a host-mount at this path.
required: False
default: False
aliases: []
ports:
description:
- A comma delimited list of ports or port pairs to expose on the registry pod. The default is set for 5000.
required: False
default: [5000]
aliases: []
replicas:
description:
- The replication factor of the registry; commonly 2 when high availability is desired.
required: False
default: 1
aliases: []
selector:
description:
- Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes.
required: False
default: None
aliases: []
service_account:
description:
- Name of the service account to use to run the registry pod.
required: False
default: 'registry'
aliases: []
tls_certificate:
description:
- An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS
required: false
default: None
aliases: []
tls_key:
description:
- An optional path to a PEM encoded private key for serving over TLS
required: false
default: None
aliases: []
volume_mounts:
description:
- The volume mounts for the registry.
required: false
default: None
aliases: []
daemonset:
description:
- Use a daemonset instead of a deployment config.
required: false
default: False
aliases: []
edits:
description:
- A list of modifications to make on the deploymentconfig
required: false
default: None
aliases: []
env_vars:
description:
- A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR
required: false
default: None
aliases: []
force:
description:
- Force a registry update.
required: false
default: False
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create a secure registry
oc_adm_registry:
name: docker-registry
service_account: registry
replicas: 2
namespace: default
selector: type=infra
images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}"
env_vars:
REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml
REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
REGISTRY_HTTP_SECRET: supersecret
volume_mounts:
- path: /etc/secrets
name: dockercerts
type: secret
secret_name: registry-secret
- path: /etc/registryconfig
name: dockersecrets
type: secret
secret_name: docker-registry-config
edits:
- key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.strategy.rollingParams
value:
intervalSeconds: 1
maxSurge: 50%
maxUnavailable: 50%
timeoutSeconds: 600
updatePeriodSeconds: 1
action: put
- key: spec.template.spec.containers[0].resources.limits.memory
value: 2G
action: update
- key: spec.template.spec.containers[0].resources.requests.memory
value: 1G
action: update
register: registryout
'''
# -*- -*- -*- End included fragment: doc/registry -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for deploymentconfig '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key and result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_var(self, key):
'''return a environment variables '''
results = self.get(DeploymentConfig.env_path) or []
if not results:
return None
for env_var in results:
if env_var['name'] == key:
return env_var
return None
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if 'name' in exist_volume and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if 'mountPath' in exist_vol_mount and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if 'secret' in volume:
results.append('secret' in exist_volume)
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'emptyDir' in volume:
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'persistentVolumeClaim' in volume:
pvc = 'persistentVolumeClaim'
results.append(pvc in exist_volume)
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if 'claimSize' in volume[pvc]:
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif 'hostpath' in volume:
results.append('hostPath' in exist_volume)
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecretConfig(object):
''' Handle secret options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
secrets=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.secrets = secrets
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['data'] = {}
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
''' Class to wrap the oc command line tools '''
secret_path = "data"
kind = 'secret'
def __init__(self, content):
'''secret constructor'''
super(Secret, self).__init__(content=content)
self._secrets = None
@property
def secrets(self):
'''secret property getter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
@secrets.setter
def secrets(self):
'''secret property setter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
def get_secrets(self):
''' returns all of the defined secrets '''
return self.get(Secret.secret_path) or {}
def add_secret(self, key, value):
''' add a secret '''
if self.secrets:
self.secrets[key] = value
else:
self.put(Secret.secret_path, {key: value})
return True
def delete_secret(self, key):
''' delete secret'''
try:
del self.secrets[key]
except KeyError as _:
return False
return True
def find_secret(self, key):
''' find secret'''
rval = None
try:
rval = self.secrets[key]
except KeyError as _:
return None
return {'key': key, 'value': rval}
def update_secret(self, key, value):
''' update a secret'''
if key in self.secrets:
self.secrets[key] = value
else:
self.add_secret(key, value)
return True
# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class ServiceConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
ports,
selector=None,
labels=None,
cluster_ip=None,
portal_ip=None,
session_affinity=None,
service_type=None,
external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
self.ports = ports
self.selector = selector
self.labels = labels
self.cluster_ip = cluster_ip
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
self.external_ips = external_ips
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a service dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Service'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
self.data['metadata']['labels'] = {}
for lab, lab_value in self.labels.items():
self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
self.data['spec']['ports'] = self.ports
else:
self.data['spec']['ports'] = []
if self.selector:
self.data['spec']['selector'] = self.selector
self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
if self.cluster_ip:
self.data['spec']['clusterIP'] = self.cluster_ip
if self.portal_ip:
self.data['spec']['portalIP'] = self.portal_ip
if self.service_type:
self.data['spec']['type'] = self.service_type
if self.external_ips:
self.data['spec']['externalIPs'] = self.external_ips
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
super(Service, self).__init__(content=content)
def get_ports(self):
''' get a list of ports '''
return self.get(Service.port_path) or []
def get_selector(self):
''' get the service selector'''
return self.get(Service.selector_path) or {}
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get_ports()
if not ports:
self.put(Service.port_path, inc_ports)
else:
ports.extend(inc_ports)
return True
def find_ports(self, inc_port):
''' find a specific port '''
for port in self.get_ports():
if port['port'] == inc_port['port']:
return port
return None
def delete_ports(self, inc_ports):
''' remove a port from a service '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get(Service.port_path) or []
if not ports:
return True
removed = False
for inc_port in inc_ports:
port = self.find_ports(inc_port)
if port:
ports.remove(port)
removed = True
return removed
def add_cluster_ip(self, sip):
'''add cluster ip'''
self.put(Service.cluster_ip, sip)
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
def get_external_ips(self):
''' get a list of external_ips '''
return self.get(Service.external_ips) or []
def add_external_ips(self, inc_external_ips):
''' add an external_ip to the external_ips list '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get_external_ips()
if not external_ips:
self.put(Service.external_ips, inc_external_ips)
else:
external_ips.extend(inc_external_ips)
return True
def find_external_ips(self, inc_external_ip):
''' find a specific external IP '''
val = None
try:
idx = self.get_external_ips().index(inc_external_ip)
val = self.get_external_ips()[idx]
except ValueError:
pass
return val
def delete_external_ips(self, inc_external_ips):
''' remove an external IP from a service '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get(Service.external_ips) or []
if not external_ips:
return True
removed = False
for inc_external_ip in inc_external_ips:
external_ip = self.find_external_ips(inc_external_ip)
if external_ip:
external_ips.remove(external_ip)
removed = True
return removed
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
class Volume(object):
''' Class to represent an openshift volume object'''
volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
"dc": "spec.template.spec.containers[0].volumeMounts",
"rc": "spec.template.spec.containers[0].volumeMounts",
}
volumes_path = {"pod": "spec.volumes",
"dc": "spec.template.spec.volumes",
"rc": "spec.template.spec.volumes",
}
@staticmethod
def create_volume_structure(volume_info):
''' return a properly structured volume '''
volume_mount = None
volume = {'name': volume_info['name']}
volume_type = volume_info['type'].lower()
if volume_type == 'secret':
volume['secret'] = {}
volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'emptydir':
volume['emptyDir'] = {}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
volume['persistentVolumeClaim'] = {}
volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
elif volume_type == 'hostpath':
volume['hostPath'] = {}
volume['hostPath']['path'] = volume_info['path']
elif volume_type == 'configmap':
volume['configMap'] = {}
volume['configMap']['name'] = volume_info['configmap_name']
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
return (volume, volume_mount)
# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCVersion(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
debug):
''' Constructor for OCVersion '''
super(OCVersion, self).__init__(None, config)
self.debug = debug
def get(self):
'''get and return version information '''
results = {}
version_results = self._version()
if version_results['returncode'] == 0:
filtered_vers = Utils.filter_versions(version_results['results'])
custom_vers = Utils.add_custom_versions(filtered_vers)
results['returncode'] = version_results['returncode']
results.update(filtered_vers)
results.update(custom_vers)
return results
raise OpenShiftCLIError('Problem detecting openshift version.')
@staticmethod
def run_ansible(params):
'''run the idempotent ansible code'''
oc_version = OCVersion(params['kubeconfig'], params['debug'])
if params['state'] == 'list':
#pylint: disable=protected-access
result = oc_version.get()
return {'state': params['state'],
'results': result,
'changed': False}
# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_adm_registry.py -*- -*- -*-
class RegistryException(Exception):
''' Registry Exception Class '''
pass
class RegistryConfig(OpenShiftCLIConfig):
''' RegistryConfig is a DTO for the registry. '''
def __init__(self, rname, namespace, kubeconfig, registry_options):
super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options)
class Registry(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
volume_mount_path = 'spec.template.spec.containers[0].volumeMounts'
volume_path = 'spec.template.spec.volumes'
env_path = 'spec.template.spec.containers[0].env'
def __init__(self,
registry_config,
verbose=False):
''' Constructor for Registry
a registry consists of 3 or more parts
- dc/docker-registry
- svc/docker-registry
Parameters:
:registry_config:
:verbose:
'''
super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose)
self.version = OCVersion(registry_config.kubeconfig, verbose)
self.svc_ip = None
self.portal_ip = None
self.config = registry_config
self.verbose = verbose
self.registry_parts = [{'kind': 'dc', 'name': self.config.name},
{'kind': 'svc', 'name': self.config.name},
]
self.__prepared_registry = None
self.volume_mounts = []
self.volumes = []
if self.config.config_options['volume_mounts']['value']:
for volume in self.config.config_options['volume_mounts']['value']:
volume_info = {'secret_name': volume.get('secret_name', None),
'name': volume.get('name', None),
'type': volume.get('type', None),
'path': volume.get('path', None),
'claimName': volume.get('claim_name', None),
'claimSize': volume.get('claim_size', None),
}
vol, vol_mount = Volume.create_volume_structure(volume_info)
self.volumes.append(vol)
self.volume_mounts.append(vol_mount)
self.dconfig = None
self.svc = None
@property
def deploymentconfig(self):
''' deploymentconfig property '''
return self.dconfig
@deploymentconfig.setter
def deploymentconfig(self, config):
''' setter for deploymentconfig property '''
self.dconfig = config
@property
def service(self):
''' service property '''
return self.svc
@service.setter
def service(self, config):
''' setter for service property '''
self.svc = config
@property
def prepared_registry(self):
''' prepared_registry property '''
if not self.__prepared_registry:
results = self.prepare_registry()
if not results or ('returncode' in results and results['returncode'] != 0):
raise RegistryException('Could not perform registry preparation. {}'.format(results))
self.__prepared_registry = results
return self.__prepared_registry
@prepared_registry.setter
def prepared_registry(self, data):
''' setter method for prepared_registry attribute '''
self.__prepared_registry = data
def get(self):
''' return the self.registry_parts '''
self.deploymentconfig = None
self.service = None
rval = 0
for part in self.registry_parts:
result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
self.service = Service(result['results'][0])
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service}
def exists(self):
'''does the object exist?'''
if self.deploymentconfig and self.service:
return True
return False
def delete(self, complete=True):
'''return all pods '''
parts = []
for part in self.registry_parts:
if not complete and part['kind'] == 'svc':
continue
parts.append(self._delete(part['kind'], part['name']))
# Clean up returned results
rval = 0
for part in parts:
# pylint: disable=invalid-sequence-index
if 'returncode' in part and part['returncode'] != 0:
rval = part['returncode']
return {'returncode': rval, 'results': parts}
def prepare_registry(self):
''' prepare a registry for instantiation '''
options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
# probably need to parse this
# pylint thinks results is a string
# pylint: disable=no-member
if results['returncode'] != 0 and 'items' not in results['results']:
raise RegistryException('Could not perform registry preparation. {}'.format(results))
service = None
deploymentconfig = None
# pylint: disable=invalid-sequence-index
for res in results['results']['items']:
if res['kind'] == 'DeploymentConfig':
deploymentconfig = DeploymentConfig(res)
elif res['kind'] == 'Service':
service = Service(res)
# Verify we got a service and a deploymentconfig
if not service or not deploymentconfig:
return results
# results will need to get parsed here and modifications added
deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig))
# modify service ip
if self.svc_ip:
service.put('spec.clusterIP', self.svc_ip)
if self.portal_ip:
service.put('spec.portalIP', self.portal_ip)
# the dry-run doesn't apply the selector correctly
if self.service:
service.put('spec.selector', self.service.get_selector())
# need to create the service and the deploymentconfig
service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
return {"service": service,
"service_file": service_file,
"service_update": False,
"deployment": deploymentconfig,
"deployment_file": deployment_file,
"deployment_update": False}
def create(self):
'''Create a registry'''
results = []
self.needs_update()
# if the object is none, then we need to create it
# if the object needs an update, then we should call replace
# Handle the deploymentconfig
if self.deploymentconfig is None:
results.append(self._create(self.prepared_registry['deployment_file']))
elif self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
# Handle the service
if self.service is None:
results.append(self._create(self.prepared_registry['service_file']))
elif self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
# pylint: disable=invalid-sequence-index
if 'returncode' in result and result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def update(self):
'''run update for the registry. This performs a replace if required'''
# Store the current service IP
if self.service:
svcip = self.service.get('spec.clusterIP')
if svcip:
self.svc_ip = svcip
portip = self.service.get('spec.portalIP')
if portip:
self.portal_ip = portip
results = []
if self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
if self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def add_modifications(self, deploymentconfig):
''' update a deployment config with changes '''
# The environment variable for REGISTRY_HTTP_SECRET is autogenerated
# We should set the generated deploymentconfig to the in memory version
# the following modifications will overwrite if needed
if self.deploymentconfig:
result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
if result:
deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])
# Currently we know that our deployment of a registry requires a few extra modifications
# Modification 1
# we need specific environment variables to be set
for key, value in self.config.config_options['env_vars'].get('value', {}).items():
if not deploymentconfig.exists_env_key(key):
deploymentconfig.add_env_value(key, value)
else:
deploymentconfig.update_env_var(key, value)
# Modification 2
# we need specific volume variables to be set
for volume in self.volumes:
deploymentconfig.update_volume(volume)
for vol_mount in self.volume_mounts:
deploymentconfig.update_volume_mount(vol_mount)
# Modification 3
# Edits
edit_results = []
for edit in self.config.config_options['edits'].get('value', []):
if edit['action'] == 'put':
edit_results.append(deploymentconfig.put(edit['key'],
edit['value']))
if edit['action'] == 'update':
edit_results.append(deploymentconfig.update(edit['key'],
edit['value'],
edit.get('index', None),
edit.get('curr_value', None)))
if edit['action'] == 'append':
edit_results.append(deploymentconfig.append(edit['key'],
edit['value']))
if edit_results and not any([res[0] for res in edit_results]):
return None
return deploymentconfig.yaml_dict
def needs_update(self):
''' check to see if we need to update '''
exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
if self.service is None or \
not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
self.service.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['service_update'] = True
exclude_list = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath',
'securityContext',
'imagePullPolicy',
'protocol', # ports.portocol: TCP
'type', # strategy: {'type': 'rolling'}
'defaultMode', # added on secrets
'activeDeadlineSeconds', # added in 1.5 for timeouts
]
if self.deploymentconfig is None or \
not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
self.deploymentconfig.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['deployment_update'] = True
return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
# In the future, we would like to break out each ansible state into a function.
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
registry_options = {'images': {'value': params['images'], 'include': True},
'latest_images': {'value': params['latest_images'], 'include': True},
'labels': {'value': params['labels'], 'include': True},
'ports': {'value': ','.join(params['ports']), 'include': True},
'replicas': {'value': params['replicas'], 'include': True},
'selector': {'value': params['selector'], 'include': True},
'service_account': {'value': params['service_account'], 'include': True},
'mount_host': {'value': params['mount_host'], 'include': True},
'env_vars': {'value': params['env_vars'], 'include': False},
'volume_mounts': {'value': params['volume_mounts'], 'include': False},
'edits': {'value': params['edits'], 'include': False},
'tls_key': {'value': params['tls_key'], 'include': True},
'tls_certificate': {'value': params['tls_certificate'], 'include': True},
}
# Do not always pass the daemonset and enforce-quota parameters because they are not understood
# by old versions of oc.
# Default value is false. So, it's safe to not pass an explicit false value to oc versions which
# understand these parameters.
if params['daemonset']:
registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
if params['enforce_quota']:
registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
registry_options)
ocregistry = Registry(rconfig, params['debug'])
api_rval = ocregistry.get()
state = params['state']
########
# get
########
if state == 'list':
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if not ocregistry.exists():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
# Unsure as to why this is angry with the return type.
# pylint: disable=redefined-variable-type
api_rval = ocregistry.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
if state == 'present':
########
# Create
########
if not ocregistry.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
api_rval = ocregistry.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if not params['force'] and not ocregistry.needs_update():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocregistry.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. %s' % state}
# -*- -*- -*- End included fragment: class/oc_adm_registry.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_adm_registry.py -*- -*- -*-
def main():
'''
ansible oc module for registry
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
labels=dict(default=None, type='dict'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
service_account=dict(default='registry', type='str'),
mount_host=dict(default=None, type='str'),
volume_mounts=dict(default=None, type='list'),
env_vars=dict(default={}, type='dict'),
edits=dict(default=[], type='list'),
enforce_quota=dict(default=False, type='bool'),
force=dict(default=False, type='bool'),
daemonset=dict(default=False, type='bool'),
tls_key=dict(default=None, type='str'),
tls_certificate=dict(default=None, type='str'),
),
supports_check_mode=True,
)
results = Registry.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_adm_registry.py -*- -*- -*-
| DG-i/openshift-ansible | roles/lib_openshift/library/oc_adm_registry.py | Python | apache-2.0 | 94,103 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a ResNet-50 model on ImageNet on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import re
import sys
import time
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
# For Cloud environment, add parent directory for imports
sys.path.append(os.path.dirname(os.path.abspath(sys.path[0])))
from official.resnet import imagenet_input # pylint: disable=g-import-not-at-top
from official.resnet import resnet_main
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.python.estimator import estimator
FLAGS = tf.flags.FLAGS
CKPT_PATTERN = r'model\.ckpt-(?P<gs>[0-9]+)\.data'
flags.DEFINE_string(
'data_dir_small', default=None,
help=('The directory where the resized (160x160) ImageNet input data is '
'stored. This is only to be used in conjunction with the '
'resnet_benchmark.py script.'))
flags.DEFINE_bool(
'use_fast_lr', default=False,
help=('Enabling this uses a faster learning rate schedule along with '
'different image sizes in the input pipeline. This is only to be '
'used in conjunction with the resnet_benchmark.py script.'))
# Number of training and evaluation images in the standard ImageNet dataset
NUM_TRAIN_IMAGES = 1281167
NUM_EVAL_IMAGES = 50000
def main(unused_argv):
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=FLAGS.iterations_per_loop,
keep_checkpoint_max=None,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_cores,
per_host_input_for_training=contrib_tpu.InputPipelineConfig.PER_HOST_V2)) # pylint: disable=line-too-long
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
imagenet_train = imagenet_input.ImageNetInput(
is_training=True,
data_dir=FLAGS.data_dir,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
imagenet_eval = imagenet_input.ImageNetInput(
is_training=False,
data_dir=FLAGS.data_dir,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
if FLAGS.use_fast_lr:
resnet_main.LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 4), (0.1, 21), (0.01, 35), (0.001, 43)
]
imagenet_train_small = imagenet_input.ImageNetInput(
is_training=True,
image_size=128,
data_dir=FLAGS.data_dir_small,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input,
cache=True)
imagenet_eval_small = imagenet_input.ImageNetInput(
is_training=False,
image_size=128,
data_dir=FLAGS.data_dir_small,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input,
cache=True)
imagenet_train_large = imagenet_input.ImageNetInput(
is_training=True,
image_size=288,
data_dir=FLAGS.data_dir,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
imagenet_eval_large = imagenet_input.ImageNetInput(
is_training=False,
image_size=288,
data_dir=FLAGS.data_dir,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
resnet_classifier = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=resnet_main.resnet_model_fn,
config=config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.mode == 'train':
current_step = estimator._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
batches_per_epoch = NUM_TRAIN_IMAGES / FLAGS.train_batch_size
tf.logging.info('Training for %d steps (%.2f epochs in total). Current'
' step %d.' % (FLAGS.train_steps,
FLAGS.train_steps / batches_per_epoch,
current_step))
start_timestamp = time.time() # This time will include compilation time
# Write a dummy file at the start of training so that we can measure the
# runtime at each checkpoint from the file write time.
tf.gfile.MkDir(FLAGS.model_dir)
if not tf.gfile.Exists(os.path.join(FLAGS.model_dir, 'START')):
with tf.gfile.GFile(os.path.join(FLAGS.model_dir, 'START'), 'w') as f:
f.write(str(start_timestamp))
if FLAGS.use_fast_lr:
small_steps = int(18 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size)
normal_steps = int(41 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size)
large_steps = int(min(50 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size,
FLAGS.train_steps))
resnet_classifier.train(
input_fn=imagenet_train_small.input_fn, max_steps=small_steps)
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=normal_steps)
resnet_classifier.train(
input_fn=imagenet_train_large.input_fn,
max_steps=large_steps)
else:
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=FLAGS.train_steps)
else:
assert FLAGS.mode == 'eval'
start_timestamp = tf.gfile.Stat(
os.path.join(FLAGS.model_dir, 'START')).mtime_nsec
results = []
eval_steps = NUM_EVAL_IMAGES // FLAGS.eval_batch_size
ckpt_steps = set()
all_files = tf.gfile.ListDirectory(FLAGS.model_dir)
for f in all_files:
mat = re.match(CKPT_PATTERN, f)
if mat is not None:
ckpt_steps.add(int(mat.group('gs')))
ckpt_steps = sorted(list(ckpt_steps))
tf.logging.info('Steps to be evaluated: %s' % str(ckpt_steps))
for step in ckpt_steps:
ckpt = os.path.join(FLAGS.model_dir, 'model.ckpt-%d' % step)
batches_per_epoch = NUM_TRAIN_IMAGES // FLAGS.train_batch_size
current_epoch = step // batches_per_epoch
if FLAGS.use_fast_lr:
if current_epoch < 18:
eval_input_fn = imagenet_eval_small.input_fn
if current_epoch >= 18 and current_epoch < 41:
eval_input_fn = imagenet_eval.input_fn
if current_epoch >= 41: # 41:
eval_input_fn = imagenet_eval_large.input_fn
else:
eval_input_fn = imagenet_eval.input_fn
end_timestamp = tf.gfile.Stat(ckpt + '.index').mtime_nsec
elapsed_hours = (end_timestamp - start_timestamp) / (1e9 * 3600.0)
tf.logging.info('Starting to evaluate.')
eval_start = time.time() # This time will include compilation time
eval_results = resnet_classifier.evaluate(
input_fn=eval_input_fn,
steps=eval_steps,
checkpoint_path=ckpt)
eval_time = int(time.time() - eval_start)
tf.logging.info('Eval results: %s. Elapsed seconds: %d' %
(eval_results, eval_time))
results.append([
current_epoch,
elapsed_hours,
'%.2f' % (eval_results['top_1_accuracy'] * 100),
'%.2f' % (eval_results['top_5_accuracy'] * 100),
])
time.sleep(60)
with tf.gfile.GFile(os.path.join(FLAGS.model_dir, 'results.tsv'), 'wb') as tsv_file: # pylint: disable=line-too-long
writer = csv.writer(tsv_file, delimiter='\t')
writer.writerow(['epoch', 'hours', 'top1Accuracy', 'top5Accuracy'])
writer.writerows(results)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| tensorflow/tpu | models/official/resnet/benchmark/resnet_benchmark.py | Python | apache-2.0 | 8,651 |
from socket import inet_ntoa
from struct import pack
def calcDottedNetmask(mask):
bits = 0
for i in xrange(32 - mask, 32):
bits |= (1 << i)
packed_value = pack('!I', bits)
addr = inet_ntoa(packed_value)
return addr
| openbmc/openbmc-test-automation | lib/pythonutil.py | Python | apache-2.0 | 245 |
import pytest
import salt.engines
from tests.support.mock import MagicMock, patch
def test_engine_module_name():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
assert engine.name == "foobar"
def test_engine_title_set():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
with patch("salt.utils.process.appendproctitle", MagicMock()) as mm:
with pytest.raises(KeyError):
# The method does not exist so a KeyError will be raised.
engine.run()
mm.assert_called_with("foobar")
| saltstack/salt | tests/pytests/unit/engines/test_engines.py | Python | apache-2.0 | 595 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
class VolumesActionsTest(base.BaseVolumeTest):
"""Test volume actions"""
create_default_network = True
@classmethod
def resource_setup(cls):
super(VolumesActionsTest, cls).resource_setup()
# Create a test shared volume for attach/detach tests
cls.volume = cls.create_volume()
@decorators.idempotent_id('fff42874-7db5-4487-a8e1-ddda5fb5288d')
@decorators.attr(type='smoke')
@utils.services('compute')
def test_attach_detach_volume_to_instance(self):
"""Test attaching and detaching volume to instance"""
# Create a server
server = self.create_server()
# Volume is attached and detached successfully from an instance
self.volumes_client.attach_volume(self.volume['id'],
instance_uuid=server['id'],
mountpoint='/dev/%s' %
CONF.compute.volume_device_name)
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'in-use')
self.volumes_client.detach_volume(self.volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'available')
@decorators.idempotent_id('63e21b4c-0a0c-41f6-bfc3-7c2816815599')
def test_volume_bootable(self):
"""Test setting and retrieving bootable flag of a volume"""
for bool_bootable in [True, False]:
self.volumes_client.set_bootable_volume(self.volume['id'],
bootable=bool_bootable)
fetched_volume = self.volumes_client.show_volume(
self.volume['id'])['volume']
# Get Volume information
# NOTE(masayukig): 'bootable' is "true" or "false" in the current
# cinder implementation. So we need to cast boolean values to str
# and make it lower to compare here.
self.assertEqual(str(bool_bootable).lower(),
fetched_volume['bootable'])
@decorators.idempotent_id('9516a2c8-9135-488c-8dd6-5677a7e5f371')
@utils.services('compute')
def test_get_volume_attachment(self):
"""Test getting volume attachments
Attach a volume to a server, and then retrieve volume's attachments
info.
"""
# Create a server
server = self.create_server()
# Verify that a volume's attachment information is retrieved
self.volumes_client.attach_volume(self.volume['id'],
instance_uuid=server['id'],
mountpoint='/dev/%s' %
CONF.compute.volume_device_name)
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'],
'in-use')
self.addCleanup(waiters.wait_for_volume_resource_status,
self.volumes_client,
self.volume['id'], 'available')
self.addCleanup(self.volumes_client.detach_volume, self.volume['id'])
volume = self.volumes_client.show_volume(self.volume['id'])['volume']
attachment = volume['attachments'][0]
self.assertEqual('/dev/%s' %
CONF.compute.volume_device_name,
attachment['device'])
self.assertEqual(server['id'], attachment['server_id'])
self.assertEqual(self.volume['id'], attachment['id'])
self.assertEqual(self.volume['id'], attachment['volume_id'])
@decorators.idempotent_id('d8f1ca95-3d5b-44a3-b8ca-909691c9532d')
@utils.services('image')
def test_volume_upload(self):
"""Test uploading volume to create an image"""
# NOTE(gfidente): the volume uploaded in Glance comes from setUpClass,
# it is shared with the other tests. After it is uploaded in Glance,
# there is no way to delete it from Cinder, so we delete it from Glance
# using the Glance images_client and from Cinder via tearDownClass.
image_name = data_utils.rand_name(self.__class__.__name__ + '-Image')
body = self.volumes_client.upload_volume(
self.volume['id'], image_name=image_name,
disk_format=CONF.volume.disk_format)['os-volume_upload_image']
image_id = body["image_id"]
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.images_client.delete_image,
image_id)
waiters.wait_for_image_status(self.images_client, image_id, 'active')
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'available')
image_info = self.images_client.show_image(image_id)
self.assertEqual(image_name, image_info['name'])
self.assertEqual(CONF.volume.disk_format, image_info['disk_format'])
@decorators.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
def test_reserve_unreserve_volume(self):
"""Test reserving and unreserving volume"""
# Mark volume as reserved.
self.volumes_client.reserve_volume(self.volume['id'])
# To get the volume info
body = self.volumes_client.show_volume(self.volume['id'])['volume']
self.assertIn('attaching', body['status'])
# Unmark volume as reserved.
self.volumes_client.unreserve_volume(self.volume['id'])
# To get the volume info
body = self.volumes_client.show_volume(self.volume['id'])['volume']
self.assertIn('available', body['status'])
@decorators.idempotent_id('fff74e1e-5bd3-4b33-9ea9-24c103bc3f59')
def test_volume_readonly_update(self):
"""Test updating and retrieve volume's readonly flag"""
for readonly in [True, False]:
# Update volume readonly
self.volumes_client.update_volume_readonly(self.volume['id'],
readonly=readonly)
# Get Volume information
fetched_volume = self.volumes_client.show_volume(
self.volume['id'])['volume']
# NOTE(masayukig): 'readonly' is "True" or "False" in the current
# cinder implementation. So we need to cast boolean values to str
# to compare here.
self.assertEqual(str(readonly),
fetched_volume['metadata']['readonly'])
| openstack/tempest | tempest/api/volume/test_volumes_actions.py | Python | apache-2.0 | 7,582 |
#!/usr/bin/env python
"""A flow to run checks for a host."""
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib.checks import checks
from grr.proto import flows_pb2
class CheckFlowArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.CheckFlowArgs
class CheckRunner(flow.GRRFlow):
"""This flow runs checks on a host.
CheckRunner:
- Identifies what checks should be run for a host.
- Identifies the artifacts that need to be collected to perform those checks.
- Orchestrates collection of the host data.
- Routes host data to the relevant checks.
- Returns check data ready for reporting.
"""
friendly_name = "Run Checks"
category = "/Checks/"
behaviours = flow.GRRFlow.behaviours + "BASIC"
@flow.StateHandler(next_state=["MapArtifactData"])
def Start(self):
"""."""
client = aff4.FACTORY.Open(self.client_id, token=self.token)
self.state.Register("knowledge_base",
client.Get(client.Schema.KNOWLEDGE_BASE))
self.state.Register("labels", client.GetLabels())
self.state.Register("artifacts_wanted", set())
self.state.Register("artifacts_fetched", set())
self.state.Register("checks_run", [])
self.state.Register("checks_with_findings", [])
self.state.Register("results_store", None)
self.state.Register("host_data", {})
self.CallState(next_state="MapArtifactData")
@flow.StateHandler(next_state=["AddResponses", "RunChecks"])
def MapArtifactData(self, responses):
"""Get processed data, mapped to artifacts."""
self.state.artifacts_wanted = checks.CheckRegistry.SelectArtifacts(
os=self.state.knowledge_base.os)
# Fetch Artifacts and map results to the artifacts that generated them.
# This is an inefficient collection, but necessary because results need to
# be mapped to the originating artifact. An alternative would be to have
# rdfvalues labeled with originating artifact ids.
for artifact_id in self.state.artifacts_wanted:
self.CallFlow("ArtifactCollectorFlow", artifact_list=[artifact_id],
request_data={"artifact_id": artifact_id},
next_state="AddResponses")
self.CallState(next_state="RunChecks")
@flow.StateHandler()
def AddResponses(self, responses):
artifact_id = responses.request_data["artifact_id"]
# TODO(user): Check whether artifact collection succeeded.
self.state.host_data[artifact_id] = list(responses)
@flow.StateHandler(next_state=["Done"])
def RunChecks(self, responses):
if not responses.success:
raise RuntimeError("Checks did not run successfully.")
# Hand host data across to checks. Do this after all data has been collected
# in case some checks require multiple artifacts/results.
for finding in checks.CheckHost(self.state.host_data,
os=self.state.knowledge_base.os):
self.state.checks_run.append(finding.check_id)
if finding.anomaly:
self.state.checks_with_findings.append(finding.check_id)
self.SendReply(finding)
| ojengwa/grr | lib/flows/general/checks.py | Python | apache-2.0 | 3,088 |
#!/usr/bin/python3
################################################################################
#
# Copyright 2014 Stjepan Henc <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
import scipy.io.wavfile as wav
import numpy as np
import copy
class Signal:
# Data loaders
def LoadFromFile(self, file):
self.fs, self.s = wav.read(file)
self.sLength, self.nChans = self.s.shape
def LoadWF(self, waveform, fs):
self.s = waveform
self.fs = fs
self.sLength, self.nChans = self.s.shape
def __init__(self, *args):
#signal properties
self.singlePrecision = 0
self.s = np.array([])
self.fs = 44100
self.sLength = 0
self.nChans = 0
self.weightingFunction = np.hamming #FIXME
#STFT properties
self.S = np.array([])
self.windowLength = 60
self.nfft = 0
self.nfftUtil = 0
self.overlapRatio = 0.5
self.framesPositions = np.array([])
self.nFrames = 0
self.weightingWindow = np.array([])
self.overlap = 0
# Windowing properties
self.sWin = np.array([])
self.sWeights = np.array([])
self.sWin = np.array([])
self.sWeights = np.array([])
if len(args) == 1:
if type(args[0]) == type(''): # it's a filename
self.LoadFromFile(args[0])
elif type(args[0] == type(self)): # copy data from other signal
self.__dict__ = copy.deepcopy(args[0].__dict__)
elif len(args) == 2: # args[0] is a signal, args[1] is sample freq.
self.LoadWF(args(0), args(1))
| sthenc/pyKAM | Signal.py | Python | apache-2.0 | 2,314 |
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for StatementVisitor."""
from __future__ import unicode_literals
import re
import subprocess
import textwrap
import unittest
from grumpy_tools.compiler import block
from grumpy_tools.compiler import imputil
from grumpy_tools.compiler import shard_test
from grumpy_tools.compiler import stmt
from grumpy_tools.compiler import util
from grumpy_tools.vendor import pythonparser
from grumpy_tools.vendor.pythonparser import ast
class StatementVisitorTest(unittest.TestCase):
def testAssertNoMsg(self):
self.assertEqual((0, 'AssertionError()\n'), _GrumpRun(textwrap.dedent("""\
try:
assert False
except AssertionError as e:
print repr(e)""")))
def testAssertMsg(self):
want = (0, "AssertionError('foo',)\n")
self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
try:
assert False, 'foo'
except AssertionError as e:
print repr(e)""")))
def testBareAssert(self):
# Assertion errors at the top level of a block should raise:
# https://github.com/google/grumpy/issues/18
want = (0, 'ok\n')
self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
def foo():
assert False
try:
foo()
except AssertionError:
print 'ok'
else:
print 'bad'""")))
def testAssignAttribute(self):
self.assertEqual((0, '123\n'), _GrumpRun(textwrap.dedent("""\
e = Exception()
e.foo = 123
print e.foo""")))
def testAssignName(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
foo = 'bar'
print foo""")))
def testAssignMultiple(self):
self.assertEqual((0, 'baz baz\n'), _GrumpRun(textwrap.dedent("""\
foo = bar = 'baz'
print foo, bar""")))
def testAssignSubscript(self):
self.assertEqual((0, "{'bar': None}\n"), _GrumpRun(textwrap.dedent("""\
foo = {}
foo['bar'] = None
print foo""")))
def testAssignTuple(self):
self.assertEqual((0, 'a b\n'), _GrumpRun(textwrap.dedent("""\
baz = ('a', 'b')
foo, bar = baz
print foo, bar""")))
def testAugAssign(self):
self.assertEqual((0, '42\n'), _GrumpRun(textwrap.dedent("""\
foo = 41
foo += 1
print foo""")))
def testAugAssignBitAnd(self):
self.assertEqual((0, '3\n'), _GrumpRun(textwrap.dedent("""\
foo = 7
foo &= 3
print foo""")))
def testAugAssignPow(self):
self.assertEqual((0, '64\n'), _GrumpRun(textwrap.dedent("""\
foo = 8
foo **= 2
print foo""")))
def testClassDef(self):
self.assertEqual((0, "<type 'type'>\n"), _GrumpRun(textwrap.dedent("""\
class Foo(object):
pass
print type(Foo)""")))
def testClassDefWithVar(self):
self.assertEqual((0, 'abc\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'abc'
print Foo.bar""")))
def testDeleteAttribute(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 42
del Foo.bar
print hasattr(Foo, 'bar')""")))
def testDeleteClassLocal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'baz'
del bar
print hasattr(Foo, 'bar')""")))
def testDeleteGlobal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
foo = 42
del foo
print 'foo' in globals()""")))
def testDeleteLocal(self):
self.assertEqual((0, 'ok\n'), _GrumpRun(textwrap.dedent("""\
def foo():
bar = 123
del bar
try:
print bar
raise AssertionError
except UnboundLocalError:
print 'ok'
foo()""")))
def testDeleteNonexistentLocal(self):
self.assertRaisesRegexp(
util.ParseError, 'cannot delete nonexistent local',
_ParseAndVisit, 'def foo():\n del bar')
def testDeleteSubscript(self):
self.assertEqual((0, '{}\n'), _GrumpRun(textwrap.dedent("""\
foo = {'bar': 'baz'}
del foo['bar']
print foo""")))
def testExprCall(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
print 'bar'
foo()""")))
def testExprNameGlobal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
foo""")))
def testExprNameLocal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
def bar():
foo
bar()""")))
def testFor(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i""")))
def testForBreak(self):
self.assertEqual((0, '1\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
break""")))
def testForContinue(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
continue
raise AssertionError""")))
def testForElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
for i in (1,):
print 'foo'
else:
print 'bar'""")))
def testForElseBreakNotNested(self):
self.assertRaisesRegexp(
util.ParseError, "'continue' not in loop",
_ParseAndVisit, 'for i in (1,):\n pass\nelse:\n continue')
def testForElseContinueNotNested(self):
self.assertRaisesRegexp(
util.ParseError, "'continue' not in loop",
_ParseAndVisit, 'for i in (1,):\n pass\nelse:\n continue')
def testFunctionDecorator(self):
self.assertEqual((0, '<b>foo</b>\n'), _GrumpRun(textwrap.dedent("""\
def bold(fn):
return lambda: '<b>' + fn() + '</b>'
@bold
def foo():
return 'foo'
print foo()""")))
def testFunctionDecoratorWithArg(self):
self.assertEqual((0, '<b id=red>foo</b>\n'), _GrumpRun(textwrap.dedent("""\
def tag(name):
def bold(fn):
return lambda: '<b id=' + name + '>' + fn() + '</b>'
return bold
@tag('red')
def foo():
return 'foo'
print foo()""")))
def testFunctionDef(self):
self.assertEqual((0, 'bar baz\n'), _GrumpRun(textwrap.dedent("""\
def foo(a, b):
print a, b
foo('bar', 'baz')""")))
def testFunctionDefGenerator(self):
self.assertEqual((0, "['foo', 'bar']\n"), _GrumpRun(textwrap.dedent("""\
def gen():
yield 'foo'
yield 'bar'
print list(gen())""")))
def testFunctionDefGeneratorReturnValue(self):
self.assertRaisesRegexp(
util.ParseError, 'returning a value in a generator function',
_ParseAndVisit, 'def foo():\n yield 1\n return 2')
def testFunctionDefLocal(self):
self.assertEqual((0, 'baz\n'), _GrumpRun(textwrap.dedent("""\
def foo():
def bar():
print 'baz'
bar()
foo()""")))
def testIf(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
if 123:
print 'foo'
if '':
print 'bar'""")))
def testIfElif(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
elif False:
print 'bar'
if False:
print 'foo'
elif True:
print 'bar'""")))
def testIfElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
else:
print 'bar'
if False:
print 'foo'
else:
print 'bar'""")))
def testImport(self):
self.assertEqual((0, "<type 'dict'>\n"), _GrumpRun(textwrap.dedent("""\
import sys
print type(sys.modules)""")))
def testImportFutureLateRaises(self):
regexp = 'from __future__ imports must occur at the beginning of the file'
self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
'foo = bar\nfrom __future__ import print_function')
def testFutureUnicodeLiterals(self):
want = "u'foo'\n"
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
from __future__ import unicode_literals
print repr('foo')""")))
def testImportMember(self):
self.assertEqual((0, "<type 'dict'>\n"), _GrumpRun(textwrap.dedent("""\
from sys import modules
print type(modules)""")))
def testImportConflictingPackage(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
import time
from "__go__/time" import Now""")))
def testImportNative(self):
self.assertEqual((0, '1 1000000000\n'), _GrumpRun(textwrap.dedent("""\
from "__go__/time" import Nanosecond, Second
print Nanosecond, Second""")))
def testImportGrumpy(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
from "__go__/grumpy" import Assert
Assert(__frame__(), True, 'bad')""")))
def testImportNativeType(self):
self.assertEqual((0, "<type 'Duration'>\n"), _GrumpRun(textwrap.dedent("""\
from "__go__/time" import Duration
print Duration""")))
def testImportWildcardMemberRaises(self):
regexp = 'wildcard member import is not implemented'
self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
'from foo import *')
self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
'from "__go__/foo" import *')
def testPrintStatement(self):
self.assertEqual((0, 'abc 123\nfoo bar\n'), _GrumpRun(textwrap.dedent("""\
print 'abc',
print '123'
print 'foo', 'bar'""")))
def testPrintFunction(self):
want = "abc\n123\nabc 123\nabcx123\nabc 123 "
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
"module docstring is ok to proceed __future__"
from __future__ import print_function
print('abc')
print(123)
print('abc', 123)
print('abc', 123, sep='x')
print('abc', 123, end=' ')""")))
def testRaiseExitStatus(self):
self.assertEqual(1, _GrumpRun('raise Exception')[0])
def testRaiseInstance(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
raise RuntimeError('foo')
print 'bad'
except RuntimeError as e:
print e""")))
def testRaiseTypeAndArg(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
raise KeyError('foo')
print 'bad'
except KeyError as e:
print e""")))
def testRaiseAgain(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
try:
raise AssertionError('foo')
except AssertionError:
raise
except Exception as e:
print e""")))
def testRaiseTraceback(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
import sys
try:
try:
raise Exception
except:
e, _, tb = sys.exc_info()
raise e, None, tb
except:
e2, _, tb2 = sys.exc_info()
assert e is e2
assert tb is tb2""")))
def testReturn(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
return 'bar'
print foo()""")))
def testTryBareExcept(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
try:
raise AssertionError
except:
pass""")))
def testTryElse(self):
self.assertEqual((0, 'foo baz\n'), _GrumpRun(textwrap.dedent("""\
try:
print 'foo',
except:
print 'bar'
else:
print 'baz'""")))
def testTryMultipleExcept(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
try:
raise AssertionError
except RuntimeError:
print 'foo'
except AssertionError:
print 'bar'
except:
print 'baz'""")))
def testTryFinally(self):
result = _GrumpRun(textwrap.dedent("""\
try:
print 'foo',
finally:
print 'bar'
try:
print 'foo',
raise Exception
finally:
print 'bar'"""))
self.assertEqual(1, result[0])
self.assertIn('foo bar\nfoo bar\n', result[1])
self.assertIn('Exception\n', result[1])
def testWhile(self):
self.assertEqual((0, '2\n1\n'), _GrumpRun(textwrap.dedent("""\
i = 2
while i:
print i
i -= 1""")))
def testWhileElse(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
while False:
print 'foo'
else:
print 'bar'""")))
def testWith(self):
self.assertEqual((0, 'enter\n1\nexit\nenter\n2\nexit\n3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
print "enter"
def __exit__(self, exc_type, value, traceback):
print "exit"
a = ContextManager()
with a:
print 1
try:
with a:
print 2
raise RuntimeError
except RuntimeError:
print 3
""")))
def testWithAs(self):
self.assertEqual((0, '1 2 3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
return (1, (2, 3))
def __exit__(self, *args):
pass
with ContextManager() as [x, (y, z)]:
print x, y, z
""")))
def testWriteExceptDispatcherBareExcept(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
ast.ExceptHandler(type=None)]
self.assertEqual(visitor._write_except_dispatcher( # pylint: disable=protected-access
'exc', 'tb', handlers), [1, 2])
expected = re.compile(r'ResolveGlobal\(.*foo.*\bIsInstance\(.*'
r'goto Label1.*goto Label2', re.DOTALL)
self.assertRegexpMatches(visitor.writer.getvalue(), expected)
def testWriteExceptDispatcherBareExceptionNotLast(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=None),
ast.ExceptHandler(type=ast.Name(id='foo'))]
self.assertRaisesRegexp(util.ParseError, r"default 'except:' must be last",
visitor._write_except_dispatcher, # pylint: disable=protected-access
'exc', 'tb', handlers)
def testWriteExceptDispatcherMultipleExcept(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
ast.ExceptHandler(type=ast.Name(id='bar'))]
self.assertEqual(visitor._write_except_dispatcher( # pylint: disable=protected-access
'exc', 'tb', handlers), [1, 2])
expected = re.compile(
r'ResolveGlobal\(.*foo.*\bif .*\bIsInstance\(.*\{.*goto Label1.*'
r'ResolveGlobal\(.*bar.*\bif .*\bIsInstance\(.*\{.*goto Label2.*'
r'\bRaise\(exc\.ToObject\(\), nil, tb\.ToObject\(\)\)', re.DOTALL)
self.assertRegexpMatches(visitor.writer.getvalue(), expected)
def _MakeModuleBlock():
return block.ModuleBlock(None, '__main__', '<test>', '',
imputil.FutureFeatures())
def _ParseAndVisit(source):
mod = pythonparser.parse(source)
_, future_features = imputil.parse_future_features(mod)
importer = imputil.Importer(None, 'foo', 'foo.py', False)
b = block.ModuleBlock(importer, '__main__', '<test>',
source, future_features)
visitor = stmt.StatementVisitor(b)
visitor.visit(mod)
return visitor
def _GrumpRun(cmd):
p = subprocess.Popen(['grumpy', 'run'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = p.communicate(cmd)
return p.returncode, out
if __name__ == '__main__':
shard_test.main()
| corona10/grumpy | grumpy-tools-src/grumpy_tools/compiler/stmt_test.py | Python | apache-2.0 | 16,929 |
"""Translation helper functions."""
import locale
import os
import re
import sys
import gettext as gettext_module
from cStringIO import StringIO
from django.utils.importlib import import_module
from django.utils.safestring import mark_safe, SafeData
from django.utils.thread_support import currentThread
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = {}
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9.
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*"
(?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset. Django uses a defined DEFAULT_CHARSET as the output charset on
Python 2.4. With Python 2.3, use DjangoTranslation23.
"""
def __init__(self, *args, **kw):
from django.conf import settings
gettext_module.GNUTranslations.__init__(self, *args, **kw)
# Starting with Python 2.4, there's a function to define
# the output charset. Before 2.4, the output charset is
# identical with the translation file charset.
try:
self.set_output_charset('utf-8')
except AttributeError:
pass
self.django_output_charset = 'utf-8'
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
def language(self):
return self.__language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
class DjangoTranslation23(DjangoTranslation):
"""
Compatibility class that is only used with Python 2.3.
Python 2.3 doesn't support set_output_charset on translation objects and
needs this wrapper class to make sure input charsets from translation files
are correctly translated to output charsets.
With a full switch to Python 2.4, this can be removed from the source.
"""
def gettext(self, msgid):
res = self.ugettext(msgid)
return res.encode(self.django_output_charset)
def ngettext(self, msgid1, msgid2, n):
res = self.ungettext(msgid1, msgid2, n)
return res.encode(self.django_output_charset)
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
# set up the right translation class
klass = DjangoTranslation
if sys.version_info < (2, 4):
klass = DjangoTranslation23
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if settings.SETTINGS_MODULE is not None:
parts = settings.SETTINGS_MODULE.split('.')
project = import_module(parts[0])
projectpath = os.path.join(os.path.dirname(project.__file__), 'locale')
else:
projectpath = None
def _fetch(lang, fallback=None):
global _translations
loc = to_locale(lang)
res = _translations.get(lang, None)
if res is not None:
return res
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], klass)
t.set_language(lang)
return t
except IOError, e:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in _translations]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for localepath in settings.LOCALE_PATHS:
if os.path.isdir(localepath):
res = _merge(localepath)
if projectpath and os.path.isdir(projectpath):
res = _merge(projectpath)
for appname in settings.INSTALLED_APPS:
app = import_module(appname)
apppath = os.path.join(os.path.dirname(app.__file__), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
_active[currentThread()] = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
global _active
if currentThread() in _active:
del _active[currentThread()]
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active[currentThread()] = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = _active.get(currentThread(), None)
if t is not None:
try:
return to_language(t.language())
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
False = left-to-right layout
True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
result = getattr(t, translation_function)(message)
else:
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
return do_translate(message, 'gettext')
def ugettext(message):
return do_translate(message, 'ugettext')
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a UTF-8 bytestring of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies or
session.
"""
from django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if gettext_module.find('django', globalpath, [to_locale(lang_code)]) is not None:
return True
else:
return False
def get_language_from_request(request):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
"""
global _accepted
from django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
supported = dict(settings.LANGUAGES)
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code and lang_code in supported and check_for_language(lang_code):
return lang_code
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# We have a very restricted form for our language files (no encoding
# specifier, since they all must be UTF-8 and only one possible
# language each time. So we avoid the overhead of gettext.find() and
# work out the MO file manually.
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
for lang, dirname in ((accept_lang, normalized),
(accept_lang.split('-')[0], normalized.split('_')[0])):
if lang.lower() not in supported:
continue
langfile = os.path.join(globalpath, dirname, 'LC_MESSAGES',
'django.mo')
if os.path.exists(langfile):
_accepted[normalized] = lang
return lang
return settings.LANGUAGE_CODE
def get_date_formats():
"""
Checks whether translation files provide a translation for some technical
message ID to store date and time formats. If it doesn't contain one, the
formats provided in the settings will be used.
"""
from django.conf import settings
date_format = ugettext('DATE_FORMAT')
datetime_format = ugettext('DATETIME_FORMAT')
time_format = ugettext('TIME_FORMAT')
if date_format == 'DATE_FORMAT':
date_format = settings.DATE_FORMAT
if datetime_format == 'DATETIME_FORMAT':
datetime_format = settings.DATETIME_FORMAT
if time_format == 'TIME_FORMAT':
time_format = settings.TIME_FORMAT
return date_format, datetime_format, time_format
def get_partial_date_formats():
"""
Checks whether translation files provide a translation for some technical
message ID to store partial date formats. If it doesn't contain one, the
formats provided in the settings will be used.
"""
from django.conf import settings
year_month_format = ugettext('YEAR_MONTH_FORMAT')
month_day_format = ugettext('MONTH_DAY_FORMAT')
if year_month_format == 'YEAR_MONTH_FORMAT':
year_month_format = settings.YEAR_MONTH_FORMAT
if month_day_format == 'MONTH_DAY_FORMAT':
month_day_format = settings.MONTH_DAY_FORMAT
return year_month_format, month_day_format
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
inline_re = re.compile(r"""^\s*trans\s+((?:".*?")|(?:'.*?'))\s*""")
block_re = re.compile(r"""^\s*blocktrans(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.template import Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK
out = StringIO()
intrans = False
inplural = False
singular = []
plural = []
for t in Lexer(src, None).tokenize():
if intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
out.write(' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
raise SyntaxError("Translation blocks must not include other block tags: %s" % t.contents)
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
if inplural:
plural.append(t.contents)
else:
singular.append(t.contents)
else:
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"': g = g.strip('"')
elif g[0] == "'": g = g.strip("'")
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
priority = priority and float(priority) or 1.0
result.append((lang, priority))
result.sort(lambda x, y: -cmp(x[1], y[1]))
return result
| greggian/TapdIn | django/utils/translation/trans_real.py | Python | apache-2.0 | 20,192 |
"""
Support for EBox.
Get data from 'My Usage Page' page: https://client.ebox.ca/myusage
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.ebox/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_USERNAME,
CONF_PASSWORD,
CONF_NAME,
CONF_MONITORED_VARIABLES,
)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.exceptions import PlatformNotReady
_LOGGER = logging.getLogger(__name__)
GIGABITS = "Gb"
PRICE = "CAD"
DAYS = "days"
PERCENT = "%"
DEFAULT_NAME = "EBox"
REQUESTS_TIMEOUT = 15
SCAN_INTERVAL = timedelta(minutes=15)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
SENSOR_TYPES = {
"usage": ["Usage", PERCENT, "mdi:percent"],
"balance": ["Balance", PRICE, "mdi:square-inc-cash"],
"limit": ["Data limit", GIGABITS, "mdi:download"],
"days_left": ["Days left", DAYS, "mdi:calendar-today"],
"before_offpeak_download": ["Download before offpeak", GIGABITS, "mdi:download"],
"before_offpeak_upload": ["Upload before offpeak", GIGABITS, "mdi:upload"],
"before_offpeak_total": ["Total before offpeak", GIGABITS, "mdi:download"],
"offpeak_download": ["Offpeak download", GIGABITS, "mdi:download"],
"offpeak_upload": ["Offpeak Upload", GIGABITS, "mdi:upload"],
"offpeak_total": ["Offpeak Total", GIGABITS, "mdi:download"],
"download": ["Download", GIGABITS, "mdi:download"],
"upload": ["Upload", GIGABITS, "mdi:upload"],
"total": ["Total", GIGABITS, "mdi:download"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_VARIABLES): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the EBox sensor."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
httpsession = hass.helpers.aiohttp_client.async_get_clientsession()
ebox_data = EBoxData(username, password, httpsession)
name = config.get(CONF_NAME)
from pyebox.client import PyEboxError
try:
await ebox_data.async_update()
except PyEboxError as exp:
_LOGGER.error("Failed login: %s", exp)
raise PlatformNotReady
sensors = []
for variable in config[CONF_MONITORED_VARIABLES]:
sensors.append(EBoxSensor(ebox_data, variable, name))
async_add_entities(sensors, True)
class EBoxSensor(Entity):
"""Implementation of a EBox sensor."""
def __init__(self, ebox_data, sensor_type, name):
"""Initialize the sensor."""
self.client_name = name
self.type = sensor_type
self._name = SENSOR_TYPES[sensor_type][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._icon = SENSOR_TYPES[sensor_type][2]
self.ebox_data = ebox_data
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
async def async_update(self):
"""Get the latest data from EBox and update the state."""
await self.ebox_data.async_update()
if self.type in self.ebox_data.data:
self._state = round(self.ebox_data.data[self.type], 2)
class EBoxData:
"""Get data from Ebox."""
def __init__(self, username, password, httpsession):
"""Initialize the data object."""
from pyebox import EboxClient
self.client = EboxClient(username, password, REQUESTS_TIMEOUT, httpsession)
self.data = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from Ebox."""
from pyebox.client import PyEboxError
try:
await self.client.fetch_data()
except PyEboxError as exp:
_LOGGER.error("Error on receive last EBox data: %s", exp)
return
# Update data
self.data = self.client.get_data()
| Cinntax/home-assistant | homeassistant/components/ebox/sensor.py | Python | apache-2.0 | 4,756 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the output module field formatting helper."""
import unittest
from dfdatetime import semantic_time as dfdatetime_semantic_time
from dfvfs.path import fake_path_spec
from plaso.containers import events
from plaso.lib import definitions
from plaso.output import formatting_helper
from tests.containers import test_lib as containers_test_lib
from tests.output import test_lib
class TestFieldFormattingHelper(formatting_helper.FieldFormattingHelper):
"""Field formatter helper for testing purposes."""
_FIELD_FORMAT_CALLBACKS = {'zone': '_FormatTimeZone'}
class FieldFormattingHelperTest(test_lib.OutputModuleTestCase):
"""Test the output module field formatting helper."""
# pylint: disable=protected-access
_TEST_EVENTS = [
{'data_type': 'test:event',
'filename': 'log/syslog.1',
'hostname': 'ubuntu',
'path_spec': fake_path_spec.FakePathSpec(
location='log/syslog.1'),
'text': (
'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': '2012-06-27 18:17:01',
'timestamp_desc': definitions.TIME_DESCRIPTION_CHANGE}]
def testFormatDateTime(self):
"""Tests the _FormatDateTime function with dynamic time."""
output_mediator = self._CreateOutputMediator()
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
date_time_string = test_helper._FormatDateTime(
event, event_data, event_data_stream)
self.assertEqual(date_time_string, '2012-06-27T18:17:01.000000+00:00')
output_mediator.SetTimezone('Europe/Amsterdam')
date_time_string = test_helper._FormatDateTime(
event, event_data, event_data_stream)
self.assertEqual(date_time_string, '2012-06-27T20:17:01.000000+02:00')
output_mediator.SetTimezone('UTC')
event.date_time = dfdatetime_semantic_time.InvalidTime()
date_time_string = test_helper._FormatDateTime(
event, event_data, event_data_stream)
self.assertEqual(date_time_string, 'Invalid')
def testFormatDateTimeWithoutDynamicTime(self):
"""Tests the _FormatDateTime function without dynamic time."""
output_mediator = self._CreateOutputMediator(dynamic_time=False)
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
# Test with event.date_time
date_time_string = test_helper._FormatDateTime(
event, event_data, event_data_stream)
self.assertEqual(date_time_string, '2012-06-27T18:17:01.000000+00:00')
output_mediator.SetTimezone('Europe/Amsterdam')
date_time_string = test_helper._FormatDateTime(
event, event_data, event_data_stream)
self.assertEqual(date_time_string, '2012-06-27T20:17:01.000000+02:00')
output_mediator.SetTimezone('UTC')
event.date_time = dfdatetime_semantic_time.InvalidTime()
date_time_string = test_helper._FormatDateTime(
event, event_data, event_data_stream)
self.assertEqual(date_time_string, '0000-00-00T00:00:00.000000+00:00')
# Test with event.timestamp
event.date_time = None
date_time_string = test_helper._FormatDateTime(
event, event_data, event_data_stream)
self.assertEqual(date_time_string, '2012-06-27T18:17:01.000000+00:00')
event.timestamp = 0
date_time_string = test_helper._FormatDateTime(
event, event_data, event_data_stream)
self.assertEqual(date_time_string, '0000-00-00T00:00:00.000000+00:00')
event.timestamp = -9223372036854775808
date_time_string = test_helper._FormatDateTime(
event, event_data, event_data_stream)
self.assertEqual(date_time_string, '0000-00-00T00:00:00.000000+00:00')
def testFormatDisplayName(self):
"""Tests the _FormatDisplayName function."""
output_mediator = self._CreateOutputMediator()
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
display_name_string = test_helper._FormatDisplayName(
event, event_data, event_data_stream)
self.assertEqual(display_name_string, 'FAKE:log/syslog.1')
def testFormatFilename(self):
"""Tests the _FormatFilename function."""
output_mediator = self._CreateOutputMediator()
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
filename_string = test_helper._FormatFilename(
event, event_data, event_data_stream)
self.assertEqual(filename_string, 'log/syslog.1')
def testFormatHostname(self):
"""Tests the _FormatHostname function."""
output_mediator = self._CreateOutputMediator()
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
hostname_string = test_helper._FormatHostname(
event, event_data, event_data_stream)
self.assertEqual(hostname_string, 'ubuntu')
def testFormatInode(self):
"""Tests the _FormatInode function."""
output_mediator = self._CreateOutputMediator()
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
inode_string = test_helper._FormatInode(
event, event_data, event_data_stream)
self.assertEqual(inode_string, '-')
def testFormatMACB(self):
"""Tests the _FormatMACB function."""
output_mediator = self._CreateOutputMediator()
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
macb_string = test_helper._FormatMACB(event, event_data, event_data_stream)
self.assertEqual(macb_string, '..C.')
def testFormatMessage(self):
"""Tests the _FormatMessage function."""
output_mediator = self._CreateOutputMediator()
formatters_directory_path = self._GetTestFilePath(['formatters'])
output_mediator.ReadMessageFormattersFromDirectory(
formatters_directory_path)
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
message_string = test_helper._FormatMessage(
event, event_data, event_data_stream)
expected_message_string = (
'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session closed '
'for user root)')
self.assertEqual(message_string, expected_message_string)
def testFormatMessageShort(self):
"""Tests the _FormatMessageShort function."""
output_mediator = self._CreateOutputMediator()
formatters_directory_path = self._GetTestFilePath(['formatters'])
output_mediator.ReadMessageFormattersFromDirectory(
formatters_directory_path)
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
message_short_string = test_helper._FormatMessageShort(
event, event_data, event_data_stream)
expected_message_short_string = (
'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session closed '
'for user root)')
self.assertEqual(message_short_string, expected_message_short_string)
def testFormatSource(self):
"""Tests the _FormatSource function."""
output_mediator = self._CreateOutputMediator()
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
source_string = test_helper._FormatSource(
event, event_data, event_data_stream)
self.assertEqual(source_string, 'Test log file')
def testFormatSourceShort(self):
"""Tests the _FormatSourceShort function."""
output_mediator = self._CreateOutputMediator()
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
source_short_string = test_helper._FormatSourceShort(
event, event_data, event_data_stream)
self.assertEqual(source_short_string, 'FILE')
def testFormatTag(self):
"""Tests the _FormatTag function."""
output_mediator = self._CreateOutputMediator()
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
tag_string = test_helper._FormatTag(None)
self.assertEqual(tag_string, '-')
event_tag = events.EventTag()
event_tag.AddLabel('one')
event_tag.AddLabel('two')
tag_string = test_helper._FormatTag(event_tag)
self.assertEqual(tag_string, 'one two')
def testFormatTime(self):
"""Tests the _FormatTime function."""
output_mediator = self._CreateOutputMediator()
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
# Test with event.date_time
time_string = test_helper._FormatTime(
event, event_data, event_data_stream)
self.assertEqual(time_string, '18:17:01')
output_mediator.SetTimezone('Europe/Amsterdam')
time_string = test_helper._FormatTime(
event, event_data, event_data_stream)
self.assertEqual(time_string, '20:17:01')
output_mediator.SetTimezone('UTC')
# Test with event.timestamp
event.date_time = None
time_string = test_helper._FormatTime(
event, event_data, event_data_stream)
self.assertEqual(time_string, '18:17:01')
event.timestamp = 0
time_string = test_helper._FormatTime(
event, event_data, event_data_stream)
self.assertEqual(time_string, '--:--:--')
event.timestamp = -9223372036854775808
time_string = test_helper._FormatTime(
event, event_data, event_data_stream)
self.assertEqual(time_string, '--:--:--')
def testFormatTimeZone(self):
"""Tests the _FormatTimeZone function."""
output_mediator = self._CreateOutputMediator()
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
zone_string = test_helper._FormatTimeZone(
event, event_data, event_data_stream)
self.assertEqual(zone_string, 'UTC')
def testFormatUsername(self):
"""Tests the _FormatUsername function."""
output_mediator = self._CreateOutputMediator()
test_helper = formatting_helper.FieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
username_string = test_helper._FormatUsername(
event, event_data, event_data_stream)
self.assertEqual(username_string, '-')
# TODO: add coverage for _ReportEventError
def testGetFormattedField(self):
"""Tests the GetFormattedField function."""
output_mediator = self._CreateOutputMediator()
test_helper = TestFieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
zone_string = test_helper.GetFormattedField(
'zone', event, event_data, event_data_stream, None)
self.assertEqual(zone_string, 'UTC')
if __name__ == '__main__':
unittest.main()
| kiddinn/plaso | tests/output/formatting_helper.py | Python | apache-2.0 | 12,029 |
from turbo.flux import Mutation, register, dispatch, register_dispatch
import mutation_types
@register_dispatch('user', mutation_types.INCREASE)
def increase(rank):
pass
def decrease(rank):
return dispatch('user', mutation_types.DECREASE, rank)
@register_dispatch('metric', 'inc_qps')
def inc_qps():
pass | tao12345666333/app-turbo | demos/helloworld/store/actions.py | Python | apache-2.0 | 322 |
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
CONF = config.CONF
class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
"""Test migration operations supported by admin user"""
@classmethod
def setup_clients(cls):
super(MigrationsAdminTest, cls).setup_clients()
cls.client = cls.os_admin.migrations_client
@decorators.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
def test_list_migrations(self):
"""Test admin user can get the migrations list"""
self.client.list_migrations()
@decorators.idempotent_id('1b512062-8093-438e-b47a-37d2f597cd64')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_list_migrations_in_flavor_resize_situation(self):
"""Admin can get the migrations list containing the resized server"""
server = self.create_test_server(wait_until="ACTIVE")
server_id = server['id']
self.resize_server(server_id, self.flavor_ref_alt)
body = self.client.list_migrations()['migrations']
instance_uuids = [x['instance_uuid'] for x in body]
self.assertIn(server_id, instance_uuids)
def _flavor_clean_up(self, flavor_id):
try:
self.admin_flavors_client.delete_flavor(flavor_id)
self.admin_flavors_client.wait_for_resource_deletion(flavor_id)
except exceptions.NotFound:
pass
@decorators.idempotent_id('33f1fec3-ba18-4470-8e4e-1d888e7c3593')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_revert_deleted_flavor(self):
"""Test reverting resized server with original flavor deleted
Tests that we can revert the resize on an instance whose original
flavor has been deleted.
"""
# First we have to create a flavor that we can delete so make a copy
# of the normal flavor from which we'd create a server.
flavor = self.admin_flavors_client.show_flavor(
self.flavor_ref)['flavor']
flavor = self.admin_flavors_client.create_flavor(
name=data_utils.rand_name('test_resize_flavor_'),
ram=flavor['ram'],
disk=flavor['disk'],
vcpus=flavor['vcpus']
)['flavor']
self.addCleanup(self._flavor_clean_up, flavor['id'])
# Set extra specs same as self.flavor_ref for the created flavor,
# because the environment may need some special extra specs to
# create server which should have been contained in
# self.flavor_ref.
extra_spec_keys = self.admin_flavors_client.list_flavor_extra_specs(
self.flavor_ref)['extra_specs']
if extra_spec_keys:
self.admin_flavors_client.set_flavor_extra_spec(
flavor['id'], **extra_spec_keys)
# Now boot a server with the copied flavor.
server = self.create_test_server(
wait_until='ACTIVE', flavor=flavor['id'])
server = self.servers_client.show_server(server['id'])['server']
# If 'id' not in server['flavor'], we can only compare the flavor
# details, so here we should save the to-be-deleted flavor's details,
# for the flavor comparison after the server resizing.
if not server['flavor'].get('id'):
pre_flavor = {}
body = self.flavors_client.show_flavor(flavor['id'])['flavor']
for key in ['name', 'ram', 'vcpus', 'disk']:
pre_flavor[key] = body[key]
# Delete the flavor we used to boot the instance.
self._flavor_clean_up(flavor['id'])
# Now resize the server and wait for it to go into verify state.
self.servers_client.resize_server(server['id'], self.flavor_ref_alt)
waiters.wait_for_server_status(self.servers_client, server['id'],
'VERIFY_RESIZE')
# Now revert the resize, it should be OK even though the original
# flavor used to boot the server was deleted.
self.servers_client.revert_resize_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
server = self.servers_client.show_server(server['id'])['server']
if server['flavor'].get('id'):
msg = ('server flavor is not same as flavor!')
self.assertEqual(flavor['id'], server['flavor']['id'], msg)
else:
self.assertEqual(pre_flavor['name'],
server['flavor']['original_name'],
"original_name in server flavor is not same as "
"flavor name!")
for key in ['ram', 'vcpus', 'disk']:
msg = ('attribute %s in server flavor is not same as '
'flavor!' % key)
self.assertEqual(pre_flavor[key], server['flavor'][key], msg)
def _test_cold_migrate_server(self, revert=False):
if CONF.compute.min_compute_nodes < 2:
msg = "Less than 2 compute nodes, skipping multinode tests."
raise self.skipException(msg)
server = self.create_test_server(wait_until="ACTIVE")
src_host = self.get_host_for_server(server['id'])
self.admin_servers_client.migrate_server(server['id'])
waiters.wait_for_server_status(self.servers_client,
server['id'], 'VERIFY_RESIZE')
if revert:
self.servers_client.revert_resize_server(server['id'])
assert_func = self.assertEqual
else:
self.servers_client.confirm_resize_server(server['id'])
assert_func = self.assertNotEqual
waiters.wait_for_server_status(self.servers_client,
server['id'], 'ACTIVE')
dst_host = self.get_host_for_server(server['id'])
assert_func(src_host, dst_host)
@decorators.idempotent_id('4bf0be52-3b6f-4746-9a27-3143636fe30d')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration not available.')
def test_cold_migration(self):
"""Test cold migrating server and then confirm the migration"""
self._test_cold_migrate_server(revert=False)
@decorators.idempotent_id('caa1aa8b-f4ef-4374-be0d-95f001c2ac2d')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration not available.')
def test_revert_cold_migration(self):
"""Test cold migrating server and then revert the migration"""
self._test_cold_migrate_server(revert=True)
| openstack/tempest | tempest/api/compute/admin/test_migrations.py | Python | apache-2.0 | 7,577 |
from functools import wraps
import json
import os
import traceback
import validators
from jinja2 import Environment, PackageLoader
from notebook.utils import url_path_join
from notebook.base.handlers import IPythonHandler
import requests
from requests.auth import HTTPBasicAuth
env = Environment(
loader=PackageLoader('saagie', 'jinja2'),
)
SAAGIE_ROOT_URL = os.environ.get("SAAGIE_ROOT_URL", None)
SAAGIE_USERNAME = None
PLATFORMS_URL = None
SAAGIE_BASIC_AUTH_TOKEN = None
JOBS_URL_PATTERN = None
JOB_URL_PATTERN = None
JOB_UPGRADE_URL_PATTERN = None
SCRIPT_UPLOAD_URL_PATTERN = None
def get_absolute_saagie_url(saagie_url):
if saagie_url.startswith('/'):
return SAAGIE_ROOT_URL + saagie_url
return saagie_url
class ResponseError(Exception):
def __init__(self, status_code):
self.status_code = status_code
super(ResponseError, self).__init__(status_code)
class SaagieHandler(IPythonHandler):
def handle_request(self, method):
data = {k: v[0].decode() for k, v in self.request.arguments.items()}
if 'view' not in data:
self.send_error(404)
return
view_name = data.pop('view')
notebook_path = data.pop('notebook_path', None)
notebook_json = data.pop('notebook_json', None)
notebook = Notebook(notebook_path, notebook_json)
try:
template_name, template_data = views.render(
view_name, notebook=notebook, data=data, method=method)
except ResponseError as e:
self.send_error(e.status_code)
return
except:
template_name = 'internal_error.html'
template_data = {'error': traceback.format_exc()}
self.set_status(500)
template_data.update(
notebook=notebook,
)
template = env.get_template(template_name)
self.finish(template.render(template_data))
def get(self):
self.handle_request('GET')
def post(self):
self.handle_request('POST')
def check_xsrf_cookie(self):
return
class SaagieCheckHandler(IPythonHandler):
def get(self):
self.finish()
class SaagieJobRun:
def __init__(self, job, run_data):
self.job = job
self.id = run_data['id']
self.status = run_data['status']
self.stderr = run_data.get('logs_err', '')
self.stdout = run_data.get('logs_out', '')
class SaagieJob:
@classmethod
def from_id(cls, notebook, platform_id, job_id):
return SaagieJob(
notebook,
requests.get(JOB_URL_PATTERN % (platform_id, job_id), auth=SAAGIE_BASIC_AUTH_TOKEN).json())
def __init__(self, notebook, job_data):
self.notebook = notebook
self.data = job_data
self.platform_id = job_data['platform_id']
self.capsule_type = job_data['capsule_code']
self.id = job_data['id']
self.name = job_data['name']
self.last_run = None
def set_as_current(self):
self.notebook.current_job = self
@property
def url(self):
return (JOBS_URL_PATTERN + '/%s') % (self.platform_id, self.id)
@property
def admin_url(self):
return get_absolute_saagie_url('/#/manager/%s/job/%s'
% (self.platform_id, self.id))
@property
def logs_url(self):
return self.admin_url + '/logs'
@property
def is_started(self):
return self.last_run is not None
def fetch_logs(self):
job_data = requests.get(self.url, auth=SAAGIE_BASIC_AUTH_TOKEN).json()
run_data = job_data.get('last_instance')
if run_data is None or run_data['status'] not in ('SUCCESS', 'FAILED'):
return
run_data = requests.get(
get_absolute_saagie_url('/api/v1/jobtask/%s'
% run_data['id']), auth=SAAGIE_BASIC_AUTH_TOKEN).json()
self.last_run = SaagieJobRun(self, run_data)
@property
def details_template_name(self):
return 'include/python_job_details.html'
def __str__(self):
return self.name
def __eq__(self, other):
if other is None:
return False
return self.platform_id == other.platform_id and self.id == other.id
def __lt__(self, other):
if other is None:
return False
return self.id < other.id
class SaagiePlatform:
SUPPORTED_CAPSULE_TYPES = {'python'}
def __init__(self, notebook, platform_data):
self.notebook = notebook
self.id = platform_data['id']
self.name = platform_data['name']
self.capsule_types = {c['code'] for c in platform_data['capsules']}
@property
def is_supported(self):
return not self.capsule_types.isdisjoint(self.SUPPORTED_CAPSULE_TYPES)
def get_jobs(self):
if not self.is_supported:
return []
jobs_data = requests.get(JOBS_URL_PATTERN % self.id, auth=SAAGIE_BASIC_AUTH_TOKEN).json()
return [SaagieJob(self.notebook, job_data) for job_data in jobs_data
if job_data['category'] == 'processing' and
job_data['capsule_code'] in self.SUPPORTED_CAPSULE_TYPES]
def __eq__(self, other):
return self.id == other.id
class Notebook:
CACHE = {}
def __new__(cls, path, json):
if path in cls.CACHE:
return cls.CACHE[path]
cls.CACHE[path] = new = super(Notebook, cls).__new__(cls)
return new
def __init__(self, path, json_data):
if path is None:
path = 'Untitled.ipynb'
if json_data is None:
json_data = json.dumps({
'cells': [],
'metadata': {'kernelspec': {'name': 'python3'}}})
self.path = path
self.json = json.loads(json_data)
# In cached instances, current_job is already defined.
if not hasattr(self, 'current_job'):
self.current_job = None
@property
def name(self):
return os.path.splitext(os.path.basename(self.path))[0]
@property
def kernel_name(self):
return self.json['metadata']['kernelspec']['name']
@property
def kernel_display_name(self):
return self.json['metadata']['kernelspec']['display_name']
def get_code_cells(self):
return [cell['source'] for cell in self.json['cells']
if cell['cell_type'] == 'code']
def get_code(self, indices=None):
cells = self.get_code_cells()
if indices is None:
indices = list(range(len(cells)))
return '\n\n\n'.join([cells[i] for i in indices])
def get_platforms(self):
return [SaagiePlatform(self, platform_data)
for platform_data in requests.get(PLATFORMS_URL, auth=SAAGIE_BASIC_AUTH_TOKEN).json()]
class ViewsCollection(dict):
def add(self, func):
self[func.__name__] = func
return func
def render(self, view_name, notebook, data=None, method='GET', **kwargs):
if data is None:
data = {}
try:
view = views[view_name]
except KeyError:
raise ResponseError(404)
template_data = view(method, notebook, data, **kwargs)
if isinstance(template_data, tuple):
template_name, template_data = template_data
else:
template_name = view.__name__ + '.html'
return template_name, template_data
views = ViewsCollection()
@views.add
def modal(method, notebook, data):
return {}
def clear_basic_auth_token():
global SAAGIE_BASIC_AUTH_TOKEN
SAAGIE_BASIC_AUTH_TOKEN = None
# Init an empty Basic Auth token on first launch
clear_basic_auth_token()
def is_logged():
if SAAGIE_ROOT_URL is None or SAAGIE_BASIC_AUTH_TOKEN is None:
return False
else:
# Check if Basic token is still valid
is_logged_in = False
try:
response = requests.get(SAAGIE_ROOT_URL + '/api/v1/user-current', auth=SAAGIE_BASIC_AUTH_TOKEN, allow_redirects=False)
is_logged_in = response.ok
except (requests.ConnectionError, requests.RequestException, requests.HTTPError, requests.Timeout) as err:
print ('Error while trying to connect to Saagie: ', err)
if is_logged_in is not True:
# Remove Basic Auth token from globals. It will force a new login phase.
clear_basic_auth_token()
return is_logged_in
def define_globals(saagie_root_url, saagie_username):
if saagie_root_url is not None:
global SAAGIE_ROOT_URL
global SAAGIE_USERNAME
global PLATFORMS_URL
global JOBS_URL_PATTERN
global JOB_URL_PATTERN
global JOB_UPGRADE_URL_PATTERN
global SCRIPT_UPLOAD_URL_PATTERN
SAAGIE_USERNAME = saagie_username
SAAGIE_ROOT_URL = saagie_root_url.strip("/")
PLATFORMS_URL = SAAGIE_ROOT_URL + '/api/v1/platform'
JOBS_URL_PATTERN = PLATFORMS_URL + '/%s/job'
JOB_URL_PATTERN = JOBS_URL_PATTERN + '/%s'
JOB_UPGRADE_URL_PATTERN = JOBS_URL_PATTERN + '/%s/version'
SCRIPT_UPLOAD_URL_PATTERN = JOBS_URL_PATTERN + '/upload'
@views.add
def login_form(method, notebook, data):
if method == 'POST':
# check if the given Saagie URL is well formed
if not validators.url(data['saagie_root_url']):
return {'error': 'Invalid URL', 'saagie_root_url': data['saagie_root_url'] or '', 'username': data['username'] or ''}
define_globals(data['saagie_root_url'], data['username'])
try:
basic_token = HTTPBasicAuth(data['username'], data['password'])
current_user_response = requests.get(SAAGIE_ROOT_URL + '/api/v1/user-current', auth=basic_token, allow_redirects=False)
if current_user_response.ok:
# Login succeeded, keep the basic token for future API calls
global SAAGIE_BASIC_AUTH_TOKEN
SAAGIE_BASIC_AUTH_TOKEN = basic_token
except (requests.ConnectionError, requests.RequestException, requests.HTTPError, requests.Timeout) as err:
print ('Error while trying to connect to Saagie: ', err)
return {'error': 'Connection error', 'saagie_root_url': SAAGIE_ROOT_URL, 'username': SAAGIE_USERNAME or ''}
if SAAGIE_BASIC_AUTH_TOKEN is not None:
return views.render('capsule_type_chooser', notebook)
return {'error': 'Invalid URL, username or password.', 'saagie_root_url': SAAGIE_ROOT_URL, 'username': SAAGIE_USERNAME or ''}
if is_logged():
return views.render('capsule_type_chooser', notebook)
return {'error': None, 'saagie_root_url': SAAGIE_ROOT_URL or '', 'username': SAAGIE_USERNAME or ''}
def login_required(view):
@wraps(view)
def inner(method, notebook, data, *args, **kwargs):
if not is_logged():
return views.render('login_form', notebook)
return view(method, notebook, data, *args, **kwargs)
return inner
@views.add
@login_required
def capsule_type_chooser(method, notebook, data):
return {'username': SAAGIE_USERNAME}
def get_job_form(method, notebook, data):
context = {'platforms': notebook.get_platforms()}
context['values'] = ({'current': {'options': {}}} if notebook.current_job is None
else notebook.current_job.data)
return context
def create_job_base_data(data):
return {
'platform_id': data['saagie-platform'],
'category': 'processing',
'name': data['job-name'],
'description': data['description'],
'current': {
'cpu': data['cpu'],
'disk': data['disk'],
'memory': data['ram'],
'isInternalSubDomain': False,
'isInternalPort': False,
'options': {}
}
}
def upload_python_script(notebook, data):
code = notebook.get_code(map(int, data.get('code-lines', '').split('|')))
files = {'file': (data['job-name'] + '.py', code)}
return requests.post(
SCRIPT_UPLOAD_URL_PATTERN % data['saagie-platform'],
files=files, auth=SAAGIE_BASIC_AUTH_TOKEN).json()['fileName']
@views.add
@login_required
def python_job_form(method, notebook, data):
if method == 'POST':
platform_id = data['saagie-platform']
job_data = create_job_base_data(data)
job_data['capsule_code'] = 'python'
job_data['always_email'] = False
job_data['manual'] = True
job_data['retry'] = ''
current = job_data['current']
current['options']['language_version'] = data['language-version']
current['releaseNote'] = data['release-note']
current['template'] = data['shell-command']
current['file'] = upload_python_script(notebook, data)
new_job_data = requests.post(JOBS_URL_PATTERN % platform_id,
json=job_data, auth=SAAGIE_BASIC_AUTH_TOKEN).json()
job = SaagieJob(notebook, new_job_data)
job.set_as_current()
return views.render('starting_job', notebook, {'job': job})
context = get_job_form(method, notebook, data)
context['action'] = '/saagie?view=python_job_form'
context['username'] = SAAGIE_USERNAME
return context
@views.add
@login_required
def update_python_job(method, notebook, data):
if method == 'POST':
job = notebook.current_job
platform_id = job.platform_id
data['saagie-platform'] = platform_id
data['job-name'] = job.name
data['description'] = ''
current = create_job_base_data(data)['current']
current['options']['language_version'] = data['language-version']
current['releaseNote'] = data['release-note']
current['template'] = data['shell-command']
current['file'] = upload_python_script(notebook, data)
requests.post(JOB_UPGRADE_URL_PATTERN % (platform_id, job.id),
json={'current': current}, auth=SAAGIE_BASIC_AUTH_TOKEN)
job.last_run = None
return views.render('starting_job', notebook, {'job': job})
context = get_job_form(method, notebook, data)
context['action'] = '/saagie?view=update_python_job'
context['username'] = SAAGIE_USERNAME
return context
@views.add
@login_required
def select_python_job(method, notebook, data):
if method == 'POST':
platform_id, job_id = data['job'].split('-')
notebook.current_job = SaagieJob.from_id(notebook, platform_id, job_id)
return views.render('update_python_job', notebook, data)
jobs_by_platform = []
for platform in notebook.get_platforms():
jobs = platform.get_jobs()
if jobs:
jobs_by_platform.append((platform,
list(sorted(jobs, reverse=True))))
return {'jobs_by_platform': jobs_by_platform,
'action': '/saagie?view=select_python_job', 'username': SAAGIE_USERNAME}
@views.add
@login_required
def unsupported_kernel(method, notebook, data):
return {'username': SAAGIE_USERNAME}
@views.add
@login_required
def starting_job(method, notebook, data):
job = notebook.current_job
job.fetch_logs()
if job.is_started:
return views.render('started_job', notebook, {'job': job})
return {'job': job, 'username': SAAGIE_USERNAME}
@views.add
@login_required
def started_job(method, notebook, data):
return {'job': notebook.current_job, 'username': SAAGIE_USERNAME}
@views.add
def logout(method, notebook, data):
global SAAGIE_BASIC_AUTH_TOKEN
global SAAGIE_ROOT_URL
global SAAGIE_USERNAME
SAAGIE_BASIC_AUTH_TOKEN = None
SAAGIE_ROOT_URL = None
SAAGIE_USERNAME = None
return {}
def load_jupyter_server_extension(nb_app):
web_app = nb_app.web_app
base_url = web_app.settings['base_url']
route_pattern = url_path_join(base_url, '/saagie')
web_app.add_handlers('.*$', [(route_pattern, SaagieHandler)])
route_pattern = url_path_join(base_url, '/saagie/check')
web_app.add_handlers('.*$', [(route_pattern, SaagieCheckHandler)])
| saagie/jupyter-saagie-plugin | saagie/server_extension.py | Python | apache-2.0 | 16,090 |
import numpy as np
class WordClusters(object):
def __init__(self, vocab, clusters):
self.vocab = vocab
self.clusters = clusters
def ix(self, word):
"""
Returns the index on self.vocab and self.clusters for 'word'
"""
temp = np.where(self.vocab == word)[0]
if temp.size == 0:
raise KeyError("Word not in vocabulary")
else:
return temp[0]
def __getitem__(self, word):
return self.get_cluster(word)
def get_cluster(self, word):
"""
Returns the cluster number for a word in the vocabulary
"""
idx = self.ix(word)
return self.clusters[idx]
def get_words_on_cluster(self, cluster):
return self.vocab[self.clusters == cluster]
@classmethod
def from_text(cls, fname):
vocab = np.genfromtxt(fname, dtype=str, delimiter=" ", usecols=0)
clusters = np.genfromtxt(fname, dtype=int, delimiter=" ", usecols=1)
return cls(vocab=vocab, clusters=clusters)
| danielfrg/word2vec | word2vec/wordclusters.py | Python | apache-2.0 | 1,041 |
"""api_server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
version = 'v1.0'
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'api/%s/' % version, include('apis.urls'))
]
| AutohomeOps/Assets_Report | api_server/api_server/urls.py | Python | apache-2.0 | 846 |
import os,json
from cgi import escape
def unescape(s):
s = s.replace("<", "<")
s = s.replace(">", ">")
# this has to be last:
s = s.replace("&", "&")
return s
class FilesystemMixin:
def h_fs_get(_,path,eltName=''):
from stat import S_ISDIR
data = (escape(open(path).read())
if not S_ISDIR(os.stat(path).st_mode)
else [(p,S_ISDIR(os.stat(path+'/'+p).st_mode))
for p in os.listdir(path)])
_.ws.send(json.dumps({"method":"fs_get","result":[path,data,eltName]}))
pass
def h_fs_put(_,path,data):
f=open(path,'w')
for x in data: f.write(unescape(x))
f.close()
pass
def h_fs_system(_,path,eltName='',cwd=None):
import subprocess as sp
import shlex
data=sp.Popen(shlex.split(path),cwd=cwd,stdout=sp.PIPE, stderr=sp.PIPE).communicate()
_.ws.send(json.dumps({"method":"fs_system","result":[path,data,eltName]}));
pass
def h_fs_mkdir (_,path): os.mkdir(path)
def h_fs_rmdir (_,path): os.rmdir(path)
def h_fs_touch (_,path): open(path,'w').close()
def h_fs_unlink(_,path): os.unlink(path)
pass
class FsApp(FilesystemMixin):
def __init__(_,ws):_.ws=ws
| val314159/framist | fssvr/fs.py | Python | apache-2.0 | 1,267 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
tests for catalog module
"""
import os
import fabric.api
from fabric.operations import _AttributeString
from mock import patch
from prestoadmin import catalog
from prestoadmin.util import constants
from prestoadmin.util.exception import ConfigurationError, \
ConfigFileNotFoundError
from prestoadmin.standalone.config import PRESTO_STANDALONE_USER_GROUP
from prestoadmin.util.local_config_util import get_catalog_directory
from tests.unit.base_unit_case import BaseUnitCase
class TestCatalog(BaseUnitCase):
def setUp(self):
super(TestCatalog, self).setUp(capture_output=True)
@patch('prestoadmin.catalog.os.path.isfile')
def test_add_not_exist(self, isfile_mock):
isfile_mock.return_value = False
self.assertRaisesRegexp(ConfigurationError,
'Configuration for catalog dummy not found',
catalog.add, 'dummy')
@patch('prestoadmin.catalog.validate')
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isfile')
def test_add_exists(self, isfile_mock, deploy_mock, validate_mock):
isfile_mock.return_value = True
catalog.add('tpch')
filenames = ['tpch.properties']
deploy_mock.assert_called_with(filenames,
get_catalog_directory(),
constants.REMOTE_CATALOG_DIR,
PRESTO_STANDALONE_USER_GROUP)
validate_mock.assert_called_with(filenames)
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isdir')
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.validate')
def test_add_all(self, mock_validate, listdir_mock, isdir_mock,
deploy_mock):
catalogs = ['tpch.properties', 'another.properties']
listdir_mock.return_value = catalogs
catalog.add()
deploy_mock.assert_called_with(catalogs,
get_catalog_directory(),
constants.REMOTE_CATALOG_DIR,
PRESTO_STANDALONE_USER_GROUP)
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isdir')
def test_add_all_fails_if_dir_not_there(self, isdir_mock, deploy_mock):
isdir_mock.return_value = False
self.assertRaisesRegexp(ConfigFileNotFoundError,
r'Cannot add catalogs because directory .+'
r' does not exist',
catalog.add)
self.assertFalse(deploy_mock.called)
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
@patch('prestoadmin.catalog.os.remove')
def test_remove(self, local_rm_mock, exists_mock, sudo_mock):
script = ('if [ -f /etc/presto/catalog/tpch.properties ] ; '
'then rm /etc/presto/catalog/tpch.properties ; '
'else echo "Could not remove catalog \'tpch\'. '
'No such file \'/etc/presto/catalog/tpch.properties\'"; fi')
exists_mock.return_value = True
fabric.api.env.host = 'localhost'
catalog.remove('tpch')
sudo_mock.assert_called_with(script)
local_rm_mock.assert_called_with(get_catalog_directory() +
'/tpch.properties')
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
def test_remove_failure(self, exists_mock, sudo_mock):
exists_mock.return_value = False
fabric.api.env.host = 'localhost'
out = _AttributeString()
out.succeeded = False
sudo_mock.return_value = out
self.assertRaisesRegexp(SystemExit,
'\\[localhost\\] Failed to remove catalog tpch.',
catalog.remove,
'tpch')
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
def test_remove_no_such_file(self, exists_mock, sudo_mock):
exists_mock.return_value = False
fabric.api.env.host = 'localhost'
error_msg = ('Could not remove catalog tpch: No such file ' +
os.path.join(get_catalog_directory(), 'tpch.properties'))
out = _AttributeString(error_msg)
out.succeeded = True
sudo_mock.return_value = out
self.assertRaisesRegexp(SystemExit,
'\\[localhost\\] %s' % error_msg,
catalog.remove,
'tpch')
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.os.path.isdir')
def test_warning_if_connector_dir_empty(self, isdir_mock, listdir_mock):
isdir_mock.return_value = True
listdir_mock.return_value = []
catalog.add()
self.assertEqual('\nWarning: Directory %s is empty. No catalogs will'
' be deployed\n\n' % get_catalog_directory(),
self.test_stderr.getvalue())
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.os.path.isdir')
def test_add_permission_denied(self, isdir_mock, listdir_mock):
isdir_mock.return_value = True
error_msg = ('Permission denied')
listdir_mock.side_effect = OSError(13, error_msg)
fabric.api.env.host = 'localhost'
self.assertRaisesRegexp(SystemExit, '\[localhost\] %s' % error_msg,
catalog.add)
@patch('prestoadmin.catalog.os.remove')
@patch('prestoadmin.catalog.remove_file')
def test_remove_os_error(self, remove_file_mock, remove_mock):
fabric.api.env.host = 'localhost'
error = OSError(13, 'Permission denied')
remove_mock.side_effect = error
self.assertRaisesRegexp(OSError, 'Permission denied',
catalog.remove, 'tpch')
@patch('prestoadmin.catalog.secure_create_directory')
@patch('prestoadmin.util.fabricapi.put')
def test_deploy_files(self, put_mock, create_dir_mock):
local_dir = '/my/local/dir'
remote_dir = '/my/remote/dir'
catalog.deploy_files(['a', 'b'], local_dir, remote_dir,
PRESTO_STANDALONE_USER_GROUP)
create_dir_mock.assert_called_with(remote_dir, PRESTO_STANDALONE_USER_GROUP)
put_mock.assert_any_call('/my/local/dir/a', remote_dir, use_sudo=True,
mode=0600)
put_mock.assert_any_call('/my/local/dir/b', remote_dir, use_sudo=True,
mode=0600)
@patch('prestoadmin.catalog.os.path.isfile')
@patch("__builtin__.open")
def test_validate(self, open_mock, is_file_mock):
is_file_mock.return_value = True
file_obj = open_mock.return_value.__enter__.return_value
file_obj.read.return_value = 'connector.noname=example'
self.assertRaisesRegexp(ConfigurationError,
'Catalog configuration example.properties '
'does not contain connector.name',
catalog.add, 'example')
@patch('prestoadmin.catalog.os.path.isfile')
def test_validate_fail(self, is_file_mock):
is_file_mock.return_value = True
self.assertRaisesRegexp(
SystemExit,
'Error validating ' + os.path.join(get_catalog_directory(), 'example.properties') + '\n\n'
'Underlying exception:\n No such file or directory',
catalog.add, 'example')
@patch('prestoadmin.catalog.get')
@patch('prestoadmin.catalog.files.exists')
@patch('prestoadmin.catalog.ensure_directory_exists')
@patch('prestoadmin.catalog.os.path.exists')
def test_gather_connectors(self, path_exists, ensure_dir_exists,
files_exists, get_mock):
fabric.api.env.host = 'any_host'
path_exists.return_value = False
files_exists.return_value = True
catalog.gather_catalogs('local_config_dir')
get_mock.assert_called_once_with(
constants.REMOTE_CATALOG_DIR, 'local_config_dir/any_host/catalog', use_sudo=True)
# if remote catalog dir does not exist
get_mock.reset_mock()
files_exists.return_value = False
results = catalog.gather_catalogs('local_config_dir')
self.assertEqual([], results)
self.assertFalse(get_mock.called)
| prestodb/presto-admin | tests/unit/test_catalog.py | Python | apache-2.0 | 9,157 |
"""
Installs and configures MySQL
"""
import uuid
import logging
from packstack.installer import validators
from packstack.installer import utils
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-MySQL"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding MySQL OpenStack configuration")
paramsList = [
{"CMD_OPTION" : "mysql-host",
"USAGE" : "The IP address of the server on which to install MySQL",
"PROMPT" : "Enter the IP address of the MySQL server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_MYSQL_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "mysql-user",
"USAGE" : "Username for the MySQL admin user",
"PROMPT" : "Enter the username for the MySQL admin user",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "root",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_MYSQL_USER",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "mysql-pw",
"USAGE" : "Password for the MySQL admin user",
"PROMPT" : "Enter the password for the MySQL admin user",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_MYSQL_PW",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : True,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "MYSQL",
"DESCRIPTION" : "MySQL Config parameters",
"PRE_CONDITION" : lambda x: 'yes',
"PRE_CONDITION_MATCH" : "yes",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def initSequences(controller):
mysqlsteps = [
{'title': 'Adding MySQL manifest entries',
'functions':[createmanifest]}
]
controller.addSequence("Installing MySQL", [], [], mysqlsteps)
def createmanifest(config):
if config['CONFIG_MYSQL_INSTALL'] == 'y':
install = True
suffix = 'install'
else:
install = False
suffix = 'noinstall'
# In case we are not installing MySQL server, mysql* manifests have
# to be run from Keystone host
host = install and config['CONFIG_MYSQL_HOST'] \
or config['CONFIG_KEYSTONE_HOST']
manifestfile = "%s_mysql.pp" % host
manifestdata = [getManifestTemplate('mysql_%s.pp' % suffix)]
def append_for(module, suffix):
# Modules have to be appended to the existing mysql.pp
# otherwise pp will fail for some of them saying that
# Mysql::Config definition is missing.
template = "mysql_%s_%s.pp" % (module, suffix)
manifestdata.append(getManifestTemplate(template))
append_for("keystone", suffix)
hosts = set()
for mod in ['nova', 'cinder', 'glance', 'neutron', 'heat']:
if config['CONFIG_%s_INSTALL' % mod.upper()] == 'y':
append_for(mod, suffix)
# Check wich modules are enabled so we can allow their
# hosts on the firewall
if mod != 'nova' and mod != 'neutron':
hosts.add(config.get('CONFIG_%s_HOST' % mod.upper()).strip())
elif mod == 'neutron':
hosts.add(config.get('CONFIG_NEUTRON_SERVER_HOST').strip())
elif config['CONFIG_NOVA_INSTALL'] != 'n':
#In that remote case that we have lot's of nova hosts
hosts.add(config.get('CONFIG_NOVA_API_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_CERT_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_VNCPROXY_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_CONDUCTOR_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_SCHED_HOST').strip())
if config['CONFIG_NEUTRON_INSTALL'] != 'y':
dbhosts = split_hosts(config['CONFIG_NOVA_NETWORK_HOSTS'])
hosts |= dbhosts
for host in config.get('CONFIG_NOVA_COMPUTE_HOSTS').split(','):
hosts.add(host.strip())
config['FIREWALL_ALLOWED'] = ",".join(["'%s'" % i for i in hosts])
config['FIREWALL_SERVICE_NAME'] = "mysql"
config['FIREWALL_PORTS'] = "'3306'"
manifestdata.append(getManifestTemplate("firewall.pp"))
appendManifestFile(manifestfile, "\n".join(manifestdata), 'pre')
| tangfeixiong/packstack | packstack/plugins/mysql_001.py | Python | apache-2.0 | 5,760 |
def power_digit_sum(exponent):
power_of_2 = str(2 ** exponent)
return sum([int(x) for x in power_of_2]) | plilja/project-euler | problem_16/power_digit_sum.py | Python | apache-2.0 | 111 |
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants for music processing in Magenta."""
# Meter-related constants.
DEFAULT_QUARTERS_PER_MINUTE = 120.0
DEFAULT_STEPS_PER_BAR = 16 # 4/4 music sampled at 4 steps per quarter note.
DEFAULT_STEPS_PER_QUARTER = 4
# Default absolute quantization.
DEFAULT_STEPS_PER_SECOND = 100
# Standard pulses per quarter.
# https://en.wikipedia.org/wiki/Pulses_per_quarter_note
STANDARD_PPQ = 220
# Special melody events.
NUM_SPECIAL_MELODY_EVENTS = 2
MELODY_NOTE_OFF = -1
MELODY_NO_EVENT = -2
# Other melody-related constants.
MIN_MELODY_EVENT = -2
MAX_MELODY_EVENT = 127
MIN_MIDI_PITCH = 0 # Inclusive.
MAX_MIDI_PITCH = 127 # Inclusive.
NUM_MIDI_PITCHES = MAX_MIDI_PITCH - MIN_MIDI_PITCH + 1
NOTES_PER_OCTAVE = 12
# Velocity-related constants.
MIN_MIDI_VELOCITY = 1 # Inclusive.
MAX_MIDI_VELOCITY = 127 # Inclusive.
# Program-related constants.
MIN_MIDI_PROGRAM = 0
MAX_MIDI_PROGRAM = 127
# MIDI programs that typically sound unpitched.
UNPITCHED_PROGRAMS = (
list(range(96, 104)) + list(range(112, 120)) + list(range(120, 128)))
# Chord symbol for "no chord".
NO_CHORD = 'N.C.'
# The indices of the pitch classes in a major scale.
MAJOR_SCALE = [0, 2, 4, 5, 7, 9, 11]
# NOTE_KEYS[note] = The major keys that note belongs to.
# ex. NOTE_KEYS[0] lists all the major keys that contain the note C,
# which are:
# [0, 1, 3, 5, 7, 8, 10]
# [C, C#, D#, F, G, G#, A#]
#
# 0 = C
# 1 = C#
# 2 = D
# 3 = D#
# 4 = E
# 5 = F
# 6 = F#
# 7 = G
# 8 = G#
# 9 = A
# 10 = A#
# 11 = B
#
# NOTE_KEYS can be generated using the code below, but is explicitly declared
# for readability:
# NOTE_KEYS = [[j for j in range(12) if (i - j) % 12 in MAJOR_SCALE]
# for i in range(12)]
NOTE_KEYS = [
[0, 1, 3, 5, 7, 8, 10],
[1, 2, 4, 6, 8, 9, 11],
[0, 2, 3, 5, 7, 9, 10],
[1, 3, 4, 6, 8, 10, 11],
[0, 2, 4, 5, 7, 9, 11],
[0, 1, 3, 5, 6, 8, 10],
[1, 2, 4, 6, 7, 9, 11],
[0, 2, 3, 5, 7, 8, 10],
[1, 3, 4, 6, 8, 9, 11],
[0, 2, 4, 5, 7, 9, 10],
[1, 3, 5, 6, 8, 10, 11],
[0, 2, 4, 6, 7, 9, 11]
]
| magenta/note-seq | note_seq/constants.py | Python | apache-2.0 | 2,620 |
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to AWS CloudWatch Logs."""
import json
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import util
class LogGroup(resource.BaseResource):
"""Class representing a CloudWatch log group."""
def __init__(self, region, name, retention_in_days=7):
super(LogGroup, self).__init__()
self.region = region
self.name = name
self.retention_in_days = retention_in_days
def _Create(self):
"""Create the log group."""
create_cmd = util.AWS_PREFIX + [
'--region', self.region,
'logs', 'create-log-group',
'--log-group-name', self.name
]
vm_util.IssueCommand(create_cmd)
def _Delete(self):
"""Delete the log group."""
delete_cmd = util.AWS_PREFIX + [
'--region', self.region,
'logs', 'delete-log-group',
'--log-group-name', self.name
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def Exists(self):
"""Returns True if the log group exists."""
describe_cmd = util.AWS_PREFIX + [
'--region', self.region,
'logs', 'describe-log-groups',
'--log-group-name-prefix', self.name,
'--no-paginate'
]
stdout, _, _ = vm_util.IssueCommand(describe_cmd)
log_groups = json.loads(stdout)['logGroups']
group = next((group for group in log_groups
if group['logGroupName'] == self.name), None)
return bool(group)
def _PostCreate(self):
"""Set the retention policy."""
put_cmd = util.AWS_PREFIX + [
'--region', self.region,
'logs', 'put-retention-policy',
'--log-group-name', self.name,
'--retention-in-days', str(self.retention_in_days)
]
vm_util.IssueCommand(put_cmd)
def GetLogs(region, stream_name, group_name, token=None):
"""Fetches the JSON formatted log stream starting at the token."""
get_cmd = util.AWS_PREFIX + [
'--region', region,
'logs', 'get-log-events',
'--start-from-head',
'--log-group-name', group_name,
'--log-stream-name', stream_name,
]
if token:
get_cmd.extend(['--next-token', token])
stdout, _, _ = vm_util.IssueCommand(get_cmd)
return json.loads(stdout)
def GetLogStreamAsString(region, stream_name, log_group):
"""Returns the messages of the log stream as a string."""
log_lines = []
token = None
events = []
while token is None or events:
response = GetLogs(region, stream_name, log_group, token)
events = response['events']
token = response['nextForwardToken']
for event in events:
log_lines.append(event['message'])
return '\n'.join(log_lines)
| GoogleCloudPlatform/PerfKitBenchmarker | perfkitbenchmarker/providers/aws/aws_logs.py | Python | apache-2.0 | 3,293 |
"""Auto-generated file, do not edit by hand. BM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BM = PhoneMetadata(id='BM', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='(?:441|[58]\\d\\d|900)\\d{7}', possible_length=(10,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='441(?:[46]\\d\\d|5(?:4\\d|60|89))\\d{4}', example_number='4414123456', possible_length=(10,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='441(?:[2378]\\d|5[0-39])\\d{5}', example_number='4413701234', possible_length=(10,), possible_length_local_only=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', example_number='8002123456', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', example_number='9002123456', possible_length=(10,)),
personal_number=PhoneNumberDesc(national_number_pattern='52(?:3(?:[2-46-9][02-9]\\d|5(?:[02-46-9]\\d|5[0-46-9]))|4(?:[2-478][02-9]\\d|5(?:[034]\\d|2[024-9]|5[0-46-9])|6(?:0[1-9]|[2-9]\\d)|9(?:[05-9]\\d|2[0-5]|49)))\\d{4}|52[34][2-9]1[02-9]\\d{4}|5(?:00|2[12]|33|44|66|77|88)[2-9]\\d{6}', example_number='5002345678', possible_length=(10,)),
national_prefix='1',
national_prefix_for_parsing='1|([2-8]\\d{6})$',
national_prefix_transform_rule='441\\1',
leading_digits='441',
mobile_number_portable_region=True)
| daviddrysdale/python-phonenumbers | python/phonenumbers/data/region_BM.py | Python | apache-2.0 | 1,530 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from past.builtins import basestring
from datetime import datetime
import logging
from urllib.parse import urlparse
from time import sleep
import airflow
from airflow import hooks, settings
from airflow.exceptions import AirflowException, AirflowSensorTimeout, AirflowSkipException
from airflow.models import BaseOperator, TaskInstance, Connection as DB
from airflow.hooks.base_hook import BaseHook
from airflow.utils.state import State
from airflow.utils.decorators import apply_defaults
class BaseSensorOperator(BaseOperator):
'''
Sensor operators are derived from this class an inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:type soft_fail: bool
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: int
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: int
'''
ui_color = '#e6f1f2'
@apply_defaults
def __init__(
self,
poke_interval=60,
timeout=60*60*24*7,
soft_fail=False,
*args, **kwargs):
super(BaseSensorOperator, self).__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
def poke(self, context):
'''
Function that the sensors defined while deriving this class should
override.
'''
raise AirflowException('Override me.')
def execute(self, context):
started_at = datetime.now()
while not self.poke(context):
if (datetime.now() - started_at).total_seconds() > self.timeout:
if self.soft_fail:
raise AirflowSkipException('Snap. Time is OUT.')
else:
raise AirflowSensorTimeout('Snap. Time is OUT.')
sleep(self.poke_interval)
logging.info("Success criteria met. Exiting.")
class SqlSensor(BaseSensorOperator):
"""
Runs a sql statement until a criteria is met. It will keep trying until
sql returns no row, or if the first cell in (0, '0', '').
:param conn_id: The connection to run the sensor against
:type conn_id: string
:param sql: The sql to run. To pass, it needs to return at least one cell
that contains a non-zero / empty string value.
"""
template_fields = ('sql',)
template_ext = ('.hql', '.sql',)
@apply_defaults
def __init__(self, conn_id, sql, *args, **kwargs):
self.sql = sql
self.conn_id = conn_id
super(SqlSensor, self).__init__(*args, **kwargs)
def poke(self, context):
hook = BaseHook.get_connection(self.conn_id).get_hook()
logging.info('Poking: ' + self.sql)
records = hook.get_records(self.sql)
if not records:
return False
else:
if str(records[0][0]) in ('0', '',):
return False
else:
return True
print(records[0][0])
class MetastorePartitionSensor(SqlSensor):
"""
An alternative to the HivePartitionSensor that talk directly to the
MySQL db. This was created as a result of observing sub optimal
queries generated by the Metastore thrift service when hitting
subpartitioned tables. The Thrift service's queries were written in a
way that wouldn't leverage the indexes.
:param schema: the schema
:type schema: str
:param table: the table
:type table: str
:param partition_name: the partition name, as defined in the PARTITIONS
table of the Metastore. Order of the fields does matter.
Examples: ``ds=2016-01-01`` or
``ds=2016-01-01/sub=foo`` for a sub partitioned table
:type partition_name: str
:param mysql_conn_id: a reference to the MySQL conn_id for the metastore
:type mysql_conn_id: str
"""
template_fields = ('partition_name', 'table', 'schema')
@apply_defaults
def __init__(
self, table, partition_name, schema="default",
mysql_conn_id="metastore_mysql",
*args, **kwargs):
self.partition_name = partition_name
self.table = table
self.schema = schema
self.first_poke = True
self.conn_id = mysql_conn_id
super(SqlSensor, self).__init__(*args, **kwargs)
def poke(self, context):
if self.first_poke:
self.first_poke = False
if '.' in self.table:
self.schema, self.table = self.table.split('.')
self.sql = """
SELECT 'X'
FROM PARTITIONS A0
LEFT OUTER JOIN TBLS B0 ON A0.TBL_ID = B0.TBL_ID
LEFT OUTER JOIN DBS C0 ON B0.DB_ID = C0.DB_ID
WHERE
B0.TBL_NAME = '{self.table}' AND
C0.NAME = '{self.schema}' AND
A0.PART_NAME = '{self.partition_name}';
""".format(self=self)
return super(MetastorePartitionSensor, self).poke(context)
class ExternalTaskSensor(BaseSensorOperator):
"""
Waits for a task to complete in a different DAG
:param external_dag_id: The dag_id that contains the task you want to
wait for
:type external_dag_id: string
:param external_task_id: The task_id that contains the task you want to
wait for
:type external_task_id: string
:param allowed_states: list of allowed states, default is ``['success']``
:type allowed_states: list
:param execution_delta: time difference with the previous execution to
look at, the default is the same execution_date as the current task.
For yesterday, use [positive!] datetime.timedelta(days=1). Either
execution_delta or execution_date_fn can be passed to
ExternalTaskSensor, but not both.
:type execution_delta: datetime.timedelta
:param execution_date_fn: function that receives the current execution date
and returns the desired execution date to query. Either execution_delta
or execution_date_fn can be passed to ExternalTaskSensor, but not both.
:type execution_date_fn: callable
"""
@apply_defaults
def __init__(
self,
external_dag_id,
external_task_id,
allowed_states=None,
execution_delta=None,
execution_date_fn=None,
*args, **kwargs):
super(ExternalTaskSensor, self).__init__(*args, **kwargs)
self.allowed_states = allowed_states or [State.SUCCESS]
if execution_delta is not None and execution_date_fn is not None:
raise ValueError(
'Only one of `execution_date` or `execution_date_fn` may'
'be provided to ExternalTaskSensor; not both.')
self.execution_delta = execution_delta
self.execution_date_fn = execution_date_fn
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
def poke(self, context):
if self.execution_delta:
dttm = context['execution_date'] - self.execution_delta
elif self.execution_date_fn:
dttm = self.execution_date_fn(context['execution_date'])
else:
dttm = context['execution_date']
logging.info(
'Poking for '
'{self.external_dag_id}.'
'{self.external_task_id} on '
'{dttm} ... '.format(**locals()))
TI = TaskInstance
session = settings.Session()
count = session.query(TI).filter(
TI.dag_id == self.external_dag_id,
TI.task_id == self.external_task_id,
TI.state.in_(self.allowed_states),
TI.execution_date == dttm,
).count()
session.commit()
session.close()
return count
class NamedHivePartitionSensor(BaseSensorOperator):
"""
Waits for a set of partitions to show up in Hive.
:param partition_names: List of fully qualified names of the
partitions to wait for. A fully qualified name is of the
form schema.table/pk1=pv1/pk2=pv2, for example,
default.users/ds=2016-01-01. This is passed as is to the metastore
Thrift client "get_partitions_by_name" method. Note that
you cannot use logical operators as in HivePartitionSensor.
:type partition_names: list of strings
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('partition_names', )
@apply_defaults
def __init__(
self,
partition_names,
metastore_conn_id='metastore_default',
poke_interval=60*3,
*args,
**kwargs):
super(NamedHivePartitionSensor, self).__init__(
poke_interval=poke_interval, *args, **kwargs)
if isinstance(partition_names, basestring):
raise TypeError('partition_names must be an array of strings')
self.metastore_conn_id = metastore_conn_id
self.partition_names = partition_names
self.next_poke_idx = 0
def parse_partition_name(self, partition):
try:
schema, table_partition = partition.split('.')
table, partition = table_partition.split('/', 1)
return schema, table, partition
except ValueError as e:
raise ValueError('Could not parse ' + partition)
def poke(self, context):
if not hasattr(self, 'hook'):
self.hook = airflow.hooks.hive_hooks.HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
def poke_partition(partition):
schema, table, partition = self.parse_partition_name(partition)
logging.info(
'Poking for {schema}.{table}/{partition}'.format(**locals())
)
return self.hook.check_for_named_partition(
schema, table, partition)
while self.next_poke_idx < len(self.partition_names):
if poke_partition(self.partition_names[self.next_poke_idx]):
self.next_poke_idx += 1
else:
return False
return True
class HivePartitionSensor(BaseSensorOperator):
"""
Waits for a partition to show up in Hive.
Note: Because @partition supports general logical operators, it
can be inefficient. Consider using NamedHivePartitionSensor instead if
you don't need the full flexibility of HivePartitionSensor.
:param table: The name of the table to wait for, supports the dot
notation (my_database.my_table)
:type table: string
:param partition: The partition clause to wait for. This is passed as
is to the metastore Thrift client "get_partitions_by_filter" method,
and apparently supports SQL like notation as in `ds='2015-01-01'
AND type='value'` and > < sings as in "ds>=2015-01-01"
:type partition: string
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('schema', 'table', 'partition',)
@apply_defaults
def __init__(
self,
table, partition="ds='{{ ds }}'",
metastore_conn_id='metastore_default',
schema='default',
poke_interval=60*3,
*args, **kwargs):
super(HivePartitionSensor, self).__init__(
poke_interval=poke_interval, *args, **kwargs)
if not partition:
partition = "ds='{{ ds }}'"
self.metastore_conn_id = metastore_conn_id
self.table = table
self.partition = partition
self.schema = schema
def poke(self, context):
if '.' in self.table:
self.schema, self.table = self.table.split('.')
logging.info(
'Poking for table {self.schema}.{self.table}, '
'partition {self.partition}'.format(**locals()))
if not hasattr(self, 'hook'):
self.hook = airflow.hooks.hive_hooks.HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
return self.hook.check_for_partition(
self.schema, self.table, self.partition)
class HdfsSensor(BaseSensorOperator):
"""
Waits for a file or folder to land in HDFS
"""
template_fields = ('filepath',)
@apply_defaults
def __init__(
self,
filepath,
hdfs_conn_id='hdfs_default',
*args, **kwargs):
super(HdfsSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.hdfs_conn_id = hdfs_conn_id
def poke(self, context):
import airflow.hooks.hdfs_hook
sb = airflow.hooks.hdfs_hook.HDFSHook(self.hdfs_conn_id).get_conn()
logging.getLogger("snakebite").setLevel(logging.WARNING)
logging.info(
'Poking for file {self.filepath} '.format(**locals()))
try:
files = [f for f in sb.ls([self.filepath])]
except:
return False
return True
class WebHdfsSensor(BaseSensorOperator):
"""
Waits for a file or folder to land in HDFS
"""
template_fields = ('filepath',)
@apply_defaults
def __init__(
self,
filepath,
webhdfs_conn_id='webhdfs_default',
*args, **kwargs):
super(WebHdfsSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.webhdfs_conn_id = webhdfs_conn_id
def poke(self, context):
c = airflow.hooks.webhdfs_hook.WebHDFSHook(self.webhdfs_conn_id)
logging.info(
'Poking for file {self.filepath} '.format(**locals()))
return c.check_for_path(hdfs_path=self.filepath)
class S3KeySensor(BaseSensorOperator):
"""
Waits for a key (a file-like instance on S3) to be present in a S3 bucket.
S3 being a key/value it does not support folders. The path is just a key
a resource.
:param bucket_key: The key being waited on. Supports full s3:// style url
or relative path from root level.
:type bucket_key: str
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param wildcard_match: whether the bucket_key should be interpreted as a
Unix wildcard pattern
:type wildcard_match: bool
:param s3_conn_id: a reference to the s3 connection
:type s3_conn_id: str
"""
template_fields = ('bucket_key', 'bucket_name')
@apply_defaults
def __init__(
self, bucket_key,
bucket_name=None,
wildcard_match=False,
s3_conn_id='s3_default',
*args, **kwargs):
super(S3KeySensor, self).__init__(*args, **kwargs)
session = settings.Session()
db = session.query(DB).filter(DB.conn_id == s3_conn_id).first()
if not db:
raise AirflowException("conn_id doesn't exist in the repository")
# Parse
if bucket_name is None:
parsed_url = urlparse(bucket_key)
if parsed_url.netloc == '':
raise AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
if parsed_url.path[0] == '/':
bucket_key = parsed_url.path[1:]
else:
bucket_key = parsed_url.path
self.bucket_name = bucket_name
self.bucket_key = bucket_key
self.wildcard_match = wildcard_match
self.s3_conn_id = s3_conn_id
session.commit()
session.close()
def poke(self, context):
import airflow.hooks.S3_hook
hook = airflow.hooks.S3_hook.S3Hook(s3_conn_id=self.s3_conn_id)
full_url = "s3://" + self.bucket_name + "/" + self.bucket_key
logging.info('Poking for key : {full_url}'.format(**locals()))
if self.wildcard_match:
return hook.check_for_wildcard_key(self.bucket_key,
self.bucket_name)
else:
return hook.check_for_key(self.bucket_key, self.bucket_name)
class S3PrefixSensor(BaseSensorOperator):
"""
Waits for a prefix to exist. A prefix is the first part of a key,
thus enabling checking of constructs similar to glob airfl* or
SQL LIKE 'airfl%'. There is the possibility to precise a delimiter to
indicate the hierarchy or keys, meaning that the match will stop at that
delimiter. Current code accepts sane delimiters, i.e. characters that
are NOT special characters in the Python regex engine.
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param prefix: The prefix being waited on. Relative path from bucket root level.
:type prefix: str
:param delimiter: The delimiter intended to show hierarchy.
Defaults to '/'.
:type delimiter: str
"""
template_fields = ('prefix', 'bucket_name')
@apply_defaults
def __init__(
self, bucket_name,
prefix, delimiter='/',
s3_conn_id='s3_default',
*args, **kwargs):
super(S3PrefixSensor, self).__init__(*args, **kwargs)
session = settings.Session()
db = session.query(DB).filter(DB.conn_id == s3_conn_id).first()
if not db:
raise AirflowException("conn_id doesn't exist in the repository")
# Parse
self.bucket_name = bucket_name
self.prefix = prefix
self.delimiter = delimiter
self.full_url = "s3://" + bucket_name + '/' + prefix
self.s3_conn_id = s3_conn_id
session.commit()
session.close()
def poke(self, context):
logging.info('Poking for prefix : {self.prefix}\n'
'in bucket s3://{self.bucket_name}'.format(**locals()))
import airflow.hooks.S3_hook
hook = airflow.hooks.S3_hook.S3Hook(s3_conn_id=self.s3_conn_id)
return hook.check_for_prefix(
prefix=self.prefix,
delimiter=self.delimiter,
bucket_name=self.bucket_name)
class TimeSensor(BaseSensorOperator):
"""
Waits until the specified time of the day.
:param target_time: time after which the job succeeds
:type target_time: datetime.time
"""
template_fields = tuple()
@apply_defaults
def __init__(self, target_time, *args, **kwargs):
super(TimeSensor, self).__init__(*args, **kwargs)
self.target_time = target_time
def poke(self, context):
logging.info(
'Checking if the time ({0}) has come'.format(self.target_time))
return datetime.now().time() > self.target_time
class TimeDeltaSensor(BaseSensorOperator):
"""
Waits for a timedelta after the task's execution_date + schedule_interval.
In Airflow, the daily task stamped with ``execution_date``
2016-01-01 can only start running on 2016-01-02. The timedelta here
represents the time after the execution period has closed.
:param delta: time length to wait after execution_date before succeeding
:type delta: datetime.timedelta
"""
template_fields = tuple()
@apply_defaults
def __init__(self, delta, *args, **kwargs):
super(TimeDeltaSensor, self).__init__(*args, **kwargs)
self.delta = delta
def poke(self, context):
dag = context['dag']
target_dttm = dag.following_schedule(context['execution_date'])
target_dttm += self.delta
logging.info('Checking if the time ({0}) has come'.format(target_dttm))
return datetime.now() > target_dttm
class HttpSensor(BaseSensorOperator):
"""
Executes a HTTP get statement and returns False on failure:
404 not found or response_check function returned False
:param http_conn_id: The connection to run the sensor against
:type http_conn_id: string
:param endpoint: The relative part of the full url
:type endpoint: string
:param params: The parameters to be added to the GET url
:type params: a dictionary of string key/value pairs
:param headers: The HTTP headers to be added to the GET request
:type headers: a dictionary of string key/value pairs
:param response_check: A check against the 'requests' response object.
Returns True for 'pass' and False otherwise.
:type response_check: A lambda or defined function.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:type extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
"""
template_fields = ('endpoint',)
@apply_defaults
def __init__(self,
endpoint,
http_conn_id='http_default',
params=None,
headers=None,
response_check=None,
extra_options=None, *args, **kwargs):
super(HttpSensor, self).__init__(*args, **kwargs)
self.endpoint = endpoint
self.http_conn_id = http_conn_id
self.params = params or {}
self.headers = headers or {}
self.extra_options = extra_options or {}
self.response_check = response_check
self.hook = hooks.http_hook.HttpHook(method='GET', http_conn_id=http_conn_id)
def poke(self, context):
logging.info('Poking: ' + self.endpoint)
try:
response = self.hook.run(self.endpoint,
data=self.params,
headers=self.headers,
extra_options=self.extra_options)
if self.response_check:
# run content check on response
return self.response_check(response)
except AirflowException as ae:
if str(ae).startswith("404"):
return False
raise ae
return True
| d-lee/airflow | airflow/operators/sensors.py | Python | apache-2.0 | 22,890 |
"""Let's Encrypt constants."""
import logging
from acme import challenges
SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
"""Setuptools entry point group name for plugins."""
CLI_DEFAULTS = dict(
config_files=["/etc/letsencrypt/cli.ini"],
verbose_count=-(logging.WARNING / 10),
server="https://www.letsencrypt-demo.org/acme/new-reg",
rsa_key_size=2048,
rollback_checkpoints=0,
config_dir="/etc/letsencrypt",
work_dir="/var/lib/letsencrypt",
backup_dir="/var/lib/letsencrypt/backups",
key_dir="/etc/letsencrypt/keys",
certs_dir="/etc/letsencrypt/certs",
cert_path="/etc/letsencrypt/certs/cert-letsencrypt.pem",
chain_path="/etc/letsencrypt/certs/chain-letsencrypt.pem",
renewer_config_file="/etc/letsencrypt/renewer.conf",
no_verify_ssl=False,
dvsni_port=challenges.DVSNI.PORT,
)
"""Defaults for CLI flags and `.IConfig` attributes."""
RENEWER_DEFAULTS = dict(
renewer_config_file="/etc/letsencrypt/renewer.conf",
renewal_configs_dir="/etc/letsencrypt/configs",
archive_dir="/etc/letsencrypt/archive",
live_dir="/etc/letsencrypt/live",
renewer_enabled="yes",
renew_before_expiry="30 days",
deploy_before_expiry="20 days",
)
"""Defaults for renewer script."""
EXCLUSIVE_CHALLENGES = frozenset([frozenset([
challenges.DVSNI, challenges.SimpleHTTP])])
"""Mutually exclusive challenges."""
ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"]
"""List of possible :class:`letsencrypt.interfaces.IInstaller`
enhancements.
List of expected options parameters:
- redirect: None
- http-header: TODO
- ocsp-stapling: TODO
- spdy: TODO
"""
CONFIG_DIRS_MODE = 0o755
"""Directory mode for ``.IConfig.config_dir`` et al."""
TEMP_CHECKPOINT_DIR = "temp_checkpoint"
"""Temporary checkpoint directory (relative to IConfig.work_dir)."""
IN_PROGRESS_DIR = "IN_PROGRESS"
"""Directory used before a permanent checkpoint is finalized (relative to
IConfig.work_dir)."""
CERT_KEY_BACKUP_DIR = "keys-certs"
"""Directory where all certificates and keys are stored (relative to
IConfig.work_dir. Used for easy revocation."""
ACCOUNTS_DIR = "accounts"
"""Directory where all accounts are saved."""
ACCOUNT_KEYS_DIR = "keys"
"""Directory where account keys are saved. Relative to ACCOUNTS_DIR."""
REC_TOKEN_DIR = "recovery_tokens"
"""Directory where all recovery tokens are saved (relative to
IConfig.work_dir)."""
| digideskio/lets-encrypt-preview | letsencrypt/constants.py | Python | apache-2.0 | 2,420 |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for methods in the action registry."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import action_registry
from core.tests import test_utils
class ActionRegistryUnitTests(test_utils.GenericTestBase):
"""Test for the action registry."""
def test_action_registry(self):
"""Do some sanity checks on the action registry."""
self.assertEqual(
len(action_registry.Registry.get_all_actions()), 3)
| prasanna08/oppia | core/domain/action_registry_test.py | Python | apache-2.0 | 1,192 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
This submodule contains helper functions for parsing and printing the
contents of describe hashes for various DNAnexus entities (projects,
containers, dataobjects, apps, and jobs).
'''
from __future__ import print_function, unicode_literals, division, absolute_import
import datetime, time, json, math, sys, copy
import locale
import subprocess
from collections import defaultdict
import dxpy
from .printing import (RED, GREEN, BLUE, YELLOW, WHITE, BOLD, UNDERLINE, ENDC, DELIMITER, get_delimiter, fill)
from ..compat import basestring, USING_PYTHON2
def JOB_STATES(state):
if state == 'failed':
return BOLD() + RED() + state + ENDC()
elif state == 'done':
return BOLD() + GREEN() + state + ENDC()
elif state in ['running', 'in_progress']:
return GREEN() + state + ENDC()
elif state == 'partially_failed':
return RED() + state + ENDC()
else:
return YELLOW() + state + ENDC()
def DATA_STATES(state):
if state == 'open':
return YELLOW() + state + ENDC()
elif state == 'closing':
return YELLOW() + state + ENDC()
elif state == 'closed':
return GREEN() + state + ENDC()
else:
return state
SIZE_LEVEL = ['bytes', 'KB', 'MB', 'GB', 'TB']
def get_size_str(size):
"""
Formats a byte size as a string.
The returned string is no more than 9 characters long.
"""
if size is None:
return "0 " + SIZE_LEVEL[0]
if size == 0:
magnitude = 0
level = 0
else:
magnitude = math.floor(math.log(size, 10))
level = int(min(math.floor(magnitude // 3), 4))
return ('%d' if level == 0 else '%.2f') % (float(size) / 2**(level*10)) + ' ' + SIZE_LEVEL[level]
def parse_typespec(thing):
if isinstance(thing, basestring):
return thing
elif '$and' in thing:
return '(' + ' AND '.join(map(parse_typespec, thing['$and'])) + ')'
elif '$or' in thing:
return '(' + ' OR '.join(map(parse_typespec, thing['$or'])) + ')'
else:
return 'Type spec could not be parsed'
def get_io_desc(parameter, include_class=True, show_opt=True, app_help_version=False):
# For interactive help, format array:CLASS inputs as:
# -iNAME=CLASS [-iNAME=... [...]] # If input is required (needs >=1 inputs)
# [-iNAME=CLASS [...]] # If input is optional (needs >=0 inputs
if app_help_version and parameter["class"].startswith("array"):
scalar_parameter = parameter.copy()
# Munge the parameter dict (strip off "array:" to turn it into a
# scalar) and recurse
scalar_parameter["class"] = scalar_parameter["class"][6:]
if "default" in parameter or parameter.get("optional"):
return "[" + get_io_desc(scalar_parameter, include_class=include_class, show_opt=False, app_help_version=app_help_version) + " [-i%s=... [...]]]" % (parameter["name"],)
else:
return get_io_desc(scalar_parameter, include_class=include_class, show_opt=False, app_help_version=app_help_version) + " [-i%s=... [...]]" % (parameter["name"],)
desc = ""
is_optional = False
if show_opt:
if "default" in parameter or parameter.get("optional"):
is_optional = True
desc += "["
desc += ('-i' if app_help_version else '') + parameter["name"]
include_parens = include_class or 'type' in parameter or 'default' in parameter
if include_parens:
desc += ("=" if app_help_version else " ") + "("
is_first = True
if include_class:
desc += parameter["class"]
is_first = False
if "type" in parameter:
if not is_first:
desc += ", "
else:
is_first = False
desc += "type " + parse_typespec(parameter["type"])
if "default" in parameter:
if not is_first:
desc += ', '
desc += 'default=' + json.dumps(parameter['default'])
if include_parens:
desc += ")"
if show_opt and is_optional:
desc += "]"
return desc
def get_io_spec(spec, skip_fields=None):
if spec is None:
return 'null'
if skip_fields is None:
skip_fields = []
filtered_spec = [param for param in spec if param["name"] not in skip_fields]
groups = defaultdict(list)
for param in filtered_spec:
groups[param.get('group')].append(param)
list_of_params = []
for param in groups.get(None, []):
list_of_params.append(get_io_desc(param))
for group in groups:
if group is None:
continue
list_of_params.append("{g}:".format(g=group))
for param in groups[group]:
list_of_params.append(" "+get_io_desc(param))
if len(skip_fields) > 0:
list_of_params.append("<advanced inputs hidden; use --verbose to see more>")
if len(list_of_params) == 0:
return '-'
if get_delimiter() is not None:
return ('\n' + get_delimiter()).join(list_of_params)
else:
return ('\n' + ' '*16).join([fill(param,
subsequent_indent=' '*18,
width_adjustment=-18) for param in list_of_params])
def is_job_ref(thing, reftype=dict):
'''
:param thing: something that might be a job-based object reference hash
:param reftype: type that a job-based object reference would be (default is dict)
'''
return isinstance(thing, reftype) and \
((len(thing) == 2 and \
isinstance(thing.get('field'), basestring) and \
isinstance(thing.get('job'), basestring)) or \
(len(thing) == 1 and \
isinstance(thing.get('$dnanexus_link'), reftype) and \
isinstance(thing['$dnanexus_link'].get('field'), basestring) and \
isinstance(thing['$dnanexus_link'].get('job'), basestring)))
def get_job_from_jbor(thing):
'''
:returns: Job ID from a JBOR
Assumes :func:`is_job_ref` evaluates to True
'''
if '$dnanexus_link' in thing:
return thing['$dnanexus_link']['job']
else:
return thing['job']
def get_field_from_jbor(thing):
'''
:returns: Output field name from a JBOR
Assumes :func:`is_job_ref` evaluates to True
'''
if '$dnanexus_link' in thing:
return thing['$dnanexus_link']['field']
else:
return thing['field']
def get_index_from_jbor(thing):
'''
:returns: Array index of the JBOR if applicable; None otherwise
Assumes :func:`is_job_ref` evaluates to True
'''
if '$dnanexus_link' in thing:
return thing['$dnanexus_link'].get('index')
else:
return None
def is_metadata_ref(thing, reftype=dict):
return isinstance(thing, reftype) and \
len(thing) == 1 and \
isinstance(thing.get('$dnanexus_link'), reftype) and \
isinstance(thing['$dnanexus_link'].get('metadata'), basestring)
def jbor_to_str(val):
ans = get_job_from_jbor(val) + ':' + get_field_from_jbor(val)
index = get_index_from_jbor(val)
if index is not None:
ans += "." + str(index)
return ans
def io_val_to_str(val):
if is_job_ref(val):
# Job-based object references
return jbor_to_str(val)
elif isinstance(val, dict) and '$dnanexus_link' in val:
# DNAnexus link
if isinstance(val['$dnanexus_link'], basestring):
# simple link
return val['$dnanexus_link']
elif 'project' in val['$dnanexus_link'] and 'id' in val['$dnanexus_link']:
return val['$dnanexus_link']['project'] + ':' + val['$dnanexus_link']['id']
else:
return json.dumps(val)
elif isinstance(val, list):
if len(val) == 0:
return '[]'
else:
return '[ ' + ', '.join([io_val_to_str(item) for item in val]) + ' ]'
elif isinstance(val, dict):
return '{ ' + ', '.join([key + ': ' + io_val_to_str(value) for key, value in val.items()]) + ' }'
else:
return json.dumps(val)
def job_output_to_str(job_output, prefix='\n', title="Output: ", title_len=None):
if len(job_output) == 0:
return prefix + title + "-"
else:
if title_len is None:
title_len = len(title)
return prefix + title + (prefix+' '*title_len).join([fill(key + ' = ' + io_val_to_str(value),
subsequent_indent=' '*9,
break_long_words=False) for key, value in job_output.items()])
def get_io_field(io_hash, defaults=None, delim='=', highlight_fields=()):
def highlight_value(key, value):
if key in highlight_fields:
return YELLOW() + value + ENDC()
else:
return value
if defaults is None:
defaults = {}
if io_hash is None:
return '-'
if len(io_hash) == 0 and len(defaults) == 0:
return '-'
if get_delimiter() is not None:
return ('\n' + get_delimiter()).join([(key + delim + highlight_value(key, io_val_to_str(value))) for key, value in io_hash.items()] +
[('[' + key + delim + io_val_to_str(value) + ']') for key, value in defaults.items()])
else:
lines = [fill(key + ' ' + delim + ' ' + highlight_value(key, io_val_to_str(value)),
initial_indent=' ' * FIELD_NAME_WIDTH,
subsequent_indent=' ' * (FIELD_NAME_WIDTH + 1),
break_long_words=False)
for key, value in io_hash.items()]
lines.extend([fill('[' + key + ' ' + delim + ' ' + io_val_to_str(value) + ']',
initial_indent=' ' * FIELD_NAME_WIDTH,
subsequent_indent=' ' * (FIELD_NAME_WIDTH + 1),
break_long_words=False)
for key, value in defaults.items()])
return '\n'.join(lines)[FIELD_NAME_WIDTH:]
def get_resolved_jbors(resolved_thing, orig_thing, resolved_jbors):
if resolved_thing == orig_thing:
return
if is_job_ref(orig_thing):
jbor_str = jbor_to_str(orig_thing)
if jbor_str not in resolved_jbors:
try:
from dxpy.api import job_describe
job_output = job_describe(get_job_from_jbor(orig_thing)).get('output')
if job_output is not None:
field_value = job_output.get(get_field_from_jbor(orig_thing))
jbor_index = get_index_from_jbor(orig_thing)
if jbor_index is not None:
if isinstance(field_value, list):
resolved_jbors[jbor_str] = field_value[jbor_index]
else:
resolved_jbors[jbor_str] = field_value
except:
# Just don't report any resolved JBORs if there are
# any problems
pass
elif isinstance(orig_thing, list):
for i in range(len(orig_thing)):
get_resolved_jbors(resolved_thing[i], orig_thing[i], resolved_jbors)
elif isinstance(orig_thing, dict) and '$dnanexus_link' not in orig_thing:
for key in orig_thing:
get_resolved_jbors(resolved_thing[key], orig_thing[key], resolved_jbors)
def render_bundleddepends(thing):
from ..bindings.search import find_one_data_object
from ..exceptions import DXError
bundles = []
for item in thing:
bundle_asset_record = dxpy.DXFile(item["id"]["$dnanexus_link"]).get_properties().get("AssetBundle")
asset = None
if bundle_asset_record:
asset = dxpy.DXRecord(bundle_asset_record)
if asset:
try:
bundles.append(asset.describe().get("name") + " (" + asset.get_id() + ")")
except DXError:
asset = None
if not asset:
bundles.append(item["name"] + " (" + item["id"]["$dnanexus_link"] + ")")
return bundles
def render_execdepends(thing):
rendered = []
for item in thing:
dep = copy.copy(item)
dep.setdefault('package_manager', 'apt')
dep['version'] = ' = '+dep['version'] if 'version' in dep else ''
rendered.append("{package_manager}: {name}{version}".format(**dep))
return rendered
def render_stage(title, stage, as_stage_of=None):
lines_to_print = []
if stage['name'] is not None:
lines_to_print.append((title, "{name} ({id})".format(name=stage['name'], id=stage['id'])))
else:
lines_to_print.append((title, stage['id']))
lines_to_print.append((' Executable', stage['executable'] + \
(" (" + RED() + "inaccessible" + ENDC() + ")" \
if stage.get('accessible') is False else "")))
if 'execution' in stage:
is_cached_result = as_stage_of is not None and 'parentAnalysis' in stage['execution'] and \
stage['execution']['parentAnalysis'] != as_stage_of
execution_id_str = stage['execution']['id']
if is_cached_result:
execution_id_str = "[" + execution_id_str + "]"
if 'state' in stage['execution']:
lines_to_print.append((' Execution', execution_id_str + ' (' + JOB_STATES(stage['execution']['state']) + ')'))
else:
lines_to_print.append((' Execution', execution_id_str))
if is_cached_result:
lines_to_print.append((' Cached from', stage['execution']['parentAnalysis']))
for line in lines_to_print:
print_field(line[0], line[1])
def render_short_timestamp(timestamp):
return str(datetime.datetime.fromtimestamp(timestamp//1000))
def render_timestamp(timestamp):
return datetime.datetime.fromtimestamp(timestamp//1000).ctime()
FIELD_NAME_WIDTH = 22
def print_field(label, value):
if get_delimiter() is not None:
sys.stdout.write(label + get_delimiter() + value + '\n')
else:
sys.stdout.write(
label + " " * (FIELD_NAME_WIDTH-len(label)) + fill(value,
subsequent_indent=' '*FIELD_NAME_WIDTH,
width_adjustment=-FIELD_NAME_WIDTH) +
'\n')
def print_nofill_field(label, value):
sys.stdout.write(label + DELIMITER(" " * (FIELD_NAME_WIDTH - len(label))) + value + '\n')
def print_list_field(label, values):
print_field(label, ('-' if len(values) == 0 else DELIMITER(', ').join(values)))
def print_json_field(label, json_value):
print_field(label, json.dumps(json_value, ensure_ascii=False))
def print_project_desc(desc, verbose=False):
recognized_fields = [
'id', 'class', 'name', 'summary', 'description', 'protected', 'restricted', 'created', 'modified',
'dataUsage', 'sponsoredDataUsage', 'tags', 'level', 'folders', 'objects', 'permissions', 'properties',
'appCaches', 'billTo', 'version', 'createdBy', 'totalSponsoredEgressBytes', 'consumedSponsoredEgressBytes',
'containsPHI', 'databaseUIViewOnly', 'region', 'storageCost', 'pendingTransfer','atSpendingLimit',
# Following are app container-specific
'destroyAt', 'project', 'type', 'app', 'appName'
]
# Basic metadata
print_field("ID", desc["id"])
print_field("Class", desc["class"])
if "name" in desc:
print_field("Name", desc["name"])
if 'summary' in desc:
print_field("Summary", desc["summary"])
if 'description' in desc and (verbose or 'summary' not in desc):
print_field("Description", desc['description'])
if 'version' in desc and verbose:
print_field("Version", str(desc['version']))
# Ownership and permissions
if 'billTo' in desc:
print_field("Billed to", desc['billTo'][5 if desc['billTo'].startswith('user-') else 0:])
if 'pendingTransfer' in desc and (verbose or desc['pendingTransfer'] is not None):
print_json_field('Pending transfer to', desc['pendingTransfer'])
if "level" in desc:
print_field("Access level", desc["level"])
if 'region' in desc:
print_field('Region', desc['region'])
# Project settings
if 'protected' in desc:
print_json_field("Protected", desc["protected"])
if 'restricted' in desc:
print_json_field("Restricted", desc["restricted"])
if 'containsPHI' in desc:
print_json_field('Contains PHI', desc['containsPHI'])
if 'databaseUIViewOnly' in desc and desc['databaseUIViewOnly']:
print_json_field('Database UI View Only', desc['databaseUIViewOnly'])
# Usage
print_field("Created", render_timestamp(desc['created']))
if 'createdBy' in desc:
print_field("Created by", desc['createdBy']['user'][desc['createdBy']['user'].find('-') + 1:])
print_field("Last modified", render_timestamp(desc['modified']))
print_field("Data usage", ('%.2f' % desc["dataUsage"]) + ' GB')
if 'sponsoredDataUsage' in desc:
print_field("Sponsored data", ('%.2f' % desc["sponsoredDataUsage"]) + ' GB')
if 'storageCost' in desc:
print_field("Storage cost", "$%.3f/month" % desc["storageCost"])
if 'totalSponsoredEgressBytes' in desc or 'consumedSponsoredEgressBytes' in desc:
total_egress_str = '%.2f GB' % (desc['totalSponsoredEgressBytes'] / 1073741824.,) \
if 'totalSponsoredEgressBytes' in desc else '??'
consumed_egress_str = '%.2f GB' % (desc['consumedSponsoredEgressBytes'] / 1073741824.,) \
if 'consumedSponsoredEgressBytes' in desc else '??'
print_field('Sponsored egress',
('%s used of %s total' % (consumed_egress_str, total_egress_str)))
if 'atSpendingLimit' in desc:
print_json_field("At spending limit?", desc['atSpendingLimit'])
# Misc metadata
if "objects" in desc:
print_field("# Files", str(desc["objects"]))
if "folders" in desc:
print_list_field("Folders", desc["folders"])
if "permissions" in desc:
print_list_field(
"Permissions",
[key[5 if key.startswith('user-') else 0:] + ':' + value for key, value in desc["permissions"].items()]
)
if 'tags' in desc:
print_list_field("Tags", desc["tags"])
if "properties" in desc:
print_list_field("Properties", [key + '=' + value for key, value in desc["properties"].items()])
if "appCaches" in desc:
print_json_field("App caches", desc["appCaches"])
# Container-specific
if 'type' in desc:
print_field("Container type", desc["type"])
if 'project' in desc:
print_field("Associated project", desc["project"])
if 'destroyAt' in desc:
print_field("To be destroyed", render_timestamp(desc['modified']))
if 'app' in desc:
print_field("Associated App ID", desc["app"])
if 'appName' in desc:
print_field("Associated App", desc["appName"])
for field in desc:
if field not in recognized_fields:
print_json_field(field, desc[field])
def get_advanced_inputs(desc, verbose):
details = desc.get("details")
if not verbose and isinstance(details, dict):
return details.get("advancedInputs", [])
return []
def print_app_desc(desc, verbose=False):
recognized_fields = ['id', 'class', 'name', 'version', 'aliases', 'createdBy', 'created', 'modified', 'deleted', 'published', 'title', 'subtitle', 'description', 'categories', 'access', 'dxapi', 'inputSpec', 'outputSpec', 'runSpec', 'resources', 'billTo', 'installed', 'openSource', 'summary', 'applet', 'installs', 'billing', 'details', 'developerNotes',
'authorizedUsers']
print_field("ID", desc["id"])
print_field("Class", desc["class"])
if 'billTo' in desc:
print_field("Billed to", desc['billTo'][5 if desc['billTo'].startswith('user-') else 0:])
print_field("Name", desc["name"])
print_field("Version", desc["version"])
print_list_field("Aliases", desc["aliases"])
print_field("Created by", desc["createdBy"][5 if desc['createdBy'].startswith('user-') else 0:])
print_field("Created", render_timestamp(desc['created']))
print_field("Last modified", render_timestamp(desc['modified']))
print_field("Created from", desc["applet"])
print_json_field('Installed', desc['installed'])
print_json_field('Open source', desc['openSource'])
print_json_field('Deleted', desc['deleted'])
if not desc['deleted']:
advanced_inputs = []
details = desc["details"]
if isinstance(details, dict) and "advancedInputs" in details:
if not verbose:
advanced_inputs = details["advancedInputs"]
del details["advancedInputs"]
if 'published' not in desc or desc["published"] < 0:
print_field("Published", "-")
else:
print_field("Published", render_timestamp(desc['published']))
if "title" in desc and desc['title'] is not None:
print_field("Title", desc["title"])
if "subtitle" in desc and desc['subtitle'] is not None:
print_field("Subtitle", desc["subtitle"])
if 'summary' in desc and desc['summary'] is not None:
print_field("Summary", desc['summary'])
print_list_field("Categories", desc["categories"])
if 'details' in desc:
print_json_field("Details", desc["details"])
print_json_field("Access", desc["access"])
print_field("API version", desc["dxapi"])
if 'inputSpec' in desc:
print_nofill_field("Input Spec", get_io_spec(desc["inputSpec"], skip_fields=advanced_inputs))
print_nofill_field("Output Spec", get_io_spec(desc["outputSpec"]))
print_field("Interpreter", desc["runSpec"]["interpreter"])
if "resources" in desc["runSpec"]:
print_json_field("Resources", desc["runSpec"]["resources"])
if "bundledDepends" in desc["runSpec"]:
print_list_field("bundledDepends", render_bundleddepends(desc["runSpec"]["bundledDepends"]))
if "execDepends" in desc["runSpec"]:
print_list_field("execDepends", render_execdepends(desc["runSpec"]["execDepends"]))
if "systemRequirements" in desc['runSpec']:
print_json_field('Sys Requirements', desc['runSpec']['systemRequirements'])
if 'resources' in desc:
print_field("Resources", desc['resources'])
if 'installs' in desc:
print_field('# Installs', str(desc['installs']))
if 'authorizedUsers' in desc:
print_list_field('AuthorizedUsers', desc["authorizedUsers"])
for field in desc:
if field not in recognized_fields:
print_json_field(field, desc[field])
def print_globalworkflow_desc(desc, verbose=False):
recognized_fields = ['id', 'class', 'name', 'version', 'aliases', 'createdBy', 'created',
'modified', 'deleted', 'published', 'title', 'description',
'categories', 'dxapi', 'billTo', 'summary', 'billing', 'developerNotes',
'authorizedUsers', 'regionalOptions']
is_locked_workflow = False
print_field("ID", desc["id"])
print_field("Class", desc["class"])
if 'billTo' in desc:
print_field("Billed to", desc['billTo'][5 if desc['billTo'].startswith('user-') else 0:])
print_field("Name", desc["name"])
print_field("Version", desc["version"])
print_list_field("Aliases", desc["aliases"])
print_field("Created by", desc["createdBy"][5 if desc['createdBy'].startswith('user-') else 0:])
print_field("Created", render_timestamp(desc['created']))
print_field("Last modified", render_timestamp(desc['modified']))
# print_json_field('Open source', desc['openSource'])
print_json_field('Deleted', desc.get('deleted', False))
if not desc.get('deleted', False):
if 'published' not in desc or desc["published"] < 0:
print_field("Published", "-")
else:
print_field("Published", render_timestamp(desc['published']))
if "title" in desc and desc['title'] is not None:
print_field("Title", desc["title"])
if "subtitle" in desc and desc['subtitle'] is not None:
print_field("Subtitle", desc["subtitle"])
if 'summary' in desc and desc['summary'] is not None:
print_field("Summary", desc['summary'])
print_list_field("Categories", desc["categories"])
if 'details' in desc:
print_json_field("Details", desc["details"])
print_field("API version", desc["dxapi"])
# Additionally, print inputs, outputs, stages of the underlying workflow
# from the region of the current workspace
current_project = dxpy.WORKSPACE_ID
if current_project:
region = dxpy.api.project_describe(current_project, input_params={"fields": {"region": True}})["region"]
if region and region in desc['regionalOptions']:
workflow_desc = desc['regionalOptions'][region]['workflowDescribe']
print_field("Workflow region", region)
if 'id' in workflow_desc:
print_field("Workflow ID", workflow_desc['id'])
if workflow_desc.get('inputSpec') is not None and workflow_desc.get('inputs') is None:
print_nofill_field("Input Spec", get_io_spec(workflow_desc['inputSpec'], skip_fields=get_advanced_inputs(workflow_desc, verbose)))
if workflow_desc.get('outputSpec') is not None and workflow_desc.get('outputs') is None:
print_nofill_field("Output Spec", get_io_spec(workflow_desc['outputSpec']))
if workflow_desc.get('inputs') is not None:
is_locked_workflow = True
print_nofill_field("Workflow Inputs", get_io_spec(workflow_desc['inputs']))
if workflow_desc.get('outputs') is not None:
print_nofill_field("Workflow Outputs", get_io_spec(workflow_desc['outputs']))
if 'stages' in workflow_desc:
for i, stage in enumerate(workflow_desc["stages"]):
render_stage("Stage " + str(i), stage)
if 'authorizedUsers' in desc:
print_list_field('AuthorizedUsers', desc["authorizedUsers"])
if is_locked_workflow:
print_locked_workflow_note()
for field in desc:
if field not in recognized_fields:
print_json_field(field, desc[field])
def get_col_str(col_desc):
return col_desc['name'] + DELIMITER(" (") + col_desc['type'] + DELIMITER(")")
def print_data_obj_desc(desc, verbose=False):
recognized_fields = ['id', 'class', 'project', 'folder', 'name', 'properties', 'tags', 'types', 'hidden', 'details', 'links', 'created', 'modified', 'state', 'title', 'subtitle', 'description', 'inputSpec', 'outputSpec', 'runSpec', 'summary', 'dxapi', 'access', 'createdBy', 'summary', 'sponsored', 'developerNotes',
'stages', 'inputs', 'outputs', 'latestAnalysis', 'editVersion', 'outputFolder', 'initializedFrom', 'temporary']
is_locked_workflow = False
print_field("ID", desc["id"])
print_field("Class", desc["class"])
if 'project' in desc:
print_field("Project", desc['project'])
if 'folder' in desc:
print_field("Folder", desc["folder"])
print_field("Name", desc["name"])
if 'state' in desc:
print_field("State", DATA_STATES(desc['state']))
if 'hidden' in desc:
print_field("Visibility", ("hidden" if desc["hidden"] else "visible"))
if 'types' in desc:
print_list_field("Types", desc['types'])
if 'properties' in desc:
print_list_field("Properties", ['='.join([k, v]) for k, v in desc['properties'].items()])
if 'tags' in desc:
print_list_field("Tags", desc['tags'])
if verbose and 'details' in desc:
print_json_field("Details", desc["details"])
if 'links' in desc:
print_list_field("Outgoing links", desc['links'])
print_field("Created", render_timestamp(desc['created']))
if 'createdBy' in desc:
print_field("Created by", desc['createdBy']['user'][5:])
if 'job' in desc["createdBy"]:
print_field(" via the job", desc['createdBy']['job'])
if verbose and 'executable' in desc['createdBy']:
print_field(" running", desc['createdBy']['executable'])
print_field("Last modified", render_timestamp(desc['modified']))
if "editVersion" in desc:
print_field("Edit Version", str(desc['editVersion']))
if "title" in desc:
print_field("Title", desc["title"])
if "subtitle" in desc:
print_field("Subtitle", desc["subtitle"])
if 'summary' in desc:
print_field("Summary", desc['summary'])
if 'description' in desc and verbose:
print_field("Description", desc["description"])
if 'outputFolder' in desc:
print_field("Output Folder", desc["outputFolder"] if desc["outputFolder"] is not None else "-")
if 'access' in desc:
print_json_field("Access", desc["access"])
if 'dxapi' in desc:
print_field("API version", desc["dxapi"])
# In case of a workflow: do not display "Input/Output Specs" that show stages IO
# when the workflow has workflow-level input/output fields defined.
if desc.get('inputSpec') is not None and desc.get('inputs') is None:
print_nofill_field("Input Spec", get_io_spec(desc['inputSpec'], skip_fields=get_advanced_inputs(desc, verbose)))
if desc.get('outputSpec') is not None and desc.get('outputs') is None:
print_nofill_field("Output Spec", get_io_spec(desc['outputSpec']))
if desc.get('inputs') is not None:
is_locked_workflow = True
print_nofill_field("Workflow Inputs", get_io_spec(desc['inputs']))
if desc.get('outputs') is not None:
print_nofill_field("Workflow Outputs", get_io_spec(desc['outputs']))
if 'runSpec' in desc:
print_field("Interpreter", desc["runSpec"]["interpreter"])
if "resources" in desc['runSpec']:
print_json_field("Resources", desc["runSpec"]["resources"])
if "bundledDepends" in desc["runSpec"]:
print_list_field("bundledDepends", render_bundleddepends(desc["runSpec"]["bundledDepends"]))
if "execDepends" in desc["runSpec"]:
print_list_field("execDepends", render_execdepends(desc["runSpec"]["execDepends"]))
if "systemRequirements" in desc['runSpec']:
print_json_field('Sys Requirements', desc['runSpec']['systemRequirements'])
if 'stages' in desc:
for i, stage in enumerate(desc["stages"]):
render_stage("Stage " + str(i), stage)
if 'initializedFrom' in desc:
print_field("initializedFrom", desc["initializedFrom"]["id"])
if 'latestAnalysis' in desc and desc['latestAnalysis'] is not None:
print_field("Last execution", desc["latestAnalysis"]["id"])
print_field(" run at", render_timestamp(desc["latestAnalysis"]["created"]))
print_field(" state", JOB_STATES(desc["latestAnalysis"]["state"]))
for field in desc:
if field in recognized_fields:
continue
else:
if field == "media":
print_field("Media type", desc['media'])
elif field == "size":
if desc["class"] == "file":
sponsored_str = ""
if 'sponsored' in desc and desc['sponsored']:
sponsored_str = DELIMITER(", ") + "sponsored by DNAnexus"
print_field("Size", get_size_str(desc['size']) + sponsored_str)
else:
print_field("Size", str(desc['size']))
elif field == "length":
print_field("Length", str(desc['length']))
elif field == "columns":
if len(desc['columns']) > 0:
coldescs = "Columns" + DELIMITER(" " *(16-len("Columns"))) + get_col_str(desc["columns"][0])
for column in desc["columns"][1:]:
coldescs += '\n' + DELIMITER(" "*16) + get_col_str(column)
print(coldescs)
else:
print_list_field("Columns", desc['columns'])
else: # Unhandled prettifying
print_json_field(field, desc[field])
if is_locked_workflow:
print_locked_workflow_note()
def printable_ssh_host_key(ssh_host_key):
try:
keygen = subprocess.Popen(["ssh-keygen", "-lf", "/dev/stdin"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
if USING_PYTHON2:
(stdout, stderr) = keygen.communicate(ssh_host_key)
else:
(stdout, stderr) = keygen.communicate(ssh_host_key.encode())
except:
return ssh_host_key.strip()
else:
if not USING_PYTHON2:
stdout = stdout.decode()
return stdout.replace(" no comment", "").strip()
def print_execution_desc(desc):
recognized_fields = ['id', 'class', 'project', 'workspace', 'region',
'app', 'applet', 'executable', 'workflow',
'state',
'rootExecution', 'parentAnalysis', 'parentJob', 'originJob', 'analysis', 'stage',
'function', 'runInput', 'originalInput', 'input', 'output', 'folder', 'launchedBy', 'created',
'modified', 'failureReason', 'failureMessage', 'stdout', 'stderr', 'waitingOnChildren',
'dependsOn', 'resources', 'projectCache', 'details', 'tags', 'properties',
'name', 'instanceType', 'systemRequirements', 'executableName', 'failureFrom', 'billTo',
'startedRunning', 'stoppedRunning', 'stateTransitions',
'delayWorkspaceDestruction', 'stages', 'totalPrice', 'isFree', 'invoiceMetadata',
'priority', 'sshHostKey']
print_field("ID", desc["id"])
print_field("Class", desc["class"])
if "name" in desc and desc['name'] is not None:
print_field("Job name", desc['name'])
if "executableName" in desc and desc['executableName'] is not None:
print_field("Executable name", desc['executableName'])
print_field("Project context", desc["project"])
if 'region' in desc:
print_field("Region", desc["region"])
if 'billTo' in desc:
print_field("Billed to", desc['billTo'][5 if desc['billTo'].startswith('user-') else 0:])
if 'workspace' in desc:
print_field("Workspace", desc["workspace"])
if 'projectCache' in desc:
print_field('Cache workspace', desc['projectCache'])
print_field('Resources', desc['resources'])
if "app" in desc:
print_field("App", desc["app"])
elif desc.get("executable", "").startswith("globalworkflow"):
print_field("Workflow", desc["executable"])
elif "applet" in desc:
print_field("Applet", desc["applet"])
elif "workflow" in desc:
print_field("Workflow", desc["workflow"]["id"])
if "instanceType" in desc and desc['instanceType'] is not None:
print_field("Instance Type", desc["instanceType"])
if "priority" in desc:
print_field("Priority", desc["priority"])
print_field("State", JOB_STATES(desc["state"]))
if "rootExecution" in desc:
print_field("Root execution", desc["rootExecution"])
if "originJob" in desc:
if desc["originJob"] is None:
print_field("Origin job", "-")
else:
print_field("Origin job", desc["originJob"])
if desc["parentJob"] is None:
print_field("Parent job", "-")
else:
print_field("Parent job", desc["parentJob"])
if "parentAnalysis" in desc:
if desc["parentAnalysis"] is not None:
print_field("Parent analysis", desc["parentAnalysis"])
if "analysis" in desc and desc["analysis"] is not None:
print_field("Analysis", desc["analysis"])
print_field("Stage", desc["stage"])
if "stages" in desc:
for i, (stage, analysis_stage) in enumerate(zip(desc["workflow"]["stages"], desc["stages"])):
stage['execution'] = analysis_stage['execution']
render_stage("Stage " + str(i), stage, as_stage_of=desc["id"])
if "function" in desc:
print_field("Function", desc["function"])
if 'runInput' in desc:
default_fields = {k: v for k, v in desc["originalInput"].items() if k not in desc["runInput"]}
print_nofill_field("Input", get_io_field(desc["runInput"], defaults=default_fields))
else:
print_nofill_field("Input", get_io_field(desc["originalInput"]))
resolved_jbors = {}
input_with_jbors = desc.get('runInput', desc['originalInput'])
for k in desc["input"]:
if k in input_with_jbors and desc["input"][k] != input_with_jbors[k]:
get_resolved_jbors(desc["input"][k], input_with_jbors[k], resolved_jbors)
if len(resolved_jbors) != 0:
print_nofill_field("Resolved JBORs", get_io_field(resolved_jbors, delim=(GREEN() + '=>' + ENDC())))
print_nofill_field("Output", get_io_field(desc["output"]))
if 'folder' in desc:
print_field('Output folder', desc['folder'])
print_field("Launched by", desc["launchedBy"][5:])
print_field("Created", render_timestamp(desc['created']))
if 'startedRunning' in desc:
if 'stoppedRunning' in desc:
print_field("Started running", render_timestamp(desc['startedRunning']))
else:
print_field("Started running", "{t} (running for {rt})".format(t=render_timestamp(desc['startedRunning']),
rt=datetime.timedelta(seconds=int(time.time())-desc['startedRunning']//1000)))
if 'stoppedRunning' in desc:
print_field("Stopped running", "{t} (Runtime: {rt})".format(
t=render_timestamp(desc['stoppedRunning']),
rt=datetime.timedelta(seconds=(desc['stoppedRunning']-desc['startedRunning'])//1000)))
if desc.get('class') == 'analysis' and 'stateTransitions' in desc and desc['stateTransitions']:
# Display finishing time of the analysis if available
if desc['stateTransitions'][-1]['newState'] in ['done', 'failed', 'terminated']:
print_field("Finished", "{t} (Wall-clock time: {wt})".format(
t=render_timestamp(desc['stateTransitions'][-1]['setAt']),
wt=datetime.timedelta(seconds=(desc['stateTransitions'][-1]['setAt']-desc['created'])//1000)))
print_field("Last modified", render_timestamp(desc['modified']))
if 'waitingOnChildren' in desc:
print_list_field('Pending subjobs', desc['waitingOnChildren'])
if 'dependsOn' in desc:
print_list_field('Depends on', desc['dependsOn'])
if "failureReason" in desc:
print_field("Failure reason", desc["failureReason"])
if "failureMessage" in desc:
print_field("Failure message", desc["failureMessage"])
if "failureFrom" in desc and desc['failureFrom'] is not None and desc['failureFrom']['id'] != desc['id']:
print_field("Failure is from", desc['failureFrom']['id'])
if 'systemRequirements' in desc:
print_json_field("Sys Requirements", desc['systemRequirements'])
if "tags" in desc:
print_list_field("Tags", desc["tags"])
if "properties" in desc:
print_list_field("Properties", [key + '=' + value for key, value in desc["properties"].items()])
if "details" in desc and "clonedFrom" in desc["details"]:
cloned_hash = desc["details"]["clonedFrom"]
if "id" in cloned_hash:
print_field("Re-run of", cloned_hash["id"])
print_field(" named", cloned_hash["name"])
same_executable = cloned_hash["executable"] == desc.get("applet", desc.get("app", ""))
print_field(" using", ("" if same_executable else YELLOW()) + \
cloned_hash["executable"] + \
(" (same)" if same_executable else ENDC()))
same_project = cloned_hash["project"] == desc["project"]
same_folder = cloned_hash["folder"] == desc["folder"] or not same_project
print_field(" output folder", ("" if same_project else YELLOW()) + \
cloned_hash["project"] + \
("" if same_project else ENDC()) + ":" + \
("" if same_folder else YELLOW()) + \
cloned_hash["folder"] + \
(" (same)" if (same_project and same_folder) else "" if same_folder else ENDC()))
different_inputs = []
for item in cloned_hash["runInput"]:
if cloned_hash["runInput"][item] != desc["runInput"][item]:
different_inputs.append(item)
print_nofill_field(" input", get_io_field(cloned_hash["runInput"], highlight_fields=different_inputs))
cloned_sys_reqs = cloned_hash.get("systemRequirements")
if isinstance(cloned_sys_reqs, dict):
if cloned_sys_reqs == desc.get('systemRequirements'):
print_nofill_field(" sys reqs", json.dumps(cloned_sys_reqs) + ' (same)')
else:
print_nofill_field(" sys reqs", YELLOW() + json.dumps(cloned_sys_reqs) + ENDC())
if not desc.get('isFree') and desc.get('totalPrice') is not None:
print_field('Total Price', format_currency(desc['totalPrice'], meta=desc['currency']))
if desc.get('invoiceMetadata'):
print_json_field("Invoice Metadata", desc['invoiceMetadata'])
if desc.get('sshHostKey'):
print_nofill_field("SSH Host Key", printable_ssh_host_key(desc['sshHostKey']))
for field in desc:
if field not in recognized_fields:
print_json_field(field, desc[field])
def locale_from_currency_code(dx_code):
"""
This is a (temporary) hardcoded mapping between currency_list.json in nucleus and standard
locale string useful for further formatting
:param dx_code: An id of nucleus/commons/pricing_models/currency_list.json collection
:return: standardised locale, eg 'en_US'; None when no mapping found
"""
currency_locale_map = {0: 'en_US', 1: 'en_GB'}
return currency_locale_map[dx_code] if dx_code in currency_locale_map else None
def format_currency_from_meta(value, meta):
"""
Formats currency value into properly decorated currency string based on provided currency metadata.
Please note that this is very basic solution missing some of the localisation features (such as
negative symbol position and type.
Better option is to use 'locale' module to reflect currency string decorations more accurately.
See 'format_currency'
:param value:
:param meta:
:return:
"""
prefix = '-' if value < 0 else '' # .. TODO: some locales position neg symbol elsewhere, missing meta
prefix += meta['symbol'] if meta['symbolPosition'] == 'left' else ''
suffix = ' %s' % meta["symbol"] if meta['symbolPosition'] == 'right' else ''
# .. TODO: take the group and decimal separators from meta into account (US & UK are the same, so far we're safe)
formatted_value = '{:,.2f}'.format(abs(value))
return prefix + formatted_value + suffix
def format_currency(value, meta, currency_locale=None):
"""
Formats currency value into properly decorated currency string based on either locale (preferred)
or if that is not available then currency metadata. Until locale is provided from the server
a crude mapping between `currency.dxCode` and a locale string is used instead (eg 0: 'en_US')
:param value: amount
:param meta: server metadata (`currency`)
:return: formatted currency string
"""
try:
if currency_locale is None:
currency_locale = locale_from_currency_code(meta['dxCode'])
if currency_locale is None:
return format_currency_from_meta(value, meta)
else:
locale.setlocale(locale.LC_ALL, currency_locale)
return locale.currency(value, grouping=True)
except locale.Error:
# .. locale is probably not available -> fallback to format manually
return format_currency_from_meta(value, meta)
def print_user_desc(desc):
print_field("ID", desc["id"])
print_field("Name", desc["first"] + " " + ((desc["middle"] + " ") if desc["middle"] != '' else '') + desc["last"])
if "email" in desc:
print_field("Email", desc["email"])
bill_to_label = "Default bill to"
if "billTo" in desc:
print_field(bill_to_label, desc["billTo"])
if "appsInstalled" in desc:
print_list_field("Apps installed", desc["appsInstalled"])
def print_generic_desc(desc):
for field in desc:
print_json_field(field, desc[field])
def print_desc(desc, verbose=False):
'''
:param desc: The describe hash of a DNAnexus entity
:type desc: dict
Depending on the class of the entity, this method will print a
formatted and human-readable string containing the data in *desc*.
'''
if desc['class'] in ['project', 'workspace', 'container']:
print_project_desc(desc, verbose=verbose)
elif desc['class'] == 'app':
print_app_desc(desc, verbose=verbose)
elif desc['class'] == 'globalworkflow':
print_globalworkflow_desc(desc, verbose=verbose)
elif desc['class'] in ['job', 'analysis']:
print_execution_desc(desc)
elif desc['class'] == 'user':
print_user_desc(desc)
elif desc['class'] in ['org', 'team']:
print_generic_desc(desc)
else:
print_data_obj_desc(desc, verbose=verbose)
def get_ls_desc(desc, print_id=False):
addendum = ' : ' + desc['id'] if print_id is True else ''
if desc['class'] in ['applet', 'workflow']:
return BOLD() + GREEN() + desc['name'] + ENDC() + addendum
else:
return desc['name'] + addendum
def print_ls_desc(desc, **kwargs):
print(get_ls_desc(desc, **kwargs))
def get_ls_l_header():
return (BOLD() +
'State' + DELIMITER(' ') +
'Last modified' + DELIMITER(' ') +
'Size' + DELIMITER(' ') +
'Name' + DELIMITER(' (') +
'ID' + DELIMITER(')') +
ENDC())
def print_ls_l_header():
print(get_ls_l_header())
def get_ls_l_desc_fields():
return {
'id': True,
'class': True,
'folder': True,
'length': True,
'modified': True,
'name': True,
'project': True,
'size': True,
'state': True
}
def get_ls_l_desc(desc, include_folder=False, include_project=False):
"""
desc must have at least all the fields given by get_ls_l_desc_fields.
"""
# If you make this method consume an additional field, you must add it to
# get_ls_l_desc_fields above.
if 'state' in desc:
state_len = len(desc['state'])
if desc['state'] != 'closed':
state_str = YELLOW() + desc['state'] + ENDC()
else:
state_str = GREEN() + desc['state'] + ENDC()
else:
state_str = ''
state_len = 0
name_str = ''
if include_folder:
name_str += desc['folder'] + ('/' if desc['folder'] != '/' else '')
name_str += desc['name']
if desc['class'] in ['applet', 'workflow']:
name_str = BOLD() + GREEN() + name_str + ENDC()
size_str = ''
if 'size' in desc and desc['class'] == 'file':
size_str = get_size_str(desc['size'])
elif 'length' in desc:
size_str = str(desc['length']) + ' rows'
size_padding = ' ' * max(0, 9 - len(size_str))
return (state_str +
DELIMITER(' '*(8 - state_len)) + render_short_timestamp(desc['modified']) +
DELIMITER(' ') + size_str +
DELIMITER(size_padding + ' ') + name_str +
DELIMITER(' (') + ((desc['project'] + DELIMITER(':')) if include_project else '') + desc['id'] +
DELIMITER(')'))
def print_ls_l_desc(desc, **kwargs):
print(get_ls_l_desc(desc, **kwargs))
def get_find_executions_string(desc, has_children, single_result=False, show_outputs=True,
is_cached_result=False):
'''
:param desc: hash of execution's describe output
:param has_children: whether the execution has children to be printed
:param single_result: whether the execution is displayed as a single result or as part of an execution tree
:param is_cached_result: whether the execution should be formatted as a cached result
'''
is_not_subjob = desc['parentJob'] is None or desc['class'] == 'analysis' or single_result
result = ("* " if is_not_subjob and get_delimiter() is None else "")
canonical_execution_name = desc['executableName']
if desc['class'] == 'job':
canonical_execution_name += ":" + desc['function']
execution_name = desc.get('name', '<no name>')
# Format the name of the execution
if is_cached_result:
result += BOLD() + "[" + ENDC()
result += BOLD() + BLUE()
if desc['class'] == 'analysis':
result += UNDERLINE()
result += execution_name + ENDC()
if execution_name != canonical_execution_name and execution_name+":main" != canonical_execution_name:
result += ' (' + canonical_execution_name + ')'
if is_cached_result:
result += BOLD() + "]" + ENDC()
# Format state
result += DELIMITER(' (') + JOB_STATES(desc['state']) + DELIMITER(') ') + desc['id']
# Add unicode pipe to child if necessary
result += DELIMITER('\n' + (u'│ ' if is_not_subjob and has_children else (" " if is_not_subjob else "")))
result += desc['launchedBy'][5:] + DELIMITER(' ')
result += render_short_timestamp(desc['created'])
cached_and_runtime_strs = []
if is_cached_result:
cached_and_runtime_strs.append(YELLOW() + "cached" + ENDC())
if desc['class'] == 'job':
# Only print runtime if it ever started running
if desc.get('startedRunning'):
if desc['state'] in ['done', 'failed', 'terminated', 'waiting_on_output']:
runtime = datetime.timedelta(seconds=int(desc['stoppedRunning']-desc['startedRunning'])//1000)
cached_and_runtime_strs.append("runtime " + str(runtime))
elif desc['state'] == 'running':
seconds_running = max(int(time.time()-desc['startedRunning']//1000), 0)
msg = "running for {rt}".format(rt=datetime.timedelta(seconds=seconds_running))
cached_and_runtime_strs.append(msg)
if cached_and_runtime_strs:
result += " (" + ", ".join(cached_and_runtime_strs) + ")"
if show_outputs:
prefix = DELIMITER('\n' + (u'│ ' if is_not_subjob and has_children else (" " if is_not_subjob else "")))
if desc.get("output") != None:
result += job_output_to_str(desc['output'], prefix=prefix)
elif desc['state'] == 'failed' and 'failureReason' in desc:
result += prefix + BOLD() + desc['failureReason'] + ENDC() + ": " + fill(desc.get('failureMessage', ''),
subsequent_indent=prefix.lstrip('\n'))
return result
def print_locked_workflow_note():
print_field('Note',
'This workflow has an explicit input specification (i.e. it is locked), and as such stage inputs cannot be modified at run-time.')
| dnanexus/dx-toolkit | src/python/dxpy/utils/describe.py | Python | apache-2.0 | 52,416 |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ByteNet tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models import bytenet
import tensorflow as tf
class ByteNetTest(tf.test.TestCase):
def testByteNet(self):
vocab_size = 9
x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1))
y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 6, 1, 1))
hparams = bytenet.bytenet_base()
p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
with self.test_session() as session:
features = {
"inputs": tf.constant(x, dtype=tf.int32),
"targets": tf.constant(y, dtype=tf.int32),
}
model = bytenet.ByteNet(
hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
logits, _ = model(features)
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (3, 50, 1, 1, vocab_size))
if __name__ == "__main__":
tf.test.main()
| vthorsteinsson/tensor2tensor | tensor2tensor/models/bytenet_test.py | Python | apache-2.0 | 1,719 |
# -*- coding: utf-8 -*-
"""
Linguistic and other taggers.
Tagging each token in a sentence with supplementary information,
such as its part-of-speech (POS) tag, and named entity (NE) tag.
"""
__all__ = [
"PerceptronTagger",
"pos_tag",
"pos_tag_sents",
"tag_provinces",
"chunk_parse",
"NER",
]
from pythainlp.tag.locations import tag_provinces
from pythainlp.tag.pos_tag import pos_tag, pos_tag_sents
from pythainlp.tag._tag_perceptron import PerceptronTagger
from pythainlp.tag.chunk import chunk_parse
from pythainlp.tag.named_entity import NER
| PyThaiNLP/pythainlp | pythainlp/tag/__init__.py | Python | apache-2.0 | 573 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import re
import urllib
import urllib2
from oslo.config import cfg
import six
from kwstandby.openstack.common import fileutils
from kwstandby.openstack.common.gettextutils import _
from kwstandby.openstack.common import jsonutils
from kwstandby.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('JSON file containing policy')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Rule enforced when requested rule is not found')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or self.default_rule not in self:
raise KeyError(key)
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
"""
def __init__(self, policy_file=None, rules=None, default_rule=None):
self.rules = Rules(rules)
self.default_rule = default_rule or CONF.policy_default_rule
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
def set_rules(self, rules, overwrite=True):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
if overwrite:
self.rules = Rules(rules)
else:
self.update(rules)
def clear(self):
"""Clears Enforcer rules, policy's cache and policy's path."""
self.set_rules({})
self.policy_path = None
def load_rules(self, force_reload=False):
"""Loads policy_path's rules.
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to overwrite current rules.
"""
if not self.policy_path:
self.policy_path = self._get_policy_path()
reloaded, data = fileutils.read_cached_file(self.policy_path,
force_reload=force_reload)
if reloaded:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules)
LOG.debug(_("Rules successfully reloaded"))
def _get_policy_path(self):
"""Locate the policy json data file.
:param policy_file: Custom policy file to locate.
:returns: The policy path
:raises: ConfigFilesNotFoundError if the file couldn't
be located.
"""
policy_file = CONF.find_file(self.policy_file)
if policy_file:
return policy_file
raise cfg.ConfigFilesNotFoundError(path=CONF.policy_file)
def enforce(self, rule, target, creds, do_raise=False,
exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param rule: A string or BaseCheck instance specifying the rule
to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If not specified, PolicyNotAuthorized
will be used.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
# NOTE(flaper87): Not logging target or creds to avoid
# potential security issues.
LOG.debug(_("Rule %s will be now enforced") % rule)
self.load_rules()
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds, self)
elif not self.rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug(_("Rule [%s] doesn't exist") % rule)
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if do_raise and not result:
if exc:
raise exc(*args, **kwargs)
raise PolicyNotAuthorized(rule)
return result
class BaseCheck(object):
"""Abstract base class for Check classes."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns False (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns True (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks."""
def __init__(self, kind, match):
"""Initiates Check instance.
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""Implements the "not" logical operator.
A policy check that inverts the result of another policy check.
"""
def __init__(self, rule):
"""Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
"""
def __init__(self, rules):
"""Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns True.
"""
def __init__(self, rules):
"""Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""Translates the old list-of-lists syntax into a tree of Check objects.
Provided for backwards compatibility.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, basestring):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""Metaclass for the ParseState class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
__metaclass__ = ParseStateMeta
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
reduce() method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
Raises ValueError if the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_("Failed to understand rule %(rule)r") % locals())
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, basestring):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urllib.urlencode(data)
f = urllib2.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
"""
# TODO(termie): do dict inspection via dot syntax
match = self.match % target
if self.kind in creds:
return match == six.text_type(creds[self.kind])
return False
| frossigneux/kwstandby | kwstandby/openstack/common/policy.py | Python | apache-2.0 | 25,233 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Glance Release Notes documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cellar Release Notes'
copyright = u'2016, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GlanceReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation',
u'Glance Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'glancereleasenotes', u'Glance Release Notes Documentation',
[u'Glance Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation',
u'Glance Developers', 'GlanceReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| internap/arsenal | releasenotes/source/conf.py | Python | apache-2.0 | 8,940 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test suite for XenAPI."""
import ast
import contextlib
import datetime
import functools
import os
import re
import mox
from nova.compute import aggregate_states
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import importutils
from nova import test
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_network
from nova.tests import fake_utils
from nova.tests.glance import stubs as glance_stubs
from nova.tests.xenapi import stubs
from nova.virt.xenapi import connection as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
"""
vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached_here(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_stream_disk(*args, **kwargs):
pass
def fake_is_vdi_pv(*args, **kwargs):
return should_return
orig_vdi_attached_here = vm_utils.vdi_attached_here
orig_stream_disk = vm_utils._stream_disk
orig_is_vdi_pv = vm_utils._is_vdi_pv
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
vm_utils._stream_disk = fake_stream_disk
vm_utils._is_vdi_pv = fake_is_vdi_pv
return function(self, *args, **kwargs)
finally:
vm_utils._is_vdi_pv = orig_is_vdi_pv
vm_utils._stream_disk = orig_stream_disk
vm_utils.vdi_attached_here = orig_vdi_attached_here
return decorated_function
class XenAPIVolumeTestCase(test.TestCase):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.reset()
self.instance_values = {'id': 1,
'project_id': self.user_id,
'user_id': 'fake',
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
def _create_volume(self, size='0'):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
@staticmethod
def _make_info():
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.1:3260,fake',
'target_lun': None,
'auth_method': 'CHAP',
'auth_method': 'fake',
'auth_method': 'fake',
}
}
def test_mountpoint_to_number(self):
cases = {
'sda': 0,
'sdp': 15,
'hda': 0,
'hdp': 15,
'vda': 0,
'xvda': 0,
'0': 0,
'10': 10,
'vdq': -1,
'sdq': -1,
'hdq': -1,
'xvdq': -1,
}
for (input, expected) in cases.iteritems():
func = volume_utils.VolumeHelper.mountpoint_to_number
actual = func(input)
self.assertEqual(actual, expected,
'%s yielded %s, not %s' % (input, actual, expected))
def test_parse_volume_info_raise_exception(self):
"""This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume()
# oops, wrong mount point!
self.assertRaises(volume_utils.StorageError,
helper.parse_volume_info,
self._make_info(),
'dev/sd'
)
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_attach_volume(self):
"""This shows how to test Ops classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(self._make_info(),
instance.name, '/dev/sdc')
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
"""This shows how to test when exceptions are raised."""
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{'driver_volume_type': 'nonexist'},
instance.name,
'/dev/sdc')
class XenAPIVMTestCase(test.TestCase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.network = importutils.import_object(FLAGS.network_manager)
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
xenapi_fake.reset()
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.get_connection(False)
def test_init_host(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
vm = vm_utils.get_this_vm_ref(session)
# Local root disk
vdi0 = xenapi_fake.create_vdi('compute', None)
vbd0 = xenapi_fake.create_vbd(vm, vdi0)
# Instance VDI
vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
other_config={'nova_instance_uuid': 'aaaa'})
vbd1 = xenapi_fake.create_vbd(vm, vdi1)
# Only looks like instance VDI
vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
vbd2 = xenapi_fake.create_vbd(vm, vdi2)
self.conn.init_host(None)
self.assertEquals(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
def test_get_rrd_server(self):
self.flags(xenapi_connection_url='myscheme://myaddress/')
server_info = vm_utils.get_rrd_server()
self.assertEqual(server_info[0], 'myscheme')
self.assertEqual(server_info[1], 'myaddress')
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
with open('xenapi/vm_rrd.xml') as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, 'get_rrd', fake_get_rrd)
fake_diagnostics = {
'vbd_xvdb_write': '0.0',
'memory_target': '10961792000.0000',
'memory_internal_free': '3612860.6020',
'memory': '10961792000.0000',
'vbd_xvda_write': '0.0',
'cpu0': '0.0110',
'vif_0_tx': '752.4007',
'vbd_xvda_read': '0.0',
'vif_0_rx': '4837.8805'
}
instance = self._create_instance()
expected = self.conn.get_diagnostics(instance)
self.assertDictMatch(fake_diagnostics, expected)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(vm_ref, vdi_ref):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
name = "MySnapshot"
self.assertRaises(exception.NovaException, self.conn.snapshot,
self.context, instance, name)
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
name = "MySnapshot"
template_vm_ref = self.conn.snapshot(self.context, instance, name)
# Ensure VM was torn down
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEquals(vm_labels, [instance.name])
# Ensure VBDs were torn down
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEquals(vbd_labels, [instance.name])
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assert_(not name_label.endswith('snapshot'))
def create_vm_record(self, conn, os_type, name):
instances = conn.list_instances()
self.assertEquals(instances, [name])
# Get Nova record for VM
vm_info = conn.get_info({'name': name})
# Get XenAPI record for VM
vms = [rec for ref, rec
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, check_injection=False):
# Check that m1.large above turned into the right thing.
instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
self.assertEquals(self.vm_info['max_mem'], mem_kib)
self.assertEquals(self.vm_info['mem'], mem_kib)
self.assertEquals(self.vm['memory_static_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
# Check that the VM is running according to Nova
self.assertEquals(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEquals(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
self.assertEquals(xenstore_data['vm-data/hostname'], 'test')
key = 'vm-data/networking/DEADBEEF0000'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertEquals(tcpip_data,
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'dhcp_server': '192.168.0.1',
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})
def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true')
self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], '')
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEquals(self.vm['PV_kernel'], '')
self.assertNotEquals(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
session = xenapi_conn.XenAPISession(url, username, password)
return session.call_xenapi('VDI.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if not vdi_ref in start_list:
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
# If the cache is turned on then the base disk will be
# there even after the cleanup
if 'other_config' in vdi_rec:
if vdi_rec['other_config']['image-id'] is None:
self.fail('Found unexpected VDI:%s' % vdi_ref)
else:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
check_injection=False,
create_record=True, empty_dns=False):
if create_record:
instance_values = {'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': image_ref,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'root_gb': 20,
'instance_type_id': instance_type_id,
'os_type': os_type,
'hostname': hostname,
'architecture': architecture}
instance = db.instance_create(self.context, instance_values)
else:
instance = db.instance_get(self.context, instance_id)
network_info = [({'bridge': 'fa0', 'id': 0,
'injected': True,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::1/120',
},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'dhcp_server': '192.168.0.1',
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
if empty_dns:
network_info[0][1]['dns'] = []
# admin_pass isn't part of the DB model, but it does get set as
# an attribute for spawn to use
instance.admin_pass = 'herp'
image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
'disk_format': 'vhd'}
self.conn.spawn(self.context, instance, image_meta, network_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance.os_type)
self.assertTrue(instance.architecture)
def test_spawn_empty_dns(self):
"""Test spawning with an empty dns list"""
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory,
self._test_spawn,
1, 2, 3, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
stubs.stubout_fetch_image_glance_disk(self.stubs, raise_failure=True)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
It verifies that VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
@stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_swapdisk(self):
# Change the default host_call_plugin to one that'll return
# a swap disk
orig_func = stubs.FakeSessionForVMTests.host_call_plugin
_host_call_plugin = stubs.FakeSessionForVMTests.host_call_plugin_swap
stubs.FakeSessionForVMTests.host_call_plugin = _host_call_plugin
# Stubbing out firewall driver as previous stub sets a particular
# stub for async plugin calls
stubs.stubout_firewall_driver(self.stubs, self.conn)
try:
# We'll steal the above glance linux test
self.test_spawn_vhd_glance_linux()
finally:
# Make sure to put this back
stubs.FakeSessionForVMTests.host_call_plugin = orig_func
# We should have 2 VBDs.
self.assertEqual(len(self.vm['VBDs']), 2)
# Now test that we have 1.
self.tearDown()
self.setUp()
self.test_spawn_vhd_glance_linux()
self.assertEqual(len(self.vm['VBDs']), 1)
def test_spawn_vhd_glance_windows(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
stubs.stubout_fetch_image_glance_disk(self.stubs)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
input = kwargs.get('process_input', None)
self.assertNotEqual(input, None)
config = [line.strip() for line in input.split("\n")]
# Find the start of eth0 configuration and check it
index = config.index('auto eth0')
self.assertEquals(config[index + 1:index + 8], [
'iface eth0 inet static',
'address 192.168.0.100',
'netmask 255.255.255.0',
'broadcast 192.168.0.255',
'gateway 192.168.0.1',
'dns-nameservers 192.168.0.1',
''])
self._tee_executed = True
return '', ''
fake_utils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
])
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug(_('Creating files in %s to simulate guest agent'),
self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normall make files in the m,ounted filesystem
# disappear, so do that here
LOG.debug(_('Removing simulated guest agent files in %s'),
self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_utils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn(1, 2, 3, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_vlanmanager(self):
self.flags(image_service='nova.image.glance.GlanceImageService',
network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance id = 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
instance = self._create_instance(2, False)
networks = self.network.db.network_get_all(ctxt)
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=2,
instance_uuid="00000000-0000-0000-0000-000000000000",
host=FLAGS.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
instance_id=2,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 1024))
def test_rescue(self):
instance = self._create_instance()
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
vm = vm_utils.VMHelper.lookup(session, instance.name)
vbd = xenapi_fake.create_vbd(vm, None)
conn = xenapi_conn.get_connection(False)
image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
'disk_format': 'vhd'}
conn.rescue(self.context, instance, [], image_meta)
def test_unrescue(self):
instance = self._create_instance()
conn = xenapi_conn.get_connection(False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
rescue_vm = xenapi_fake.create_vm(instance.name + '-rescue', 'Running')
conn.unrescue(instance, None)
def test_unrescue_not_in_rescue(self):
instance = self._create_instance()
conn = xenapi_conn.get_connection(False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock():
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, instance):
self.finish_revert_migration_called = True
conn = xenapi_conn.get_connection(False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
instance_values = {
'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
instance = db.instance_create(self.context, instance_values)
network_info = [({'bridge': 'fa0', 'id': 0,
'injected': False,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::1/120',
},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'dhcp_server': '192.168.0.1',
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
'disk_format': 'vhd'}
if spawn:
instance.admin_pass = 'herp'
self.conn.spawn(self.context, instance, image_meta, network_info)
return instance
class XenAPIDiffieHellmanTestCase(test.TestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = vmops.SimpleDH()
self.bob = vmops.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEquals(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEquals(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
class XenAPIMigrateInstance(test.TestCase):
"""Unit test for verifying migration-related actions."""
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.reset()
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
'root_gb': 5,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
def test_resize_xenserver_6(self):
instance = db.instance_create(self.context, self.instance_values)
called = {'resize': False}
def fake_vdi_resize(*args, **kwargs):
called['resize'] = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(6, 0, 0))
conn = xenapi_conn.get_connection(False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
conn._vmops._resize_instance(instance, vdi_uuid)
self.assertEqual(called['resize'], True)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
conn = xenapi_conn.get_connection(False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', instance_type, None)
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
def fake_raise(*args, **kwargs):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise)
conn = xenapi_conn.get_connection(False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', instance_type, None)
def test_revert_migrate(self):
instance = db.instance_create(self.context, self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy=base_uuid, cow=cow_uuid),
network_info, image_meta, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
conn.finish_revert_migration(instance, network_info)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_finish_migrate(self):
instance = db.instance_create(self.context, self.instance_values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
def test_finish_migrate_no_local_storage(self):
tiny_type = instance_types.get_instance_type_by_name('m1.tiny')
tiny_type_id = tiny_type['id']
self.instance_values.update({'instance_type_id': tiny_type_id,
'root_gb': 0})
instance = db.instance_create(self.context, self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = db.instance_create(self.context, self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
# Resize instance would be determined by the compute call
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
class XenAPIImageTypeTestCase(test.TestCase):
"""Test ImageType class."""
def test_to_string(self):
"""Can convert from type id to type string."""
self.assertEquals(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def test_from_string(self):
"""Can convert from string to type id."""
self.assertEquals(
vm_utils.ImageType.from_string(vm_utils.ImageType.KERNEL_STR),
vm_utils.ImageType.KERNEL)
class XenAPIDetermineDiskImageTestCase(test.TestCase):
"""Unit tests for code that detects the ImageType."""
def setUp(self):
super(XenAPIDetermineDiskImageTestCase, self).setUp()
glance_stubs.stubout_glance_client(self.stubs)
class FakeInstance(object):
pass
self.fake_instance = FakeInstance()
self.fake_instance.id = 42
self.fake_instance.os_type = 'linux'
self.fake_instance.architecture = 'x86-64'
def assert_disk_type(self, image_meta, expected_disk_type):
actual = vm_utils.VMHelper.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
image_meta = {'id': 'a', 'disk_format': 'ami'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
def test_raw(self):
image_meta = {'id': 'a', 'disk_format': 'raw'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
def test_vhd(self):
image_meta = {'id': 'a', 'disk_format': 'vhd'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
"""Test that cmp_version compares a as less than b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
def test_greater_than(self):
"""Test that cmp_version compares a as greater than b"""
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
def test_equal(self):
"""Test that cmp_version compares a as equal to b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
def test_non_lexical(self):
"""Test that cmp_version compares non-lexically"""
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
def test_length(self):
"""Test that cmp_version compares by length as last resort"""
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
class XenAPIHostTestCase(test.TestCase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers."""
def setUp(self):
super(XenAPIHostTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
xenapi_fake.create_local_srs()
self.conn = xenapi_conn.get_connection(False)
def test_host_state(self):
stats = self.conn.get_host_stats()
self.assertEquals(stats['disk_total'], 10000)
self.assertEquals(stats['disk_used'], 20000)
self.assertEquals(stats['host_memory_total'], 10)
self.assertEquals(stats['host_memory_overhead'], 20)
self.assertEquals(stats['host_memory_free'], 30)
self.assertEquals(stats['host_memory_free_computed'], 40)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
if not expected:
expected = action
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action(self.conn.host_power_action, 'shutdown')
def test_host_startup(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'host', 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode,
True, 'on_maintenance')
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode,
False, 'off_maintenance')
def test_set_enable_host_enable(self):
self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
def test_set_enable_host_disable(self):
self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
class XenAPIAutoDiskConfigTestCase(test.TestCase):
def setUp(self):
super(XenAPIAutoDiskConfigTestCase, self).setUp()
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
self.conn = xenapi_conn.get_connection(False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
@classmethod
def fake_create_vbd(cls, session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True):
pass
self.stubs.Set(vm_utils.VMHelper,
"create_vbd",
fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
def fake_resize_part_and_fs(dev, start, old, new):
marker["partition_called"] = True
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
instance = db.instance_create(self.context, self.instance_values)
disk_image_type = vm_utils.ImageType.DISK_VHD
vm_ref = "blah"
first_vdi_ref = "blah"
vdis = ["blah"]
self.conn._vmops._attach_disks(
instance, disk_image_type, vm_ref, first_vdi_ref, vdis)
self.assertEqual(marker["partition_called"], called)
def test_instance_not_auto_disk_config(self):
"""Should not partition unless instance is marked as
auto_disk_config.
"""
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_doesnt_pass_fail_safes(self):
"""Should not partition unless fail safes pass"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4'), (2, 100, 200, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.
"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(True)
class XenAPIGenerateLocal(test.TestCase):
"""Test generating of local disks, like swap and ephemeral"""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
xenapi_generate_swap=True,
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.reset()
self.conn = xenapi_conn.get_connection(False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
@classmethod
def fake_create_vbd(cls, session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True):
pass
self.stubs.Set(vm_utils.VMHelper,
"create_vbd",
fake_create_vbd)
def assertCalled(self, instance):
disk_image_type = vm_utils.ImageType.DISK_VHD
vm_ref = "blah"
first_vdi_ref = "blah"
vdis = ["blah"]
self.called = False
self.conn._vmops._attach_disks(instance, disk_image_type,
vm_ref, first_vdi_ref, vdis)
self.assertTrue(self.called)
def test_generate_swap(self):
"""Test swap disk generation."""
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['id'],
{'instance_type_id': 5})
@classmethod
def fake_generate_swap(cls, *args, **kwargs):
self.called = True
self.stubs.Set(vm_utils.VMHelper, 'generate_swap',
fake_generate_swap)
self.assertCalled(instance)
def test_generate_ephemeral(self):
"""Test ephemeral disk generation."""
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['id'],
{'instance_type_id': 4})
@classmethod
def fake_generate_ephemeral(cls, *args):
self.called = True
self.stubs.Set(vm_utils.VMHelper, 'generate_ephemeral',
fake_generate_ephemeral)
self.assertCalled(instance)
class XenAPIBWUsageTestCase(test.TestCase):
def setUp(self):
super(XenAPIBWUsageTestCase, self).setUp()
self.stubs.Set(vm_utils.VMHelper, "compile_metrics",
XenAPIBWUsageTestCase._fake_compile_metrics)
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
self.conn = xenapi_conn.get_connection(False)
@classmethod
def _fake_compile_metrics(cls, start_time, stop_time=None):
raise exception.CouldNotFetchMetrics()
def test_get_all_bw_usage_in_failure_case(self):
"""Test that get_all_bw_usage returns an empty list when metrics
compilation failed. c.f. bug #910045.
"""
class testinstance(object):
def __init__(self):
self.name = "instance-0001"
self.uuid = "1-2-3-4-5"
result = self.conn.get_all_bw_usage([testinstance()],
datetime.datetime.utcnow())
self.assertEqual(result, [])
# TODO(salvatore-orlando): this class and
# nova.tests.test_libvirt.IPTablesFirewallDriverTestCase share a lot of code.
# Consider abstracting common code in a base class for firewall driver testing.
class XenAPIDom0IptablesFirewallTestCase(test.TestCase):
_in_nat_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
]
_in_filter_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
_in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def setUp(self):
super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
xenapi_fake.reset()
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
self.user_id = 'mappin'
self.project_id = 'fake'
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(FLAGS.network_manager)
self.conn = xenapi_conn.get_connection(False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': self.user_id,
'project_id': self.project_id,
'instance_type_id': 1})
def _create_test_security_group(self):
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
return secgroup
def _validate_security_group(self):
in_rules = filter(lambda l: not l.startswith('#'),
self._in_filter_rules)
for rule in in_rules:
if not 'nova' in rule:
self.assertTrue(rule in self._out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('-A .* -j ACCEPT -p icmp -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('-A .* -j ACCEPT -p icmp -m icmp --icmp-type 8'
' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('-A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = self._create_test_security_group()
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
network_model = fake_network.fake_get_instance_nw_info(self.stubs,
1, spectacular=True)
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
lambda *a, **kw: network_model)
network_info = compute_utils.legacy_network_info(network_model)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
self._validate_security_group()
# Extra test for TCP acceptance rules
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('-A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['id'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
_get_instance_nw_info = fake_network.fake_get_instance_nw_info
network_info = _get_instance_nw_info(self.stubs,
networks_count,
ipv4_addr_per_network)
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
self.assertEquals(ipv4_network_rules,
ipv4_rules_per_addr * ipv4_addr_per_network * networks_count)
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
admin_ctxt = context.get_admin_context()
instance_ref = self._create_instance_ref()
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
secgroup = self._create_test_security_group()
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.instances[instance_ref['id']] = instance_ref
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'udp',
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
#validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('-A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"Rules were not updated properly."
"The rule for UDP acceptance is missing")
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: as in libvirt tests
# peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class XenAPISRSelectionTestCase(test.TestCase):
"""Unit tests for testing we find the right SR."""
def setUp(self):
super(XenAPISRSelectionTestCase, self).setUp()
xenapi_fake.reset()
def test_safe_find_sr_raise_exception(self):
"""Ensure StorageRepositoryNotFound is raise when wrong filter."""
self.flags(sr_matching_filter='yadayadayada')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = vm_utils.VMHelper
helper.XenAPI = session.get_imported_xenapi()
self.assertRaises(exception.StorageRepositoryNotFound,
helper.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
"""Ensure the default local-storage is found."""
self.flags(sr_matching_filter='other-config:i18n-key=local-storage')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = vm_utils.VMHelper
helper.XenAPI = session.get_imported_xenapi()
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(
name_label='Fake Storage',
type='lvm',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
host_ref=host_ref)
expected = helper.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
"""Ensure the SR is found when using a different filter."""
self.flags(sr_matching_filter='other-config:my_fake_sr=true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = vm_utils.VMHelper
helper.XenAPI = session.get_imported_xenapi()
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
other_config={'my_fake_sr': 'true'},
host_ref=host_ref)
expected = helper.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
"""Ensure the default SR is found regardless of other-config."""
self.flags(sr_matching_filter='default-sr:true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = vm_utils.VMHelper
pool_ref = xenapi_fake.create_pool('')
helper.XenAPI = session.get_imported_xenapi()
expected = helper.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
expected)
class XenAPIAggregateTestCase(test.TestCase):
"""Unit tests for aggregate operations."""
def setUp(self):
super(XenAPIAggregateTestCase, self).setUp()
self.flags(xenapi_connection_url='http://test_url',
xenapi_connection_username='test_user',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host')
xenapi_fake.reset()
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.get_connection(False)
self.fake_metadata = {'main_compute': 'host',
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
def test_add_to_aggregate_called(self):
def fake_add_to_aggregate(context, aggregate, host):
fake_add_to_aggregate.called = True
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
fake_add_to_aggregate)
self.conn.add_to_aggregate(None, None, None)
self.assertTrue(fake_add_to_aggregate.called)
def test_add_to_aggregate_for_first_host_sets_metadata(self):
def fake_init_pool(id, name):
fake_init_pool.called = True
self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate.id)
self.assertTrue(fake_init_pool.called)
self.assertDictMatch(self.fake_metadata, result.metadetails)
self.assertEqual(aggregate_states.ACTIVE, result.operational_state)
def test_join_subordinate(self):
"""Ensure join_subordinate gets called when the request gets to main."""
def fake_join_subordinate(id, compute_uuid, host, url, user, password):
fake_join_subordinate.called = True
self.stubs.Set(self.conn._pool, "_join_subordinate", fake_join_subordinate)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
compute_uuid='fake_uuid',
url='fake_url',
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid')
self.assertTrue(fake_join_subordinate.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
fake_pool_set_name_label.called = True
self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
fake_pool_set_name_label)
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
values = {"name": 'fake_aggregate',
"availability_zone": 'fake_zone'}
result = db.aggregate_create(self.context, values)
db.aggregate_host_add(self.context, result.id, "host")
aggregate = db.aggregate_get(self.context, result.id)
self.assertEqual(["host"], aggregate.hosts)
self.assertEqual({}, aggregate.metadetails)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
def test_remove_from_aggregate_called(self):
def fake_remove_from_aggregate(context, aggregate, host):
fake_remove_from_aggregate.called = True
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
fake_remove_from_aggregate)
self.conn.remove_from_aggregate(None, None, None)
self.assertTrue(fake_remove_from_aggregate.called)
def test_remove_from_empty_aggregate(self):
values = {"name": 'fake_aggregate',
"availability_zone": 'fake_zone'}
result = db.aggregate_create(self.context, values)
self.assertRaises(exception.AggregateError,
self.conn._pool.remove_from_aggregate,
None, result, "test_host")
def test_remove_subordinate(self):
"""Ensure eject subordinate gets called."""
def fake_eject_subordinate(id, compute_uuid, host_uuid):
fake_eject_subordinate.called = True
self.stubs.Set(self.conn._pool, "_eject_subordinate", fake_eject_subordinate)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
self.assertTrue(fake_eject_subordinate.called)
def test_remove_main_solo(self):
"""Ensure metadata are cleared after removal."""
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
aggregate = self._aggregate_setup(aggr_state=aggregate_states.ACTIVE,
metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate.id)
self.assertTrue(fake_clear_pool.called)
self.assertDictMatch({}, result.metadetails)
self.assertEqual(aggregate_states.ACTIVE, result.operational_state)
def test_remote_main_non_empty_pool(self):
"""Ensure AggregateError is raised if removing the main."""
aggregate = self._aggregate_setup(aggr_state=aggregate_states.ACTIVE,
hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.assertRaises(exception.InvalidAggregateAction,
self.conn._pool.remove_from_aggregate,
self.context, aggregate, "host")
def _aggregate_setup(self, aggr_name='fake_aggregate',
aggr_zone='fake_zone',
aggr_state=aggregate_states.CREATED,
hosts=['host'], metadata=None):
values = {"name": aggr_name,
"availability_zone": aggr_zone,
"operational_state": aggr_state, }
result = db.aggregate_create(self.context, values)
for host in hosts:
db.aggregate_host_add(self.context, result.id, host)
if metadata:
db.aggregate_metadata_add(self.context, result.id, metadata)
return db.aggregate_get(self.context, result.id)
| usc-isi/extra-specs | nova/tests/test_xenapi.py | Python | apache-2.0 | 83,206 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import subprocess
from pants.backend.codegen.subsystems.thrift_defaults import ThriftDefaults
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.binaries.thrift_binary import ThriftBinary
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_property
from twitter.common.collections import OrderedSet
from pants.contrib.go.targets.go_thrift_library import GoThriftGenLibrary, GoThriftLibrary
class GoThriftGen(SimpleCodegenTask):
@classmethod
def register_options(cls, register):
super(GoThriftGen, cls).register_options(register)
register('--strict', default=True, fingerprint=True, type=bool,
help='Run thrift compiler with strict warnings.')
register('--gen-options', advanced=True, fingerprint=True,
help='Use these apache thrift go gen options.')
register('--thrift-import', advanced=True,
help='Use this thrift-import gen option to thrift.')
register('--thrift-import-target', advanced=True,
help='Use this thrift import on symbolic defs.')
@classmethod
def subsystem_dependencies(cls):
return (super(GoThriftGen, cls).subsystem_dependencies() +
(ThriftDefaults, ThriftBinary.Factory.scoped(cls)))
@memoized_property
def _thrift_binary(self):
thrift_binary = ThriftBinary.Factory.scoped_instance(self).create()
return thrift_binary.path
@memoized_property
def _deps(self):
thrift_import_target = self.get_options().thrift_import_target
thrift_imports = self.context.resolve(thrift_import_target)
return thrift_imports
@memoized_property
def _service_deps(self):
service_deps = self.get_options().get('service_deps')
return list(self.resolve_deps(service_deps)) if service_deps else self._deps
SERVICE_PARSER = re.compile(r'^\s*service\s+(?:[^\s{]+)')
NAMESPACE_PARSER = re.compile(r'^\s*namespace go\s+([^\s]+)', re.MULTILINE)
def _declares_service(self, source):
with open(source) as thrift:
return any(line for line in thrift if self.SERVICE_PARSER.search(line))
def _get_go_namespace(self, source):
with open(source) as thrift:
namespace = self.NAMESPACE_PARSER.search(thrift.read())
if not namespace:
raise TaskError('Thrift file {} must contain "namespace go "', source)
return namespace.group(1)
def synthetic_target_extra_dependencies(self, target, target_workdir):
for source in target.sources_relative_to_buildroot():
if self._declares_service(os.path.join(get_buildroot(), source)):
return self._service_deps
return self._deps
def synthetic_target_type(self, target):
return GoThriftGenLibrary
def is_gentarget(self, target):
return isinstance(target, GoThriftLibrary)
@memoized_property
def _thrift_cmd(self):
cmd = [self._thrift_binary]
thrift_import = 'thrift_import={}'.format(self.get_options().thrift_import)
gen_options = self.get_options().gen_options
if gen_options:
gen_options += ',' + thrift_import
else:
gen_options = thrift_import
cmd.extend(('--gen', 'go:{}'.format(gen_options)))
if self.get_options().strict:
cmd.append('-strict')
if self.get_options().level == 'debug':
cmd.append('-verbose')
return cmd
def _generate_thrift(self, target, target_workdir):
target_cmd = self._thrift_cmd[:]
bases = OrderedSet(tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt))
for base in bases:
target_cmd.extend(('-I', base))
target_cmd.extend(('-o', target_workdir))
all_sources = list(target.sources_relative_to_buildroot())
if len(all_sources) != 1:
raise TaskError('go_thrift_library only supports a single .thrift source file for {}.', target)
source = all_sources[0]
target_cmd.append(os.path.join(get_buildroot(), source))
with self.context.new_workunit(name=source,
labels=[WorkUnitLabel.TOOL],
cmd=' '.join(target_cmd)) as workunit:
result = subprocess.call(target_cmd,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'))
if result != 0:
raise TaskError('{} ... exited non-zero ({})'.format(self._thrift_binary, result))
gen_dir = os.path.join(target_workdir, 'gen-go')
src_dir = os.path.join(target_workdir, 'src')
safe_mkdir(src_dir)
go_dir = os.path.join(target_workdir, 'src', 'go')
os.rename(gen_dir, go_dir)
@classmethod
def product_types(cls):
return ['go']
def execute_codegen(self, target, target_workdir):
self._generate_thrift(target, target_workdir)
@property
def _copy_target_attributes(self):
"""Override `_copy_target_attributes` to exclude `provides`."""
return [a for a in super(GoThriftGen, self)._copy_target_attributes if a != 'provides']
def synthetic_target_dir(self, target, target_workdir):
all_sources = list(target.sources_relative_to_buildroot())
source = all_sources[0]
namespace = self._get_go_namespace(source)
return os.path.join(target_workdir, 'src', 'go', namespace.replace(".", os.path.sep))
| cevaris/pants | contrib/go/src/python/pants/contrib/go/tasks/go_thrift_gen.py | Python | apache-2.0 | 5,677 |
import unittest, re
from rexp.compiler import PatternCompiler
class CompilerTestMethods(unittest.TestCase):
def test_compile_1(self):
compiler = PatternCompiler(pattern_set=dict(
TEST=r'\w+'
))
try:
c1 = compiler.compile('$1{TEST}')
except Exception as exc:
self.assertTrue(1)
c1 = compiler.compile('$1{TEST}', ['test'])
self.assertEqual(c1, r'(?:(?P<test>(\w+)))')
def test_compile_2(self):
compiler = PatternCompiler(pattern_set=dict(
TEST=r'\w+'
))
try:
c1 = compiler.compile('$1{TEST}')
except:
self.assertTrue(1)
c1 = compiler.compile('$1{TEST}', ['test'])
self.assertEqual(c1, r'(?:(?P<test>(\w+)))')
| ebbkrdrcn/rexp | test/compiler.py | Python | apache-2.0 | 790 |
import torch
from deluca.lung.core import Controller, LungEnv
class PIDCorrection(Controller):
def __init__(self, base_controller: Controller, sim: LungEnv, pid_K=[0.0, 0.0], decay=0.1, **kwargs):
self.base_controller = base_controller
self.sim = sim
self.I = 0.0
self.K = pid_K
self.decay = decay
self.reset()
def reset(self):
self.base_controller.reset()
self.sim.reset()
self.I = 0.0
def compute_action(self, state, t):
u_in_base, u_out = self.base_controller(state, t)
err = self.sim.pressure - state
self.I = self.I * (1 - self.decay) + err * self.decay
pid_correction = self.K[0] * err + self.K[1] * self.I
u_in = torch.clamp(u_in_base + pid_correction, min=0.0, max=100.0)
self.sim(u_in, u_out, t)
return u_in, u_out
| google/deluca-lung | deluca/lung/experimental/controllers/_pid_correction.py | Python | apache-2.0 | 874 |
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals, print_function, division
| dawncold/expenditure-application | expenditure_application/__init__.py | Python | apache-2.0 | 90 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def strip_region_tags(sample_text):
"""Remove blank lines and region tags from sample text"""
magic_lines = [
line for line in sample_text.split("\n") if len(line) > 0 and "# [" not in line
]
return "\n".join(magic_lines)
| googleapis/python-bigquery | samples/magics/_helpers.py | Python | apache-2.0 | 823 |
Subsets and Splits