repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
afb/0install | zeroinstall/injector/handler.py | 1 | 9160 | """
Integrates download callbacks with an external mainloop.
While things are being downloaded, Zero Install returns control to your program.
Your mainloop is responsible for monitoring the state of the downloads and notifying
Zero Install when they are complete.
To do this, you supply a L{Handler} to the L{policy}.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _, logger
import sys
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
from zeroinstall import SafeException
from zeroinstall import support
from zeroinstall.support import tasks
from zeroinstall.injector import download
class NoTrustedKeys(SafeException):
"""Thrown by L{Handler.confirm_import_feed} on failure."""
pass
class Handler(object):
"""
A Handler is used to interact with the user (e.g. to confirm keys, display download progress, etc).
@ivar monitored_downloads: set of downloads in progress
@type monitored_downloads: {L{download.Download}}
@ivar n_completed_downloads: number of downloads which have finished for GUIs, etc (can be reset as desired).
@type n_completed_downloads: int
@ivar total_bytes_downloaded: informational counter for GUIs, etc (can be reset as desired). Updated when download finishes.
@type total_bytes_downloaded: int
@ivar dry_run: don't write or execute any files, just print notes about what we would have done to stdout
@type dry_run: bool
"""
__slots__ = ['monitored_downloads', 'dry_run', 'total_bytes_downloaded', 'n_completed_downloads']
def __init__(self, mainloop = None, dry_run = False):
"""@type dry_run: bool"""
self.monitored_downloads = set()
self.dry_run = dry_run
self.n_completed_downloads = 0
self.total_bytes_downloaded = 0
def monitor_download(self, dl):
"""Called when a new L{download} is started.
This is mainly used by the GUI to display the progress bar.
@type dl: L{zeroinstall.injector.download.Download}"""
self.monitored_downloads.add(dl)
self.downloads_changed()
@tasks.async
def download_done_stats():
yield dl.downloaded
# NB: we don't check for exceptions here; someone else should be doing that
try:
self.n_completed_downloads += 1
self.total_bytes_downloaded += dl.get_bytes_downloaded_so_far()
self.monitored_downloads.remove(dl)
self.downloads_changed()
except Exception as ex:
self.report_error(ex)
download_done_stats()
def impl_added_to_store(self, impl):
"""Called by the L{fetch.Fetcher} when adding an implementation.
The GUI uses this to update its display.
@param impl: the implementation which has been added
@type impl: L{model.Implementation}"""
pass
def downloads_changed(self):
"""This is just for the GUI to override to update its display."""
pass
@tasks.async
def confirm_import_feed(self, pending, valid_sigs, retval):
"""Sub-classes should override this method to interact with the user about new feeds.
If multiple feeds need confirmation, L{trust.TrustMgr.confirm_keys} will only invoke one instance of this
method at a time.
@param pending: the new feed to be imported
@type pending: L{PendingFeed}
@param valid_sigs: maps signatures to a list of fetchers collecting information about the key
@type valid_sigs: {L{gpg.ValidSig} : L{fetch.KeyInfoFetcher}}
@since: 0.42"""
from zeroinstall.injector import trust
assert valid_sigs
domain = trust.domain_from_url(pending.url)
# Ask on stderr, because we may be writing XML to stdout
print(_("Feed: %s") % pending.url, file=sys.stderr)
print(_("The feed is correctly signed with the following keys:"), file=sys.stderr)
for x in valid_sigs:
print("-", x, file=sys.stderr)
def text(parent):
text = ""
for node in parent.childNodes:
if node.nodeType == node.TEXT_NODE:
text = text + node.data
return text
shown = set()
key_info_fetchers = valid_sigs.values()
while key_info_fetchers:
old_kfs = key_info_fetchers
key_info_fetchers = []
for kf in old_kfs:
infos = set(kf.info) - shown
if infos:
if len(valid_sigs) > 1:
print("%s: " % kf.fingerprint)
for key_info in infos:
print("-", text(key_info), file=sys.stderr)
shown.add(key_info)
if kf.blocker:
key_info_fetchers.append(kf)
if key_info_fetchers:
for kf in key_info_fetchers: print(kf.status, file=sys.stderr)
stdin = tasks.InputBlocker(0, 'console')
blockers = [kf.blocker for kf in key_info_fetchers] + [stdin]
yield blockers
for b in blockers:
try:
tasks.check(b)
except Exception as ex:
logger.warning(_("Failed to get key info: %s"), ex)
if stdin.happened:
print(_("Skipping remaining key lookups due to input from user"), file=sys.stderr)
break
if not shown:
print(_("Warning: Nothing known about this key!"), file=sys.stderr)
if len(valid_sigs) == 1:
print(_("Do you want to trust this key to sign feeds from '%s'?") % domain, file=sys.stderr)
else:
print(_("Do you want to trust all of these keys to sign feeds from '%s'?") % domain, file=sys.stderr)
while True:
print(_("Trust [Y/N] "), end=' ', file=sys.stderr)
sys.stderr.flush()
i = support.raw_input()
if not i: continue
if i in 'Nn':
raise NoTrustedKeys(_('Not signed with a trusted key'))
if i in 'Yy':
break
trust.trust_db._dry_run = self.dry_run
retval.extend([key.fingerprint for key in valid_sigs])
@tasks.async
def confirm_install(self, msg):
"""We need to check something with the user before continuing with the install.
@raise download.DownloadAborted: if the user cancels"""
yield
print(msg, file=sys.stderr)
while True:
sys.stderr.write(_("Install [Y/N] "))
sys.stderr.flush()
i = support.raw_input()
if not i: continue
if i in 'Nn':
raise download.DownloadAborted()
if i in 'Yy':
break
def report_error(self, exception, tb = None):
"""Report an exception to the user.
@param exception: the exception to report
@type exception: L{SafeException}
@param tb: optional traceback
@since: 0.25"""
import logging
logger.warning("%s", str(exception) or type(exception),
exc_info = (exception, exception, tb) if logger.isEnabledFor(logging.INFO) else None)
class ConsoleHandler(Handler):
"""A Handler that displays progress on stderr (a tty).
(we use stderr because we use stdout to talk to the OCaml process)
@since: 0.44"""
last_msg_len = None
update = None
disable_progress = 0
screen_width = None
# While we are displaying progress, we override builtins.print to clear the display first.
original_print = None
def downloads_changed(self):
if self.monitored_downloads and self.update is None:
if self.screen_width is None:
try:
import curses
curses.setupterm()
self.screen_width = curses.tigetnum('cols') or 80
except Exception as ex:
logger.info("Failed to initialise curses library: %s", ex)
self.screen_width = 80
self.show_progress()
self.original_print = print
builtins.print = self.print
self.update = tasks.get_loop().call_repeatedly(0.2, self.show_progress)
elif len(self.monitored_downloads) == 0:
if self.update:
self.update.cancel()
self.update = None
builtins.print = self.original_print
self.original_print = None
self.clear_display()
def show_progress(self):
if not self.monitored_downloads: return
urls = [(dl.url, dl) for dl in self.monitored_downloads]
if self.disable_progress: return
screen_width = self.screen_width - 2
item_width = max(16, screen_width // len(self.monitored_downloads))
url_width = item_width - 7
msg = ""
for url, dl in sorted(urls):
so_far = dl.get_bytes_downloaded_so_far()
if url.endswith('/latest.xml'):
url = url[:-10] # remove latest.xml from mirror URLs
leaf = url.rsplit('/', 1)[-1]
if len(leaf) >= url_width:
display = leaf[:url_width]
else:
display = url[-url_width:]
if dl.expected_size:
msg += "[%s %d%%] " % (display, int(so_far * 100 / dl.expected_size))
else:
msg += "[%s] " % (display)
msg = msg[:screen_width]
if self.last_msg_len is None:
sys.stderr.write(msg)
else:
sys.stderr.write(chr(13) + msg)
if len(msg) < self.last_msg_len:
sys.stderr.write(" " * (self.last_msg_len - len(msg)))
self.last_msg_len = len(msg)
sys.stderr.flush()
return
def clear_display(self):
if self.last_msg_len != None:
sys.stderr.write(chr(13) + " " * self.last_msg_len + chr(13))
sys.stderr.flush()
self.last_msg_len = None
def report_error(self, exception, tb = None):
self.clear_display()
Handler.report_error(self, exception, tb)
def confirm_import_feed(self, pending, valid_sigs, retval):
self.clear_display()
self.disable_progress += 1
blocker = Handler.confirm_import_feed(self, pending, valid_sigs, retval)
@tasks.async
def enable():
yield blocker
self.disable_progress -= 1
self.show_progress()
enable()
return blocker
def print(self, *args, **kwargs):
self.clear_display()
self.original_print(*args, **kwargs)
| lgpl-2.1 | 4,241,712,712,980,864,500 | 31.253521 | 125 | 0.69083 | false |
OpenDroneMap/WebODM | app/api/common.py | 1 | 1763 | from django.core.exceptions import ObjectDoesNotExist, SuspiciousFileOperation
from rest_framework import exceptions
import os
from app import models
def get_and_check_project(request, project_pk, perms=('view_project',)):
"""
Django comes with a standard `model level` permission system. You can
check whether users are logged-in and have privileges to act on things
model wise (can a user add a project? can a user view projects?).
Django-guardian adds a `row level` permission system. Now not only can you
decide whether a user can add a project or view projects, you can specify exactly
which projects a user has or has not access to.
This brings up the reason the following function: tasks are part of a project,
and it would add a tremendous headache (and redundancy) to specify row level permissions
for each task. Instead, we check the row level permissions of the project
to which a task belongs to.
Perhaps this could be added as a django-rest filter?
Retrieves a project and raises an exception if the current user
has no access to it.
"""
try:
project = models.Project.objects.get(pk=project_pk, deleting=False)
for perm in perms:
if not request.user.has_perm(perm, project): raise ObjectDoesNotExist()
except ObjectDoesNotExist:
raise exceptions.NotFound()
return project
def path_traversal_check(unsafe_path, known_safe_path):
known_safe_path = os.path.abspath(known_safe_path)
unsafe_path = os.path.abspath(unsafe_path)
if (os.path.commonprefix([known_safe_path, unsafe_path]) != known_safe_path):
raise SuspiciousFileOperation("{} is not safe".format(unsafe_path))
# Passes the check
return unsafe_path | mpl-2.0 | 4,351,260,022,897,503,700 | 40.023256 | 92 | 0.724334 | false |
zlcnup/csmath | hw4_lm/lm.py | 1 | 2784 | # -*- coding: utf-8 -*-
#!/usr/bin/enzl_v python
from pylab import *
from numpy import *
from math import *
def data_generator(N):
#生成向量函数F:ai*exp(bi*x)的系数数组
zl_mean = [3.4,4.5]
zl_cozl_v = [[1,0],[0,10]]
zl_coff = np.random.multivariate_normal(zl_mean,zl_cozl_v,N)
#生成观测值向量y
x = np.random.uniform(1, N, N)
y = [zl_coff[i][0]*exp(-zl_coff[i][1]*x[i]) for i in range(N)]
#生成初始值x0
x0 = [x[i]+np.random.normal(0.0,0.3) for i in range(N)]
return zl_coff, y, x0
def jacobian(zl_coff, x0, N):
J=zeros((N,N),float)
#计算第i个函数对X的第j个维度变量的偏导数
for i in range(N):
for j in range(N):
#-abexp(-b*xi)
J[i][j] = -(zl_coff[i][0]*zl_coff[i][1])*exp(-(zl_coff[i][1]*x0[j]))
return J
def normG(g):
absg = abs(g)
Normg = absg.argmax()
num = absg[Normg]
return num
def zl_LM(zl_coff, y, x0, N, maxIter):
zl_numIter = 0
zl_v = 2
zl_miu = 0.05 #阻尼系数
x = x0
zl_Threshold = 1e-5
zl_preszl_fx = 100000
while zl_numIter < maxIter:
zl_numIter += 1
#计算Jacobian矩阵
J = jacobian(zl_coff, x, N)
#计算Hessian矩阵,Ep以及g值
A = dot(J.T,J)
zl_fx = zeros((N,N),float)
zl_fx = [zl_coff[i][0]*exp(-zl_coff[i][1]*x[i]) for i in range(N)]
szl_fx = sum(array(zl_fx)*array(zl_fx))
Ep = array(y) - array(zl_fx)
g = array(dot(J.T,Ep))
H = A + zl_miu*np.eye(N)
DTp = solve(H, g)
x = x + DTp
zl_fx2 = zeros(N,float)
for j in range(N):
zl_fx2[j] = zl_coff[j][0]*exp(-zl_coff[j][1])
szl_fx2 = sum(array(zl_fx2)*array(zl_fx2))
if abs(szl_fx - zl_preszl_fx) < zl_Threshold:
print("The zl_vector x is: ")
print(x)
print("The sum is: ")
print(szl_fx2)
break
if szl_fx2 < (szl_fx+0.5*sum(array(g)*array(Ep))):
zl_miu /= zl_v
else :
zl_miu *= 2
if zl_numIter == maxIter:
print("The zl_vector x0 is: ")
print(x0)
print("The zl_vector x is: ")
print(x)
print("The sum is: ")
print(szl_fx2)
if __name__ == "__main__":
#输入向量空间的长度N(在这里假设m=n)
print("Please Input the dimension N of zl_vector space and the maxIter (the product of N and maxIter not be too large)")
N = input("Input N (not be too large): ")
N = int(N)
maxIter = input("Input the max number of interation (larger than half of the N): ")
maxIter = int(maxIter)
zl_coff, y, x0 = data_generator(N)
#zl_LM算法
zl_LM(zl_coff, y, x0, N, maxIter)
| mit | -685,786,909,710,917,200 | 28.573034 | 124 | 0.518237 | false |
atizo/pygobject | ltihooks.py | 1 | 2327 | # -*- Mode: Python; py-indent-offset: 4 -*-
# ltihooks.py: python import hooks that understand libtool libraries.
# Copyright (C) 2000 James Henstridge.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os, ihooks
class LibtoolHooks(ihooks.Hooks):
def get_suffixes(self):
"""Like normal get_suffixes, but adds .la suffixes to list"""
ret = ihooks.Hooks.get_suffixes(self)
ret.insert(0, ('module.la', 'rb', 3))
ret.insert(0, ('.la', 'rb', 3))
return ret
def load_dynamic(self, name, filename, file=None):
"""Like normal load_dynamic, but treat .la files specially"""
if len(filename) > 3 and filename[-3:] == '.la':
fp = open(filename, 'r')
dlname = ''
installed = 1
line = fp.readline()
while line:
if len(line) > 7 and line[:7] == 'dlname=':
dlname = line[8:-2]
elif len(line) > 10 and line[:10] == 'installed=':
installed = line[10:-1] == 'yes'
line = fp.readline()
fp.close()
if dlname:
if installed:
filename = os.path.join(os.path.dirname(filename),
dlname)
else:
filename = os.path.join(os.path.dirname(filename),
'.libs', dlname)
return ihooks.Hooks.load_dynamic(self, name, filename, file)
importer = ihooks.ModuleImporter()
importer.set_hooks(LibtoolHooks())
def install():
importer.install()
def uninstall():
importer.uninstall()
install()
| lgpl-2.1 | -552,883,266,822,510,400 | 37.783333 | 75 | 0.593468 | false |
eBay/cronus-agent | agent/agent/lib/agent_thread/deactivate_manifest.py | 1 | 5040 | #pylint: disable=W0703,R0912,R0915,R0904,W0105
'''
Copyright 2014 eBay Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
""" Thread to perform creation of a service """
import os
import shutil
import traceback
from agent.lib.utils import islink
from agent.lib.utils import readlink
from agent.lib.errors import Errors
from agent.lib.errors import AgentException
from agent.controllers.service import ServiceController
import logging
from agent.lib.agent_thread.manifest_control import ManifestControl
from agent.lib import manifestutil
class DeactivateManifest(ManifestControl):
""" This thread will attempt to activate a manifest
This means going throuh each package
call the stop
call the deactivate
delete the active link
call the activate
create the active link
call start
"""
THREAD_NAME = 'deactivate_manifest'
def __init__(self, threadMgr, service):
""" Constructor """
ManifestControl.__init__(self, threadMgr, service, manifest = None, name = 'deactivate_manifest')
self.setName(DeactivateManifest.THREAD_NAME)
self.__LOG = manifestutil.getServiceLogger(self, logging.getLogger(__name__))
def doRun(self):
""" Main body of the thread """
errorMsg = ""
errorCode = None
failed = False
try:
activePath = os.path.join(ServiceController.manifestPath(self._service), 'active')
oldManifest = None
# make sure that if the active path exists, it's a link
# if not log that and delete the link
if (os.path.exists(activePath) and not os.name == 'nt' and not islink(activePath)):
self.__LOG.error('%s is not a link. Attempted to delete' % activePath)
shutil.rmtree(activePath)
if (os.path.exists(activePath)):
oldManifest = os.path.basename(readlink(activePath))
else:
raise AgentException(error = Errors.ACTIVEMANIFEST_MANIFEST_MISSING, errorMsg = 'No active manifest - cannot deactivate service')
self.__deactivateManifest(self._service, oldManifest)
self.__removeSymlink(self._service)
except SystemExit as exc:
failed = True
if (len(exc.args) == 2):
# ok we got {err code, err msg}
errorCode = exc.args[0]
errorMsg = exc.args[1]
raise exc
except AgentException as exc:
failed = True
errorMsg = 'Deactivate Manifest - Agent Exception - %s' % exc.getMsg()
errorCode = exc.getCode()
except Exception as exc:
failed = True
errorMsg = 'Deactivate Manifest - Unknown error - (%s) - %s - %s' \
% (self._service, str(exc), traceback.format_exc(5))
errorCode = Errors.UNKNOWN_ERROR
finally:
if failed:
self.__LOG.warning(errorMsg)
self._updateStatus(httpStatus = 500, error = errorCode,
errorMsg = errorMsg)
self.__LOG.debug('Done: activate manifest for (%s)' % (self._service))
self._updateProgress(100)
def __deactivateManifest(self, service, manifest):
""" deactive a manifest. This means calling stop then deactive on the manifest
@param service - service of manifest to deactivate
@param manifest - manifest to deactivate
@param stack - stack for recovery
"""
self.__LOG.debug("Deactivate Manifest %s-%s" % (service, manifest))
if (manifest == None):
return
self._execPackages('shutdown', service, manifest, 11, 25, activateFlow = False)
self._execPackages('deactivate', service, manifest, 26, 50, activateFlow = False)
manifestutil.processControllerInPackage(service, manifest, activateFlow = False)
def __removeSymlink(self, service):
""" remove symlink """
#remove symlink
activePath = self.__getSymlinkPath(service)
if os.path.exists(activePath):
if (os.path.islink(activePath)): # *nix
os.remove(activePath)
else:
raise AgentException('Running platform seems to be neither win32 nor *nix with any (sym)link support. Can\'t proceed with link deletion')
def __getSymlinkPath(self, service):
""" return symlink path for a service """
return os.path.join(ServiceController.manifestPath(service), 'active')
| apache-2.0 | 1,921,334,820,953,624,800 | 38.069767 | 153 | 0.639286 | false |
cizixs/tftp | tftp/tftp_client.py | 1 | 7600 | import sys
import struct
import binascii
import argparse
import tftp
from tftp import SocketBase
from tftp import get_opcode
from tftp import default_port
from tftp import make_data_packet
from tftp import make_ack_packet
class State(object):
START, DATA = range(2)
# Make packet functions.
def make_request_packet(opcode, filename, mode='octet'):
values = (opcode, filename, 0, mode, 0)
s = struct.Struct('! H {}s B {}s B'.format(len(filename),len(mode)) )
return s.pack(*values)
def make_rrq_packet(filename):
return make_request_packet(tftp.RRQ, filename)
def make_wrq_packet(filename):
return make_request_packet(tftp.WRQ, filename)
class TftpClient(SocketBase):
def __init__(self, host='127.0.0.1', port='', filename=None, **argv):
self.host = host
self.orig_port = self.port = port or default_port()
self.block_num = 1
self.is_done = False
self.status = State.START
self.action = argv.get('action', 'get')
self.debug = argv.get('debug', False)
self.block_size = argv.get('block_size', tftp.DEFAULT_BLOCK_SIZE)
self.filename = filename
self.setup_file()
self.setup_connect()
def reset(self):
self.block_num = 1
self.is_done = False
self.status = State.START
self.port = self.orig_port or 69
self.setup_file()
self.setup_connect()
@property
def server_addr(self):
return (self.host, self.port)
def setup_file(self):
if self.filename:
if self.action == 'get':
self.fd = open(self.filename, 'wb')
elif self.action == 'put':
self.fd = open(self.filename, 'rb')
else:
raise Exception('unsupport action %s' % self.action)
def handle_packet(self, packet, addr):
"""Handle pakcet from remote.
If it's a wrong packet, not from expected host/port, discard it;
If it's a data packet, send ACK packet back;
If it's a error packet, print error and exit;
If it's a ack packet, send Data packet back.
"""
host, port = addr
if host != self.host:
# ignore packet from wrong address.
return
packet_len = len(packet)
opcode = get_opcode(packet)
if opcode == tftp.ERROR:
err_code = struct.unpack('!H', packet[2:4])[0]
err_msg = packet[4:packet_len-1]
print "Error %s: %s" % (err_code, err_msg)
sys.exit(err_code)
elif opcode == tftp.DATA:
# This is a data packet received from server, save data to file.
# update port
if self.port != port:
self.port = port
block_num = struct.unpack('!H', packet[2:4])[0]
if block_num != self.block_num:
# skip unexpected #block data packet
print 'unexpected block num %d' % block_num
return
data = packet[4:]
self.fd.write(data)
if len(packet) < self.block_size + 2:
self.is_done = True
self.fd.close()
file_len = self.block_size * (self.block_num -1) + len(data)
print '%d bytes received.' % file_len
self.block_num += 1
elif opcode == tftp.ACK:
# This is a write request ACK
# Send next block_size data to server
if self.port != port:
self.port = port
block_num = struct.unpack('!H', packet[2:4])[0]
self.verbose('received ack for %d' % block_num)
self.block_num += 1
else:
raise Exception('unrecognized packet: %s', str(opcode))
def get_next_packet(self):
if self.status == State.START:
opcode = tftp.RRQ if self.action == 'get' else tftp.WRQ
self.verbose('about to send packet %d' % opcode)
packet = make_request_packet(opcode, self.filename)
self.status = State.DATA
elif self.status == State.DATA:
if self.action == 'get':
self.verbose('about to send ack for %d' % (self.block_num - 1))
packet = make_ack_packet(self.block_num-1)
elif self.action == 'put':
self.verbose('about to send data for %d' % (self.block_num - 1))
data = self.fd.read(self.block_size)
if len(data) < self.block_size:
self.is_done = True
packet = make_data_packet(self.block_num-1, data)
return packet
def handle(self):
"""Main loop function for tftp.
The main loop works like the following:
1. get next-to-send packet
2. send the packet to server
3. receive packet from server
4. handle packet received, back to step 1.
"""
while not self.is_done:
packet = self.get_next_packet()
if packet:
self.send_packet(packet)
(packet, addr) = self.recv_packet()
self.handle_packet(packet, addr)
def main():
menu = """Tftp client help menu:
Supported commands:
connect connect to a server
get get file
put put file
quit exit
? print this menu
"""
def command_parse(line):
if not line:
return (None, None)
line = line.split()
command = line[0]
options = line[1:]
return command, options
tftp_client = TftpClient()
def connect(*args):
tftp_client.host = args[0]
if len(args) > 1:
tftp_client.port = int(args[1])
def get(*args):
print args[0]
tftp_client.action = 'get'
tftp_client.filename = args[0]
tftp_client.reset()
tftp_client.handle()
def put(*args):
tftp_client.filename = args[0]
tftp_client.action = 'put'
tftp_client.reset()
tftp_client.handle()
def quit(*args):
print 'Bye!'
def print_help(*args):
print menu
command_map = {
'connect': connect,
'get': get,
'put': put,
'quit': quit,
}
print 'Welcome to python tftpclient.'
while True:
line = raw_input('tftp> ').strip().lower()
command, options = command_parse(line)
command_map.get(command, print_help)(*options)
if command == 'quit':
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Tftp client in pure python.')
parser.add_argument('--host', '-s', action='store', dest='host',
default='127.0.0.1', help='Server hostname')
parser.add_argument('--port', '-p', action='store', dest='port', type=int,
default=69, help='Server port')
parser.add_argument('--file', '-f', action='store', dest='filename',
help='File to get from server')
parser.add_argument('--debug', '-d', action='store_true',
default=False, help='Debug mode: print more information(debug: False)')
parser.add_argument('action', metavar='action', nargs='*',
help='Action to conduct: put or get(default: get)')
args = parser.parse_args()
print args
if not args.filename or not args.action:
main()
else:
tftp_client = TftpClient(args.host, args.port, args.filename,
action=args.action[0], debug=args.debug)
tftp_client.handle()
| mit | -6,937,391,200,844,491,000 | 31.340426 | 83 | 0.550526 | false |
mduggan/toumeika | shikin/review.py | 1 | 4611 | # -*- coding: utf-8 -*-
"""
Shikin review page and associated API
"""
from sqlalchemy import func
import datetime
import random
from flask import render_template, abort, request, jsonify, session
from . import app, ocrfix
from .model import DocSegment, DocSegmentReview, User
from .util import dologin
def get_user_or_abort():
# if request.remote_addr == '127.0.0.1':
# user = 'admin'
# else:
user = session.get('username')
if not user:
abort(403)
u = User.query.filter(User.name == user).first()
if not u:
abort(403)
return u
@app.route('/api/reviewcount/<user>')
def review_count(user):
u = User.query.filter(User.name == user).first()
if not u:
return abort(404)
return jsonify({'user': user, 'count': len(u.reviews)})
@app.route('/api/unreview/<int:segmentid>')
def unreview(segmentid):
user = get_user_or_abort()
revid = request.args.get('revid')
ds = DocSegment.query.filter(DocSegment.id == segmentid).first()
if not ds:
abort(404)
ds.viewcount = max(0, ds.viewcount-1)
app.dbobj.session.add(ds)
if not revid or not revid.isdigit():
app.dbobj.session.commit()
return
revid = int(revid)
old = DocSegmentReview.query.filter(DocSegmentReview.id == revid, DocSegmentReview.user_id == user.id).first()
if not old:
abort(404)
app.dbobj.session.delete(old)
app.dbobj.session.commit()
return jsonify({'status': 'ok', 'id': revid})
@app.route('/api/review/<int:segmentid>')
def review_submit(segmentid):
user = get_user_or_abort()
ds = DocSegment.query.filter(DocSegment.id == segmentid).first()
if not ds:
abort(404)
text = request.args.get('text')
skip = request.args.get('skip')
if text is None and not skip:
abort(404)
timestamp = datetime.datetime.now()
ds.viewcount += 1
app.dbobj.session.add(ds)
if skip:
app.dbobj.session.commit()
return jsonify({'status': 'ok'})
old = DocSegmentReview.query\
.filter(DocSegmentReview.segment_id == ds.id)\
.order_by(DocSegmentReview.rev.desc())\
.first()
if old is not None:
rev = old.rev + 1
else:
rev = 1
newrev = DocSegmentReview(segment=ds, rev=rev, timestamp=timestamp, user=user, text=text)
app.dbobj.session.add(newrev)
app.dbobj.session.commit()
return jsonify({'status': 'ok', 'id': newrev.id})
@app.route('/api/reviewdata', methods=['GET'])
def reviewdata():
# Find a random early page with lots of unreviewed items. This way even
# with multiple simulteanous users they should get different pages.
minviewcount = app.dbobj.session.query(func.min(DocSegment.viewcount)).one()[0]
q = app.dbobj.session.query(DocSegment.doc_id, DocSegment.page)\
.filter(DocSegment.ocrtext != None)\
.filter(DocSegment.viewcount <= minviewcount)\
.distinct()
pages = list(q.all())
app.logger.debug("%d pages with segments of only %d views" % (len(pages), minviewcount))
# FIXME: this kinda works, but as all the pages get reviewed it will tend
# toward giving all users the same page. not really a problem until I have
# more than 1 user.
docid, page = random.choice(pages)
q = DocSegment.query.filter(DocSegment.doc_id == docid)\
.filter(DocSegment.page == page)\
.filter(DocSegment.viewcount <= minviewcount)
segments = q.all()
if not segments:
abort(404)
segdata = []
for d in segments:
if d.usertext is None:
txt = ocrfix.guess_fix(d.ocrtext)
suggests = ocrfix.suggestions(d)
else:
txt = d.usertext.text
suggests = []
lines = max(len(d.ocrtext.splitlines()), len(txt.splitlines()))
segdata.append(dict(ocrtext=d.ocrtext, text=txt, segment_id=d.id,
x1=d.x1, x2=d.x2, y1=d.y1, y2=d.y2,
textlines=lines, docid=docid, page=page+1, suggests=suggests))
return jsonify(dict(segments=segdata, docid=docid, page=page+1))
@app.route('/review', methods=['GET', 'POST'])
def review():
""" Review page """
error = None
user = None
if request.method == 'POST':
user, error = dologin()
if 'username' in session:
u = get_user_or_abort()
uname = u.name
else:
uname = None
return render_template('review.html', user=uname, error=error)
| bsd-2-clause | 2,451,348,942,650,668,500 | 27.81875 | 114 | 0.605942 | false |
falkTX/Cadence | src/systray.py | 1 | 23718 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# KDE, App-Indicator or Qt Systray
# Copyright (C) 2011-2018 Filipe Coelho <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For a full copy of the GNU General Public License see the COPYING file
# Imports (Global)
import os, sys
if True:
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QAction, QMainWindow, QMenu, QSystemTrayIcon
else:
from PyQt4.QtCore import QTimer
from PyQt4.QtGui import QIcon
from PyQt4.QtGui import QAction, QMainWindow, QMenu, QSystemTrayIcon
try:
if False and os.getenv("DESKTOP_SESSION") in ("ubuntu", "ubuntu-2d") and not os.path.exists("/var/cadence/no_app_indicators"):
from gi import require_version
require_version('Gtk', '3.0')
from gi.repository import Gtk
require_version('AppIndicator3', '0.1')
from gi.repository import AppIndicator3 as AppIndicator
TrayEngine = "AppIndicator"
#elif os.getenv("KDE_SESSION_VERSION") >= 5:
#TrayEngine = "Qt"
#elif os.getenv("KDE_FULL_SESSION") or os.getenv("DESKTOP_SESSION") == "kde-plasma":
#from PyKDE5.kdeui import KAction, KIcon, KMenu, KStatusNotifierItem
#TrayEngine = "KDE"
else:
TrayEngine = "Qt"
except:
TrayEngine = "Qt"
print("Using Tray Engine '%s'" % TrayEngine)
iActNameId = 0
iActWidget = 1
iActParentMenuId = 2
iActFunc = 3
iSepNameId = 0
iSepWidget = 1
iSepParentMenuId = 2
iMenuNameId = 0
iMenuWidget = 1
iMenuParentMenuId = 2
# Get Icon from user theme, using our own as backup (Oxygen)
def getIcon(icon, size=16):
return QIcon.fromTheme(icon, QIcon(":/%ix%i/%s.png" % (size, size, icon)))
# Global Systray class
class GlobalSysTray(object):
def __init__(self, parent, name, icon):
object.__init__(self)
self._app = None
self._parent = parent
self._gtk_running = False
self._quit_added = False
self.act_indexes = []
self.sep_indexes = []
self.menu_indexes = []
if TrayEngine == "KDE":
self.menu = KMenu(parent)
self.menu.setTitle(name)
self.tray = KStatusNotifierItem()
self.tray.setAssociatedWidget(parent)
self.tray.setCategory(KStatusNotifierItem.ApplicationStatus)
self.tray.setContextMenu(self.menu)
self.tray.setIconByPixmap(getIcon(icon))
self.tray.setTitle(name)
self.tray.setToolTipTitle(" ")
self.tray.setToolTipIconByPixmap(getIcon(icon))
# Double-click is managed by KDE
elif TrayEngine == "AppIndicator":
self.menu = Gtk.Menu()
self.tray = AppIndicator.Indicator.new(name, icon, AppIndicator.IndicatorCategory.APPLICATION_STATUS)
self.tray.set_menu(self.menu)
# Double-click is not possible with App-Indicators
elif TrayEngine == "Qt":
self.menu = QMenu(parent)
self.tray = QSystemTrayIcon(getIcon(icon))
self.tray.setContextMenu(self.menu)
self.tray.setParent(parent)
self.tray.activated.connect(self.qt_systray_clicked)
# -------------------------------------------------------------------------------------------
def addAction(self, act_name_id, act_name_string, is_check=False):
if TrayEngine == "KDE":
act_widget = KAction(act_name_string, self.menu)
act_widget.setCheckable(is_check)
self.menu.addAction(act_widget)
elif TrayEngine == "AppIndicator":
if is_check:
act_widget = Gtk.CheckMenuItem(act_name_string)
else:
act_widget = Gtk.ImageMenuItem(act_name_string)
act_widget.set_image(None)
act_widget.show()
self.menu.append(act_widget)
elif TrayEngine == "Qt":
act_widget = QAction(act_name_string, self.menu)
act_widget.setCheckable(is_check)
self.menu.addAction(act_widget)
else:
act_widget = None
act_obj = [None, None, None, None]
act_obj[iActNameId] = act_name_id
act_obj[iActWidget] = act_widget
self.act_indexes.append(act_obj)
def addSeparator(self, sep_name_id):
if TrayEngine == "KDE":
sep_widget = self.menu.addSeparator()
elif TrayEngine == "AppIndicator":
sep_widget = Gtk.SeparatorMenuItem()
sep_widget.show()
self.menu.append(sep_widget)
elif TrayEngine == "Qt":
sep_widget = self.menu.addSeparator()
else:
sep_widget = None
sep_obj = [None, None, None]
sep_obj[iSepNameId] = sep_name_id
sep_obj[iSepWidget] = sep_widget
self.sep_indexes.append(sep_obj)
def addMenu(self, menu_name_id, menu_name_string):
if TrayEngine == "KDE":
menu_widget = KMenu(menu_name_string, self.menu)
self.menu.addMenu(menu_widget)
elif TrayEngine == "AppIndicator":
menu_widget = Gtk.MenuItem(menu_name_string)
menu_parent = Gtk.Menu()
menu_widget.set_submenu(menu_parent)
menu_widget.show()
self.menu.append(menu_widget)
elif TrayEngine == "Qt":
menu_widget = QMenu(menu_name_string, self.menu)
self.menu.addMenu(menu_widget)
else:
menu_widget = None
menu_obj = [None, None, None]
menu_obj[iMenuNameId] = menu_name_id
menu_obj[iMenuWidget] = menu_widget
self.menu_indexes.append(menu_obj)
# -------------------------------------------------------------------------------------------
def addMenuAction(self, menu_name_id, act_name_id, act_name_string, is_check=False):
i = self.get_menu_index(menu_name_id)
if i < 0: return
menu_widget = self.menu_indexes[i][iMenuWidget]
if TrayEngine == "KDE":
act_widget = KAction(act_name_string, menu_widget)
act_widget.setCheckable(is_check)
menu_widget.addAction(act_widget)
elif TrayEngine == "AppIndicator":
menu_widget = menu_widget.get_submenu()
if is_check:
act_widget = Gtk.CheckMenuItem(act_name_string)
else:
act_widget = Gtk.ImageMenuItem(act_name_string)
act_widget.set_image(None)
act_widget.show()
menu_widget.append(act_widget)
elif TrayEngine == "Qt":
act_widget = QAction(act_name_string, menu_widget)
act_widget.setCheckable(is_check)
menu_widget.addAction(act_widget)
else:
act_widget = None
act_obj = [None, None, None, None]
act_obj[iActNameId] = act_name_id
act_obj[iActWidget] = act_widget
act_obj[iActParentMenuId] = menu_name_id
self.act_indexes.append(act_obj)
def addMenuSeparator(self, menu_name_id, sep_name_id):
i = self.get_menu_index(menu_name_id)
if i < 0: return
menu_widget = self.menu_indexes[i][iMenuWidget]
if TrayEngine == "KDE":
sep_widget = menu_widget.addSeparator()
elif TrayEngine == "AppIndicator":
menu_widget = menu_widget.get_submenu()
sep_widget = Gtk.SeparatorMenuItem()
sep_widget.show()
menu_widget.append(sep_widget)
elif TrayEngine == "Qt":
sep_widget = menu_widget.addSeparator()
else:
sep_widget = None
sep_obj = [None, None, None]
sep_obj[iSepNameId] = sep_name_id
sep_obj[iSepWidget] = sep_widget
sep_obj[iSepParentMenuId] = menu_name_id
self.sep_indexes.append(sep_obj)
#def addSubMenu(self, menu_name_id, new_menu_name_id, new_menu_name_string):
#menu_index = self.get_menu_index(menu_name_id)
#if menu_index < 0: return
#menu_widget = self.menu_indexes[menu_index][1]
##if TrayEngine == "KDE":
##new_menu_widget = KMenu(new_menu_name_string, self.menu)
##menu_widget.addMenu(new_menu_widget)
##elif TrayEngine == "AppIndicator":
##new_menu_widget = Gtk.MenuItem(new_menu_name_string)
##new_menu_widget.show()
##menu_widget.get_submenu().append(new_menu_widget)
##parent_menu_widget = Gtk.Menu()
##new_menu_widget.set_submenu(parent_menu_widget)
##else:
#if (1):
#new_menu_widget = QMenu(new_menu_name_string, self.menu)
#menu_widget.addMenu(new_menu_widget)
#self.menu_indexes.append([new_menu_name_id, new_menu_widget, menu_name_id])
# -------------------------------------------------------------------------------------------
def connect(self, act_name_id, act_func):
i = self.get_act_index(act_name_id)
if i < 0: return
act_widget = self.act_indexes[i][iActWidget]
if TrayEngine == "AppIndicator":
act_widget.connect("activate", self.gtk_call_func, act_name_id)
elif TrayEngine in ("KDE", "Qt"):
act_widget.triggered.connect(act_func)
self.act_indexes[i][iActFunc] = act_func
# -------------------------------------------------------------------------------------------
#def setActionChecked(self, act_name_id, yesno):
#index = self.get_act_index(act_name_id)
#if index < 0: return
#act_widget = self.act_indexes[index][1]
##if TrayEngine == "KDE":
##act_widget.setChecked(yesno)
##elif TrayEngine == "AppIndicator":
##if type(act_widget) != Gtk.CheckMenuItem:
##return # Cannot continue
##act_widget.set_active(yesno)
##else:
#if (1):
#act_widget.setChecked(yesno)
def setActionEnabled(self, act_name_id, yesno):
i = self.get_act_index(act_name_id)
if i < 0: return
act_widget = self.act_indexes[i][iActWidget]
if TrayEngine == "KDE":
act_widget.setEnabled(yesno)
elif TrayEngine == "AppIndicator":
act_widget.set_sensitive(yesno)
elif TrayEngine == "Qt":
act_widget.setEnabled(yesno)
def setActionIcon(self, act_name_id, icon):
i = self.get_act_index(act_name_id)
if i < 0: return
act_widget = self.act_indexes[i][iActWidget]
if TrayEngine == "KDE":
act_widget.setIcon(KIcon(icon))
elif TrayEngine == "AppIndicator":
if not isinstance(act_widget, Gtk.ImageMenuItem):
# Cannot use icons here
return
act_widget.set_image(Gtk.Image.new_from_icon_name(icon, Gtk.IconSize.MENU))
#act_widget.set_always_show_image(True)
elif TrayEngine == "Qt":
act_widget.setIcon(getIcon(icon))
def setActionText(self, act_name_id, text):
i = self.get_act_index(act_name_id)
if i < 0: return
act_widget = self.act_indexes[i][iActWidget]
if TrayEngine == "KDE":
act_widget.setText(text)
elif TrayEngine == "AppIndicator":
if isinstance(act_widget, Gtk.ImageMenuItem):
# Fix icon reset
last_icon = act_widget.get_image()
act_widget.set_label(text)
act_widget.set_image(last_icon)
else:
act_widget.set_label(text)
elif TrayEngine == "Qt":
act_widget.setText(text)
def setIcon(self, icon):
if TrayEngine == "KDE":
self.tray.setIconByPixmap(getIcon(icon))
#self.tray.setToolTipIconByPixmap(getIcon(icon))
elif TrayEngine == "AppIndicator":
self.tray.set_icon(icon)
elif TrayEngine == "Qt":
self.tray.setIcon(getIcon(icon))
def setToolTip(self, text):
if TrayEngine == "KDE":
self.tray.setToolTipSubTitle(text)
elif TrayEngine == "AppIndicator":
# ToolTips are disabled in App-Indicators by design
pass
elif TrayEngine == "Qt":
self.tray.setToolTip(text)
# -------------------------------------------------------------------------------------------
#def removeAction(self, act_name_id):
#index = self.get_act_index(act_name_id)
#if index < 0: return
#act_widget = self.act_indexes[index][1]
#parent_menu_widget = self.get_parent_menu_widget(self.act_indexes[index][2])
##if TrayEngine == "KDE":
##parent_menu_widget.removeAction(act_widget)
##elif TrayEngine == "AppIndicator":
##act_widget.hide()
##parent_menu_widget.remove(act_widget)
##else:
#if (1):
#parent_menu_widget.removeAction(act_widget)
#self.act_indexes.pop(index)
#def removeSeparator(self, sep_name_id):
#index = self.get_sep_index(sep_name_id)
#if index < 0: return
#sep_widget = self.sep_indexes[index][1]
#parent_menu_widget = self.get_parent_menu_widget(self.sep_indexes[index][2])
##if TrayEngine == "KDE":
##parent_menu_widget.removeAction(sep_widget)
##elif TrayEngine == "AppIndicator":
##sep_widget.hide()
##parent_menu_widget.remove(sep_widget)
##else:
#if (1):
#parent_menu_widget.removeAction(sep_widget)
#self.sep_indexes.pop(index)
#def removeMenu(self, menu_name_id):
#index = self.get_menu_index(menu_name_id)
#if index < 0: return
#menu_widget = self.menu_indexes[index][1]
#parent_menu_widget = self.get_parent_menu_widget(self.menu_indexes[index][2])
##if TrayEngine == "KDE":
##parent_menu_widget.removeAction(menu_widget.menuAction())
##elif TrayEngine == "AppIndicator":
##menu_widget.hide()
##parent_menu_widget.remove(menu_widget.get_submenu())
##else:
#if (1):
#parent_menu_widget.removeAction(menu_widget.menuAction())
#self.remove_actions_by_menu_name_id(menu_name_id)
#self.remove_separators_by_menu_name_id(menu_name_id)
#self.remove_submenus_by_menu_name_id(menu_name_id)
# -------------------------------------------------------------------------------------------
#def clearAll(self):
##if TrayEngine == "KDE":
##self.menu.clear()
##elif TrayEngine == "AppIndicator":
##for child in self.menu.get_children():
##self.menu.remove(child)
##else:
#if (1):
#self.menu.clear()
#self.act_indexes = []
#self.sep_indexes = []
#self.menu_indexes = []
#def clearMenu(self, menu_name_id):
#menu_index = self.get_menu_index(menu_name_id)
#if menu_index < 0: return
#menu_widget = self.menu_indexes[menu_index][1]
##if TrayEngine == "KDE":
##menu_widget.clear()
##elif TrayEngine == "AppIndicator":
##for child in menu_widget.get_submenu().get_children():
##menu_widget.get_submenu().remove(child)
##else:
#if (1):
#menu_widget.clear()
#list_of_submenus = [menu_name_id]
#for x in range(0, 10): # 10x level deep, should cover all cases...
#for this_menu_name_id, menu_widget, parent_menu_id in self.menu_indexes:
#if parent_menu_id in list_of_submenus and this_menu_name_id not in list_of_submenus:
#list_of_submenus.append(this_menu_name_id)
#for this_menu_name_id in list_of_submenus:
#self.remove_actions_by_menu_name_id(this_menu_name_id)
#self.remove_separators_by_menu_name_id(this_menu_name_id)
#self.remove_submenus_by_menu_name_id(this_menu_name_id)
# -------------------------------------------------------------------------------------------
def getTrayEngine(self):
return TrayEngine
def isTrayAvailable(self):
if TrayEngine in ("KDE", "Qt"):
# Ask Qt
return QSystemTrayIcon.isSystemTrayAvailable()
if TrayEngine == "AppIndicator":
# Ubuntu/Unity always has a systray
return True
return False
def handleQtCloseEvent(self, event):
if self.isTrayAvailable() and self._parent.isVisible():
event.accept()
self.__hideShowCall()
return
self.close()
QMainWindow.closeEvent(self._parent, event)
# -------------------------------------------------------------------------------------------
def show(self):
if not self._quit_added:
self._quit_added = True
if TrayEngine != "KDE":
self.addSeparator("_quit")
self.addAction("show", self._parent.tr("Minimize"))
self.addAction("quit", self._parent.tr("Quit"))
self.setActionIcon("quit", "application-exit")
self.connect("show", self.__hideShowCall)
self.connect("quit", self.__quitCall)
if TrayEngine == "KDE":
self.tray.setStatus(KStatusNotifierItem.Active)
elif TrayEngine == "AppIndicator":
self.tray.set_status(AppIndicator.IndicatorStatus.ACTIVE)
elif TrayEngine == "Qt":
self.tray.show()
def hide(self):
if TrayEngine == "KDE":
self.tray.setStatus(KStatusNotifierItem.Passive)
elif TrayEngine == "AppIndicator":
self.tray.set_status(AppIndicator.IndicatorStatus.PASSIVE)
elif TrayEngine == "Qt":
self.tray.hide()
def close(self):
if TrayEngine == "KDE":
self.menu.close()
elif TrayEngine == "AppIndicator":
if self._gtk_running:
self._gtk_running = False
Gtk.main_quit()
elif TrayEngine == "Qt":
self.menu.close()
def exec_(self, app):
self._app = app
if TrayEngine == "AppIndicator":
self._gtk_running = True
return Gtk.main()
else:
return app.exec_()
# -------------------------------------------------------------------------------------------
def get_act_index(self, act_name_id):
for i in range(len(self.act_indexes)):
if self.act_indexes[i][iActNameId] == act_name_id:
return i
else:
print("systray.py - Failed to get action index for %s" % act_name_id)
return -1
def get_sep_index(self, sep_name_id):
for i in range(len(self.sep_indexes)):
if self.sep_indexes[i][iSepNameId] == sep_name_id:
return i
else:
print("systray.py - Failed to get separator index for %s" % sep_name_id)
return -1
def get_menu_index(self, menu_name_id):
for i in range(len(self.menu_indexes)):
if self.menu_indexes[i][iMenuNameId] == menu_name_id:
return i
else:
print("systray.py - Failed to get menu index for %s" % menu_name_id)
return -1
#def get_parent_menu_widget(self, parent_menu_id):
#if parent_menu_id != None:
#menu_index = self.get_menu_index(parent_menu_id)
#if menu_index >= 0:
#return self.menu_indexes[menu_index][1]
#else:
#print("systray.py::Failed to get parent Menu widget for", parent_menu_id)
#return None
#else:
#return self.menu
#def remove_actions_by_menu_name_id(self, menu_name_id):
#h = 0
#for i in range(len(self.act_indexes)):
#act_name_id, act_widget, parent_menu_id, act_func = self.act_indexes[i - h]
#if parent_menu_id == menu_name_id:
#self.act_indexes.pop(i - h)
#h += 1
#def remove_separators_by_menu_name_id(self, menu_name_id):
#h = 0
#for i in range(len(self.sep_indexes)):
#sep_name_id, sep_widget, parent_menu_id = self.sep_indexes[i - h]
#if parent_menu_id == menu_name_id:
#self.sep_indexes.pop(i - h)
#h += 1
#def remove_submenus_by_menu_name_id(self, submenu_name_id):
#h = 0
#for i in range(len(self.menu_indexes)):
#menu_name_id, menu_widget, parent_menu_id = self.menu_indexes[i - h]
#if parent_menu_id == submenu_name_id:
#self.menu_indexes.pop(i - h)
#h += 1
# -------------------------------------------------------------------------------------------
def gtk_call_func(self, gtkmenu, act_name_id):
i = self.get_act_index(act_name_id)
if i < 0: return None
return self.act_indexes[i][iActFunc]
def qt_systray_clicked(self, reason):
if reason in (QSystemTrayIcon.DoubleClick, QSystemTrayIcon.Trigger):
self.__hideShowCall()
# -------------------------------------------------------------------------------------------
def __hideShowCall(self):
if self._parent.isVisible():
self.setActionText("show", self._parent.tr("Restore"))
self._parent.hide()
if self._app:
self._app.setQuitOnLastWindowClosed(False)
else:
self.setActionText("show", self._parent.tr("Minimize"))
if self._parent.isMaximized():
self._parent.showMaximized()
else:
self._parent.showNormal()
if self._app:
self._app.setQuitOnLastWindowClosed(True)
QTimer.singleShot(500, self.__raiseWindow)
def __quitCall(self):
if self._app:
self._app.setQuitOnLastWindowClosed(True)
self._parent.hide()
self._parent.close()
if self._app:
self._app.quit()
def __raiseWindow(self):
self._parent.activateWindow()
self._parent.raise_()
#--------------- main ------------------
if __name__ == '__main__':
from PyQt5.QtWidgets import QApplication, QDialog, QMessageBox
class ExampleGUI(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.setWindowIcon(getIcon("audacity"))
self.systray = GlobalSysTray(self, "Claudia", "claudia")
self.systray.addAction("about", self.tr("About"))
self.systray.setIcon("audacity")
self.systray.setToolTip("Demo systray app")
self.systray.connect("about", self.about)
self.systray.show()
def about(self):
QMessageBox.about(self, self.tr("About"), self.tr("Systray Demo"))
def done(self, r):
QDialog.done(self, r)
self.close()
def closeEvent(self, event):
self.systray.close()
QDialog.closeEvent(self, event)
app = QApplication(sys.argv)
gui = ExampleGUI()
gui.show()
sys.exit(gui.systray.exec_(app))
| gpl-2.0 | 3,553,723,061,951,008,000 | 33.624818 | 130 | 0.548571 | false |
Griffiths117/TG-s-IRC | client/IRClient.py | 1 | 4985 | import socket, _thread, tkinter as tk, tkinter.ttk as ttk
from time import strftime, sleep
from tkinter import messagebox, simpledialog
#===========================================================================#
class BasicInputDialog:
def __init__(self,question,title=None,hideWindow=True):
if title == None:
title = PROGRAM_TITLE
self.master = tk.Tk()
self.string = ''
self.master.title(title)
self.frame = tk.Frame(self.master)
self.frame.pack()
self.acceptInput(question)
self.waitForInput()
try:
self.inputted = self.getText()
except Exception:
quit()
def acceptInput(self,question):
r = self.frame
k = ttk.Label(r,text=question)
k.grid(row=0,column=0)
self.e = ttk.Entry(r,width=30)
self.e.grid(row=1,columnspan=2)
self.e.focus_set()
b = ttk.Button(r,text='Enter',command=self.getText)
self.master.bind("<Return>", self.getText)
b.grid(row=0,column=1,padx=5,pady=5)
def getText(self,event=None):
self.string = self.e.get()
self.master.quit()
return self.string
def get(self):
self.master.destroy()
return self.inputted
def getString(self):
return self.string
def waitForInput(self):
self.master.mainloop()
#Main window application
class MainWindow(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.title(PROGRAM_TITLE)
self.resizable(0,0)
self.displayBox = tk.Text(self, width=100, font=THEME.font, bg=THEME.colors[3], fg=THEME.colors[0])
self.displayBox.pack()
self.displayBox.configure(state='disabled')
self.msgEntry = tk.Entry(self,width=100, font=THEME.font, bg=THEME.colors[3], fg=THEME.colors[1], insertbackground = THEME.colors[2])
self.msgEntry.pack()
self.bind("<Return>", self.sendText)
def sendText(self,event=None):
send(newMessage(self.msgEntry.get()).toString())
self.msgEntry.delete(0, 'end')
class Theme:
def __init__(self, font, colors):
self.colors = colors #Message,input,cursor,background
self.font = font
class Message:
#Static variables for formatting
sep = "§"
pref = "msg="
SUDO_PREF = "server="
#Initiate, if timestamp is not entered it will be current time
def __init__(self, sender, plainText, timestamp = None):
if timestamp == None:
timestamp = strftime("%d-%m-%Y %H:%M:%S")
self.plainText = plainText
self.sender = sender
self.timestamp = timestamp
#Sends to string object to be sent through socket
def toString(self):
return self.pref + self.sender + self.sep + self.timestamp + self.sep + self.plainText
#Turns recieved strings into messages: returns None if invalid.
def fromString(text):
if not text.startswith(Message.pref):
return Message("SERVER",text[len(Message.SUDO_PREF):]) if text.startswith(Message.SUDO_PREF) else None
data = text[len(Message.pref):].split(Message.sep,2)
return Message(data[0],data[2],data[1])
#Converts into display string
def toFormattedString(self):
return "["+self.timestamp + "] <" + self.sender + ">: "+self.plainText
#===========================================================================#
def send(msg):
try:
SEND_SOCKET.send(bytes(msg,'UTF-8'))
except:
print("Unable to send message")
def newMessage(msg):
return Message(NICKNAME, msg)
def waitForMessages(s,window):
#This should be run in a seperate thread: constantly recieves new messages
sleep(0.5)
while True:
#Recieve message and convert to string
msg = s.recv(1024)
msg = str(msg, "UTF-8")
#Checking if message follows Message class format
m = Message.fromString(msg)
if m == None: continue
msg = m.toFormattedString()
#Show in window
writeTo(window.displayBox,msg)
def writeTo(textBox,msg):
textBox.configure(state='normal')
textBox.insert('end',msg)
textBox.configure(state='disabled')
textBox.see(tk.END)
def shutdownHook():
send("!DISCONNECT")
root.destroy()
quit()
#===========================================================================#
PROGRAM_TITLE = 'TG\'s IRC'
SERVER_IP = BasicInputDialog("Enter IP:").get()
NICKNAME = BasicInputDialog("Enter Nickname:").get()
THEME = Theme(("Consolas", 10), ['aqua', 'cyan', 'white', 'black'])
RECV_SOCKET = socket.socket()
RECV_SOCKET.connect((SERVER_IP, 20075))
SEND_SOCKET = socket.socket()
SEND_SOCKET.connect((SERVER_IP, 20074))
send("!nickname="+NICKNAME)
root = MainWindow()
_thread.start_new_thread(waitForMessages, (RECV_SOCKET,root,))
root.protocol("WM_DELETE_WINDOW", shutdownHook)
root.mainloop()
| mit | -6,780,789,818,688,822,000 | 29.956522 | 141 | 0.595907 | false |
xunilrj/sandbox | courses/MITx/MITx 6.86x Machine Learning with Python-From Linear Models to Deep Learning/project3/mnist/part2-twodigit/mlp.py | 1 | 2432 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from train_utils import batchify_data, run_epoch, train_model, Flatten
import utils_multiMNIST as U
path_to_data_dir = '../Datasets/'
use_mini_dataset = True
batch_size = 64
nb_classes = 10
nb_epoch = 30
num_classes = 10
img_rows, img_cols = 42, 28 # input image dimensions
class MLP(nn.Module):
def __init__(self, input_dimension):
super(MLP, self).__init__()
self.flatten = Flatten()
self.l1 = nn.Linear(input_dimension, 64)
self.o1 = nn.Linear(64, 10)
self.o2 = nn.Linear(64, 10)
self.model = nn.Sequential(
self.flatten,
self.l1,
)
def forward(self, x):
out = self.model(x)
out_first_digit = self.o1(out)
out_second_digit = self.o2(out)
return out_first_digit, out_second_digit
def main():
X_train, y_train, X_test, y_test = U.get_data(path_to_data_dir, use_mini_dataset)
# Split into train and dev
dev_split_index = int(9 * len(X_train) / 10)
X_dev = X_train[dev_split_index:]
y_dev = [y_train[0][dev_split_index:], y_train[1][dev_split_index:]]
X_train = X_train[:dev_split_index]
y_train = [y_train[0][:dev_split_index], y_train[1][:dev_split_index]]
permutation = np.array([i for i in range(len(X_train))])
np.random.shuffle(permutation)
X_train = [X_train[i] for i in permutation]
y_train = [[y_train[0][i] for i in permutation], [y_train[1][i] for i in permutation]]
# Split dataset into batches
train_batches = batchify_data(X_train, y_train, batch_size)
dev_batches = batchify_data(X_dev, y_dev, batch_size)
test_batches = batchify_data(X_test, y_test, batch_size)
# Load model
input_dimension = img_rows * img_cols
model = MLP(input_dimension) # TODO add proper layers to MLP class above
# Train
train_model(train_batches, dev_batches, model)
## Evaluate the model on test data
loss, acc = run_epoch(test_batches, model.eval(), None)
print('Test loss1: {:.6f} accuracy1: {:.6f} loss2: {:.6f} accuracy2: {:.6f}'.format(loss[0], acc[0], loss[1], acc[1]))
if __name__ == '__main__':
# Specify seed for deterministic behavior, then shuffle. Do not change seed for official submissions to edx
np.random.seed(12321) # for reproducibility
torch.manual_seed(12321) # for reproducibility
main()
| apache-2.0 | 6,854,202,822,458,918,000 | 33.253521 | 126 | 0.636924 | false |
TraceContext/tracecontext-spec | test/server.py | 1 | 3239 | from aiohttp import ClientSession, ClientTimeout, ContentTypeError, web
from multidict import MultiDict
class AsyncTestServer(object):
scopes = {}
def __init__(self, host, port, timeout = 5):
self.host = host
self.port = port
self.timeout = ClientTimeout(total = timeout)
self.app = web.Application()
self.app.add_routes([
web.post('/{scope}', self.scope_handler),
])
async def start(self):
self.runner = web.AppRunner(self.app)
await self.runner.setup()
self.site = web.TCPSite(self.runner, self.host, self.port)
await self.site.start()
print('harness listening on http://%s:%s'%(self.host, self.port))
async def stop(self):
await self.runner.cleanup()
async def scope_handler(self, request):
scope_id = request.match_info['scope'].split('.', maxsplit = 1)
callback_id = None if len(scope_id) == 1 else scope_id[1]
scope_id = scope_id[0]
arguments = await request.json()
scope = None
if callback_id:
scope = self.scopes[scope_id]
scope[callback_id] = {
'headers': list(request.headers.items()),
'arguments': arguments,
}
else:
scope = {
'headers': list(request.headers.items()),
'arguments': arguments,
'results': [],
}
self.scopes[scope_id] = scope
if not arguments:
return web.json_response(None)
if not isinstance(arguments, list):
arguments = [arguments]
for action in arguments:
headers = [['Accept', 'application/json']]
if 'headers' in action:
headers += action['headers']
async with ClientSession(headers = headers, timeout = self.timeout) as session:
arguments = []
if 'arguments' in action:
arguments = action['arguments'] or []
result = {}
result['url'] = action['url']
scope['results'].append(result)
try:
async with session.post(action['url'], json = arguments) as response:
result['status'] = response.status
result['headers'] = list(response.headers.items())
result['body'] = await response.json(content_type = 'application/json')
except ContentTypeError as err:
result['body'] = await response.text()
except Exception as err:
result['exception'] = type(err).__name__
result['msg'] = str(err)
if not callback_id:
del self.scopes[scope_id]
return web.json_response(scope)
class TestServer(object):
def __init__(self, host, port, timeout = 5):
import asyncio
from threading import Thread
self.loop = asyncio.get_event_loop()
self.server = AsyncTestServer(host, port, timeout)
self.thread = Thread(target = self.monitor)
self.run = True
def monitor(self):
import asyncio
while self.run:
self.loop.run_until_complete(asyncio.sleep(0.2))
def start(self):
self.loop.run_until_complete(self.server.start())
self.thread.start()
def stop(self):
self.run = False
self.thread.join()
self.loop.run_until_complete(self.server.stop())
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
if __name__ == '__main__':
import sys
host = '127.0.0.1'
port = 7777
if len(sys.argv) >= 2:
host = sys.argv[1]
if len(sys.argv) >= 3:
port = int(sys.argv[2])
with TestServer(host = host, port = port) as server:
input('Press Enter to quit...')
| apache-2.0 | -567,431,452,355,279,040 | 27.663717 | 82 | 0.661624 | false |
kichkasch/pisi | pisiconstants.py | 1 | 4235 | """
Module for definition of shared constants between the modules.
This file is part of Pisi.
Pisi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pisi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Pisi. If not, see <http://www.gnu.org/licenses/>
"""
PISI_NAME = 'PISI'
"""'About'-information for user - program name"""
PISI_COMMENTS = "PISI is synchronizing information"
"""'About'-information for user - comments / explainations"""
PISI_VERSION = '0.5.3' #'-svn-' #
"""'About'-information for user - current version"""
FILEPATH_COPYING = "/opt/pisi/COPYING"
"""'About'-information for user - where to find the 'licence' file"""
PISI_AUTHORS = ["Esben Damgaard","Michael Pilgermann"]
"""'About'-information for user - list of programmers"""
PISI_HOMEPAGE = "http://freshmeat.net/projects/pisiom"
"""'About'-information for user - program home page"""
PISI_TRANSLATOR_CREDITS = None
"""'About'-information for user - list of translators"""
PISI_DOCUMENTERS = ['Michael Pilgermann']
"""'About'-information for user - list of documenters"""
CONSOLE_PROGRESSBAR_WIDTH = 80
"""Length of progress bar in CLI mode"""
MODE_CALENDAR = 0
"""Type of sources to deal with are calendars"""
MODE_CONTACTS = 1
"""Type of sources to deal with are contacts"""
MODE_STRINGS = ['calendar', 'contacts']
"""Names for the types of sources in order"""
MERGEMODE_SKIP = 0
"""Resolve conflicts between two entries from two sources by skipping the entry"""
MERGEMODE_FLUSH_A = 1
"""Resolve conflicts between two entries from two sources by flushing the entire data repository for the first data source"""
MERGEMODE_FLUSH_B = 2
"""Resolve conflicts between two entries from two sources by flushing the entire data repository for the second data source"""
MERGEMODE_OVERWRITE_A = 3
"""Resolve conflicts between two entries from two sources by overwriting the single entry on the first data source"""
MERGEMODE_OVERWRITE_B = 4
"""Resolve conflicts between two entries from two sources by overwriting the single entry on the second data source"""
MERGEMODE_MANUALCONFIRM = 5
"""Resolve conflicts between two entries from two sources by asking the user for decision for every single entry"""
MERGEMODE_STRINGS = ["Skip", "Flush source 1", "Flush source 2", "Overwrite entry in source 1", "Overwrite entry in source 2", "Manual confirmation"]
"""Names of merge modes in order"""
ACTIONID_ADD = 0
"""Entry in the history of activities for synchronization modules - here for ADD"""
ACTIONID_DELETE = 1
"""Entry in the history of activities for synchronization modules - here for DELETE"""
ACTIONID_MODIFY = 2
"""Entry in the history of activities for synchronization modules - here for MODIFY"""
GOOGLE_CONTACTS_APPNAME = "pisi" + PISI_VERSION
"""application name to use for connecting against google contacts services"""
GOOGLE_CONTACTS_MAXRESULTS = 1000
"""upper limit of result set when querying google contacts api"""
GOOGLE_CALENDAR_APPNAME = "pisi" + PISI_VERSION
"""application name to use for connecting against google calendar services"""
GOOGLE_CALENDAR_MAXRESULTS = GOOGLE_CONTACTS_MAXRESULTS
"""upper limit of result set when querying google calendar api"""
FILEDOWNLOAD_TIMEOUT = 10
"""Timeout for socket opeations (e.g. http download) in seconds - None for disable"""
FILEDOWNLOAD_TMPFILE = "/tmp/pisi-remotebuffer.data"
"""Temporary file for buffering information from remote file sources"""
VCF_BYTES_PER_ENTRY = 200
"""For guessing the number of entries inside a VCF file by evaluating its size we need an estimation of the size for a single entry - for the purpose of showing some progress"""
ICS_BYTES_PER_ENTRY = 200
"""For guessing the number of entries inside an ICS file by evaluating its size we need an estimation of the size for a single entry - for the purpose of showing some progress"""
| gpl-3.0 | -7,278,563,369,972,107,000 | 48.244186 | 178 | 0.756789 | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/openmdao.main-0.13.0-py2.7.egg/openmdao/main/test/test_scaler_adder_example.py | 1 | 5419 | """ Tests the scaler/adder example in our docs. This test was inconvenient to test
in its place in the docs. """
# pylint: disable-msg=C0111,C0103
import unittest
from openmdao.lib.datatypes.api import Float
from openmdao.lib.drivers.api import SLSQPdriver
from openmdao.main.api import Assembly,Component
from openmdao.main.test.simpledriver import SimpleDriver
from openmdao.util.testutil import assert_rel_error
class Paraboloid_scale(Component):
""" Evaluates the equation f(x,y) = (1000*x-3)^2 + (1000*x)*(0.01*y) + (0.01*y+4)^2 - 3 """
# set up interface to the framework
# pylint: disable-msg=E1101
x = Float(0.0, iotype='in', desc='The variable x')
y = Float(0.0, iotype='in', desc='The variable y')
f_xy = Float(iotype='out', desc='F(x,y)')
def execute(self):
"""f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
Optimal solution (minimum): x = 0.0066666666666666671; y = -733.33333333333337
"""
x = self.x
y = self.y
self.f_xy = (1000.*x-3.)**2 + (1000.*x)*(0.01*y) + (0.01*y+4.)**2 - 3.
#print "Executing, %.33f, %.33f, %.33f" % (x, y, self.f_xy)
class OptimizationUnconstrainedScale(Assembly):
"""Unconstrained optimization of the unscaled Paraboloid Component."""
def configure(self):
""" Creates a new Assembly containing an unscaled Paraboloid and an optimizer"""
# Create Optimizer instance
self.add('driver', SLSQPdriver())
# Create Paraboloid component instances
self.add('paraboloid', Paraboloid_scale())
# Driver process definition
self.driver.workflow.add('paraboloid')
# SQLSQP Flags
self.driver.iprint = 0
# Objective
self.driver.add_objective('paraboloid.f_xy')
# Design Variables
self.driver.add_parameter('paraboloid.x', low=-1000., high=1000., scaler=0.001)
self.driver.add_parameter('paraboloid.y', low=-1000., high=1000., scaler=1000.0)
class Paraboloid_shift(Component):
""" Evaluates the equation f(x,y) = (1000*x-3)^2 + (1000*x)*(0.01*(y+1000)) + (0.01*(y+1000)+4)^2 - 3 """
# set up interface to the framework
# pylint: disable-msg=E1101
x = Float(0.0, iotype='in', desc='The variable x')
y = Float(0.0, iotype='in', desc='The variable y')
f_xy = Float(iotype='out', desc='F(x,y)')
def execute(self):
"""f(x,y) = (1000*x-3)^2 + (1000*x)*(0.01*(y+1000)) + (0.01*(y+1000)+4)^2 - 3
Optimal solution (minimum): x = 0.0066666666666666671; y = -1733.33333333333337
"""
x = self.x
y = self.y
self.f_xy = (1000*x-3)**2 + (1000*x)*(0.01*(y+1000)) + (0.01*(y+1000)+4)**2 - 3
class OptimizationUnconstrainedScaleShift(Assembly):
"""Unconstrained optimization of the Paraboloid Component."""
def configure(self):
""" Creates a new Assembly containing a Paraboloid and an optimizer"""
# pylint: disable-msg=E1101
# Create Optimizer instance
self.add('driver', SLSQPdriver())
# Create Paraboloid component instances
self.add('paraboloid', Paraboloid_shift())
# Driver process definition
self.driver.workflow.add('paraboloid')
# SQLSQP Flags
self.driver.iprint = 0
# Objective
self.driver.add_objective('paraboloid.f_xy')
# Design Variables
self.driver.add_parameter('paraboloid.x', low=-1000000., high=1000000.,
scaler=0.001)
self.driver.add_parameter('paraboloid.y', low=-1000000., high=1000000.,
scaler=1000.0, adder=-1000.0)
class ScalerAdderExampleTestCase(unittest.TestCase):
def test_scale(self):
opt_problem = OptimizationUnconstrainedScale()
opt_problem.run()
assert_rel_error(self, opt_problem.paraboloid.x, 0.006667, 0.001)
assert_rel_error(self, opt_problem.paraboloid.y, -733.333313, 0.001)
J = opt_problem.driver.calc_gradient()
Jdict = opt_problem.driver.calc_gradient(return_format='dict')
def test_scale_gradients(self):
opt_problem = OptimizationUnconstrainedScale()
opt_problem.replace('driver', SimpleDriver())
opt_problem.run()
J = opt_problem.driver.calc_gradient()
Jdict = opt_problem.driver.calc_gradient(return_format='dict')
# Make sure untransforming works for dicts too
self.assertTrue(J[0][0] == Jdict['_pseudo_0.out0']['paraboloid.x'])
self.assertTrue(J[0][1] == Jdict['_pseudo_0.out0']['paraboloid.y'])
Jfddict = opt_problem.driver.calc_gradient(mode='fd', return_format='dict')
opt_problem.driver.run_iteration()
Jfd = opt_problem.driver.calc_gradient(mode='fd')
# Make sure untransforming works for dicts too
self.assertTrue(Jfd[0][0] == Jfddict['_pseudo_0.out0']['paraboloid.x'])
self.assertTrue(Jfd[0][1] == Jfddict['_pseudo_0.out0']['paraboloid.y'])
def test_scale_adder(self):
opt_problem = OptimizationUnconstrainedScaleShift()
opt_problem.run()
assert_rel_error(self, opt_problem.paraboloid.x, 0.006667, 0.001)
assert_rel_error(self, opt_problem.paraboloid.y, -1733.333313, 0.001)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -6,789,788,535,626,708,000 | 33.737179 | 114 | 0.60823 | false |
ruuk/service.xbmc.tts | enabler.py | 1 | 3477 | # -*- coding: utf-8 -*-
import os, sys, xbmc, xbmcaddon
DISABLE_PATH = os.path.join(xbmc.translatePath('special://profile').decode('utf-8'), 'addon_data', 'service.xbmc.tts', 'DISABLED')
ENABLE_PATH = os.path.join(xbmc.translatePath('special://profile').decode('utf-8'), 'addon_data', 'service.xbmc.tts', 'ENABLED')
def getXBMCVersion():
import json
resp = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "method": "Application.GetProperties", "params": {"properties": ["version", "name"]}, "id": 1 }')
data = json.loads(resp)
if not 'result' in data: return None
if not 'version' in data['result']: return None
return data['result']['version']
BASE = '{ "jsonrpc": "2.0", "method": "Addons.SetAddonEnabled", "params": { "addonid": "service.xbmc.tts","enabled":%s}, "id": 1 }'
def enableAddon():
if os.path.exists(DISABLE_PATH):
os.remove(DISABLE_PATH)
markPreOrPost(enable=True)
if isPostInstalled():
if addonIsEnabled():
xbmc.executebuiltin('RunScript(service.xbmc.tts)')
else:
xbmc.executeJSONRPC(BASE % 'true') #So enable it instead
else:
xbmc.executebuiltin('RunScript(service.xbmc.tts)')
def disableAddon():
if os.path.exists(ENABLE_PATH):
os.remove(ENABLE_PATH)
markPreOrPost(disable=True)
if isPostInstalled():
version = getXBMCVersion()
if not version or version['major'] < 13: return #Disabling in this manner crashes on Frodo
xbmc.executeJSONRPC(BASE % 'false') #Try to disable it
#if res and 'error' in res: #If we have an error, it's already disabled
#print res
def markPreOrPost(enable=False, disable=False):
if os.path.exists(ENABLE_PATH) or enable:
with open(ENABLE_PATH, 'w') as f:
f.write(isPostInstalled() and 'POST' or 'PRE')
if os.path.exists(DISABLE_PATH) or disable:
with open(DISABLE_PATH, 'w') as f:
f.write(isPostInstalled() and 'POST' or 'PRE')
def addonIsEnabled():
if os.path.exists(DISABLE_PATH):
return False
if isPostInstalled():
import json
resp = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 1, "method": "Addons.GetAddonDetails", "params": {"addonid":"service.xbmc.tts","properties": ["name","version","enabled"]}}')
data = json.loads(resp)
if not 'result' in data: return False
if not 'addon' in data['result']: return False
if not 'enabled' in data['result']['addon']: return False
return data['result']['addon']['enabled']
else:
return True
def toggleEnabled():
try:
if not addonIsEnabled(): raise Exception('Addon Disabled')
xbmcaddon.Addon('service.xbmc.tts')
xbmc.log('service.xbmc.tts: DISABLING')
xbmc.executebuiltin('XBMC.RunScript(service.xbmc.tts,key.SHUTDOWN)')
except:
xbmc.log('service.xbmc.tts: ENABLING')
enableAddon()
def reset():
if not addonIsEnabled(): return
disableAddon()
ct=0
while addonIsEnabled() and ct < 11:
xbmc.sleep(500)
ct+=1
enableAddon()
def isPostInstalled():
homePath = xbmc.translatePath('special://home').decode('utf-8')
postInstalledPath = os.path.join(homePath, 'addons', 'service.xbmc.tts')
return os.path.exists(postInstalledPath)
if __name__ == '__main__':
arg = None
if len(sys.argv) > 1: arg = sys.argv[1]
if arg == 'RESET':
reset()
else:
toggleEnabled() | gpl-2.0 | 9,017,643,453,152,437,000 | 32.12381 | 187 | 0.625252 | false |
uclouvain/osis | base/models/session_exam_deadline.py | 1 | 4136 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
from django.db import models
from base.models.enums import number_session
from base.signals.publisher import compute_student_score_encoding_deadline
from osis_common.models.osis_model_admin import OsisModelAdmin
class SessionExamDeadlineAdmin(OsisModelAdmin):
list_display = ('offer_enrollment', 'deadline', 'deadline_tutor', 'number_session', 'changed')
list_filter = ('number_session',)
raw_id_fields = ('offer_enrollment',)
search_fields = ['offer_enrollment__student__person__first_name', 'offer_enrollment__student__person__last_name',
'offer_enrollment__student__registration_id', 'offer_enrollment__education_group_year__acronym']
class SessionExamDeadline(models.Model):
external_id = models.CharField(max_length=100, blank=True, null=True, db_index=True)
changed = models.DateTimeField(null=True, auto_now=True)
deadline = models.DateField()
deliberation_date = models.DateField(blank=True, null=True)
deadline_tutor = models.IntegerField(null=True, blank=True) # Delta day(s)
number_session = models.IntegerField(choices=number_session.NUMBERS_SESSION)
offer_enrollment = models.ForeignKey('OfferEnrollment', on_delete=models.CASCADE)
__original_deliberation_date = None
def __init__(self, *args, **kwargs):
super(SessionExamDeadline, self).__init__(*args, **kwargs)
self.__original_deliberation_date = self.deliberation_date
def save(self, *args, **kwargs):
super(SessionExamDeadline, self).save(*args, **kwargs)
if self.deliberation_date != self.__original_deliberation_date:
compute_student_score_encoding_deadline.send(sender=self.__class__, session_exam_deadline=self)
@property
def deadline_tutor_computed(self):
return compute_deadline_tutor(self.deadline, self.deadline_tutor)
@property
def is_deadline_reached(self):
return self.deadline < datetime.date.today()
@property
def is_deadline_tutor_reached(self):
if self.deadline_tutor_computed:
return self.deadline_tutor_computed < datetime.date.today()
return self.is_deadline_reached
def __str__(self):
return u"%s-%s" % (self.offer_enrollment, self.number_session)
def compute_deadline_tutor(deadline, deadline_tutor):
if deadline_tutor is not None:
return deadline - datetime.timedelta(days=deadline_tutor)
return None
def filter_by_nb_session(nb_session):
return SessionExamDeadline.objects.filter(number_session=nb_session)
def get_by_offer_enrollment_nb_session(offer_enrollment, nb_session):
try:
return SessionExamDeadline.objects.get(offer_enrollment=offer_enrollment.id,
number_session=nb_session)
except SessionExamDeadline.DoesNotExist:
return None
| agpl-3.0 | -3,591,662,348,334,433,300 | 42.072917 | 117 | 0.685127 | false |
eharney/cinder | cinder/tests/unit/volume/drivers/dell_emc/sc/test_fc.py | 1 | 43968 | # Copyright (c) 2014 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.volume.drivers.dell_emc.sc import storagecenter_api
from cinder.volume.drivers.dell_emc.sc import storagecenter_fc
# We patch these here as they are used by every test to keep
# from trying to contact a Dell Storage Center.
@mock.patch.object(storagecenter_api.HttpClient,
'__init__',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'open_connection')
@mock.patch.object(storagecenter_api.SCApi,
'close_connection')
class DellSCSanFCDriverTestCase(test.TestCase):
VOLUME = {u'instanceId': u'64702.4829',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 4831,
u'objectType': u'ScVolume',
u'index': 4829,
u'volumeFolderPath': u'dopnstktst/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'5729f1db-4c45-416c-bc15-c8ea13a4465d',
u'statusMessage': u'',
u'status': u'Down',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'opnstktst',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe0000000000000012df',
u'active': False,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'5729f1db-4c45-416c-bc15-c8ea13a4465d',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-000012df',
u'replayAllowed': False,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
SCSERVER = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'opnstktst/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'opnstktst',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell EMC Cinder Driver',
u'mapped': False,
u'operatingSystem': {u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
MAPPING = {u'instanceId': u'64702.2183',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'lunUsed': [1],
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.4829',
u'instanceName':
u'5729f1db-4c45-416c-bc15-c8ea13a4465d',
u'objectType': u'ScVolume'},
u'connectivity': u'Up',
u'readOnly': False,
u'objectType': u'ScMappingProfile',
u'hostCache': False,
u'mappedVia': u'Server',
u'mapCount': 2,
u'instanceName': u'4829-47',
u'lunRequested': u'N/A'
}
def setUp(self):
super(DellSCSanFCDriverTestCase, self).setUp()
# configuration is a mock. A mock is pretty much a blank
# slate. I believe mock's done in setup are not happy time
# mocks. So we just do a few things like driver config here.
self.configuration = mock.Mock()
self.configuration.san_is_local = False
self.configuration.san_ip = "192.168.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "pwd"
self.configuration.dell_sc_ssn = 64702
self.configuration.dell_sc_server_folder = 'opnstktst'
self.configuration.dell_sc_volume_folder = 'opnstktst'
self.configuration.dell_sc_api_port = 3033
self._context = context.get_admin_context()
self.driver = storagecenter_fc.SCFCDriver(
configuration=self.configuration)
self.driver.do_setup(None)
self.driver._stats = {'QoS_support': False,
'volume_backend_name': 'dell-1',
'free_capacity_gb': 12123,
'driver_version': '1.0.1',
'total_capacity_gb': 12388,
'reserved_percentage': 0,
'vendor_name': 'Dell',
'storage_protocol': 'FC'}
# Start with none. Add in the specific tests later.
# Mock tests bozo this.
self.driver.backends = None
self.driver.replication_enabled = False
self.volid = '5729f1db-4c45-416c-bc15-c8ea13a4465d'
self.volume_name = "volume" + self.volid
self.connector = {'ip': '192.168.0.77',
'host': 'cinderfc-vm',
'wwnns': ['20000024ff30441c', '20000024ff30441d'],
'initiator': 'iqn.1993-08.org.debian:01:e1b1312f9e1',
'wwpns': ['21000024ff30441c', '21000024ff30441d']}
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'get_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
def test_initialize_connection(self,
mock_find_wwns,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
res = self.driver.initialize_connection(volume, connector)
expected = {'data':
{'discard': True,
'initiator_target_map':
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']},
'target_discovered': True,
'target_lun': 1,
'target_wwn':
[u'5000D31000FCBE3D', u'5000D31000FCBE35']},
'driver_volume_type': 'fibre_channel'}
self.assertEqual(expected, res, 'Unexpected return data')
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, False)
mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId'])
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'get_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(storagecenter_fc.SCFCDriver,
'_is_live_vol')
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns')
@mock.patch.object(storagecenter_fc.SCFCDriver,
'initialize_secondary')
@mock.patch.object(storagecenter_api.SCApi,
'get_live_volume')
def test_initialize_connection_live_vol(self,
mock_get_live_volume,
mock_initialize_secondary,
mock_find_wwns,
mock_is_live_volume,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102,
'secondaryRole': 'Secondary'}
mock_is_live_volume.return_value = True
mock_find_wwns.return_value = (
1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_initialize_secondary.return_value = (
1, [u'5000D31000FCBE3E', u'5000D31000FCBE36'],
{u'21000024FF30441E': [u'5000D31000FCBE36'],
u'21000024FF30441F': [u'5000D31000FCBE3E']})
mock_get_live_volume.return_value = sclivevol
res = self.driver.initialize_connection(volume, connector)
expected = {'data':
{'discard': True,
'initiator_target_map':
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D'],
u'21000024FF30441E': [u'5000D31000FCBE36'],
u'21000024FF30441F': [u'5000D31000FCBE3E']},
'target_discovered': True,
'target_lun': 1,
'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35',
u'5000D31000FCBE3E', u'5000D31000FCBE36']},
'driver_volume_type': 'fibre_channel'}
self.assertEqual(expected, res, 'Unexpected return data')
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, True)
mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId'])
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume')
@mock.patch.object(storagecenter_api.SCApi,
'get_volume')
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(storagecenter_fc.SCFCDriver,
'_is_live_vol')
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns')
@mock.patch.object(storagecenter_fc.SCFCDriver,
'initialize_secondary')
@mock.patch.object(storagecenter_api.SCApi,
'get_live_volume')
def test_initialize_connection_live_vol_afo(self,
mock_get_live_volume,
mock_initialize_secondary,
mock_find_wwns,
mock_is_live_volume,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID, 'provider_id': '101.101'}
scvol = {'instanceId': '102.101'}
mock_find_volume.return_value = scvol
mock_get_volume.return_value = scvol
connector = self.connector
sclivevol = {'instanceId': '101.10001',
'primaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'primaryScSerialNumber': 102,
'secondaryVolume': {'instanceId': '101.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 101,
'secondaryRole': 'Activated'}
mock_is_live_volume.return_value = True
mock_find_wwns.return_value = (
1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_get_live_volume.return_value = sclivevol
res = self.driver.initialize_connection(volume, connector)
expected = {'data':
{'discard': True,
'initiator_target_map':
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']},
'target_discovered': True,
'target_lun': 1,
'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']},
'driver_volume_type': 'fibre_channel'}
self.assertEqual(expected, res, 'Unexpected return data')
# verify find_volume has been called and that is has been called twice
self.assertFalse(mock_initialize_secondary.called)
mock_find_volume.assert_called_once_with(
fake.VOLUME_ID, '101.101', True)
mock_get_volume.assert_called_once_with('102.101')
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'get_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(None, [], {}))
def test_initialize_connection_no_wwns(self,
mock_find_wwns,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'create_server',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(None, [], {}))
def test_initialize_connection_no_server(self,
mock_find_wwns,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(None, [], {}))
def test_initialize_connection_vol_not_found(self,
mock_find_wwns,
mock_map_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(None, [], {}))
def test_initialize_connection_map_vol_fail(self,
mock_find_wwns,
mock_map_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where map_volume returns None (no mappings)
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
def test_initialize_secondary(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.map_secondary_volume = mock.MagicMock(
return_value=self.VOLUME)
find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret)
mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
self.assertEqual(find_wwns_ret, ret)
def test_initialize_secondary_create_server(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=None)
mock_api.create_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.map_secondary_volume = mock.MagicMock(
return_value=self.VOLUME)
find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret)
mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
self.assertEqual(find_wwns_ret, ret)
def test_initialize_secondary_no_server(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=None)
mock_api.create_server = mock.MagicMock(return_value=None)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
expected = (None, [], {})
self.assertEqual(expected, ret)
def test_initialize_secondary_map_fail(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.map_secondary_volume = mock.MagicMock(return_value=None)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
expected = (None, [], {})
self.assertEqual(expected, ret)
def test_initialize_secondary_vol_not_found(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.map_secondary_volume = mock.MagicMock(
return_value=self.VOLUME)
mock_api.get_volume = mock.MagicMock(return_value=None)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
expected = (None, [], {})
self.assertEqual(expected, ret)
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'unmap_volume',
return_value=True)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
@mock.patch.object(storagecenter_api.SCApi,
'get_volume_count',
return_value=1)
def test_terminate_connection(self,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
res = self.driver.terminate_connection(volume, connector)
mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER)
expected = {'driver_volume_type': 'fibre_channel',
'data': {}}
self.assertEqual(expected, res, 'Unexpected return data')
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'unmap_volume',
return_value=True)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
@mock.patch.object(storagecenter_api.SCApi,
'get_volume_count',
return_value=1)
@mock.patch.object(storagecenter_fc.SCFCDriver,
'_is_live_vol')
@mock.patch.object(storagecenter_fc.SCFCDriver,
'terminate_secondary')
def test_terminate_connection_live_vol(self,
mock_terminate_secondary,
mock_is_live_vol,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
mock_terminate_secondary.return_value = (None, [], {})
mock_is_live_vol.return_value = True
res = self.driver.terminate_connection(volume, connector)
mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER)
expected = {'driver_volume_type': 'fibre_channel',
'data': {}}
self.assertEqual(expected, res, 'Unexpected return data')
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'unmap_volume',
return_value=True)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
@mock.patch.object(storagecenter_api.SCApi,
'get_volume_count',
return_value=1)
def test_terminate_connection_no_server(self,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'unmap_volume',
return_value=True)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
@mock.patch.object(storagecenter_api.SCApi,
'get_volume_count',
return_value=1)
def test_terminate_connection_no_volume(self,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'unmap_volume',
return_value=True)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(None,
[],
{}))
@mock.patch.object(storagecenter_api.SCApi,
'get_volume_count',
return_value=1)
def test_terminate_connection_no_wwns(self,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
res = self.driver.terminate_connection(volume, connector)
expected = {'driver_volume_type': 'fibre_channel',
'data': {}}
self.assertEqual(expected, res, 'Unexpected return data')
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'unmap_volume',
return_value=False)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
@mock.patch.object(storagecenter_api.SCApi,
'get_volume_count',
return_value=1)
def test_terminate_connection_failure(self,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'unmap_volume',
return_value=True)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
@mock.patch.object(storagecenter_api.SCApi,
'get_volume_count',
return_value=0)
def test_terminate_connection_vol_count_zero(self,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get_volume_count is zero
volume = {'id': fake.VOLUME_ID}
connector = self.connector
res = self.driver.terminate_connection(volume, connector)
mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER)
expected = {'data':
{'initiator_target_map':
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']},
'target_wwn':
[u'5000D31000FCBE3D', u'5000D31000FCBE35']},
'driver_volume_type': 'fibre_channel'}
self.assertEqual(expected, res, 'Unexpected return data')
def test_terminate_secondary(self,
mock_close_connection,
mock_open_connection,
mock_init):
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME)
mock_api.find_wwns = mock.MagicMock(return_value=(None, [], {}))
mock_api.unmap_volume = mock.MagicMock(return_value=True)
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
ret = self.driver.terminate_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
expected = (None, [], {})
self.assertEqual(expected, ret)
@mock.patch.object(storagecenter_api.SCApi,
'get_storage_usage',
return_value={'availableSpace': 100, 'freeSpace': 50})
def test_update_volume_stats_with_refresh(self,
mock_get_storage_usage,
mock_close_connection,
mock_open_connection,
mock_init):
stats = self.driver.get_volume_stats(True)
self.assertEqual('FC', stats['storage_protocol'])
mock_get_storage_usage.assert_called_once_with()
@mock.patch.object(storagecenter_api.SCApi,
'get_storage_usage',
return_value={'availableSpace': 100, 'freeSpace': 50})
def test_get_volume_stats_no_refresh(self,
mock_get_storage_usage,
mock_close_connection,
mock_open_connection,
mock_init):
stats = self.driver.get_volume_stats(False)
self.assertEqual('FC', stats['storage_protocol'])
mock_get_storage_usage.assert_not_called()
| apache-2.0 | -1,594,430,307,689,691,000 | 48.513514 | 79 | 0.455854 | false |
mabhub/Geotrek | geotrek/settings/base.py | 1 | 17906 | import os
import sys
from django.contrib.messages import constants as messages
from geotrek import __version__
from . import PROJECT_ROOT_PATH
def gettext_noop(s):
return s
DEBUG = False
TEMPLATE_DEBUG = DEBUG
TEST = 'test' in sys.argv
VERSION = __version__
ADMINS = (
('Makina Corpus', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'OPTIONS': {},
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
#
# PostgreSQL Schemas for apps and models.
#
# Caution: editing this setting might not be enough.
# Indeed, it won't apply to apps that not managed of South, nor database views and functions.
# See all sql/*-schemas.sql files in each Geotrek app.
#
DATABASE_SCHEMAS = {
'default': 'geotrek',
'auth': 'django',
'django': 'django',
'easy_thumbnails': 'django',
'south': 'django',
'feedback': 'gestion',
'infrastructure': 'gestion',
'maintenance': 'gestion',
'tourism': 'tourisme',
'trekking': 'rando',
'zoning': 'zonage',
'land': 'foncier',
}
DATABASES['default']['OPTIONS'] = {
'options': '-c search_path=public,%s' % ','.join(set(DATABASE_SCHEMAS.values()))
}
#
# Authentication
#
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
AUTH_PROFILE_MODULE = 'authent.UserProfile'
# Settings required for geotrek.authent.backend.DatabaseBackend :
AUTHENT_DATABASE = None
AUTHENT_TABLENAME = None
AUTHENT_GROUPS_MAPPING = {
'PATH_MANAGER': 1,
'TREKKING_MANAGER': 2,
'EDITOR': 3,
'READER': 4,
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Paris'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'fr'
MODELTRANSLATION_DEFAULT_LANGUAGE = LANGUAGE_CODE
LANGUAGES = (
('en', gettext_noop('English')),
('fr', gettext_noop('French')),
('it', gettext_noop('Italian')),
('es', gettext_noop('Spanish')),
)
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT_PATH, 'locale'),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
DATE_INPUT_FORMATS = ('%d/%m/%Y',)
ROOT_URL = ''
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'home'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT_PATH, 'media')
UPLOAD_DIR = 'upload' # media root subdir
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
MEDIA_URL_SECURE = '/media_secure/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT_PATH, 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
COMPRESSOR_ENABLED = False
COMPRESS_PARSER = 'compressor.parser.HtmlParser'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'public_key'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'geotrek.authent.middleware.LocaleForcedMiddleware',
'django.middleware.locale.LocaleMiddleware',
'geotrek.common.middleware.APILocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'geotrek.authent.middleware.CorsMiddleware',
'mapentity.middleware.AutoLoginMiddleware'
)
ROOT_URLCONF = 'geotrek.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'geotrek.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'mapentity.context_processors.settings',
)
#
# /!\ Application names (last levels) must be unique
# (c.f. auth/authent)
# https://code.djangoproject.com/ticket/12288
#
PROJECT_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.gis',
)
# Do not migrate translated fields, they differ per instance, and
# can be added/removed using `update_translation_fields`
if 'schemamigration' not in sys.argv:
PROJECT_APPS += ('modeltranslation',)
PROJECT_APPS += (
'south',
'leaflet',
'floppyforms',
'crispy_forms',
'compressor',
'djgeojson',
'tinymce',
'easy_thumbnails',
'shapes',
'paperclip',
'mapentity',
'rest_framework',
'embed_video',
'djcelery',
)
INSTALLED_APPS = PROJECT_APPS + (
'geotrek.cirkwi',
'geotrek.authent',
'geotrek.common',
'geotrek.altimetry',
'geotrek.core',
'geotrek.infrastructure',
'geotrek.maintenance',
'geotrek.zoning',
'geotrek.land',
'geotrek.trekking',
'geotrek.tourism',
'geotrek.flatpages',
'geotrek.feedback',
)
SERIALIZATION_MODULES = {
'geojson': 'djgeojson.serializers'
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
# The fat backend is used to store big chunk of data (>1 Mo)
'fat': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'simple': {
'format': '%(levelname)s %(asctime)s %(name)s %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'logging.NullHandler'
},
'console': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.request': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'south': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'geotrek': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
'mapentity': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
'': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
}
}
THUMBNAIL_ALIASES = {
'': {
'thumbnail': {'size': (150, 150)},
# Thumbnails for public trek website
'small-square': {'size': (120, 120), 'crop': True},
'medium': {'size': (800, 800)},
# Header image for trek export (keep ratio of TREK_EXPORT_HEADER_IMAGE_SIZE)
'print': {'size': (1000, 500), 'crop': 'smart'},
},
}
PAPERCLIP_CONFIG = {
'ENABLE_VIDEO': True,
'FILETYPE_MODEL': 'common.FileType',
'ATTACHMENT_TABLE_NAME': 'fl_t_fichier',
}
# Data projection
SRID = 3857
# API projection (client-side), can differ from SRID (database). Leaflet requires 4326.
API_SRID = 4326
# Extent in native projection (Toulouse area)
SPATIAL_EXTENT = (144968, 5415668, 175412, 5388753)
MAPENTITY_CONFIG = {
'TITLE': gettext_noop("Geotrek"),
'TEMP_DIR': '/tmp',
'HISTORY_ITEMS_MAX': 7,
'CONVERSION_SERVER': 'http://127.0.0.1:6543',
'CAPTURE_SERVER': 'http://127.0.0.1:8001',
'ROOT_URL': ROOT_URL,
'MAP_BACKGROUND_FOGGED': True,
'GEOJSON_LAYERS_CACHE_BACKEND': 'fat',
'SENDFILE_HTTP_HEADER': 'X-Accel-Redirect',
'DRF_API_URL_PREFIX': r'^api/(?P<lang>\w+)/',
}
DEFAULT_STRUCTURE_NAME = gettext_noop('Default')
VIEWPORT_MARGIN = 0.1 # On list page, around spatial extent from settings.ini
PATHS_LINE_MARKER = 'dotL'
PATH_SNAPPING_DISTANCE = 1 # Distance of path snapping in meters
SNAP_DISTANCE = 30 # Distance of snapping in pixels
ALTIMETRIC_PROFILE_PRECISION = 25 # Sampling precision in meters
ALTIMETRIC_PROFILE_BACKGROUND = 'white'
ALTIMETRIC_PROFILE_COLOR = '#F77E00'
ALTIMETRIC_PROFILE_HEIGHT = 400
ALTIMETRIC_PROFILE_WIDTH = 800
ALTIMETRIC_PROFILE_FONTSIZE = 25
ALTIMETRIC_PROFILE_FONT = 'ubuntu'
ALTIMETRIC_PROFILE_MIN_YSCALE = 1200 # Minimum y scale (in meters)
ALTIMETRIC_AREA_MAX_RESOLUTION = 150 # Maximum number of points (by width/height)
ALTIMETRIC_AREA_MARGIN = 0.15
# Let this be defined at instance-level
LEAFLET_CONFIG = {
'SRID': SRID,
'TILES': [
('Scan', 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',),
('Ortho', 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.jpg'),
],
'TILES_EXTENT': SPATIAL_EXTENT,
# Extent in API projection (Leaflet view default extent)
'SPATIAL_EXTENT': (1.3, 43.7, 1.5, 43.5),
'NO_GLOBALS': False,
'PLUGINS': {
'geotrek': {'js': ['core/leaflet.lineextremities.js',
'core/leaflet.textpath.js',
'trekking/points_reference.js',
'trekking/parking_location.js']},
'topofields': {'js': ['core/geotrek.forms.snap.js',
'core/geotrek.forms.topology.js',
'core/dijkstra.js',
'core/multipath.js',
'core/topology_helper.js']}
}
}
""" This *pool* of colors is used to colorized lands records.
"""
COLORS_POOL = {'land': ['#f37e79', '#7998f3', '#bbf379', '#f379df', '#f3bf79', '#9c79f3', '#7af379'],
'physical': ['#f3799d', '#79c1f3', '#e4f379', '#de79f3', '#79f3ba', '#f39779', '#797ff3'],
'competence': ['#a2f379', '#f379c6', '#79e9f3', '#f3d979', '#b579f3', '#79f392', '#f37984'],
'signagemanagement': ['#79a8f3', '#cbf379', '#f379ee', '#79f3e3', '#79f3d3'],
'workmanagement': ['#79a8f3', '#cbf379', '#f379ee', '#79f3e3', '#79f3d3'],
'restrictedarea': ['plum', 'violet', 'deeppink', 'orchid',
'darkviolet', 'lightcoral', 'palevioletred',
'MediumVioletRed', 'MediumOrchid', 'Magenta',
'LightSalmon', 'HotPink', 'Fuchsia']}
MAP_STYLES = {
'path': {'weight': 2, 'opacity': 1.0, 'color': '#FF4800'},
'city': {'weight': 4, 'color': 'orange', 'opacity': 0.3, 'fillOpacity': 0.0},
'district': {'weight': 6, 'color': 'orange', 'opacity': 0.3, 'fillOpacity': 0.0, 'dashArray': '12, 12'},
'restrictedarea': {'weight': 2, 'color': 'red', 'opacity': 0.5, 'fillOpacity': 0.5},
'land': {'weight': 4, 'color': 'red', 'opacity': 1.0},
'physical': {'weight': 6, 'color': 'red', 'opacity': 1.0},
'competence': {'weight': 4, 'color': 'red', 'opacity': 1.0},
'workmanagement': {'weight': 4, 'color': 'red', 'opacity': 1.0},
'signagemanagement': {'weight': 5, 'color': 'red', 'opacity': 1.0},
'print': {
'path': {'weight': 1},
'trek': {'color': '#FF3300', 'weight': 7, 'opacity': 0.5,
'arrowColor': 'black', 'arrowSize': 10},
}
}
LAYER_PRECISION_LAND = 4 # Number of fraction digit
LAYER_SIMPLIFY_LAND = 10 # Simplification tolerance
LAND_BBOX_CITIES_ENABLED = True
LAND_BBOX_DISTRICTS_ENABLED = True
LAND_BBOX_AREAS_ENABLED = False
PUBLISHED_BY_LANG = True
EXPORT_MAP_IMAGE_SIZE = {
'trek': (14.1, 11),
'poi': (14.1, 11),
'touristiccontent': (14.1, 11),
'touristicevent': (14.1, 11),
}
EXPORT_HEADER_IMAGE_SIZE = {
'trek': (10.7, 5.35), # Keep ratio of THUMBNAIL_ALIASES['print']
'poi': (10.7, 5.35), # Keep ratio of THUMBNAIL_ALIASES['print']
'touristiccontent': (10.7, 5.35), # Keep ratio of THUMBNAIL_ALIASES['print']
'touristicevent': (10.7, 5.35), # Keep ratio of THUMBNAIL_ALIASES['print']
}
COMPLETENESS_FIELDS = {
'trek': ['departure', 'duration', 'difficulty', 'description_teaser']
}
TRAIL_MODEL_ENABLED = True
TREKKING_TOPOLOGY_ENABLED = True
FLATPAGES_ENABLED = False # False because still experimental
TOURISM_ENABLED = False # False because still experimental
TREK_POI_INTERSECTION_MARGIN = 500 # meters (used only if TREKKING_TOPOLOGY_ENABLED = False)
TOURISM_INTERSECTION_MARGIN = 500 # meters (always used)
SIGNAGE_LINE_ENABLED = False
TREK_POINTS_OF_REFERENCE_ENABLED = True
TREK_EXPORT_POI_LIST_LIMIT = 14
TREK_EXPORT_INFORMATION_DESK_LIST_LIMIT = 2
TREK_DAY_DURATION = 10 # Max duration to be done in one day
TREK_ICON_SIZE_POI = 18
TREK_ICON_SIZE_PARKING = 18
TREK_ICON_SIZE_INFORMATION_DESK = 18
# Static offsets in projection units
TOPOLOGY_STATIC_OFFSETS = {'land': -5,
'physical': 0,
'competence': 5,
'signagemanagement': -10,
'workmanagement': 10}
MESSAGE_TAGS = {
messages.SUCCESS: 'alert-success',
messages.INFO: 'alert-info',
messages.DEBUG: 'alert-info',
messages.WARNING: 'alert-error',
messages.ERROR: 'alert-error',
}
CACHE_TIMEOUT_LAND_LAYERS = 60 * 60 * 24
CACHE_TIMEOUT_TOURISM_DATASOURCES = 60 * 60 * 24
TREK_CATEGORY_ORDER = None
TOURISTIC_EVENT_CATEGORY_ORDER = None
SPLIT_TREKS_CATEGORIES_BY_PRACTICE = False
SPLIT_TREKS_CATEGORIES_BY_ACCESSIBILITY = False
HIDE_PUBLISHED_TREKS_IN_TOPOLOGIES = False
ZIP_TOURISTIC_CONTENTS_AS_POI = False
CRISPY_ALLOWED_TEMPLATE_PACKS = ('bootstrap', 'bootstrap3')
CRISPY_TEMPLATE_PACK = 'bootstrap'
# Mobile app_directories
MOBILE_TILES_URL = 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'
MOBILE_TILES_RADIUS_LARGE = 0.01 # ~1 km
MOBILE_TILES_RADIUS_SMALL = 0.005 # ~500 m
MOBILE_TILES_GLOBAL_ZOOMS = range(13)
MOBILE_TILES_LOW_ZOOMS = range(13, 15)
MOBILE_TILES_HIGH_ZOOMS = range(15, 17)
import djcelery
djcelery.setup_loader()
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
BROKER_URL = 'redis://127.0.0.1:6379/0'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_RESULT_EXPIRES = 5
TEST_RUNNER = 'djcelery.contrib.test_runner.CeleryTestSuiteRunner'
| bsd-2-clause | -7,784,692,578,393,617,000 | 30.414035 | 108 | 0.631744 | false |
fiji-flo/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/environment.py | 1 | 8310 | import json
import os
import multiprocessing
import signal
import socket
import sys
import time
from mozlog import get_default_logger, handlers, proxy
from wptlogging import LogLevelRewriter
from wptserve.handlers import StringHandler
here = os.path.split(__file__)[0]
repo_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir, os.pardir))
serve = None
sslutils = None
def do_delayed_imports(logger, test_paths):
global serve, sslutils
serve_root = serve_path(test_paths)
sys.path.insert(0, serve_root)
failed = []
try:
from tools.serve import serve
except ImportError:
failed.append("serve")
try:
import sslutils
except ImportError:
failed.append("sslutils")
if failed:
logger.critical(
"Failed to import %s. Ensure that tests path %s contains web-platform-tests" %
(", ".join(failed), serve_root))
sys.exit(1)
def serve_path(test_paths):
return test_paths["/"]["tests_path"]
def get_ssl_kwargs(**kwargs):
if kwargs["ssl_type"] == "openssl":
args = {"openssl_binary": kwargs["openssl_binary"]}
elif kwargs["ssl_type"] == "pregenerated":
args = {"host_key_path": kwargs["host_key_path"],
"host_cert_path": kwargs["host_cert_path"],
"ca_cert_path": kwargs["ca_cert_path"]}
else:
args = {}
return args
def ssl_env(logger, **kwargs):
ssl_env_cls = sslutils.environments[kwargs["ssl_type"]]
return ssl_env_cls(logger, **get_ssl_kwargs(**kwargs))
class TestEnvironmentError(Exception):
pass
class TestEnvironment(object):
def __init__(self, test_paths, ssl_env, pause_after_test, debug_info, options, env_extras):
"""Context manager that owns the test environment i.e. the http and
websockets servers"""
self.test_paths = test_paths
self.ssl_env = ssl_env
self.server = None
self.config = None
self.pause_after_test = pause_after_test
self.test_server_port = options.pop("test_server_port", True)
self.debug_info = debug_info
self.options = options if options is not None else {}
self.cache_manager = multiprocessing.Manager()
self.stash = serve.stash.StashServer()
self.env_extras = env_extras
self.env_extras_cms = None
def __enter__(self):
self.stash.__enter__()
self.ssl_env.__enter__()
self.cache_manager.__enter__()
self.config = self.load_config()
self.setup_server_logging()
ports = serve.get_ports(self.config, self.ssl_env)
self.config = serve.normalise_config(self.config, ports)
assert self.env_extras_cms is None, (
"A TestEnvironment object cannot be nested")
self.env_extras_cms = []
for env in self.env_extras:
cm = env(self.options, self.config)
cm.__enter__()
self.env_extras_cms.append(cm)
self.servers = serve.start(self.config, self.ssl_env,
self.get_routes())
if self.options.get("supports_debugger") and self.debug_info and self.debug_info.interactive:
self.ignore_interrupts()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.process_interrupts()
for scheme, servers in self.servers.iteritems():
for port, server in servers:
server.kill()
for cm in self.env_extras_cms:
cm.__exit__(exc_type, exc_val, exc_tb)
self.env_extras_cms = None
self.cache_manager.__exit__(exc_type, exc_val, exc_tb)
self.ssl_env.__exit__(exc_type, exc_val, exc_tb)
self.stash.__exit__()
def ignore_interrupts(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def process_interrupts(self):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def load_config(self):
default_config_path = os.path.join(serve_path(self.test_paths), "config.default.json")
local_config = {
"ports": {
"http": [8000, 8001],
"https": [8443],
"ws": [8888]
},
"check_subdomains": False,
"bind_hostname": self.options["bind_hostname"],
"ssl": {}
}
if "host" in self.options:
local_config["host"] = self.options["host"]
with open(default_config_path) as f:
default_config = json.load(f)
#TODO: allow non-default configuration for ssl
local_config["external_host"] = self.options.get("external_host", None)
local_config["ssl"]["encrypt_after_connect"] = self.options.get("encrypt_after_connect", False)
config = serve.merge_json(default_config, local_config)
config["doc_root"] = serve_path(self.test_paths)
if not self.ssl_env.ssl_enabled:
config["ports"]["https"] = [None]
host = self.options.get("certificate_domain", config["host"])
hosts = [host]
hosts.extend("%s.%s" % (item[0], host) for item in serve.get_subdomains(host).values())
key_file, certificate = self.ssl_env.host_cert_path(hosts)
config["key_file"] = key_file
config["certificate"] = certificate
serve.set_computed_defaults(config)
return config
def setup_server_logging(self):
server_logger = get_default_logger(component="wptserve")
assert server_logger is not None
log_filter = handlers.LogLevelFilter(lambda x:x, "info")
# Downgrade errors to warnings for the server
log_filter = LogLevelRewriter(log_filter, ["error"], "warning")
server_logger.component_filter = log_filter
server_logger = proxy.QueuedProxyLogger(server_logger)
try:
#Set as the default logger for wptserve
serve.set_logger(server_logger)
serve.logger = server_logger
except Exception:
# This happens if logging has already been set up for wptserve
pass
def get_routes(self):
route_builder = serve.RoutesBuilder()
for path, format_args, content_type, route in [
("testharness_runner.html", {}, "text/html", "/testharness_runner.html"),
(self.options.get("testharnessreport", "testharnessreport.js"),
{"output": self.pause_after_test}, "text/javascript",
"/resources/testharnessreport.js")]:
path = os.path.normpath(os.path.join(here, path))
route_builder.add_static(path, format_args, content_type, route)
data = b""
with open(os.path.join(repo_root, "resources", "testdriver.js"), "rb") as fp:
data += fp.read()
with open(os.path.join(here, "testdriver-extra.js"), "rb") as fp:
data += fp.read()
route_builder.add_handler(b"GET", b"/resources/testdriver.js",
StringHandler(data, "text/javascript"))
for url_base, paths in self.test_paths.iteritems():
if url_base == "/":
continue
route_builder.add_mount_point(url_base, paths["tests_path"])
if "/" not in self.test_paths:
del route_builder.mountpoint_routes["/"]
return route_builder.get_routes()
def ensure_started(self):
# Pause for a while to ensure that the server has a chance to start
for _ in xrange(20):
failed = self.test_servers()
if not failed:
return
time.sleep(0.5)
raise EnvironmentError("Servers failed to start (scheme:port): %s" % ("%s:%s" for item in failed))
def test_servers(self):
failed = []
for scheme, servers in self.servers.iteritems():
for port, server in servers:
if self.test_server_port:
s = socket.socket()
try:
s.connect((self.config["host"], port))
except socket.error:
failed.append((scheme, port))
finally:
s.close()
if not server.is_alive():
failed.append((scheme, port))
| mpl-2.0 | 7,157,560,631,007,877,000 | 32.373494 | 106 | 0.58231 | false |
jelly/calibre | manual/conf.py | 2 | 7503 | # -*- coding: utf-8 -*-
#
# calibre documentation build configuration file, created by
# sphinx-quickstart.py on Sun Mar 23 01:23:55 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os, errno
from datetime import date
# If your extensions are in another directory, add it here.
base = os.path.dirname(os.path.abspath(__file__))
sys.path.append(base)
sys.path.insert(0, os.path.dirname(base))
from setup import __appname__, __version__
import calibre.utils.localization as l # Ensure calibre translations are installed
import custom
del sys.path[0]
del l
custom
# General configuration
# ---------------------
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.addons.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'custom', 'sidebar_toc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index' if tags.has('online') else 'simple_index' # noqa
# kill the warning about index/simple_index not being in a toctree
exclude_patterns = ['simple_index.rst'] if master_doc == 'index' else ['index.rst']
exclude_patterns.append('cli-options-header.rst')
if tags.has('gettext'): # noqa
# Do not exclude anything as the strings must be translated. This will
# generate a warning about the documents not being in a toctree, just ignore
# it.
exclude_patterns = []
# The language
language = os.environ.get('CALIBRE_OVERRIDE_LANG', 'en')
def generated_langs():
try:
return os.listdir(os.path.join(base, 'generated'))
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
return ()
# ignore generated files in languages other than the language we are building for
ge = {'generated/' + x for x in generated_langs()} | {
'generated/' + x for x in os.environ.get('ALL_USER_MANUAL_LANGUAGES', '').split()}
ge.discard('generated/' + language)
exclude_patterns += list(ge)
del ge
# General substitutions.
project = __appname__
copyright = 'Kovid Goyal'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = ['global', 'cli/global']
locale_dirs = ['locale/']
title = '%s User Manual' % __appname__
if language not in {'en', 'eng'}:
import gettext
try:
t = gettext.translation('simple_index', locale_dirs[0], [language])
except IOError:
pass
else:
title = t.ugettext(title)
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html',
'searchbox.html',
'localtoc.html',
'relations.html',
]
}
html_theme_options = {
'logo': 'logo.png',
'show_powered_by': False,
'fixed_sidebar': True,
'sidebar_collapse': True,
'analytics_id': 'UA-20736318-1',
'github_button': False,
}
# The favicon
html_favicon = '../icons/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the built-in static files,
# so a file named "default.css" will overwrite the built-in "default.css".
html_static_path = ['resources', '../icons/favicon.ico']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Overall title of the documentation
# html_title = title
html_short_title = _('Start')
from calibre.utils.localization import get_language
html_context = {}
html_context['other_languages'] = [
(lc, get_language(lc)) for lc in os.environ.get('ALL_USER_MANUAL_LANGUAGES', '').split() if lc != language]
def sort_languages(x):
from calibre.utils.icu import sort_key
lc, name = x
if lc == language:
return ''
return sort_key(unicode(name))
html_context['other_languages'].sort(key=sort_languages)
html_context['support_text'] = _('Support calibre')
html_context['support_tooltip'] = _('Contribute to support calibre development')
del sort_languages, get_language
epub_author = u'Kovid Goyal'
epub_publisher = u'Kovid Goyal'
epub_copyright = u'© {} Kovid Goyal'.format(date.today().year)
epub_description = u'Comprehensive documentation for calibre'
epub_identifier = u'https://manual.calibre-ebook.com'
epub_scheme = u'url'
epub_uid = u'S54a88f8e9d42455e9c6db000e989225f'
epub_tocdepth = 4
epub_tocdup = True
epub_cover = ('epub_cover.jpg', 'epub_cover_template.html')
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
html_use_modindex = False
html_use_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'calibredoc'
html_use_opensearch = 'https://manual.calibre-ebook.com'
html_show_sphinx = False
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [(master_doc, 'calibre.tex', title, 'Kovid Goyal', 'manual', False)]
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
latex_logo = 'resources/logo.png'
latex_show_pagerefs = True
latex_show_urls = 'footnote'
latex_elements = {
'papersize':'letterpaper',
'fontenc':r'\usepackage[T2A,T1]{fontenc}',
'preamble': r'\renewcommand{\pageautorefname}{%s}' % _('page'),
}
| gpl-3.0 | 4,534,027,237,254,408,000 | 30.389121 | 111 | 0.692082 | false |
nmarcetic/weblines.io | weblines/apps/cms/tests/test_models.py | 1 | 2950 | """
Domain objects test suites
"""
from django.db import IntegrityError
from django.test import TestCase
from weblines.apps.cms import models
class PageTestCase(TestCase):
"""
Contains page-related test scenarios
"""
def test_creation(self):
"""
Tests various creation scenarios
Each object is assigned unique slug, generated from its title. Once
created, slug retains its value even when page title is updated.
"""
# object creation
new_page = models.Page.objects.create(title='First page')
self.assertEqual(new_page.slug, 'first-page')
self.assertEqual(new_page.id, 1)
# title update
new_page.title = 'Different title'
new_page.save(update_fields=['title'])
self.assertEqual(new_page.slug, 'first-page')
# slug duplication
with self.assertRaises(IntegrityError):
models.Page.objects.create(title='First page')
class GalleryTestCase(TestCase):
"""
Contains gallery-related test scenarios
"""
def setUp(self):
"""
Creates dummy gallery
"""
# create owner page
page = models.Page.objects.create(title='first page')
# create new display type
display_type = models.DisplayType.objects.create(name='gallery',
code='G')
# instantiate gallery and bind it to a created display type
self.gallery = models.Gallery.objects.create(name='slider',
page=page,
display_type=display_type)
def test_item_addition(self):
"""
Tests item addition
"""
self.gallery.add_item('gallery-item')
self.assertEqual(1, self.gallery.items.count())
self.assertEqual('gallery-item', self.gallery.items.first().caption)
# in case of same captions, slugs must remain unique
self.gallery.add_item('gallery-item')
first = self.gallery.items.first()
last = self.gallery.items.last()
self.assertNotEqual(first.slug, last.slug)
def test_item_removal(self):
"""
Tests item removal
Gallery item is removed if its name is found in added items.
"""
self.test_item_addition()
self.assertEqual(2, self.gallery.items.count())
# failed search should result with False
return_value = self.gallery.remove_item('invalid-item')
self.assertEqual(False, return_value)
# successful search should remove item and return True
return_value = self.gallery.remove_item('gallery-item-1')
self.assertEqual(True, return_value)
self.assertEqual(1, self.gallery.items.count())
# slide should be completely removed from system
self.assertEqual(1, models.GalleryItem.objects.count())
| gpl-2.0 | 6,279,059,706,440,562,000 | 32.146067 | 79 | 0.606102 | false |
sojournexx/python | Assignments/TanAndrew_assign6.py | 1 | 5318 | #Andrew Tan, 3/25, Section 010
import myfunctions
import random
#Ask user for inputs and check validity
while True:
qns = int(input("How many problems would you like to attempt? "))
if qns <= 0:
print("Invalid number, try again\n")
continue
else:
break
while True:
width = int(input("How wide do you want your digits to be? 5-10: "))
if width < 5 or width > 10:
print("Invalid width, try again\n")
continue
else:
break
while True:
drill = str.lower(input("Would you like to activate 'drill' mode? yes or no: "))
if drill != "yes" and drill != "no":
print("Invalid response, try again\n")
continue
else:
break
print("\nHere we go!")
#Define variables to track score and statistics
tscore = 0
addition = 0
subtraction = 0
multiplication = 0
division = 0
addition_score = 0
subtraction_score = 0
multiplication_score = 0
division_score = 0
#Set number of questions
for i in range(qns):
print("\nWhat is .....\n")
#Define parameters
x = random.randint(0, 9)
op = random.randint(1, 4)
y = random.randint(0, 9)
#Check for valid division equation
if op == 4:
if y == 0:
y = random.randint(1, 9)
while x % y != 0:
x = random.randint(0, 9)
y = random.randint(1, 9)
#Display first number
if x == 0:
myfunctions.number_0(width)
elif x == 1:
myfunctions.number_1(width)
elif x == 2:
myfunctions.number_2(width)
elif x == 3:
myfunctions.number_3(width)
elif x == 4:
myfunctions.number_4(width)
elif x == 5:
myfunctions.number_5(width)
elif x == 6:
myfunctions.number_6(width)
elif x == 7:
myfunctions.number_7(width)
elif x == 8:
myfunctions.number_8(width)
elif x == 9:
myfunctions.number_9(width)
#Display operator
if op == 1:
op = "+"
myfunctions.plus(width)
addition += 1
elif op == 2:
op = "-"
myfunctions.minus(width)
subtraction += 1
elif op == 3:
op = "*"
myfunctions.multiply(width)
multiplication += 1
elif op == 4:
op = "/"
myfunctions.divide(width)
division += 1
#Display second number
if y == 0:
myfunctions.number_0(width)
elif y == 1:
myfunctions.number_1(width)
elif y == 2:
myfunctions.number_2(width)
elif y == 3:
myfunctions.number_3(width)
elif y == 4:
myfunctions.number_4(width)
elif y == 5:
myfunctions.number_5(width)
elif y == 6:
myfunctions.number_6(width)
elif y == 7:
myfunctions.number_7(width)
elif y == 8:
myfunctions.number_8(width)
elif y == 9:
myfunctions.number_9(width)
#Ask user for answer and check answer
if drill == "no":
z = int(input("= "))
if myfunctions.check_answer(x, y, z, op) == True:
print("Correct!")
tscore += 1
if op == "+":
addition_score += 1
if op == "-":
subtraction_score += 1
if op == "*":
multiplication_score += 1
if op == "/":
division_score += 1
else:
print("Sorry, that's not correct.")
elif drill == "yes":
while True:
z = int(input("= "))
if myfunctions.check_answer(x, y, z, op) == False:
print("Sorry, that's not correct.")
if op == "+":
addition_score += 1
if op == "-":
subtraction_score += 1
if op == "*":
multiplication_score += 1
if op == "/":
division_score += 1
continue
else:
print("Correct!")
break
#Display score
if drill == "no":
print("\nYou got %d out of %d correct!" %(tscore, qns))
for operator, count, score in zip(["addition", "subtraction", "multiplication", "division"], [addition, subtraction, multiplication, division], [addition_score, subtraction_score, multiplication_score, division_score]):
if count == 0:
print("\nNo %s problems presented" %(operator))
else:
print("\nTotal %s problems presented: %d" %(operator, count))
print("Correct %s problems: %d (%s)" %(operator, score, format(score/count, ".1%")))
elif drill == "yes":
for operator, count, score in zip(["addition", "subtraction", "multiplication", "division"], [addition, subtraction, multiplication, division], [addition_score, subtraction_score, multiplication_score, division_score]):
if score == 0:
praise = "(perfect!)"
else:
praise = ""
if count == 0:
print("\nNo %s problems presented" %(operator))
else:
print("\nTotal %s problems presented: %d" %(operator, count))
print("# of extra attempts needed: %d %s" %(score, praise))
| mit | -6,892,856,213,811,609,000 | 28.045198 | 223 | 0.511847 | false |
DBHeise/fileid | runtests.py | 1 | 2380 | import requests, json, argparse, os, subprocess, xmltodict
envDict = {}
ignoreFiles = [".gitignore",".gitattributes","LICENSE"]
ignoreExt = [".md",".txt",".json",".ps1",".py"]
def findBinaries(basefolder):
testBinaries = []
fileidBinaries = []
for root, dirs, files in os.walk(basefolder):
for file in files:
if file.endswith("_test.exe") or file.endswith("_test"):
testBinaries.append(os.path.join(root,file))
elif file == "fileid.exe" or file == "fid" or file == "fileid":
fileidBinaries.append(os.path.join(root,file))
return fileidBinaries,testBinaries
def runTestBinary(file):
print("Running Test: " + file)
process = subprocess.Popen(file, shell=True, env=envDict)
process.wait()
return process.returncode == 0
def testfiles(fidbin, folder):
allgood = True
for root, dirs, files in os.walk(folder):
if ".git" in dirs:
dirs.remove('.git')
for file in files:
fullpath = os.path.join(root, file)
file_name, file_ext = os.path.splitext(file)
if not file_name in ignoreFiles and not file_ext in ignoreExt:
allgood &= testfile(fidbin, fullpath)
return allgood
def testfile(fid, file, format = "json"):
outputStr = subprocess.check_output([fid, file, format])
output = {}
if format == "json":
output = json.loads(outputStr)
elif format == "xml":
output = xmltodict.parse(outputStr)
actual_ext = [extension["extension"].lower() for extension in output["extensions"]]
filename, expected_ext = os.path.splitext(file)
if "_" in expected_ext:
expected_ext = expected_ext.replace(".","").replace("_",".").lower()
else:
expected_ext = expected_ext.replace(".","").lower()
if expected_ext not in actual_ext:
print("FAIL! (" + expected_ext + "!=" + ",".join(actual_ext) +") " + file)
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run FILEID tests")
parser.add_argument("--build", help="path to build output")
parser.add_argument("--files", help="path to directory containing test files")
args = parser.parse_args()
fileids, testbins = findBinaries(args.build)
envDict = dict(os.environ)
envDict["TESTFOLDER"] = args.files
allgood = True
## Run Unit Tests
for testBin in testbins:
allgood &= runTestBinary(testBin)
## Run Test Files
for fid in fileids:
allgood &= testfiles(fid, args.files)
if not allgood:
exit(-10) | mit | -585,351,895,886,946,000 | 27.011765 | 84 | 0.678992 | false |
tradej/pykickstart-old | tests/commands/zerombr.py | 1 | 1629 | #
# Martin Gracik <[email protected]>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import *
class FC3_TestCase(CommandTest):
command = "zerombr"
def runTest(self):
# pass
self.assert_parse("zerombr", "zerombr\n")
# ignoring arguments
self.assert_parse("zerombr arg", "zerombr\n")
class F9_TestCase(FC3_TestCase):
command = "zerombr"
def runTest(self):
# pass
self.assert_parse("zerombr", "zerombr\n")
# fail
# zerombr does not take any arguments
self.assert_parse_error("zerombr arg", KickstartParseError)
self.assert_parse_error("zerombr --bad-flag", KickstartParseError)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -8,884,366,830,419,637,000 | 33.659574 | 79 | 0.710866 | false |
FEniCS/dolfin | demo/undocumented/adaptive-poisson/python/demo_adaptive-poisson.py | 1 | 2889 | """This demo program solves Poisson's equation
- div grad u(x, y) = f(x, y)
on the unit square with source f given by
f(x, y) = exp(-100(x^2 + y^2))
and homogeneous Dirichlet boundary conditions.
Note that we use a simplified error indicator, ignoring
edge (jump) terms and the size of the interpolation constant.
"""
# Copyright (C) 2008 Rolv Erlend Bredesen
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Anders Logg 2008-2011
from __future__ import print_function
from dolfin import *
from numpy import array, sqrt
from math import pow
from six.moves import xrange as range
TOL = 5e-4 # Error tolerance
REFINE_RATIO = 0.50 # Refine 50 % of the cells in each iteration
MAX_ITER = 20 # Maximal number of iterations
# Create initial mesh
mesh = UnitSquareMesh(4, 4)
source_str = "exp(-100.0*(pow(x[0], 2) + pow(x[1], 2)))"
source = eval("lambda x: " + source_str)
# Adaptive algorithm
for level in range(MAX_ITER):
# Define variational problem
V = FunctionSpace(mesh, "CG", 1)
v = TestFunction(V)
u = TrialFunction(V)
f = Expression(source_str, degree=2)
a = dot(grad(v), grad(u))*dx
L = v*f*dx
# Define boundary condition
u0 = Constant(0.0)
bc = DirichletBC(V, u0, DomainBoundary())
# Compute solution
u = Function(V)
solve(a == L, u, bc)
# Compute error indicators
h = array([c.h() for c in cells(mesh)])
K = array([c.volume() for c in cells(mesh)])
R = array([abs(source([c.midpoint().x(), c.midpoint().y()])) for c in cells(mesh)])
gamma = h*R*sqrt(K)
# Compute error estimate
E = sum([g*g for g in gamma])
E = sqrt(MPI.sum(mesh.mpi_comm(), E))
print("Level %d: E = %g (TOL = %g)" % (level, E, TOL))
# Check convergence
if E < TOL:
info("Success, solution converged after %d iterations" % level)
break
# Mark cells for refinement
cell_markers = MeshFunction("bool", mesh, mesh.topology().dim())
gamma_0 = sorted(gamma, reverse=True)[int(len(gamma)*REFINE_RATIO)]
gamma_0 = MPI.max(mesh.mpi_comm(), gamma_0)
for c in cells(mesh):
cell_markers[c] = gamma[c.index()] > gamma_0
# Refine mesh
mesh = refine(mesh, cell_markers)
# Plot mesh
plot(mesh)
| lgpl-3.0 | -1,866,448,215,433,288,200 | 29.410526 | 87 | 0.661821 | false |
theintencity/flash-videoio | examples/django-apps/project/experts/models.py | 1 | 3541 | import datetime
from google.appengine.api import users
from google.appengine.ext import db
class User(db.Model):
name = db.StringProperty('Full Name')
account = db.UserProperty()
phone_number = db.PhoneNumberProperty('Phone Number')
address = db.PostalAddressProperty('Postal Address')
website = db.StringProperty('Homepage URL')
description = db.TextProperty('Brief Biography')
rating = db.FloatProperty(default=0.0)
rating_count = db.IntegerProperty(default=0)
tags = db.StringListProperty('Expertise, one per line', default=None)
availability = db.TextProperty('Availability', default='Available by appointment on weekdays in PST timezone')
has_chat = db.BooleanProperty('Use Google Chat', default=False)
def email(self):
result = self.account.nickname() if self.account else ''
return (result + '@gmail.com') if result and '@' not in result else result
def get_current_user():
account = users.get_current_user()
if account:
user = db.GqlQuery('SELECT * FROM User WHERE account = :1', account).get()
if not user:
user = User(name='', account=account)
user.put()
user.is_active = True
user.is_staff = users.is_current_user_admin()
else:
user = User()
user.is_active = False
return user
class Tag(db.Model):
name = db.StringProperty(required=True)
count = db.IntegerProperty(default=1)
class Event(db.Model):
subject = db.StringProperty()
description = db.TextProperty()
owner = db.StringProperty()
visitor = db.StringProperty()
start_time = db.DateTimeProperty()
end_time = db.DateTimeProperty()
created_on = db.DateTimeProperty(auto_now_add=True)
class Review(db.Model):
event = db.ReferenceProperty(Event, collection_name='event_set') # TODO make required=True
for_user = db.ReferenceProperty(User, required=True, collection_name='for_user_set')
by_user = db.ReferenceProperty(User, required=True, collection_name='by_user_set')
rating = db.IntegerProperty(default=3)
description = db.TextProperty()
modified_on = db.DateTimeProperty(auto_now=True)
class ClientStream(db.Model):
clientId = db.StringProperty(required=True)
visitor = db.StringProperty()
name = db.StringProperty(default='Anonymous')
publish = db.StringProperty(required=True)
play = db.StringProperty()
is_owner = db.BooleanProperty(default=False)
owner = db.StringProperty(required=True)
modified_on = db.DateTimeProperty(auto_now=True)
created_on = db.DateTimeProperty(auto_now_add=True)
def __repr__(self):
return '<ClientStream clientId=%r visitor=%r name=%r is_owner=%r owner=%r />'%(self.clientId, self.visitor, self.name, self.is_owner, self.owner)
def get_object(self, full=True):
if full:
return {'clientId': self.clientId, 'name': self.name, 'url': self.publish}
else:
return {'clientId': self.clientId}
class OfflineMessage(db.Model):
sender = db.StringProperty()
senderName = db.StringProperty()
receiver = db.StringProperty()
text = db.StringProperty(multiline=True)
created_on = db.DateTimeProperty(auto_now_add=True)
def __repr__(self):
return '<OfflineMessage sender=%r senderName=%r receiver=%r text=%r />'%(self.sender, self.senderName, self.receiver, self.text)
def get_object(self):
return {'senderName': self.senderName, 'text': self.text}
| lgpl-3.0 | -634,891,817,617,899,600 | 36.680851 | 153 | 0.672409 | false |
thesilencelies/SonnetConvs | InceptionModule.py | 1 | 1790 | #implimentation of the standard inceptionnet v3 inception module in sonnet
import tensorflow as tf
import sonnet as snt
class InceptionModule(snt.AbstractModule):
def __init__(self, output_channels, name="inception_module"):
super(InceptionModule, self).__init__(name=name)
self._output_channels = output_channels
def _build(self, inputs):
reshapeFlat = lambda x : tf.contrib.layers.flatten(x)
conv1d5 = snt.Conv2D(output_channels=self._output_channels, kernel_shape=1,
stride=1,name="inception5input")
conv1d3 = snt.Conv2D(output_channels=self._output_channels, kernel_shape=1,
stride=1,name="inception3input")
conv1dm = snt.Conv2D(output_channels=self._output_channels, kernel_shape=1,
stride=1,name="inceptionpoolinput")
conv1d1 = snt.Conv2D(output_channels=self._output_channels, kernel_shape=1,
stride=1,name="inception1channel")
conv3d5a = snt.Conv2D(output_channels=self._output_channels, kernel_shape=3,
stride=1,name="inception5stage1")
conv3d5b = snt.Conv2D(output_channels=self._output_channels, kernel_shape=3,
stride=1,name="inception5stage2")
conv3d3 = snt.Conv2D(output_channels=self._output_channels, kernel_shape=3,
stride=1,name="inception3channel")
maxpool = lambda x : tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
return tf.concat([reshapeFlat(conv3d5b(conv3d5a(conv1d5(inputs)))),
reshapeFlat(conv3d3(conv1d3(inputs))),
reshapeFlat(maxpool(conv1dm(inputs))),
reshapeFlat(conv1d1(inputs))],1) # then connect it.
| apache-2.0 | -6,797,985,732,095,887,000 | 43.75 | 81 | 0.632961 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/aio/operations/_hub_virtual_network_connections_operations.py | 1 | 8982 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class HubVirtualNetworkConnectionsOperations:
"""HubVirtualNetworkConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
virtual_hub_name: str,
connection_name: str,
**kwargs
) -> "_models.HubVirtualNetworkConnection":
"""Retrieves the details of a HubVirtualNetworkConnection.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HubVirtualNetworkConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.HubVirtualNetworkConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HubVirtualNetworkConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('HubVirtualNetworkConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
virtual_hub_name: str,
**kwargs
) -> AsyncIterable["_models.ListHubVirtualNetworkConnectionsResult"]:
"""Retrieves the details of all HubVirtualNetworkConnections.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListHubVirtualNetworkConnectionsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.ListHubVirtualNetworkConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHubVirtualNetworkConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListHubVirtualNetworkConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections'} # type: ignore
| mit | 4,938,271,912,548,516,000 | 48.9 | 215 | 0.656647 | false |
YutingZhang/lmdis-rep | exp-ae-aflw-30.py | 1 | 2231 | import tensorflow as tf
import os
import sys
from copy import copy
from model.pipeline import Pipeline
from tensorflow.python import debug as tf_debug
if __name__ == "__main__":
num_keypoints = 30
patch_feature_dim = 8
decoding_levels = 5
kp_transform_loss = 1e4
recon_weight = 0.001
learning_rate=0.01
keypoint_separation_bandwidth=0.04
keypoint_separation_loss_weight = 10.0
opt = {
"optimizer": "Adam",
"data_name": "aflw_80x80",
"recon_name": "gaussian_fixedvar_in_01",
"encoder_name": "general_80x80",
"decoder_name": "general_80x80",
"latent_dim": num_keypoints*2+(num_keypoints+1)*patch_feature_dim,
"train_color_jittering": True,
"train_random_mirroring": False,
"train_batch_size": 8,
"train_shuffle_capacity": 1000,
"learning_rate": learning_rate,
"max_epochs": 2000,
"weight_decay": 1e-6,
"test_steps": 5000,
"test_limit": 200,
"recon_weight": recon_weight,
}
opt["encoder_options"] = {
"keypoint_num": num_keypoints,
"patch_feature_dim": patch_feature_dim,
"ae_recon_type": opt["recon_name"],
"keypoint_concentration_loss_weight": 100.,
"keypoint_axis_balancing_loss_weight": 200.,
"keypoint_separation_loss_weight": keypoint_separation_loss_weight,
"keypoint_separation_bandwidth": keypoint_separation_bandwidth,
"keypoint_transform_loss_weight": kp_transform_loss,
"keypoint_decoding_heatmap_levels": decoding_levels,
"keypoint_decoding_heatmap_level_base": 0.5**(1/2),
"image_channels": 3,
}
opt["decoder_options"] = copy(opt["encoder_options"])
# -------------------------------------
model_dir = os.path.join("results/aflw_30")
checkpoint_dir = 'pretrained_results'
checkpoint_filename = 'celeba_30/model/snapshot_step_205317'
vp = Pipeline(None, opt, model_dir=model_dir)
print(vp.opt)
with vp.graph.as_default():
sess = vp.create_session()
vp.run_full_train_from_checkpoint(sess, checkpoint_dir = checkpoint_dir, checkpoint_filename=checkpoint_filename)
vp.run_full_test(sess)
| apache-2.0 | -6,170,820,400,219,724,000 | 32.298507 | 121 | 0.618557 | false |
q14035/pimouse_ros | scripts/motors2.py | 1 | 2178 | #!/usr/bin/env python
#encoding: utf8
import sys, rospy, math
from pimouse_ros.msg import MotorFreqs
from geometry_msgs.msg import Twist
from std_srvs.srv import Trigger, TriggerResponse
class Motor():
def __init__(self):
if not self.set_power(False): sys.exit(1)
rospy.on_shutdown(self.set_power)
self.sub_raw = rospy.Subscriber('motor_raw', MotorFreqs, self.callback_raw_freq)
self.sub_cmd_vel = rospy.Subscriber('cmd_vel', Twist, self.callback_cmd_vel)
self.srv_on = rospy.Service('motor_on', Trigger, self.callback_on)
self.srv_off = rospy.Service('motor_off', Trigger, self.callback_off)
self.last_time = rospy.Time.now()
self.using_cmd_vel = False
def set_power(self, onoff = False):
en = "/dev/rtmotoren0"
try:
with open(en, 'w') as f:
f.write("1\n" if onoff else "0\n")
self.is_on = onoff
return True
except:
rospy.logerr("cannot write to " + en)
return False
def set_raw_freq(self, left_hz, right_hz):
if not self.is_on:
rospy.logerr("not enpowered")
return
try:
with open("/dev/rtmotor_raw_l0", 'w') as lf, open("/dev/rtmotor_raw_r0", 'w') as rf:
lf.write(str(int(round(left_hz))) + "\n")
rf.write(str(int(round(right_hz))) + "\n")
except:
rospy.logerr("cannot write to rtmotor_raw_*")
def callback_raw_freq(self, message):
self.set_raw_freq(message.left_hz, message.right_hz)
def callback_cmd_vel(self, message):
forward_hz = 80000.0*message.linear.x/(9*math.pi)
rot_hz = 400.0*message.angular.z/math.pi
self.set_raw_freq(forward_hz-rot_hz, forward_hz+rot_hz)
self.using_cmd_vel = True
self.last_time = rospy.Time.now()
def onoff_response(self, onoff):
d = TriggerResponse()
d.success = self.set_power(onoff)
d.message = "ON" if self.is_on else "OFF"
return d
def callback_on(self, message): return self.onoff_response(True)
def callback_off(self, message): return self.onoff_response(False)
if __name__ == '__main__':
rospy.init_node('motors')
m = Motor()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if m.using_cmd_vel and rospy.Time.now().to_sec() - m.last_time.to_sec() >= 1.0:
m.set_raw_freq(0, 0)
m.using_cmd_vel = False
rate.sleep()
| gpl-3.0 | 1,233,239,839,957,850,400 | 29.676056 | 87 | 0.674472 | false |
antoinecarme/pyaf | tests/perf/test_ozone_debug_perf.py | 1 | 1566 | import pandas as pd
import numpy as np
# from memory_profiler import profile
# from memprof import *
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
#get_ipython().magic('matplotlib inline')
# @memprof
def test_ozone_debug_perf():
b1 = tsds.load_ozone()
df = b1.mPastData
# df.tail(10)
# df[:-10].tail()
# df[:-10:-1]
# df.describe()
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.mEnableCycles = False;
lEngine.mOptions.mEnableTimeBasedTrends = False;
lEngine.mOptions.mEnableARModels = False;
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots("outputs/my_ozone");
dfapp_in = df.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H).values);
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
test_ozone_debug_perf();
| bsd-3-clause | 6,925,707,315,643,373,000 | 25.1 | 87 | 0.664112 | false |
assamite/TwatBot | tweets/reasoning.py | 1 | 4674 | '''
.. py:module:: reasoning
:platform: Unix
Reasoning object for the tweets.
'''
import logging
import traceback
logger = logging.getLogger('tweets.default')
class Reasoning():
'''Reasoning for the tweets.
Class is used to hold information about the tweet's construction, and contains
few utility functions for convenience.
After the tweet has been constructed, the class should hold at least
following attributes:
* color_code (str or unicode): color of the tweet in html-format.
* color_name (str or unicode: name constructed for the color code
* tweet (str or unicode): text of the tweet
* tweeted (bool): Was the constructed tweet send to twitter
* retweet (bool): is the tweet a retweet
* retweet_url (str or unicode): URL for the retweet (if any)
* original_tweet (str or unicode): Original tweet if this is a retweet
* muse: class instance of the used Muse
* context: class instance of the used Context
* color_semantics: class instance of the used ColorSemantics.
* values (dict): dictionary of the appreciation values generated during the tweet's construction.
'''
def __init__(self, **kwargs):
self.color_code = ""
self.color_name = ""
self.tweet = ""
self.tweeted = False
self.retweet = False
self.retweet_url = ""
self.original_tweet = ""
self.muse = None
self.context = None
self.color_semantics = None
self.muse_classname = ""
self.color_semantics_classname = ""
self.context_classname = ""
self.values = {}
self.media = None
self.appreciation = 0.0
for k, v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
ret = ""
for k, v in self.__dict__.items():
ret = ret + k + ": " + str(v) + "\n"
return ret
def set_attr(self, name, value):
'''Define new or change old attribute value.
Caller should take care of the possible conflicts when changing existing
attribute values.
:param name: Name of the attribute
:type name: str
:param value: New attribute value
:type value: Object
'''
setattr(self, name, value)
if name == 'muse':
setattr(self, 'muse_classname', value.__class__.__name__)
if name == 'context':
setattr(self, 'context_classname', value.__class__.__name__)
if name == 'color_semantics':
setattr(self, 'color_semantics_classname', value.__class__.__name__)
def set_attrs(self, mappings):
'''Define new or change old attribute values in a patch.
Caller should take care of the possible conflicts when changing existing
attribute values.
:param mappings: Attribute mappings
:type mappings: dict
'''
for k, v in mappings.items():
self.set_attr(k, v)
def save(self):
'''Save tweet to database.
:returns: bool -- True is the save was made, False if not exceptions happened during the save
'''
from models import EveryColorBotTweet, Tweet, ReTweet
if self.tweet == "":
logger.info("Saving called for empty tweet. Skipping.")
return False
try:
twinst = Tweet(message = self.tweet, value = self.appreciation,\
muse = self.muse_classname,\
context = self.context_classname,\
color_code = self.color_code,\
color_name = self.color_name)
twinst.save()
if self.retweet:
screen_name = self.screen_name
if screen_name == 'everycolorbot':
inst = EveryColorBotTweet.objects.get_or_none(url = self.retweet_url)
if inst:
inst.tweeted = True
inst.save()
reinst = ReTweet(tweet_url = self.retweet_url,\
screen_name = screen_name, tweet = twinst)
reinst.save()
logger.info("Tweet saved to database: {}".format(self.tweet))
except Exception:
e = traceback.format_exc()
logger.error("Could not save tweet to database, because of error: {}".format(e))
return False
return True
| mit | 2,346,785,769,061,129,700 | 34.409091 | 104 | 0.545999 | false |
forman/dectree | dectree/codegen.py | 1 | 21170 | import ast
from collections import OrderedDict
from io import StringIO
from typing import List, Dict, Any, Tuple, Optional
import dectree.propfuncs as propfuncs
from dectree.config import CONFIG_NAME_INPUTS_NAME, CONFIG_NAME_OUTPUTS_NAME, CONFIG_NAME_PARAMS_NAME
from .config import get_config_value, \
CONFIG_NAME_VECTORIZE, CONFIG_NAME_PARAMETERIZE, CONFIG_NAME_FUNCTION_NAME, CONFIG_NAME_TYPES, VECTORIZE_FUNC, \
VECTORIZE_PROP, CONFIG_NAME_OR_PATTERN, CONFIG_NAME_NOT_PATTERN, \
CONFIG_NAME_NO_JIT, VECTORIZE_NONE, CONFIG_NAME_AND_PATTERN
from .types import VarName, PropName, TypeName, PropDef, TypeDefs, VarDefs, PropFuncParamName
def gen_code(type_defs,
input_defs,
output_defs,
rules,
**options):
text_io = StringIO()
code_gen = CodeGen(type_defs, input_defs, output_defs, rules, text_io, options)
code_gen.gen_code()
return text_io.getvalue()
class CodeGen:
def __init__(self,
type_defs,
input_defs,
output_defs,
rules,
out_file,
options):
assert type_defs
assert input_defs
assert output_defs
assert rules
assert out_file
self.type_defs = type_defs
self.input_defs = input_defs
self.output_defs = output_defs
self.rules = rules
self.out_file = out_file
self.output_assignments = None
options = dict(options or {})
self.no_jit = get_config_value(options, CONFIG_NAME_NO_JIT)
self.vectorize = get_config_value(options, CONFIG_NAME_VECTORIZE)
self.parameterize = get_config_value(options, CONFIG_NAME_PARAMETERIZE)
self.function_name = get_config_value(options, CONFIG_NAME_FUNCTION_NAME)
self.inputs_name = get_config_value(options, CONFIG_NAME_INPUTS_NAME)
self.outputs_name = get_config_value(options, CONFIG_NAME_OUTPUTS_NAME)
self.params_name = get_config_value(options, CONFIG_NAME_PARAMS_NAME)
self.use_py_types = get_config_value(options, CONFIG_NAME_TYPES)
self.and_pattern = _get_config_op_pattern(options, CONFIG_NAME_AND_PATTERN)
self.or_pattern = _get_config_op_pattern(options, CONFIG_NAME_OR_PATTERN)
self.not_pattern = _get_config_op_pattern(options, CONFIG_NAME_NOT_PATTERN)
self.expr_gen = ExprGen(type_defs, input_defs,
parameterize=self.parameterize,
vectorize=self.vectorize,
no_jit=self.no_jit,
not_pattern=self.not_pattern,
and_pattern=self.and_pattern,
or_pattern=self.or_pattern)
def gen_code(self):
self.output_assignments = {}
self._write_imports()
self._write_type_prop_functions()
self._write_inputs_class()
self._write_outputs_class()
self._write_params()
self._write_apply_rules_function()
def _write_imports(self):
numba_import = 'from numba import jit, jitclass, float64'
numpy_import = 'import numpy as np'
if self.no_jit:
if self.vectorize == VECTORIZE_FUNC:
self._write_lines('', numpy_import)
else:
if self.vectorize == VECTORIZE_PROP:
self._write_lines('', numba_import + ', vectorize', numpy_import)
elif self.vectorize == VECTORIZE_FUNC:
self._write_lines('', numba_import, numpy_import)
else:
self._write_lines('', numba_import)
def _write_type_prop_functions(self):
numba_decorator = self._get_numba_decorator(prop_func=True)
for type_name, type_def in self.type_defs.items():
for prop_name, prop_def in type_def.items():
prop_value, func_params, func_body_pattern = prop_def
if self.parameterize and func_params:
func_header = 'def _{}_{}(x{}):'.format(type_name, prop_name, ', ' + ', '.join(func_params.keys()))
func_body = func_body_pattern.format(**{key: key for key in func_params.keys()})
else:
func_header = 'def _{}_{}(x):'.format(type_name, prop_name)
func_body = func_body_pattern.format(**func_params)
func_body_lines = map(lambda line: ' ' + str(line), func_body.split('\n'))
self._write_lines('', '',
numba_decorator,
func_header,
' # {}.{}: {}'.format(type_name, prop_name, prop_value),
*func_body_lines)
def _write_apply_rules_function(self):
if self.parameterize:
function_params = [('inputs', self.inputs_name),
('outputs', self.outputs_name),
('params', self.params_name)]
else:
function_params = [('inputs', self.inputs_name),
('outputs', self.outputs_name)]
if self.use_py_types:
function_args = ', '.join(['{}: {}'.format(param_name, param_type)
for param_name, param_type in function_params])
else:
function_args = ', '.join(['{}'.format(param_name)
for param_name, _ in function_params])
numba_decorator = self._get_numba_decorator()
self._write_lines('', '',
numba_decorator,
'def {}({}):'.format(self.function_name, function_args))
if self.vectorize == VECTORIZE_FUNC:
output_var = list(self.output_defs.keys())[0]
self._write_lines(' for i in range(len(outputs.{output_var})):'.format(output_var=output_var))
self._write_lines(' t0 = 1.0')
else:
self._write_lines(' t0 = 1.0')
for rule in self.rules:
self._write_rule(rule, 1, 1)
def _get_numba_decorator(self, prop_func=False):
if self.vectorize == VECTORIZE_PROP and prop_func:
numba_decorator = '@vectorize([float64(float64)])'
else:
numba_decorator = '@jit(nopython=True)'
if self.no_jit:
numba_decorator = '# ' + numba_decorator
return numba_decorator
def _write_inputs_class(self):
self._write_io_class(self.inputs_name, self.input_defs)
def _write_outputs_class(self):
self._write_io_class(self.outputs_name, self.output_defs)
def _write_io_class(self, class_name, var_defs):
self._write_class(class_name, var_defs.keys())
def _write_params(self):
if not self.parameterize:
return
param_names = []
param_values = {}
for type_name, type_def in self.type_defs.items():
for prop_name, prop_def in type_def.items():
prop_value, func_params, func_body = prop_def
for param_name, param_value in func_params.items():
qualified_param_name = _get_qualified_param_name(type_name, prop_name, param_name)
param_names.append(qualified_param_name)
param_values[qualified_param_name] = param_value
self._write_class(self.params_name, param_names, param_values)
# See http://numba.pydata.org/numba-doc/dev/user/jitclass.html
def _write_class(self, class_name, var_names, param_values: Optional[Dict[str, Any]] = None):
is_io = param_values is None
spec_name = '_{}Spec'.format(class_name)
spec_lines = ['{} = ['.format(spec_name)]
for var_name in var_names:
if param_values:
spec_lines.append(' ("{}", float64),'.format(var_name))
elif not self.no_jit and self.vectorize != VECTORIZE_NONE:
spec_lines.append(' ("{}", float64[:]),'.format(var_name))
else:
spec_lines.append(' ("{}", float64),'.format(var_name))
spec_lines.append(']')
if self.no_jit:
spec_lines = map(lambda line: '# ' + line, spec_lines)
self._write_lines('', '', *spec_lines)
numba_line = '@jitclass({})'.format(spec_name)
if self.no_jit:
numba_line = '# ' + numba_line
if is_io and self.vectorize == VECTORIZE_FUNC:
if self.use_py_types:
init_head = ' def __init__(self, size: int):'
else:
init_head = ' def __init__(self, size):'
else:
init_head = ' def __init__(self):'
self._write_lines('', '',
numba_line,
'class {}:'.format(class_name),
init_head)
for var_name in var_names:
if param_values:
self._write_lines(' self.{} = {}'.format(var_name, param_values[var_name]))
elif is_io and self.vectorize == VECTORIZE_FUNC:
self._write_lines(' self.{} = np.zeros(size, dtype=np.float64)'.format(var_name))
elif self.vectorize != VECTORIZE_NONE:
self._write_lines(' self.{} = np.zeros(1, dtype=np.float64)'.format(var_name))
else:
self._write_lines(' self.{} = 0.0'.format(var_name))
def _write_rule(self, rule: List, source_level: int, target_level: int):
sub_target_level = target_level
for stmt in rule:
keyword = stmt[0]
if keyword == 'if':
sub_target_level = target_level
self._write_stmt(keyword, stmt[1], stmt[2], source_level, sub_target_level)
elif keyword == 'elif':
sub_target_level += 1
self._write_stmt(keyword, stmt[1], stmt[2], source_level, sub_target_level)
elif keyword == 'else':
self._write_stmt(keyword, None, stmt[1], source_level, sub_target_level)
elif keyword == '=':
self._write_assignment(stmt[1], stmt[2], source_level, sub_target_level)
else:
raise NotImplemented
def _write_stmt(self,
keyword: str,
condition_expr: Optional[str],
body: List,
source_level: int,
target_level: int):
not_pattern = '1.0 - {x}' # note, not using self.not_pattern here!
source_indent = (4 * source_level) * ' '
if self.vectorize == VECTORIZE_FUNC:
target_indent = 8 * ' '
else:
target_indent = 4 * ' '
t0 = 't' + str(target_level - 1)
t1 = 't' + str(target_level - 0)
if keyword == 'if' or keyword == 'elif':
condition = self.expr_gen.gen_expr(condition_expr)
if keyword == 'if':
self._write_lines('{tind}#{sind}{key} {expr}:'.format(tind=target_indent, sind=source_indent,
key=keyword, expr=condition_expr))
target_value = self.and_pattern.format(x=t0, y=condition)
else:
tp = 't' + str(target_level - 2)
self._write_lines('{tind}#{sind}{key} {expr}:'.format(tind=target_indent, sind=source_indent,
key=keyword, expr=condition_expr))
target_value = self.and_pattern.format(x=tp, y=not_pattern.format(x=t0))
self._write_lines('{tind}{tvar} = {tval}'.format(tind=target_indent, tvar=t0, tval=target_value))
target_value = self.and_pattern.format(x=t0, y=condition)
else:
self._write_lines('{tind}#{sind}else:'.format(tind=target_indent, sind=source_indent))
target_value = self.and_pattern.format(x=t0, y=not_pattern.format(x=t1))
self._write_lines('{tind}{tvar} = {tval}'.format(tind=target_indent, tvar=t1, tval=target_value))
self._write_rule(body, source_level + 1, target_level + 1)
def _write_assignment(self, var_name: str, var_value: str, source_level: int, target_level: int):
source_indent = (source_level * 4) * ' '
if self.vectorize == VECTORIZE_FUNC:
target_indent = 8 * ' '
else:
target_indent = 4 * ' '
t0 = 't' + str(target_level - 1)
_, prop_def = self._get_output_def(var_name, var_value)
prop_value, _, _ = prop_def
if prop_value == 'true()':
assignment_value = t0
elif prop_value == 'false()':
assignment_value = self.not_pattern.format(x=t0)
else:
raise ValueError('Currently you can only assign properties,'
' whose values are "true()" or "false()')
output_assignments = self.output_assignments.get(var_name)
if output_assignments is None:
output_assignments = [assignment_value]
self.output_assignments[var_name] = output_assignments
else:
output_assignments.append(assignment_value)
out_pattern = '{tval}'
if len(output_assignments) > 1:
if self.vectorize == VECTORIZE_FUNC:
out_pattern = self.or_pattern.format(x='outputs.{name}[i]', y=out_pattern)
else:
out_pattern = self.or_pattern.format(x='outputs.{name}', y=out_pattern)
if self.vectorize == VECTORIZE_FUNC:
line_pattern = '{tind}outputs.{name}[i] = ' + out_pattern
else:
line_pattern = '{tind}outputs.{name} = ' + out_pattern
self._write_lines('{tind}#{sind}{name} = {sval}'.format(tind=target_indent, sind=source_indent,
name=var_name, sval=var_value))
self._write_lines(line_pattern.format(tind=target_indent, name=var_name,
tval=assignment_value))
def _get_output_def(self, var_name: VarName, prop_name: PropName) -> Tuple[TypeName, PropDef]:
return _get_type_name_and_prop_def(var_name, prop_name, self.type_defs, self.output_defs)
def _write_lines(self, *lines):
for line in lines:
self.out_file.write('%s\n' % line)
class ExprGen:
def __init__(self,
type_defs: TypeDefs,
var_defs: VarDefs,
parameterize=False,
vectorize=VECTORIZE_NONE,
no_jit=False,
not_pattern='1.0 - ({x})',
and_pattern='min({x}, {y})',
or_pattern='max({x}, {y})'):
assert type_defs
assert var_defs
assert vectorize
assert not_pattern
assert and_pattern
assert or_pattern
self.type_defs = type_defs
self.var_defs = var_defs
self.parameterize = parameterize
self.vectorize = vectorize
self.no_jit = no_jit
self.not_pattern = not_pattern
self.and_pattern = and_pattern
self.or_pattern = or_pattern
def gen_expr(self, rule_condition: str) -> str:
mod = ast.parse(rule_condition)
body = mod.body
if len(body) != 1 or not isinstance(body[0], ast.Expr):
raise ValueError('Invalid condition expression: [{}]'.format(rule_condition))
expr = body[0].value
return self._transpile_expression(expr)
def _transpile_expression(self, expr) -> str:
if isinstance(expr, ast.Compare):
left = expr.left
if not isinstance(left, ast.Name):
raise ValueError('Left side of comparison must be the name of an input')
var_name = expr.left.id
prop_name = expr.comparators[0].id
compare_op = expr.ops[0]
if isinstance(compare_op, ast.Eq) or isinstance(compare_op, ast.Is):
if self.vectorize == VECTORIZE_FUNC:
op_pattern = '_{t}_{r}(inputs.{l}{p}[i])'
else:
op_pattern = '_{t}_{r}(inputs.{l}{p})'
elif isinstance(compare_op, ast.NotEq) or isinstance(compare_op, ast.IsNot):
if self.vectorize == VECTORIZE_FUNC:
op_pattern = self.not_pattern.format(x='_{t}_{r}(inputs.{l}{p}[i])')
else:
op_pattern = self.not_pattern.format(x='_{t}_{r}(inputs.{l}{p})')
else:
raise ValueError('"==", "!=", "is", and "is not" are the only supported comparison operators')
type_name, prop_def = _get_type_name_and_prop_def(var_name, prop_name, self.type_defs, self.var_defs)
_, func_params, _ = prop_def
if self.parameterize and func_params:
params = ', ' + ', '.join(['{p}=params.{qp}'.format(p=param_name,
qp=_get_qualified_param_name(type_name,
prop_name,
param_name))
for param_name in func_params.keys()])
else:
params = ''
return op_pattern.format(t=type_name, r=prop_name, l=var_name, p=params)
if isinstance(expr, ast.UnaryOp):
op = expr.op
if isinstance(op, ast.Not):
op_pattern = self.not_pattern
else:
raise ValueError('"not" is the only supported unary operator')
v = expr.operand
t = self._transpile_expression(v)
return op_pattern.format(x=t)
if isinstance(expr, ast.BoolOp):
op = expr.op
if isinstance(op, ast.And):
op_pattern = self.and_pattern
elif isinstance(op, ast.Or):
op_pattern = self.or_pattern
else:
raise ValueError('"and" and "or" are the only supported binary operators')
t1 = None
for v in expr.values:
if t1 is None:
t1 = self._transpile_expression(v)
else:
t2 = self._transpile_expression(v)
t1 = op_pattern.format(x=t1, y=t2)
return t1
raise ValueError('Unsupported expression')
def _types_to_type_defs(types: Dict[str, Dict[str, str]]) -> TypeDefs:
type_defs = OrderedDict()
for type_name, type_properties in types.items():
type_def = {}
type_defs[type_name] = type_def
for prop_name, prop_value in type_properties.items():
try:
prop_result = eval(prop_value, vars(propfuncs), {})
except Exception:
raise ValueError('Illegal value for property "{}" of type "{}": {}'.format(prop_name,
type_name,
prop_value))
func_params, func_body = prop_result
type_def[prop_name] = prop_value, func_params, func_body
return type_defs
def _get_type_name_and_prop_def(var_name: VarName,
prop_name: PropName,
type_defs: TypeDefs,
var_defs: VarDefs) -> Tuple[TypeName, PropDef]:
type_name = var_defs.get(var_name)
if type_name is None:
raise ValueError('Variable "{}" is undefined'.format(var_name))
type_def = type_defs.get(type_name)
if type_def is None:
raise ValueError('Type "{}" of variable "{}" is undefined'.format(type_name, var_name))
if prop_name not in type_def:
raise ValueError('"{}" is not a property of type "{}" of variable "{}"'.format(prop_name, type_name, var_name))
prop_def = type_def[prop_name]
return type_name, prop_def
def _get_qualified_param_name(type_name: TypeName,
prop_name: PropName,
param_name: PropFuncParamName) -> str:
return '{t}_{p}_{k}'.format(t=type_name, p=prop_name, k=param_name)
def _get_config_op_pattern(options, op_pattern_name):
op_pattern = get_config_value(options, op_pattern_name)
no_jit = get_config_value(options, CONFIG_NAME_NO_JIT)
vectorize = get_config_value(options, CONFIG_NAME_VECTORIZE)
return _get_effective_op_pattern(op_pattern, no_jit=no_jit, vectorize=vectorize)
def _get_effective_op_pattern(op_pattern, no_jit=False, vectorize=VECTORIZE_NONE):
if not no_jit and vectorize == VECTORIZE_PROP:
# TODO: improve following naive replacements, e.g. use regex-based approach
return op_pattern.replace('min(', 'np.minimum(').replace('max(', 'np.maximum(')
else:
return op_pattern
| mit | -4,495,577,015,104,126,000 | 43.288703 | 119 | 0.531885 | false |
swarna-k/MyDiary | app/models.py | 1 | 1566 | from app import db
from werkzeug import generate_password_hash, check_password_hash
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
firstname = db.Column(db.String(100))
lastname = db.Column(db.String(100))
email = db.Column(db.String(120), unique=True)
pwdhash = db.Column(db.String(54))
entries = db.relationship('Entry', backref='author', lazy='dynamic')
reminders = db.relationship('Reminder', backref='author', lazy='dynamic')
def __init__(self, firstname, lastname, email, password):
self.firstname = firstname.title()
self.lastname = lastname.title()
self.email = email.lower()
self.set_password(password)
def set_password(self, password):
self.pwdhash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.pwdhash, password)
def __repr__(self):
return '<User %r>' % (self.firstname)
class Entry(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(100))
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Entry %r>' % (self.body)
class Reminder(db.Model):
id = db.Column(db.Integer, primary_key = True)
when = db.Column(db.DateTime)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Reminder %r>' % (self.body)
| bsd-3-clause | 1,779,423,689,113,203,200 | 29.705882 | 75 | 0.65645 | false |
omarkadry/rsa_algorithm | RSA.py | 1 | 5283 | #!/usr/bin/python
#Algoriths Project Part 1a
#Omar Kadry
#CMSC 441
#Dr. Marron
#IMPLEMENTATION NOTES
#Python's built in pow function uses Binary Exponentiation and reducing modulo n to compute modular
#exponentiation. This is the same algorithm as MODULAR-EXPONENTIATION(a,b,n) as used in the text
#For large number mutliplication Python uses Karatsuba's method as discusssed in class
#Encrypted using modulus of 2048 bits
#Message Encrypted with Private Key =
#549335432742725778252187541104443188156944438806863457411666058499398272260706426139538267238120336092084632198514701950566203930065985324580534295693425367212921830205866755643739579288731322322946366466576799796974416100601383412159359169170613839877922173796152893918170136479717941167924064476336789776106984955596378941959676443995574307557232184168653454435294749983774161045180981596162964832360087083009219442813368249004389009182055455524458934480504555947413171214222377987666294266525295763559510397442092718659910879958017424466509571661222667744582625838716048450963735149873220637697801126262181088272
#n = 2372112898706524098783243835606671423055801883554227254030743710505202283932667011668956139382911768876035660572032080308562219037288900124052316286309512108625859836958747947762092799677854295671866288119481685786760570903533545560435541052326183788082279075073373227880942687435505490994525413101260845901748238215480998501123816262694263026377952163660645333809073068011604416987281948409408692393376191358516220341631487894075618891499412550098438456600441042870219500840853342452184082591601805986792948794525871595912715813197678328912976549353915846570322821639411967156886422360861220109970600152445030560129
#public key e = 1977623957817836883919633554596704012915783900570809149483856078010145425692545878452812725561415102822918517227924598205956910940350062144643427460974258169951841328548095289498955467345087157904399185646775059360160689508306113707875539862799501027047474838298216312008836598256088581250099042957573530717659415412893768343977899980494510094815770699761034869232518446869348437561961594909995056962983992121384916099020899755884457999313029602625570516932900789485878260172195900227111449085645227576679740196755445527867666825244974372425673866849078226602801561771006724501838806746943672716086807419555183315337s
import sys
import os
import random
import math
import argparse
s = 5 #s for miller-rabin test
#Constants to make code more readable
PRIME = 1
COMPOSITE = 2
#Generates Random Psuedoprimes of size bits
#validates with miller rabin test
def generate_rand(size):
n = random.SystemRandom().getrandbits(size)
while(n == 1 or n == 0):
n = random.SystemRandom().getrandbits(size)
while True:
if n % 2 == 0:
n = n + 1
if(miller_rabin(n,s) == PRIME):
return n
n = n + 2
#Miller-Rabin test
def miller_rabin(n,s):
for j in range(0,s):
a = random.SystemRandom().randint(1, n-1)
if(witness(a,n)):
return COMPOSITE
return PRIME
#Witness function for miller-rabin
def witness(a,n):
t,u = calc_t_u(n-1)
x = []
x.append(modular_exponentiation(a,u,n))
for i in range(1,t+1):
x.append(modular_exponentiation(x[i-1],2,n))
if (x[i] == 1) and (x[i-1] != 1) and (x[i-1] != n-1):
return True
if x[t] != 1:
return True
return False
def modular_exponentiation(a,b,n):
a = int(a)
b = int(b)
n = int(n)
return pow(a,b,n)
#Calculates t and u for the witness function
def calc_t_u(n):
t = 0
u = n
while (u % 2 == 0):
u = u / 2
t+=1
return t,u
#Gets a value for e
#Generates a random value and checks it's relatively prime to phi_n
def get_e(phi_n):
e = random.SystemRandom().randint(3, phi_n)
while euclid(phi_n,e) != 1:
e = random.SystemRandom().randint(3,phi_n)
return e
#Euclid and extended euclid are iterative due to recursion depth errors
#being found when the modulus size was >= 1024 bits
#Euclids algorithm
def euclid(a,b):
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
#Extended Euclid's Algorithm
def extend_euclid(a,b):
x,y, u,v = 0,1, 1,0
while a != 0:
q, r = b//a, b%a
m, n = x-u*q, y-v*q
b,a, x,y, u,v = a,r, u,v, m,n
gcd = b
return gcd, x, y
def get_mult_inverse(e, phi_n):
a,b,n = e,1,phi_n
d,_x,_y = extend_euclid(a,n)
if(d % b == 0):
return _x % n
else:
return -1
def msg_to_int(m):
x = 0
for c in m:
x = x << 8
x = x ^ ord(c)
return x
if __name__ == '__main__':
private_key = 0
public_key = 0
parser = argparse.ArgumentParser(description="Generates an RSA public and private key")
parser.add_argument("-s", "--size", type=int,
help="Size in bits of RSA Key to Generate", required=True)
parser.add_argument("-m", "--message", type=str, help="A Message to Encrypt")
args = parser.parse_args()
modulus_size = args.size
p = generate_rand(modulus_size//2)
q = generate_rand(modulus_size//2)
while(p == q):
q = generate_rand(modulus_size//2)
n = p * q
phi_n = (p - 1) * (q - 1)
e = get_e(phi_n)
d = int(get_mult_inverse(e, phi_n))
print "N = \n", n, '\n'
print "Private Key d = \n", int(d), '\n'
print "Public Key e = \n", int(e), '\n'
if(args.message):
m = args.message
"Encrypting: %s" % m
print "\"",m,"\" encrypted with the private key is\n",
m = msg_to_int(m)
p = modular_exponentiation(m,d,n)
print p
| mit | 2,305,337,433,324,493,600 | 33.305195 | 633 | 0.769071 | false |
mprat/learningjulia | nbconvert_config.py | 1 | 7416 | from nbconvert.preprocessors import ExecutePreprocessor, Preprocessor
import numpy as np
def jekyllurl(path):
"""
Take the filepath of an image output by the ExportOutputProcessor
and convert it into a URL we can use with Jekyll
"""
return path.replace("../..", "")
def svg_filter(svg_xml):
"""
Remove the DOCTYPE and XML version lines from
the inline XML SVG
"""
svgstr = "".join(svg_xml)
start_index = svgstr.index("<svg")
end_index = svgstr.index("</svg>")
return svgstr[start_index:end_index + 6]
def var_def_to_var_list(var_def):
if 'linspace' in var_def:
v = var_def.replace("linspace(", "")
v = v.replace(")", "")
start, stop, num = v.split(",")
return np.linspace(
float(start.strip()),
float(stop.strip()),
float(num.strip()))
elif '[' in var_def and ']' in var_def:
v = var_def.replace("[", "")
v = v.replace("]", "")
v = v.split(",")
return [x.strip() for x in v]
else:
raise TypeError("not implemented for {}".format(var_def))
class ExecuteWithInteractPreprocessor(ExecutePreprocessor):
def preprocess_cell(self, cell, resources, cell_index):
if cell.cell_type != 'code':
return cell, resources
if "@manipulate" in cell.source:
original_source = cell.source
cell_manipulate = cell.copy()
cell_source = original_source.split("\n")
cell_manipulate.source = "\n".join([cell_source[0], cell_source[-1]])
manipulate_output = self.run_cell(cell_manipulate)
outs = []
outs.extend(manipulate_output)
main_source = "\n".join(cell_source[1:-1])
var_def = cell_source[0].replace("@manipulate", "")
var_def = var_def.replace("for", "").strip().split("=")
var_name, var_list = var_def
# currently this only works for a single for loop
# turn all the variables into a loop
all_vars = var_def_to_var_list(var_list)
for next_var in all_vars:
var_defs = "{}={}".format(var_name, next_var)
cell_copy = cell.copy()
cell_copy.source = "\n".join([var_defs, main_source.strip()])
outputs = self.run_cell(cell_copy)
outs.extend(outputs)
cell.source = original_source
cell.outputs = outs
# fix the outputs
# probably better done at the postprocessing step
# import ipdb; ipdb.set_trace()
# raise TypeError("stopping")
else:
outputs = self.run_cell(cell)
cell.outputs = outputs
return cell, resources
# if 'Interact' in cell.outputs[0]['data']['text/plain']:
# there should be a widget here
class RemoveInteractJsShimPreprocessor(Preprocessor):
def preprocess(self, nb, resources):
"""
make sure the widgets resources get put into the resources
"""
if 'widgets' in nb['metadata'].keys():
resources['metadata']['widgets'] = nb['metadata']['widgets']
return super(RemoveInteractJsShimPreprocessor, self).preprocess(nb, resources)
def preprocess_cell(self, cell, resources, cell_index):
"""
remove any outputs that have interact-js-shim
"""
if 'outputs' in cell:
outputs = cell['outputs']
new_outputs = []
for output in outputs:
new_output = output.copy()
if "data" in output.keys():
data_output = output["data"]
new_data_output = data_output.copy()
if 'text/html' in data_output.keys():
text_html = data_output['text/html']
if text_html.startswith('<div id=\"interact-js-shim\">'):
start_index = text_html.find('<div id=\"interact-js-shim\">')
end_index = text_html.find('</div>')
new_html = ""
if start_index > 0:
new_html += text_html[0:start_index]
if end_index + 6 < len(text_html):
new_html += text_html[end_index+6:]
new_html = new_html.strip()
if len(new_html) > 0:
new_data_output['text/html'] = new_html
else:
del new_data_output['text/html']
else:
new_data_output['text/html'] = text_html
if len(new_data_output.keys()) > 0:
new_output['data'] = new_data_output
else:
del new_output['data']
if 'data' in new_output:
new_outputs.append(new_output)
else:
new_outputs.append(new_output)
cell['outputs'] = new_outputs
return cell, resources
class InsertWidgetsPreprocessor(Preprocessor):
def preprocess_cell(self, cell, resources, cell_index):
"""
if the cell is a cell with @manipulate, add the appropriate
widget script into the output
"""
if cell.cell_type != 'code':
return cell, resources
if "@manipulate" in cell.source:
widget_state = resources['metadata']['widgets']['application/vnd.jupyter.widget-state+json']['state']
interact_options = cell.outputs[0]['data']['text/plain']
start_index = interact_options.find('"')
model_name = interact_options[start_index + 1:]
next_index = model_name.find('"')
model_name = model_name[:next_index]
# match the widget based on the descriptions
matched_model_id = None
for model_id in widget_state.keys():
if widget_state[model_id]['state']['description'] == model_name:
matched_model_id = model_id
break
# construct the script tag
script_tag = '<script type="application/vnd.jupyter.widget-view+json">{"model_id": "' + matched_model_id + '"}</script>'
cell.outputs[0]['data']['text/html'] = script_tag
return cell, resources
c = get_config()
c.NbConvertApp.export_format = 'html'
c.NbConvertApp.output_files_dir = '../../assets/imgs/{notebook_name}'
c.HTMLExporter.preprocessors = [
'nbconvert.preprocessors.ExecutePreprocessor',
# ExecuteWithInteractPreprocessor,
'nbconvert.preprocessors.coalesce_streams',
'nbconvert.preprocessors.ExtractOutputPreprocessor',
RemoveInteractJsShimPreprocessor,
InsertWidgetsPreprocessor]
c.HTMLExporter.template_file = 'notebooks/jekyll.tpl'
c.HTMLExporter.filters = {"jekyllimgurl": jekyllurl, "svg_filter": svg_filter}
# if there's an error in one of the cells let the execution keep going
c.ExecutePreprocessor.allow_errors = True
# disable the timeout
c.ExecutePreprocessor.timeout = -1
c.ExecutePreprocessor.iopub_timeout = 10
# write the final HTML files into the _include/notebooks directory
c.FilesWriter.build_directory = "_includes/notebooks/"
| mit | -4,977,311,403,519,195,000 | 38.238095 | 132 | 0.551106 | false |
KmolYuan/pyslvs | test/test_core.py | 1 | 3764 | # -*- coding: utf-8 -*-
"""Pyslvs core module test."""
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2021"
__license__ = "AGPL"
__email__ = "[email protected]"
from math import sqrt, radians
from pyslvs import (
Coord, SolverSystem, pxy, ppp, plap, pllp, plpp, palp, expr_solving,
t_config, parse_vpoints, example_list,
)
from . import TestBase
class CoreTest(TestBase):
def test_pxy(self):
"""Test for pxy function."""
coord = pxy(Coord(80, 90), 40, -20)
self.assertAlmostEqual(120, coord.x)
self.assertAlmostEqual(70, coord.y)
def test_ppp(self):
"""Test for ppp function."""
coord = ppp(Coord(0, 0), Coord(0, 90), Coord(90, 0))
self.assertAlmostEqual(90, coord.x)
self.assertAlmostEqual(90, coord.y)
def test_plap(self):
"""Test for plap function."""
coord = plap(Coord(0, 0), 50 * sqrt(2), radians(45), Coord(50, 0))
self.assertAlmostEqual(50, coord.x)
self.assertAlmostEqual(50, coord.y)
def test_pllp(self):
"""Test for pllp function."""
c1 = Coord(-30, 0)
c2 = Coord(30, 0)
coord = pllp(c1, 50, 50, c2)
self.assertAlmostEqual(0, coord.x)
self.assertAlmostEqual(40, coord.y)
coord = pllp(c1, 30, 30, c2)
self.assertAlmostEqual(coord.x, 0)
self.assertAlmostEqual(coord.y, 0)
coord = pllp(c1, 90, 30, c2)
self.assertAlmostEqual(60, coord.x)
self.assertAlmostEqual(0, coord.y)
def test_plpp(self):
"""Test for plpp function."""
coord = plpp(Coord(0, 0), sqrt(5), Coord(0, -3), Coord(3 / 2, 0))
self.assertAlmostEqual(2, coord.x)
self.assertAlmostEqual(1, coord.y)
def test_palp(self):
"""Test for palp function."""
coord = palp(Coord(0, 0), radians(15), 20, Coord(60, 10))
self.assertAlmostEqual(42.253221, coord.x, 6)
self.assertAlmostEqual(19.222356, coord.y, 6)
def test_solving(self):
"""Test triangular formula solving.
+ Test for PMKS parser.
+ Test data collecting function.
+ Test expression solving function.
"""
def test_case(name: str):
expr, inputs = example_list(name)
vpoints = parse_vpoints(expr)
exprs = t_config(vpoints, inputs)
result = expr_solving(exprs, vpoints, {pair: 0. for pair in inputs})
return result[-1]
x, y = test_case("Jansen's linkage (Single)")
self.assertAlmostEqual(-43.170055, x, 6)
self.assertAlmostEqual(-91.753226, y, 6)
x, y = test_case("Crank slider (RP joint)")
self.assertAlmostEqual(103.801126, x, 6)
self.assertAlmostEqual(78.393173, y, 6)
x, y = test_case("Parallel Linkage")
self.assertAlmostEqual(200, x, 6)
self.assertAlmostEqual(0, y, 6)
# TODO: New test case for Inverted slider
def test_solving_bfgs(self):
"""Test Sketch Solve kernel."""
expr, _ = example_list("Jansen's linkage (Single)")
system = SolverSystem(parse_vpoints(expr), {(0, 1): 0.})
result = system.solve()
x, y = result[7]
self.assertAlmostEqual(-43.170055, x, 6)
self.assertAlmostEqual(-91.753226, y, 6)
# Test if angle value changed
system.set_inputs({(0, 1): 45.})
result = system.solve()
x, y = result[7]
self.assertAlmostEqual(-24.406394, x, 6)
self.assertAlmostEqual(-91.789596, y, 6)
# Test if link length changed
system.set_data({(0, 1): 16.})
result = system.solve()
x, y = result[7]
self.assertAlmostEqual(-24.117994, x, 6)
self.assertAlmostEqual(-91.198072, y, 6)
| agpl-3.0 | -3,383,056,336,825,432,600 | 33.53211 | 80 | 0.582359 | false |
bubbleboy14/cantools | cantools/scripts/index.py | 1 | 8655 | """
### Usage: ctindex [--mode=MODE] [--domain=DOMAIN] [--port=PORT] [--skip=SKIP]
### Options:
-h, --help show this help message and exit
-m MODE, --mode=MODE may be: 'refcount' (default - count up all foreignkey
references for sort orders and such); 'index' (assign
each record a sequential integer index); 'urlsafekeys'
(update all key/keylist properties to use urlsafe keys
introduced in ct 0.8); 'cleanup' (delete zero-count
reference counters). Note regarding 'index' mode: it
_must_ happen remotely; it's generally unnecessary
unless you're trying to migrate an unindexed database
away from gae and need an index/key per record; it
should be invoked from _outside_ -- that's right,
outside -- of your project's directory (to avoid
loading up a bunch of google network tools that may be
crappy or cause issues outside of their normal
'dev_appserver' environment)
-d DOMAIN, --domain=DOMAIN
('index' mode only) what's the domain of the target
server? (default: localhost)
-p PORT, --port=PORT ('index' mode only) what's the port of the target
server? (default: 8080)
-s SKIP, --skip=SKIP skip these tables ('index' mode only) - use '|' as
separator, such as 'table1|table2|table3' (default:
none)
-i INDEX, --index=INDEX
start with this index ('index' mode only) (default: 0)
As you can see, this script's behavior changes according to the backend of the target project.
### dez
Run this if your CTRefCount records get messed up for
some reason. It will go through and recount everything
(in the default 'refcount' mode -- the other modes,
'urlsafekeys' and 'cleanup', are for migrating a CT-mediated
database from an older deployment to CT 0.8 or newer).
### gae
Run this in 'index' mode on a database with lots of missing index values.
"""
from getpass import getpass
from optparse import OptionParser
from cantools.util import error, log, batch
from cantools.db import get_schema, get_model, put_multi, delete_multi, unpad_key
from cantools.web import fetch
from cantools import config
if config.web.server == "dez":
from cantools.db import session, func, refresh_counter
try:
input = raw_input # py2/3 compatibility
except NameError:
pass
counts = { "_counters": 0 }
RETRIES = 5
#
# dez
#
def get_keys(kind, reference):
log("acquiring %s (%s) keys"%(kind, reference), 1)
mod = get_model(kind)
q = session.query(getattr(mod, "key"))
qcount = q.count()
log("found %s"%(qcount,), 2)
fname, fkey = reference.split(".")
fmod = get_model(fname)
fprop = getattr(fmod, fkey)
sub = session.query(fprop, func.count("*").label("sub_count")).group_by(fprop).subquery()
q = q.join(sub, mod.key==getattr(sub.c, fkey))
newcount = q.count()
log("filtering out %s untargetted entities"%(qcount - newcount), 2)
qcount = newcount
log("returning %s keys"%(qcount,), 2)
return q.all()
def refmap():
log("compiling back reference map")
rmap = {}
for tname, schema in list(get_schema().items()):
for pname, kinds in list(schema["_kinds"].items()):
reference = "%s.%s"%(tname, pname)
counts[reference] = 0
for kind in [k for k in kinds if k != "*"]: # skip wildcard for now
if kind not in rmap:
rmap[kind] = {}
rmap[kind][reference] = get_keys(kind, reference)
return rmap
def do_batch(chunk, reference):
log("refreshing %s %s keys"%(len(chunk), reference), 1)
i = 0
rc = []
for item in chunk: # item is single-member tuple
rc.append(refresh_counter(item[0], reference))
i += 1
if not i % 100:
log("processed %s"%(i,), 3)
counts[reference] += len(chunk)
counts["_counters"] += len(rc)
log("refreshed %s total"%(counts[reference],), 2)
log("updated %s counters"%(counts["_counters"],), 2)
put_multi(rc)
log("saved", 2)
def refcount():
log("indexing foreignkey references throughout database", important=True)
import model # load schema
for kind, references in list(refmap().items()):
log("processing table: %s"%(kind,), important=True)
for reference, keys in list(references.items()):
batch(keys, do_batch, reference)
tcount = sum(counts.values()) - counts["_counters"]
log("refreshed %s rows and updated %s counters"%(tcount, counts["_counters"]), important=True)
#
# gae
#
def _log_fetch(host, url, port):
res = fetch(host, url, port)
log(res)
return res
def _index_kind(kind, host, port, pw, index):
log("indexing %s"%(kind,), important=True)
retry = 0
while "Error" in _log_fetch(host, "/_db?action=index&pw=%s&kind=%s&index=%s"%(pw, kind, index), port):
log("error indexing %s"%(kind,), important=True)
if retry == RETRIES:
error("tried %s times! sorry."%(retry,))
retry += 1
log("trying again (retry: %s)"%(retry,))
def index(host, port, skips, index):
pw = getpass("what's the admin password? ")
log("indexing db at %s:%s"%(host, port), important=True)
# log(fetch(host, "/_db?action=index&pw=%s"%(pw,), port))
log("acquiring schema")
schema = fetch(host, "/_db?action=schema", port, ctjson=True)
for kind in schema:
if kind in skips:
log("skipping %s"%(kind,), important=True)
else:
_index_kind(kind, host, port, pw, index)
#
# url safety
#
def urlsafe():
log("updating key/keylist properties with urlsafe keys", important=True)
import model
schema = get_schema()
puts = []
for mod in schema:
mods = get_model(mod).query().all()
log("%s (%s)"%(mod, len(mods)), 1)
for m in mods:
if m.polytype != mod:
log("skipping! (%s != %s)"%(m.polytype, mod), 2)
continue
m.key = unpad_key(m.key.urlsafe())
for prop in schema[mod]["_kinds"]:
if schema[mod][prop] == "key":
setattr(m, prop, unpad_key(getattr(m, prop).urlsafe()))
else: # keylist
setattr(m, prop, [unpad_key(k.urlsafe()) for k in getattr(m, prop)])
puts.append(m)
log("saving records")
put_multi(puts)
log("updated %s keys"%(len(puts),), important=True)
if input("want to prune zero-count reference counters? (y/N)").lower().startswith("y"):
cleanup()
def cleanup():
log("cleaning up zero-count reference counters", important=True)
from cantools.db import lookup
ctrz = lookup.CTRefCount.query(lookup.CTRefCount.count == 0).all()
log("deleting %s zero-count reference counters"%(len(ctrz),))
delete_multi(ctrz)
log("all gone!")
def go():
parser = OptionParser("ctindex [--mode=MODE] [--domain=DOMAIN] [--port=PORT] [--skip=SKIP]")
parser.add_option("-m", "--mode", dest="mode", default="refcount",
help="may be: 'refcount' (default - count up all foreignkey references for sort "
"orders and such); 'index' (assign each record a sequential integer index); "
"'urlsafekeys' (update all key/keylist properties to use urlsafe keys "
"introduced in ct 0.8); 'cleanup' (delete zero-count reference counters). "
"Note regarding 'index' mode: it _must_ happen remotely; it's generally "
"unnecessary unless you're trying to migrate an unindexed database away from "
"gae and need an index/key per record; it should be invoked from _outside_ "
"-- that's right, outside -- of your project's directory (to avoid loading "
"up a bunch of google network tools that may be crappy or cause issues outside "
"of their normal 'dev_appserver' environment)")
parser.add_option("-d", "--domain", dest="domain", default="localhost",
help="('index' mode only) what's the domain of the target server? (default: localhost)")
parser.add_option("-p", "--port", dest="port", default="8080",
help="('index' mode only) what's the port of the target server? (default: 8080)")
parser.add_option("-s", "--skip", dest="skip", default="",
help="skip these tables ('index' mode only) - use '|' as separator, such as 'table1|table2|table3' (default: none)")
parser.add_option("-i", "--index", dest="index", default=0,
help="start with this index ('index' mode only) (default: 0)")
options, args = parser.parse_args()
log("mode: %s"%(options.mode,), important=True)
if options.mode == "refcount":
refcount()
elif options.mode == "index":
index(options.domain, int(options.port),
options.skip and options.skip.split("|") or [], options.index)
elif options.mode == "urlsafekeys":
urlsafe()
elif options.mode == "cleanup":
cleanup()
else:
error("unknown mode: %s"%(options.mode,))
log("goodbye")
if __name__ == "__main__":
go() | mit | -251,470,952,256,595,680 | 37.300885 | 118 | 0.645407 | false |
mpeuster/son-emu | src/emuvim/api/openstack/openstack_dummies/keystone_dummy_api.py | 1 | 16828 | # Copyright (c) 2015 SONATA-NFV and Paderborn University
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Paderborn University
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
from flask_restful import Resource
from flask import request, Response
from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
from emuvim.api.openstack.helper import get_host
import logging
import json
LOG = logging.getLogger("api.openstack.keystone")
class KeystoneDummyApi(BaseOpenstackDummy):
def __init__(self, in_ip, in_port):
super(KeystoneDummyApi, self).__init__(in_ip, in_port)
self.api.add_resource(KeystoneListVersions, "/",
resource_class_kwargs={'api': self})
self.api.add_resource(KeystoneShowAPIv2, "/v2.0",
resource_class_kwargs={'api': self})
self.api.add_resource(KeystoneGetToken, "/v2.0/tokens",
resource_class_kwargs={'api': self})
self.api.add_resource(KeystoneShowAPIv3, "/v3.0",
resource_class_kwargs={'api': self})
self.api.add_resource(
KeystoneGetTokenv3, "/v3.0/auth/tokens", resource_class_kwargs={'api': self})
class KeystoneListVersions(Resource):
"""
List all known keystone versions.
Hardcoded for our version!
"""
def __init__(self, api):
self.api = api
def get(self):
"""
List API versions.
:return: Returns the api versions.
:rtype: :class:`flask.response` containing a static json encoded dict.
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
resp = dict()
resp['versions'] = dict()
version = [{
"id": "v2.0",
"links": [
{
"href": "http://%s:%d/v2.0" % (get_host(request), self.api.port),
"rel": "self"
}
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.identity-v2.0+json"
}
],
"status": "stable",
"updated": "2014-04-17T00:00:00Z"
}]
resp['versions']['values'] = version
return Response(json.dumps(resp), status=200,
mimetype='application/json')
class KeystoneShowAPIv2(Resource):
"""
Entrypoint for all openstack clients.
This returns all current entrypoints running on son-emu.
"""
def __init__(self, api):
self.api = api
def get(self):
"""
List API entrypoints.
:return: Returns an openstack style response for all entrypoints.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
# neutron_port = self.api.port + 4696
# heat_port = self.api.port + 3004
resp = dict()
resp['version'] = {
"status": "stable",
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.identity-v2.0+json"
}
],
"id": "v2.0",
"links": [
{
"href": "http://%s:%d/v2.0" % (get_host(request), self.api.port),
"rel": "self"
}
]
}
LOG.debug(json.dumps(resp))
return Response(json.dumps(resp), status=200,
mimetype='application/json')
class KeystoneShowAPIv3(Resource):
"""
Entrypoint for all openstack clients.
This returns all current entrypoints running on son-emu.
"""
def __init__(self, api):
self.api = api
def get(self):
"""
List API entrypoints.
:return: Returns an openstack style response for all entrypoints.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
# neutron_port = self.api.port + 4696
# heat_port = self.api.port + 3004
resp = dict()
resp['version'] = {
"status": "stable",
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.identity-v3.0+json"
}
],
"id": "v3.0",
"links": [
{
"href": "http://%s:%d/v3.0" % (get_host(request), self.api.port),
"rel": "self"
}
]
}
return Response(json.dumps(resp), status=200,
mimetype='application/json')
class KeystoneGetToken(Resource):
"""
Returns a static keystone token.
We don't do any validation so we don't care.
"""
def __init__(self, api):
self.api = api
def post(self):
"""
List API entrypoints.
This is hardcoded. For a working "authentication" use these ENVVARS:
* OS_AUTH_URL=http://<ip>:<port>/v2.0
* OS_IDENTITY_API_VERSION=2.0
* OS_TENANT_ID=fc394f2ab2df4114bde39905f800dc57
* OS_REGION_NAME=RegionOne
* OS_USERNAME=bla
* OS_PASSWORD=bla
:return: Returns an openstack style response for all entrypoints.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
ret = dict()
req = json.loads(request.data)
ret['access'] = dict()
ret['access']['token'] = dict()
token = ret['access']['token']
token['issued_at'] = "2014-01-30T15:30:58.819Z"
token['expires'] = "2999-01-30T15:30:58.819Z"
token['id'] = req['auth'].get(
'token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
token['tenant'] = dict()
token['tenant']['description'] = None
token['tenant']['enabled'] = True
token['tenant']['id'] = req['auth'].get(
'tenantId', 'fc394f2ab2df4114bde39905f800dc57')
token['tenant']['name'] = "tenantName"
ret['access']['user'] = dict()
user = ret['access']['user']
user['username'] = req.get('username', "username")
user['name'] = "tenantName"
user['roles_links'] = list()
user['id'] = token['tenant'].get(
'id', "fc394f2ab2df4114bde39905f800dc57")
user['roles'] = [{'name': 'Member'}]
ret['access']['region_name'] = "RegionOne"
ret['access']['serviceCatalog'] = [{
"endpoints": [
{
"adminURL": "http://%s:%s/v2.1/%s" % (get_host(request), self.api.port + 3774, user['id']),
"region": "RegionOne",
"internalURL": "http://%s:%s/v2.1/%s" % (get_host(request), self.api.port + 3774, user['id']),
"id": "2dad48f09e2a447a9bf852bcd93548ef",
"publicURL": "http://%s:%s/v2.1/%s" % (get_host(request), self.api.port + 3774, user['id'])
}
],
"endpoints_links": [],
"type": "compute",
"name": "nova"
},
{
"endpoints": [
{
"adminURL": "http://%s:%s/v2.0" % (get_host(request), self.api.port),
"region": "RegionOne",
"internalURL": "http://%s:%s/v2.0" % (get_host(request), self.api.port),
"id": "2dad48f09e2a447a9bf852bcd93543fc",
"publicURL": "http://%s:%s/v2" % (get_host(request), self.api.port)
}
],
"endpoints_links": [],
"type": "identity",
"name": "keystone"
},
{
"endpoints": [
{
"adminURL": "http://%s:%s" % (get_host(request), self.api.port + 4696),
"region": "RegionOne",
"internalURL": "http://%s:%s" % (get_host(request), self.api.port + 4696),
"id": "2dad48f09e2a447a9bf852bcd93548cf",
"publicURL": "http://%s:%s" % (get_host(request), self.api.port + 4696)
}
],
"endpoints_links": [],
"type": "network",
"name": "neutron"
},
{
"endpoints": [
{
"adminURL": "http://%s:%s" % (get_host(request), self.api.port + 4242),
"region": "RegionOne",
"internalURL": "http://%s:%s" % (get_host(request), self.api.port + 4242),
"id": "2dad48f09e2a447a9bf852bcd93548cf",
"publicURL": "http://%s:%s" % (get_host(request), self.api.port + 4242)
}
],
"endpoints_links": [],
"type": "image",
"name": "glance"
},
{
"endpoints": [
{
"adminURL": "http://%s:%s/v1/%s" % (get_host(request), self.api.port + 3004, user['id']),
"region": "RegionOne",
"internalURL": "http://%s:%s/v1/%s" % (get_host(request), self.api.port + 3004, user['id']),
"id": "2dad48f09e2a447a9bf852bcd93548bf",
"publicURL": "http://%s:%s/v1/%s" % (get_host(request), self.api.port + 3004, user['id'])
}
],
"endpoints_links": [],
"type": "orchestration",
"name": "heat"
}
]
ret['access']["metadata"] = {
"is_admin": 0,
"roles": [
"7598ac3c634d4c3da4b9126a5f67ca2b"
]
},
ret['access']['trust'] = {
"id": "394998fa61f14736b1f0c1f322882949",
"trustee_user_id": "269348fdd9374b8885da1418e0730af1",
"trustor_user_id": "3ec3164f750146be97f21559ee4d9c51",
"impersonation": False
}
return Response(json.dumps(ret), status=200,
mimetype='application/json')
except Exception as ex:
logging.exception("Keystone: Get token failed.")
return ex.message, 500
class KeystoneGetTokenv3(Resource):
"""
Returns a static keystone token.
We don't do any validation so we don't care.
"""
def __init__(self, api):
self.api = api
def post(self):
"""
List API entrypoints.
This is hardcoded. For a working "authentication" use these ENVVARS:
* OS_AUTH_URL=http://<ip>:<port>/v3
* OS_IDENTITY_API_VERSION=2.0
* OS_TENANT_ID=fc394f2ab2df4114bde39905f800dc57
* OS_REGION_NAME=RegionOne
* OS_USERNAME=bla
* OS_PASSWORD=bla
:return: Returns an openstack style response for all entrypoints.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
ret = dict()
req = json.loads(request.data)
ret['token'] = dict()
token = ret['token']
token['issued_at'] = "2014-01-30T15:30:58.819Z"
token['expires_at'] = "2999-01-30T15:30:58.819Z"
token['methods'] = ["password"]
token['extras'] = dict()
token['user'] = dict()
user = token['user']
user['id'] = req['auth'].get(
'token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
user['name'] = "tenantName"
user['password_expires_at'] = None
user['domain'] = {"id": "default", "name": "Default"}
token['audit_ids'] = ["ZzZwkUflQfygX7pdYDBCQQ"]
# project
token['project'] = {
"domain": {
"id": "default",
"name": "Default"
},
"id": "8538a3f13f9541b28c2620eb19065e45",
"name": "tenantName"
}
# catalog
token['catalog'] = [{
"endpoints": [
{
"url": "http://%s:%s/v2.1/%s" % (get_host(request), self.api.port + 3774, user['id']),
"region": "RegionOne",
"interface": "public",
"id": "2dad48f09e2a447a9bf852bcd93548ef"
}
],
"id": "2dad48f09e2a447a9bf852bcd93548ef",
"type": "compute",
"name": "nova"
},
{
"endpoints": [
{
"url": "http://%s:%s/v2.0" % (get_host(request), self.api.port),
"region": "RegionOne",
"interface": "public",
"id": "2dad48f09e2a447a9bf852bcd93543fc"
}
],
"id": "2dad48f09e2a447a9bf852bcd93543fc",
"type": "identity",
"name": "keystone"
},
{
"endpoints": [
{
"url": "http://%s:%s" % (get_host(request), self.api.port + 4696),
"region": "RegionOne",
"interface": "public",
"id": "2dad48f09e2a447a9bf852bcd93548cf"
}
],
"id": "2dad48f09e2a447a9bf852bcd93548cf",
"type": "network",
"name": "neutron"
},
{
"endpoints": [
{
"url": "http://%s:%s" % (get_host(request), self.api.port + 4242),
"region": "RegionOne",
"interface": "public",
"id": "2dad48f09e2a447a9bf852bcd93548cf"
}
],
"id": "2dad48f09e2a447a9bf852bcd93548cf",
"type": "image",
"name": "glance"
},
{
"endpoints": [
{
"url": "http://%s:%s/v1/%s" % (get_host(request), self.api.port + 3004, user['id']),
"region": "RegionOne",
"interface": "public",
"id": "2dad48f09e2a447a9bf852bcd93548bf"
}
],
"id": "2dad48f09e2a447a9bf852bcd93548bf",
"type": "orchestration",
"name": "heat"
}
]
return Response(json.dumps(ret), status=201,
mimetype='application/json')
except Exception as ex:
logging.exception("Keystone: Get token failed.")
return ex.message, 500
| apache-2.0 | -1,443,519,102,164,560,100 | 35.822757 | 120 | 0.455253 | false |
wuher/diablo | test/test_resource.py | 1 | 23625 | # -*- coding: utf-8 -*-
# test_resource.py ---
#
import json
import base64
from twisted.internet import defer, reactor
from twisted.web import server
from twisted.web.test.test_web import DummyRequest
from twisted.trial import unittest
from twisted.internet.defer import succeed
from twisted.python import log
from twisted.web.http import OK, NOT_FOUND, INTERNAL_SERVER_ERROR, CONFLICT
from diablo.resource import Resource
from diablo.api import RESTApi
from diablo.auth import HttpBasic, register_authenticator
from diablo.mappers.xmlmapper import XmlMapper
from diablo.mappers.jsonmapper import JsonMapper
from diablo.mappers.yamlmapper import YamlMapper
from diablo.http import NotFound, Response, Conflict
class DiabloDummyRequest(DummyRequest):
code = OK
data = ''
def __init__(self, *args, **kw):
DummyRequest.__init__(self, *args, **kw)
self.content = self
def read(self):
return self.data
class UnaccessibleResource(Resource):
allow_anonymous = False
def get(self, request):
return 'you will never see this'
class AuthenticatedResource(Resource):
allow_anonymous = False
authentication = HttpBasic()
def get(self, request):
return 'hello %s' % (request.user,)
class DiabloTestResource(Resource):
collection = {}
def get(self, request, *args, **kw):
if 'key' in kw:
key = kw.get('key')
if key in self.collection:
return self.collection[key]
else:
raise NotFound()
else:
return self.collection
def put(self, data, request, *args, **kw):
for k in data:
self.collection[k] = data[k]
def post(self, data, request, *args, **kw):
for k in data:
self.collection[k] = data[k]
def delete(self, request, *args, **kw):
if 'key' in kw:
key = kw.get('key')
if key in self.collection:
removed = self.collection.pop(kw['key'])
log.msg('removed', removed)
else:
raise NotFound()
else:
log.msg('removing entire collection')
self.collection = {}
class RouteTestResource1(Resource):
def get(self, request, *args, **kw):
return {'nothing': 'something'}
class RouteTestResource2(Resource):
def get(self, request, *args, **kw):
return {'something': 'nothing'}
regular_result = {'name': 'luke skywalker', 'occupation': 'jedi'}
class RegularTestResource(Resource):
def get(self, request, *args, **kw):
return regular_result
deferred_result = [1, 2, 3, 4, 5]
class DeferredTestResource(Resource):
def get(self, request, *args, **kw):
d = defer.Deferred()
reactor.callLater(0, d.callback, deferred_result)
return d
class ErrorResource(Resource):
""" Resource to test error scenarios. """
def get(self, request, id, *args, **kw):
self.error_id = int(id)
if self.error_id is 1:
raise NotFound("Can't find it, sorry.")
elif self.error_id is 2:
raise Exception('Unknown exception.')
else:
return {'id': 9}
def _processResponse(self, request, response):
if self.error_id is 3:
raise Conflict("There's a conflict my friend.")
elif self.error_id is 4:
raise TypeError('Bad type.')
else:
return Response(code=OK, content='hiihoo')
def _render(resource, request):
result = resource.render(request)
if isinstance(result, str):
request.write(result)
request.finish()
return succeed(None)
elif result is server.NOT_DONE_YET:
if request.finished:
return succeed(None)
else:
return request.notifyFinish()
else:
raise ValueError("Unexpected return value: %r" % (result,))
routes = [
('/auth/unaccessible', 'test_resource.UnaccessibleResource'),
('/auth/normal', 'test_resource.AuthenticatedResource'),
('/testregular(?P<format>\.?\w{1,8})?$', 'test_resource.RegularTestResource'),
('/testdeferred(?P<format>\.?\w{1,8})?$', 'test_resource.DeferredTestResource'),
('/a/useless/path$', 'test_resource.RouteTestResource1'),
('/a/useful/path(/)?(?P<tendigit>\d{10})?$', 'test_resource.RouteTestResource2'),
('/a/test/resource(/)?(?P<key>\w{1,10})?$', 'test_resource.DiabloTestResource'),
('/error/(?P<err_id>\d{1,2})', 'test_resource.ErrorResource'),
]
params = {
'indent': 4,
'ensure_ascii': False,
'encoding': 'utf-8',
}
xmlMapper = XmlMapper()
jsonMapper = JsonMapper()
yamlMapper = YamlMapper()
class AuthResourceTest(unittest.TestCase):
passwd = {
'jedi': 'jedixx',
'sith': 'sithxx'
}
def check_creds(self, username, password):
passwd = self.passwd.get(username, None)
return passwd == password
def get_auth_header(self, username, password):
credsz = base64.b64encode('%s:%s' % (username, password))
return {
'authorization': 'Basic %s' % (credsz,)
}
def setUp(self):
self.api = RESTApi(routes)
register_authenticator(self.check_creds)
def test_get_unaccessible(self):
request = DiabloDummyRequest([''])
request.method = 'GET'
request.path = '/auth/unaccessible'
resource = self.api.getChild('/', request)
d = _render(resource, request)
def renderer(ignored):
self.assertEquals(request.responseCode, 403)
d.addCallback(renderer)
def test_get_jedi_fail(self):
request = DiabloDummyRequest([''])
request.method = 'GET'
request.path = '/auth/normal'
request.headers = self.get_auth_header('jedi', 'jedi')
resource = self.api.getChild('/', request)
d = _render(resource, request)
def renderer(ignored):
self.assertEquals(403, request.responseCode)
d.addCallback(renderer)
def test_get_jedi_ok(self):
request = DiabloDummyRequest([''])
request.method = 'GET'
request.path = '/auth/normal'
request.headers = self.get_auth_header('jedi', 'jedixx')
resource = self.api.getChild('/', request)
d = _render(resource, request)
def renderer(ignored):
self.assertEquals(200, request.responseCode)
d.addCallback(renderer)
def test_get_anonymous(self):
request = DiabloDummyRequest([''])
request.method = 'GET'
request.path = '/auth/normal'
# request.headers = self.get_auth_header('jedi', 'jedixx')
resource = self.api.getChild('/', request)
d = _render(resource, request)
def renderer(ignored):
self.assertEquals(401, request.responseCode)
d.addCallback(renderer)
class PutResourceTest(unittest.TestCase):
def setUp(self):
self.api = RESTApi(routes)
def test_put_resource(self):
request = DiabloDummyRequest([''])
request.method = 'PUT'
request.path = '/a/test/resource'
request.headers = {'content-type': 'application/json'}
request.data = json.dumps({'key1': 'value1'})
resource = self.api.getChild('/ignored', request)
d = _render(resource, request)
def rendered(ignored):
self.assertEquals(request.responseCode, OK)
d.addCallback(rendered)
request2 = DiabloDummyRequest([''])
request2.path = '/a/test/resource/key1'
request2.headers = {'content-type': 'application/json'}
resource2 = self.api.getChild('/ignored', request2)
def doGet(ignored):
d2 = _render(resource2, request2)
def get_rendered(ignored):
response = ''.join(request2.written)
response_obj = json.loads(response)
self.assertEquals(response_obj, 'value1')
d2.addCallback(get_rendered)
return d2
d.addCallback(doGet)
return d
class PostResourceTest(unittest.TestCase):
def setUp(self):
self.api = RESTApi(routes)
def test_post_resource(self):
request = DiabloDummyRequest([''])
request.method = 'POST'
request.path = '/a/test/resource'
request.headers = {'content-type': 'application/json'}
request.data = json.dumps({'key2': 'value2'})
resource = self.api.getChild('/ignored', request)
d = _render(resource, request)
def rendered(ignored):
self.assertEquals(request.responseCode, OK)
self.assertEquals({'key2': 'value2'}, resource.collection)
d.addCallback(rendered)
request2 = DiabloDummyRequest([''])
request2.path = '/a/test/resource/key2'
request2.headers = {'content-type': 'application/json'}
resource2 = self.api.getChild('/ignored', request2)
def doGet(ignored):
d2 = _render(resource2, request2)
def get_rendered(ignored):
response = ''.join(request2.written)
response_obj = json.loads(response)
self.assertEquals(response_obj, 'value2')
d2.addCallback(get_rendered)
return d2
d.addCallback(doGet)
return d
class DeleteResourceTest(unittest.TestCase):
def setUp(self):
self.api = RESTApi(routes)
def _put_something(self, key, val):
request = DiabloDummyRequest([''])
request.method = 'PUT'
request.path = '/a/test/resource'
request.headers = {'content-type': 'application/json'}
request.data = json.dumps({key: val})
resource = self.api.getChild('/ignored', request)
d = _render(resource, request)
def rendered(ignored):
self.assertEquals(request.responseCode, OK)
d.addCallback(rendered)
return d
def _delete_it(self, ignored, key):
request = DiabloDummyRequest([''])
request.method = 'DELETE'
request.path = '/a/test/resource/key3'
request.headers = {'content-type': 'application/json'}
resource = self.api.getChild('/ignored', request)
d = _render(resource, request)
def rendered(ignored):
self.assertEquals(request.responseCode, OK)
d.addCallback(rendered)
return d
def _get_it(self, ignored, key):
request = DiabloDummyRequest([''])
request.path = '/a/test/resource/key3'
request.headers = {'content-type': 'application/json'}
resource = self.api.getChild('/ignored', request)
d = _render(resource, request)
def rendered(ignored):
self.assertEquals(request.responseCode, NotFound().code)
d.addCallback(rendered)
def test_delete_resource(self):
key, val = 'key3', 'val3'
d = self._put_something(key, val)
d.addCallback(self._delete_it, key)
d.addCallback(self._get_it, key)
return d
class ResourceRoutingTest(unittest.TestCase):
def setUp(self):
self.api = RESTApi(routes)
def test_basic_route(self):
request = DiabloDummyRequest([''])
request.path = '/a/useless/path'
request.headers = {'content-type': 'application/json'}
resource = self.api.getChild('/ignored', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
response_obj = json.loads(response)
self.assertEquals(response_obj, {'nothing': 'something'})
d.addCallback(rendered)
return d
def test_re_group_route_wo_group(self):
request = DiabloDummyRequest([''])
request.path = '/a/useful/path'
request.headers = {'content-type': 'application/json'}
resource = self.api.getChild('/ignored', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
response_obj = json.loads(response)
self.assertEquals(response_obj, {'something': 'nothing'})
d.addCallback(rendered)
return d
def test_re_group_route_w_group(self):
request = DiabloDummyRequest([''])
request.path = '/a/useful/path/1234567890'
request.headers = {'content-type': 'application/json'}
resource = self.api.getChild('/ignored', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
response_obj = json.loads(response)
self.assertEquals(response_obj, {'something': 'nothing'})
d.addCallback(rendered)
return d
def test_re_group_route_w_invalid_group(self):
request = DiabloDummyRequest([''])
request.path = '/a/useful/path/1invalid01'
request.headers = {'content-type': 'application/json'}
resource = self.api.getChild('/ignored', request)
d = _render(resource, request)
def rendered(ignored):
log.msg('ignored', ignored)
self.assertEquals(request.responseCode, NotFound().code)
d.addCallback(rendered)
return d
class ResourceTestCase(unittest.TestCase):
def setUp(self):
self.api = RESTApi(routes)
def test_regular_response(self):
request = DiabloDummyRequest([''])
request.path = '/testregular'
request.headers = {'content-type': 'application/json'}
resource = self.api.getChild('/testregular', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
response_obj = json.loads(response)
self.assertEquals(response_obj, regular_result)
d.addCallback(rendered)
return d
def test_deferred_response(self):
request = DiabloDummyRequest([''])
request.path = '/testdeferred'
request.headers = {'content-type': 'application/json'}
resource = self.api.getChild('/testdeferred', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
response_obj = json.loads(response)
self.assertEquals(response_obj, deferred_result)
d.addCallback(rendered)
return d
class ContentTypeFormatterTestCase(unittest.TestCase):
def setUp(self):
self.api = RESTApi(routes)
def test_json_formatter(self):
request = DiabloDummyRequest([''])
request.path = '/testregular'
request.headers = {'content-type': 'application/json'}
resource = self.api.getChild('/testregular', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
response_obj = jsonMapper._parse_data(response, 'utf-8')
self.assertEquals(response_obj, regular_result)
content_header = request.outgoingHeaders.get('content-type', None)
content_type = content_header.split(';')[0] if content_header else None
self.assertEquals(content_type, 'application/json')
d.addCallback(rendered)
return d
def test_xml_formatter(self):
request = DiabloDummyRequest([''])
request.path = '/testregular'
request.headers = {'content-type': 'text/xml'}
resource = self.api.getChild('/testregular', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
response_obj = xmlMapper._parse_data(response, 'utf-8')
self.assertEquals(response_obj, regular_result)
content_header = request.outgoingHeaders.get('content-type', None)
content_type = content_header.split(';')[0] if content_header else None
self.assertEquals(content_type, 'text/xml')
d.addCallback(rendered)
return d
def test_yaml_formatter(self):
request = DiabloDummyRequest([''])
request.path = '/testregular'
request.headers = {'content-type': 'application/yaml'}
resource = self.api.getChild('/testregular', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
response_obj = yamlMapper._parse_data(response, 'utf-8')
self.assertEquals(response_obj, regular_result)
content_header = request.outgoingHeaders.get('content-type', None)
content_type = content_header.split(';')[0] if content_header else None
self.assertEquals(content_type, 'application/yaml')
d.addCallback(rendered)
return d
class FormatArgTestCase(unittest.TestCase):
def setUp(self):
self.api = RESTApi(routes)
def test_json_arg(self):
request = DiabloDummyRequest([])
request.path = '/testregular'
request.args = {'format': ['json']}
resource = self.api.getChild('/testregular', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
response_obj = jsonMapper._parse_data(response, 'utf-8')
self.assertEquals(response_obj, regular_result)
content_header = request.outgoingHeaders.get('content-type', None)
content_type = content_header.split(';')[0] if content_header else None
self.assertEquals(content_type, 'application/json')
d.addCallback(rendered)
return d
def test_xml_arg(self):
request = DiabloDummyRequest([])
request.path = '/testregular'
request.args = {'format': ['xml']}
resource = self.api.getChild('/testregular', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
response_obj = xmlMapper._parse_data(response, 'utf-8')
self.assertEquals(response_obj, regular_result)
content_header = request.outgoingHeaders.get('content-type', None)
content_type = content_header.split(';')[0] if content_header else None
self.assertEquals(content_type, 'text/xml')
d.addCallback(rendered)
return d
def test_yaml_arg(self):
request = DiabloDummyRequest([])
request.path = '/testregular'
request.args = {'format': ['yaml']}
resource = self.api.getChild('/testregular', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
response_obj = yamlMapper._parse_data(response, 'utf-8')
self.assertEquals(response_obj, regular_result)
content_header = request.outgoingHeaders.get('content-type', None)
content_type = content_header.split(';')[0] if content_header else None
self.assertEquals(content_type, 'application/yaml')
d.addCallback(rendered)
return d
class UrlFormatTestCase(unittest.TestCase):
def setUp(self):
self.api = RESTApi(routes)
def test_json_url(self):
request = DiabloDummyRequest([])
request.path = '/testregular.json'
resource = self.api.getChild('/testregular', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
log.msg('Response', response)
response_obj = jsonMapper._parse_data(response, 'utf-8')
self.assertEquals(response_obj, regular_result)
content_header = request.outgoingHeaders.get('content-type', None)
content_type = content_header.split(';')[0] if content_header else None
self.assertEquals(content_type, 'application/json')
d.addCallback(rendered)
return d
def test_xml_url(self):
request = DiabloDummyRequest([])
request.path = '/testregular.xml'
resource = self.api.getChild('/testregular', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
log.msg('Response', response)
response_obj = xmlMapper._parse_data(response, 'utf-8')
self.assertEquals(response_obj, regular_result)
content_header = request.outgoingHeaders.get('content-type', None)
content_type = content_header.split(';')[0] if content_header else None
self.assertEquals(content_type, 'text/xml')
d.addCallback(rendered)
return d
def test_yaml_url(self):
request = DiabloDummyRequest([])
request.method = 'GET'
request.path = '/testregular.yaml'
resource = self.api.getChild('/testregular', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
log.msg('Response', response)
response_obj = yamlMapper._parse_data(response, 'utf-8')
self.assertEquals(response_obj, regular_result)
content_header = request.outgoingHeaders.get('content-type', None)
content_type = content_header.split(';')[0] if content_header else None
self.assertEquals(content_type, 'application/yaml')
d.addCallback(rendered)
return d
class TestErrors(unittest.TestCase):
def setUp(self):
self.api = RESTApi(routes)
def test_exec_handler_fails_http(self):
request = DiabloDummyRequest([])
request.method = 'GET'
request.path = '/error/1'
resource = self.api.getChild('/error/1', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
self.assertEquals(NOT_FOUND, request.responseCode)
self.assertEquals("Can't find it, sorry.", response)
d.addCallback(rendered)
return d
def test_exec_handler_fails_unknown(self):
request = DiabloDummyRequest([])
request.method = 'GET'
request.path = '/error/2'
resource = self.api.getChild('/error/2', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
self.assertEquals(INTERNAL_SERVER_ERROR, request.responseCode)
self.assertEquals("Unknown exception.", response)
d.addCallback(rendered)
return d
def test_processing_fails_http(self):
request = DiabloDummyRequest([])
request.method = 'GET'
request.path = '/error/3'
resource = self.api.getChild('/error/3', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
self.assertEquals(CONFLICT, request.responseCode)
self.assertEquals("There's a conflict my friend.", response)
d.addCallback(rendered)
return d
def test_processing_fails_unknown(self):
request = DiabloDummyRequest([])
request.method = 'GET'
request.path = '/error/4'
resource = self.api.getChild('/error/4', request)
d = _render(resource, request)
def rendered(ignored):
response = ''.join(request.written)
self.assertEquals(INTERNAL_SERVER_ERROR, request.responseCode)
self.assertEquals("Bad type.", response)
d.addCallback(rendered)
return d
#
# test_resource.py ends here
| mit | 5,369,552,455,020,991,000 | 32.274648 | 85 | 0.607619 | false |
arpadpe/plover | plover/machine/keyboard.py | 1 | 4230 | # Copyright (c) 2010 Joshua Harlan Lifton.
# See LICENSE.txt for details.
"For use with a computer keyboard (preferably NKRO) as a steno machine."
from plover.machine.base import StenotypeBase
from plover.oslayer.keyboardcontrol import KeyboardCapture
class Keyboard(StenotypeBase):
"""Standard stenotype interface for a computer keyboard.
This class implements the three methods necessary for a standard
stenotype interface: start_capture, stop_capture, and
add_callback.
"""
KEYS_LAYOUT = KeyboardCapture.SUPPORTED_KEYS_LAYOUT
ACTIONS = StenotypeBase.ACTIONS + ('arpeggiate',)
def __init__(self, params):
"""Monitor the keyboard's events."""
super(Keyboard, self).__init__()
self.arpeggiate = params['arpeggiate']
self._bindings = {}
self._down_keys = set()
self._released_keys = set()
self._keyboard_capture = None
self._last_stroke_key_down_count = 0
self._update_bindings()
def _update_bindings(self):
self._bindings = dict(self.keymap.get_bindings())
for key, mapping in list(self._bindings.items()):
if 'no-op' == mapping:
self._bindings[key] = None
elif 'arpeggiate' == mapping:
if self.arpeggiate:
self._bindings[key] = None
self._arpeggiate_key = key
else:
# Don't suppress arpeggiate key if it's not used.
del self._bindings[key]
def set_mappings(self, mappings):
super(Keyboard, self).set_mappings(mappings)
self._update_bindings()
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
self._released_keys.clear()
self._last_stroke_key_down_count = 0
self._initializing()
try:
self._keyboard_capture = KeyboardCapture()
self._keyboard_capture.key_down = self._key_down
self._keyboard_capture.key_up = self._key_up
self._keyboard_capture.start()
except:
self._error()
raise
self._ready()
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
if self._keyboard_capture is not None:
self._keyboard_capture.cancel()
self._keyboard_capture = None
self._stopped()
def set_suppression(self, enabled):
suppressed_keys = self._bindings.keys() if enabled else ()
self._keyboard_capture.suppress_keyboard(suppressed_keys)
def suppress_last_stroke(self, send_backspaces):
send_backspaces(self._last_stroke_key_down_count)
def _key_down(self, key):
"""Called when a key is pressed."""
assert key is not None
if key in self._bindings:
self._last_stroke_key_down_count += 1
steno_key = self._bindings.get(key)
if steno_key is not None:
self._down_keys.add(steno_key)
def _key_up(self, key):
"""Called when a key is released."""
assert key is not None
steno_key = self._bindings.get(key)
if steno_key is not None:
# Process the newly released key.
self._released_keys.add(steno_key)
# Remove invalid released keys.
self._released_keys = self._released_keys.intersection(self._down_keys)
# A stroke is complete if all pressed keys have been released.
# If we are in arpeggiate mode then only send stroke when spacebar is pressed.
send_strokes = bool(self._down_keys and
self._down_keys == self._released_keys)
if self.arpeggiate:
send_strokes &= key == self._arpeggiate_key
if send_strokes:
steno_keys = list(self._down_keys)
if steno_keys:
self._down_keys.clear()
self._released_keys.clear()
self._notify(steno_keys)
self._last_stroke_key_down_count = 0
@classmethod
def get_option_info(cls):
bool_converter = lambda s: s == 'True'
return {
'arpeggiate': (False, bool_converter),
}
| gpl-2.0 | -4,720,774,508,469,126,000 | 35.153846 | 86 | 0.591253 | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/nose-0.10.1-py2.5.egg/nose/plugins/isolate.py | 1 | 3674 | """Use the isolation plugin with --with-isolation or the
NOSE_WITH_ISOLATION environment variable to clean sys.modules after
each test module is loaded and executed.
The isolation module is in effect similar to wrapping the following
functions around the import and execution of each test module::
def setup(module):
module._mods = sys.modules.copy()
def teardown(module):
to_del = [ m for m in sys.modules.keys() if m not in
module._mods ]
for mod in to_del:
del sys.modules[mod]
sys.modules.update(module._mods)
Isolation works only during lazy loading. In normal use, this is only
during discovery of modules within a directory, where the process of
importing, loading tests and running tests from each module is
encapsulated in a single loadTestsFromName call. This plugin
implements loadTestsFromNames to force the same lazy-loading there,
which allows isolation to work in directed mode as well as discovery,
at the cost of some efficiency: lazy-loading names forces full context
setup and teardown to run for each name, defeating the grouping that
is normally used to ensure that context setup and teardown are run the
fewest possible times for a given set of names.
PLEASE NOTE that this plugin should not be used in conjunction with
other plugins that assume that modules once imported will stay
imported; for instance, it may cause very odd results when used with
the coverage plugin.
"""
import logging
import sys
from nose.plugins import Plugin
log = logging.getLogger('nose.plugins.isolation')
class IsolationPlugin(Plugin):
"""
Activate the isolation plugin to isolate changes to external
modules to a single test module or package. The isolation plugin
resets the contents of sys.modules after each test module or
package runs to its state before the test. PLEASE NOTE that this
plugin should not be used with the coverage plugin in any other case
where module reloading may produce undesirable side-effects.
"""
score = 10 # I want to be last
name = 'isolation'
def configure(self, options, conf):
Plugin.configure(self, options, conf)
self._mod_stack = []
def beforeContext(self):
"""Copy sys.modules onto my mod stack
"""
mods = sys.modules.copy()
self._mod_stack.append(mods)
def afterContext(self):
"""Pop my mod stack and restore sys.modules to the state
it was in when mod stack was pushed.
"""
mods = self._mod_stack.pop()
to_del = [ m for m in sys.modules.keys() if m not in mods ]
if to_del:
log.debug('removing sys modules entries: %s', to_del)
for mod in to_del:
del sys.modules[mod]
sys.modules.update(mods)
def loadTestsFromNames(self, names, module=None):
"""Create a lazy suite that calls beforeContext and afterContext
around each name. The side-effect of this is that full context
fixtures will be set up and torn down around each test named.
"""
# Fast path for when we don't care
if not names or len(names) == 1:
return
loader = self.loader
plugins = self.conf.plugins
def lazy():
for name in names:
plugins.beforeContext()
yield loader.loadTestsFromName(name, module=module)
plugins.afterContext()
return (loader.suiteClass(lazy), [])
def prepareTestLoader(self, loader):
"""Get handle on test loader so we can use it in loadTestsFromNames.
"""
self.loader = loader
| bsd-3-clause | -7,653,814,076,835,601,000 | 36.489796 | 76 | 0.67828 | false |
graik/labhamster | labhamster/admin.py | 1 | 12409 | ## Copyright 2016 - 2018 Raik Gruenberg
## This file is part of the LabHamster project (https://github.com/graik/labhamster).
## LabHamster is released under the MIT open source license, which you can find
## along with this project (LICENSE) or at <https://opensource.org/licenses/MIT>.
from __future__ import unicode_literals
from labhamster.models import *
from django.contrib import admin
import django.forms
from django.http import HttpResponse
import django.utils.html as html
import customforms
def export_csv(request, queryset, fields):
"""
Helper method for Admin make_csv action. Exports selected objects as
CSV file.
fields - OrderedDict of name / field pairs, see Product.make_csv for example
"""
import csv
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=orders.csv'
writer = csv.writer(response)
writer.writerow(fields.keys())
for o in queryset:
columns = []
for name,value in fields.items():
try:
columns.append( eval('o.%s'%value) )
except:
columns.append("") ## capture 'None' fields
columns = [ c.encode('utf-8') if type(c) is unicode else c \
for c in columns]
writer.writerow( columns )
return response
class RequestFormAdmin(admin.ModelAdmin):
"""
ModelAdmin that adds a 'request' field to the form generated by the Admin.
This e.g. allows to extract the user ID during the creation of the form.
"""
def get_form(self, request, obj=None, **kwargs):
"""
Assign request variable to form
http://stackoverflow.com/questions/1057252/how-do-i-access-the-request-object-or-any-other-variable-in-a-forms-clean-met
(last answer, much simpler than Django 1.6 version)
"""
form = super(RequestFormAdmin, self).get_form(request, obj=obj, **kwargs)
form.request = request
return form
class GrantAdmin(admin.ModelAdmin):
ordering = ('name',)
admin.site.register(Grant, GrantAdmin)
class CategoryAdmin(admin.ModelAdmin):
ordering = ('name',)
admin.site.register(Category, CategoryAdmin)
class VendorAdmin(admin.ModelAdmin):
fieldsets = ((None, {'fields': (('name',),
('link', 'login', 'password'),)}),
('Contact', {'fields' : (('contact',),
('email','phone'),)})
)
list_display = ('name', 'link', 'login', 'password')
ordering = ('name',)
search_fields = ('name', 'contact')
admin.site.register(Vendor, VendorAdmin)
class ProductAdmin(admin.ModelAdmin):
fieldsets = ((None, {'fields': (('name', 'category'),
('vendor', 'catalog'),
('manufacturer', 'manufacturer_catalog'),
'link',
('status', 'shelflife'),
'comment',
'location')}),)
list_display = ('name', 'show_vendor', 'category', 'show_catalog',
'status')
list_filter = ('status', 'category', 'vendor')
ordering = ('name',)
search_fields = ('name', 'comment', 'catalog', 'location', 'vendor__name',
'manufacturer__name', 'manufacturer_catalog')
save_as = True
actions = ['make_ok',
'make_low',
'make_out',
'make_deprecated',
'make_csv']
## reduce size of Description text field.
formfield_overrides = {
models.TextField: {'widget': django.forms.Textarea(
attrs={'rows': 4,
'cols': 80})},
}
def make_ok(self, request, queryset):
n = queryset.update(status='ok')
self.message_user(request, '%i products were updated' % n)
make_ok.short_description = 'Mark selected entries as in stock'
def make_low(self, request, queryset):
n = queryset.update(status='low')
self.message_user(request, '%i products were updated' % n)
make_low.short_description = 'Mark selected entries as running low'
def make_out(self, request, queryset):
n = queryset.update(status='out')
self.message_user(request, '%i products were updated' % n)
make_out.short_description = 'Mark selected entries as out of stock'
def make_deprecated(self, request, queryset):
n = queryset.update(status='deprecated')
self.message_user(request, '%i products were updated' % n)
make_deprecated.short_description = 'Mark selected entries as deprecated'
def make_csv(self, request, queryset):
from collections import OrderedDict
fields = OrderedDict( [('Name', 'name'),
('Vendor', 'vendor.name'),
('Vendor Catalog','catalog'),
('Manufacturer', 'manufacturer.name'),
('Manufacturer Catalog', 'manufacturer_catalog'),
('Category','category.name'),
('Shelf_life','shelflife'),
('Status','status'),
('Location','location'),
('Link','link'),
('Comment','comment')])
return export_csv( request, queryset, fields)
make_csv.short_description = 'Export products as CSV'
## note: this currently breaks the selection of products from the
## order form "lense" button
def show_name(self, o):
"""truncate product name to less than 40 char"""
from django.utils.safestring import SafeUnicode
return html.format_html(
'<a href="{url}" title="{comment}">{name}</a>',
url=o.get_absolute_url(),
name=T.truncate(o.name, 40),
comment=SafeUnicode(o.comment))
show_name.short_description = 'Name'
show_name.admin_order_field = 'name'
def show_vendor(self, o):
"""Display in table: Vendor (Manufacturer)"""
r = o.vendor.name
if o.manufacturer:
r += '<br>(%s)' % o.manufacturer.name
return html.format_html(r)
show_vendor.admin_order_field = 'vendor'
show_vendor.short_description = 'Vendor'
def show_catalog(self, o):
return T.truncate(o.catalog, 15)
show_catalog.short_description = 'Catalog'
show_catalog.admin_order_field = 'catalog'
admin.site.register(Product, ProductAdmin)
class OrderAdmin(RequestFormAdmin):
form = customforms.OrderForm
raw_id_fields = ('product',)
fieldsets = ((None,
{'fields': (('status', 'is_urgent', 'product',),
('created_by', 'ordered_by', 'date_ordered',
'date_received'))}),
('Details', {'fields': (('unit_size', 'quantity'),
('price', 'po_number'),
('grant', 'grant_category'),
'comment')}))
radio_fields = {'grant': admin.VERTICAL,
'grant_category': admin.VERTICAL}
list_display = ('show_title', 'Status', 'show_urgent',
'show_quantity', 'show_price',
'requested', 'show_requestedby', 'ordered',
'received', 'show_comment',)
list_filter = ('status',
'product__category__name', 'grant', 'created_by', 'product__vendor__name',)
ordering = ('-date_created', 'product', '-date_ordered') #, 'price')
search_fields = ('comment', 'grant__name', 'grant__grant_id', 'product__name',
'product__vendor__name')
save_as = True
date_hierarchy = 'date_created'
actions = ['make_ordered', 'make_received', 'make_cancelled', 'make_csv']
def show_title(self, o):
"""truncate product name + supplier to less than 40 char"""
n = T.truncate(o.product.name, 40)
v = o.product.vendor.name
r = html.format_html('<a href="{}">{}', o.get_absolute_url(), n)
r += '<br>' if len(n) + len(v) > 37 else ' '
r += html.format_html('[{}]</a>',v)
return html.mark_safe(r)
show_title.short_description = 'Product'
def show_comment(self, obj):
"""
@return: str; truncated comment with full comment mouse-over
"""
if not obj.comment:
return ''
if len(obj.comment) < 30:
return obj.comment
r = obj.comment[:28]
r = '<a title="%s">%s</a>' % (obj.comment, T.truncate(obj.comment, 30))
return r
show_comment.short_description = 'comment'
show_comment.allow_tags = True
def show_price(self, o):
"""Workaround for bug in djmoney -- MoneyField confuses Admin formatting"""
if not o.price:
return ''
return o.price
show_price.admin_order_field = 'price'
show_price.short_description = 'Unit price'
def show_urgent(self, o):
"""Show exclamation mark if order is urgent"""
if not o.is_urgent:
return ''
return html.format_html(
'<big>❗</big>')
show_urgent.admin_order_field = 'is_urgent'
show_urgent.short_description = '!'
def show_requestedby(self,o):
return o.created_by
show_requestedby.admin_order_field = 'created_by'
show_requestedby.short_description = 'By'
def show_quantity(self, o):
return o.quantity
show_quantity.short_description = 'Q'
def make_ordered(self, request, queryset):
"""
Mark several orders as 'ordered'
see: https://docs.djangoproject.com/en/1.4/ref/contrib/admin/actions/
"""
import datetime
n = queryset.update(status='ordered', ordered_by=request.user,
date_ordered=datetime.datetime.now())
self.message_user(request, '%i orders were updated' % n)
make_ordered.short_description = 'Mark selected entries as ordered'
def make_received(self, request, queryset):
import datetime
n = queryset.update(date_received=datetime.datetime.now(),
status='received')
i = 0
for order in queryset:
order.product.status = 'ok'
order.product.save()
i += 1
self.message_user(request,
'%i orders were updated and %i products set to "in stock"'\
% (n, i))
make_received.short_description= 'Mark as received (and update product status)'
def make_cancelled(self, request, queryset):
import datetime
n = queryset.update(date_received=None, date_ordered=None,
status='cancelled')
self.message_user(request, '%i orders were set to cancelled' % n)
make_cancelled.short_description = 'Mark selected entries as cancelled'
def make_csv(self, request, queryset):
"""
Export selected orders as CSV file
"""
from collections import OrderedDict
fields = OrderedDict( [('Product', 'product.name'),
('Quantity', 'quantity'),
('Price','price'),
('Vendor','product.vendor.name'),
('Catalog','product.catalog'),
('PO Number', 'po_number'),
('Requested','date_created'),
('Requested by','created_by.username'),
('Ordered','date_ordered'),
('Ordered by','ordered_by.username'),
('Received','date_received'),
('Status','status'),
('Urgent','is_urgent'),
('Comment','comment')])
return export_csv(request, queryset, fields)
make_csv.short_description = 'Export orders as CSV'
admin.site.register(Order, OrderAdmin)
| mit | 5,380,798,477,713,165,000 | 34.864162 | 128 | 0.539608 | false |
fdudatamining/framework | tests/draw/test_simple.py | 1 | 1233 | import numpy as np
import pandas as pd
from unittest import TestCase
from framework import draw
X = np.array([1, 2, 3, 4, 5])
class TestSimplePlots(TestCase):
def test_kinds(self):
self.assertIsNotNone(draw.draw_kinds)
def test_line(self):
draw.draw(clear=True, kind='line', x=X, y=X)
draw.draw(clear=True, kind='line', y=X)
def test_scatter(self):
draw.draw(clear=True, kind='scatter', x=X, y=X)
draw.draw(clear=True, kind='scatter', y=X)
def test_stem(self):
draw.draw(clear=True, kind='stem', x=X, y=X)
draw.draw(clear=True, kind='stem', y=X)
def test_errorbar(self):
draw.draw(clear=True, kind='errorbar', x=X, y=X, xerr=X, yerr=X)
draw.draw(clear=True, kind='errorbar', y=X, yerr=X)
def test_boxplot(self):
draw.draw(clear=True, kind='boxplot', x=X)
def test_barplot(self):
draw.draw(clear=True, kind='barplot', x=X, y=X, width=1)
draw.draw(clear=True, kind='barplot', x=X, y=X)
draw.draw(clear=True, kind='barplot', y=X)
def test_contour(self):
draw.draw(clear=True, kind='contour', z=[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
def test_hist(self):
draw.draw(clear=True, kind='hist', x=X, bins=2)
draw.draw(clear=True, kind='hist', x=X)
| gpl-2.0 | -2,851,734,426,032,515,600 | 29.073171 | 78 | 0.636659 | false |
wa3l/mailr | email_model.py | 1 | 1590 | from flask.ext.sqlalchemy import SQLAlchemy
import html2text as convert
import time
db = SQLAlchemy()
class Email(db.Model):
"""
Email model
Store emails going through the app in a database.
"""
id = db.Column(db.Integer, primary_key=True)
to_email = db.Column(db.String(254))
to_name = db.Column(db.String(256))
from_email = db.Column(db.String(254))
from_name = db.Column(db.String(256))
subject = db.Column(db.String(78))
html = db.Column(db.UnicodeText)
text = db.Column(db.UnicodeText)
service = db.Column(db.String(10))
deliverytime = db.Column(db.BigInteger)
def __init__(self, data):
self.to_email = data['to']
self.to_name = data['to_name']
self.from_email = data['from']
self.from_name = data['from_name']
self.subject = data['subject']
self.html = data['body']
self.text = convert.html2text(data['body'])
self.service = data['service'] if data.has_key('service') else None
if data.has_key('deliverytime'):
self.deliverytime = int(data['deliverytime'])
else:
self.deliverytime = int(time.time())
def __str__(self):
return str({
'to': self.to_email,
'from': self.from_email,
'to_name': self.to_name,
'from_name': self.from_name,
'subject': self.subject,
'text': self.text,
'html': self.html,
'service': self.service,
'deliverytime': str(self.deliverytime)
})
def __repr__(self):
return str(self)
| mit | -104,383,001,852,401,000 | 28.444444 | 74 | 0.583648 | false |
ronaldsantos63/Gera_SPED_SysPDV | resources_rc.py | 1 | 125636 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: ter 29. set 21:52:13 2015
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x01\x57\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\
\x25\x00\x00\x80\x83\x00\x00\xf9\xff\x00\x00\x80\xe9\x00\x00\x75\
\x30\x00\x00\xea\x60\x00\x00\x3a\x98\x00\x00\x17\x6f\x92\x5f\xc5\
\x46\x00\x00\x00\xdd\x49\x44\x41\x54\x78\xda\x5c\x8e\xb1\x4e\x84\
\x40\x18\x84\x67\xef\x4c\x2c\xc8\xd9\x2c\x0d\x58\x50\x1b\x0b\xc3\
\xfa\x24\x77\xbd\x0d\x85\x4f\x40\x0b\xbb\xcb\x3b\xd0\x68\x41\x72\
\xc5\xd2\x28\x4f\x02\xcf\xb1\x97\x40\x61\xd4\xc2\xc4\x62\x2c\xbc\
\x4d\xd0\x49\xfe\xbf\xf8\x32\xff\x3f\x23\x48\xc2\x5a\x3b\x00\x80\
\xd6\xfa\x80\xb3\xac\xb5\x03\x49\x18\x63\x0e\x5b\x21\xc4\x90\xe7\
\xf9\x3e\x49\x92\x9b\xbe\xef\xef\xca\xb2\x7c\xf5\xde\xbf\x04\xe6\
\x9c\xbb\xbd\x20\xf9\x19\xae\x95\x52\xfb\x2c\xcb\xbe\xa5\x94\x01\
\x81\xe4\x9b\x38\xbf\x3c\x2a\xa5\x1e\xf0\x4f\xe3\x38\x3e\x37\x4d\
\xf3\x28\x48\x02\x00\xba\xae\x7b\x97\x52\xee\x82\x61\x59\x96\x8f\
\xa2\x28\xae\x00\x60\x03\x00\xc6\x98\xe3\xda\x00\x00\x71\x1c\xef\
\xb4\xd6\x4f\x00\xb0\x05\xf0\x27\x6a\x9e\x67\x44\x51\x04\x00\x48\
\xd3\xf4\xde\x39\x77\xbd\x21\xf9\xb5\xea\x70\x6a\xdb\xf6\x72\x9a\
\xa6\xd3\xaa\xf8\xef\xaa\xeb\xda\x57\x55\xe5\x49\x22\xcc\x9a\xfd\
\x0c\x00\x24\xab\x6e\xfa\x96\x21\xfc\xb8\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xf0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x07\x00\x00\x00\x05\x08\x04\x00\x00\x00\x23\x93\x3e\x53\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x03\x18\x69\x43\x43\x50\x50\x68\x6f\
\x74\x6f\x73\x68\x6f\x70\x20\x49\x43\x43\x20\x70\x72\x6f\x66\x69\
\x6c\x65\x00\x00\x78\xda\x63\x60\x60\x9e\xe0\xe8\xe2\xe4\xca\x24\
\xc0\xc0\x50\x50\x54\x52\xe4\x1e\xe4\x18\x19\x11\x19\xa5\xc0\x7e\
\x9e\x81\x8d\x81\x99\x81\x81\x81\x81\x81\x21\x31\xb9\xb8\xc0\x31\
\x20\xc0\x87\x81\x81\x81\x21\x2f\x3f\x2f\x95\x01\x15\x30\x32\x30\
\x7c\xbb\xc6\xc0\xc8\xc0\xc0\xc0\x70\x59\xd7\xd1\xc5\xc9\x95\x81\
\x34\xc0\x9a\x5c\x50\x54\xc2\xc0\xc0\x70\x80\x81\x81\xc1\x28\x25\
\xb5\x38\x99\x81\x81\xe1\x0b\x03\x03\x43\x7a\x79\x49\x41\x09\x03\
\x03\x63\x0c\x03\x03\x83\x48\x52\x76\x41\x09\x03\x03\x63\x01\x03\
\x03\x83\x48\x76\x48\x90\x33\x03\x03\x63\x0b\x03\x03\x13\x4f\x49\
\x6a\x45\x09\x03\x03\x03\x83\x73\x7e\x41\x65\x51\x66\x7a\x46\x89\
\x82\xa1\xa5\xa5\xa5\x82\x63\x4a\x7e\x52\xaa\x42\x70\x65\x71\x49\
\x6a\x6e\xb1\x82\x67\x5e\x72\x7e\x51\x41\x7e\x51\x62\x49\x6a\x0a\
\x03\x03\x03\xd4\x0e\x06\x06\x06\x06\x5e\x97\xfc\x12\x05\xf7\xc4\
\xcc\x3c\x05\x23\x03\x55\x06\x2a\x83\x88\xc8\x28\x05\x08\x0b\x11\
\x3e\x08\x31\x04\x48\x2e\x2d\x2a\x83\x07\x25\x03\x83\x00\x83\x02\
\x83\x01\x83\x03\x43\x00\x43\x22\x43\x3d\xc3\x02\x86\xa3\x0c\x6f\
\x18\xc5\x19\x5d\x18\x4b\x19\x57\x30\xde\x63\x12\x63\x0a\x62\x9a\
\xc0\x74\x81\x59\x98\x39\x92\x79\x21\xf3\x1b\x16\x4b\x96\x0e\x96\
\x5b\xac\x7a\xac\xad\xac\xf7\xd8\x2c\xd9\xa6\xb1\x7d\x63\x0f\x67\
\xdf\xcd\xa1\xc4\xd1\xc5\xf1\x85\x33\x91\xf3\x02\x97\x23\xd7\x16\
\x6e\x4d\xee\x05\x3c\x52\x3c\x53\x79\x85\x78\x27\xf1\x09\xf3\x4d\
\xe3\x97\xe1\x5f\x2c\xa0\x23\xb0\x43\xd0\x55\xf0\x8a\x50\xaa\xd0\
\x0f\xe1\x5e\x11\x15\x91\xbd\xa2\xe1\xa2\x5f\xc4\x26\x89\x1b\x89\
\x5f\x91\xa8\x90\x94\x93\x3c\x26\x95\x2f\x2d\x2d\x7d\x42\xa6\x4c\
\x56\x5d\xf6\x96\x5c\x9f\xbc\x8b\xfc\x1f\x85\xad\x8a\x85\x4a\x7a\
\x4a\x6f\x95\xd7\xaa\x14\xa8\x9a\xa8\xfe\x54\x3b\xa8\xde\xa5\x11\
\xaa\xa9\xa4\xf9\x41\xeb\x80\xf6\x24\x9d\x54\x5d\x2b\x3d\x41\xbd\
\x57\xfa\x47\x0c\x16\x18\xd6\x1a\xc5\x18\xdb\x9a\xc8\x9b\x32\x9b\
\xbe\x34\xbb\x60\xbe\xd3\x62\x89\xe5\x04\xab\x3a\xeb\x5c\x9b\x38\
\xdb\x40\x3b\x57\x7b\x6b\x07\x63\x47\x1d\x27\x35\x67\x25\x17\x05\
\x57\x79\x37\x05\x77\x65\x0f\x75\x4f\x5d\x2f\x13\x6f\x1b\x1f\x77\
\xdf\x60\xbf\x04\xff\xfc\x80\xfa\xc0\x89\x41\x4b\x83\x77\x85\x5c\
\x0c\x7d\x19\xce\x14\x21\x17\x69\x15\x15\x11\x5d\x11\x33\x33\x76\
\x4f\xdc\x83\x04\xb6\x44\xdd\xa4\xb0\xe4\x86\x94\x35\xa9\x37\xd3\
\x39\x32\x2c\x32\x33\xb3\xe6\x66\x5f\xcc\x65\xcf\xb3\xcf\xaf\x28\
\xd8\x54\xf8\xae\x58\xbb\x24\xab\x74\x55\xd9\x9b\x0a\xfd\xca\x92\
\xaa\x5d\x35\x8c\xb5\x5e\x75\x53\xeb\x1f\x36\xea\x35\xd5\x34\x9f\
\x6d\x95\x6b\x2b\x6c\x3f\xda\x29\xdd\x55\xd4\x7d\xba\x57\xb5\xaf\
\xb1\xff\xee\x44\x9b\x49\xb3\x27\xff\x9d\x1a\x3f\xed\xf0\x0c\x8d\
\x99\xfd\xb3\xbe\xcf\x49\x98\x7b\x7a\xbe\xf9\x82\xa5\x8b\x44\x16\
\xb7\x2e\xf9\xb6\x2c\x73\xf9\xbd\x95\x21\xab\x4e\xaf\x71\x59\xbb\
\x6f\xbd\xe5\x86\x6d\x9b\x4c\x36\x6f\xd9\x6a\xb2\x6d\xfb\x0e\xab\
\x9d\xfb\x77\xbb\xee\x39\xbb\x2f\x6c\xff\x83\x83\x39\x87\x7e\x1e\
\x69\x3f\x26\x7e\x7c\xc5\x49\xeb\x53\xe7\xce\x24\x9f\xfd\x75\x7e\
\xd2\x45\xed\x4b\x47\xaf\x24\x5e\xfd\x77\x7d\xce\x4d\x9b\x5b\x77\
\xef\xd4\xdf\x53\xbe\x7f\xe2\x61\xde\x63\xb1\x27\xfb\x9f\x65\xbe\
\x10\x79\x79\xf0\x75\xfe\x5b\xf9\x77\x17\x3e\x34\x7d\x32\xfd\xfc\
\xea\xeb\x82\xef\xe1\x3f\x05\x7e\x9d\xfa\xd3\xfa\xcf\xf1\xff\x7f\
\x00\x0d\x00\x0f\x34\xfa\x96\xf1\x5d\x00\x00\x00\x20\x63\x48\x52\
\x4d\x00\x00\x7a\x25\x00\x00\x80\x83\x00\x00\xf9\xff\x00\x00\x80\
\xe9\x00\x00\x75\x30\x00\x00\xea\x60\x00\x00\x3a\x98\x00\x00\x17\
\x6f\x92\x5f\xc5\x46\x00\x00\x00\x52\x49\x44\x41\x54\x78\xda\x62\
\x58\xf5\xe9\xca\x3f\x18\x5c\xfe\x9e\x21\xd3\xff\xc4\x8f\xab\xbf\
\xaf\xfe\xbe\xfa\xfb\xd0\x97\x68\x63\x86\xff\x0c\x85\x6b\xf7\x7e\
\xdc\xfb\x71\xf3\x87\xcc\xbc\xff\x0c\x0c\xff\x19\x18\x98\x73\xce\
\xce\xbd\x1f\x39\xff\x3f\xc3\x7f\x06\x86\xff\x0c\xff\x19\x14\xdd\
\x2c\xb6\xfe\x67\xf8\xcf\xf0\x9f\x01\x30\x00\x6a\x5f\x2c\x67\x74\
\xda\xec\xfb\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0b\x15\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x06\x08\x06\x00\x00\x00\xe0\xcc\xef\x48\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x0a\x4f\x69\x43\x43\x50\x50\x68\x6f\
\x74\x6f\x73\x68\x6f\x70\x20\x49\x43\x43\x20\x70\x72\x6f\x66\x69\
\x6c\x65\x00\x00\x78\xda\x9d\x53\x67\x54\x53\xe9\x16\x3d\xf7\xde\
\xf4\x42\x4b\x88\x80\x94\x4b\x6f\x52\x15\x08\x20\x52\x42\x8b\x80\
\x14\x91\x26\x2a\x21\x09\x10\x4a\x88\x21\xa1\xd9\x15\x51\xc1\x11\
\x45\x45\x04\x1b\xc8\xa0\x88\x03\x8e\x8e\x80\x8c\x15\x51\x2c\x0c\
\x8a\x0a\xd8\x07\xe4\x21\xa2\x8e\x83\xa3\x88\x8a\xca\xfb\xe1\x7b\
\xa3\x6b\xd6\xbc\xf7\xe6\xcd\xfe\xb5\xd7\x3e\xe7\xac\xf3\x9d\xb3\
\xcf\x07\xc0\x08\x0c\x96\x48\x33\x51\x35\x80\x0c\xa9\x42\x1e\x11\
\xe0\x83\xc7\xc4\xc6\xe1\xe4\x2e\x40\x81\x0a\x24\x70\x00\x10\x08\
\xb3\x64\x21\x73\xfd\x23\x01\x00\xf8\x7e\x3c\x3c\x2b\x22\xc0\x07\
\xbe\x00\x01\x78\xd3\x0b\x08\x00\xc0\x4d\x9b\xc0\x30\x1c\x87\xff\
\x0f\xea\x42\x99\x5c\x01\x80\x84\x01\xc0\x74\x91\x38\x4b\x08\x80\
\x14\x00\x40\x7a\x8e\x42\xa6\x00\x40\x46\x01\x80\x9d\x98\x26\x53\
\x00\xa0\x04\x00\x60\xcb\x63\x62\xe3\x00\x50\x2d\x00\x60\x27\x7f\
\xe6\xd3\x00\x80\x9d\xf8\x99\x7b\x01\x00\x5b\x94\x21\x15\x01\xa0\
\x91\x00\x20\x13\x65\x88\x44\x00\x68\x3b\x00\xac\xcf\x56\x8a\x45\
\x00\x58\x30\x00\x14\x66\x4b\xc4\x39\x00\xd8\x2d\x00\x30\x49\x57\
\x66\x48\x00\xb0\xb7\x00\xc0\xce\x10\x0b\xb2\x00\x08\x0c\x00\x30\
\x51\x88\x85\x29\x00\x04\x7b\x00\x60\xc8\x23\x23\x78\x00\x84\x99\
\x00\x14\x46\xf2\x57\x3c\xf1\x2b\xae\x10\xe7\x2a\x00\x00\x78\x99\
\xb2\x3c\xb9\x24\x39\x45\x81\x5b\x08\x2d\x71\x07\x57\x57\x2e\x1e\
\x28\xce\x49\x17\x2b\x14\x36\x61\x02\x61\x9a\x40\x2e\xc2\x79\x99\
\x19\x32\x81\x34\x0f\xe0\xf3\xcc\x00\x00\xa0\x91\x15\x11\xe0\x83\
\xf3\xfd\x78\xce\x0e\xae\xce\xce\x36\x8e\xb6\x0e\x5f\x2d\xea\xbf\
\x06\xff\x22\x62\x62\xe3\xfe\xe5\xcf\xab\x70\x40\x00\x00\xe1\x74\
\x7e\xd1\xfe\x2c\x2f\xb3\x1a\x80\x3b\x06\x80\x6d\xfe\xa2\x25\xee\
\x04\x68\x5e\x0b\xa0\x75\xf7\x8b\x66\xb2\x0f\x40\xb5\x00\xa0\xe9\
\xda\x57\xf3\x70\xf8\x7e\x3c\x3c\x45\xa1\x90\xb9\xd9\xd9\xe5\xe4\
\xe4\xd8\x4a\xc4\x42\x5b\x61\xca\x57\x7d\xfe\x67\xc2\x5f\xc0\x57\
\xfd\x6c\xf9\x7e\x3c\xfc\xf7\xf5\xe0\xbe\xe2\x24\x81\x32\x5d\x81\
\x47\x04\xf8\xe0\xc2\xcc\xf4\x4c\xa5\x1c\xcf\x92\x09\x84\x62\xdc\
\xe6\x8f\x47\xfc\xb7\x0b\xff\xfc\x1d\xd3\x22\xc4\x49\x62\xb9\x58\
\x2a\x14\xe3\x51\x12\x71\x8e\x44\x9a\x8c\xf3\x32\xa5\x22\x89\x42\
\x92\x29\xc5\x25\xd2\xff\x64\xe2\xdf\x2c\xfb\x03\x3e\xdf\x35\x00\
\xb0\x6a\x3e\x01\x7b\x91\x2d\xa8\x5d\x63\x03\xf6\x4b\x27\x10\x58\
\x74\xc0\xe2\xf7\x00\x00\xf2\xbb\x6f\xc1\xd4\x28\x08\x03\x80\x68\
\x83\xe1\xcf\x77\xff\xef\x3f\xfd\x47\xa0\x25\x00\x80\x66\x49\x92\
\x71\x00\x00\x5e\x44\x24\x2e\x54\xca\xb3\x3f\xc7\x08\x00\x00\x44\
\xa0\x81\x2a\xb0\x41\x1b\xf4\xc1\x18\x2c\xc0\x06\x1c\xc1\x05\xdc\
\xc1\x0b\xfc\x60\x36\x84\x42\x24\xc4\xc2\x42\x10\x42\x0a\x64\x80\
\x1c\x72\x60\x29\xac\x82\x42\x28\x86\xcd\xb0\x1d\x2a\x60\x2f\xd4\
\x40\x1d\x34\xc0\x51\x68\x86\x93\x70\x0e\x2e\xc2\x55\xb8\x0e\x3d\
\x70\x0f\xfa\x61\x08\x9e\xc1\x28\xbc\x81\x09\x04\x41\xc8\x08\x13\
\x61\x21\xda\x88\x01\x62\x8a\x58\x23\x8e\x08\x17\x99\x85\xf8\x21\
\xc1\x48\x04\x12\x8b\x24\x20\xc9\x88\x14\x51\x22\x4b\x91\x35\x48\
\x31\x52\x8a\x54\x20\x55\x48\x1d\xf2\x3d\x72\x02\x39\x87\x5c\x46\
\xba\x91\x3b\xc8\x00\x32\x82\xfc\x86\xbc\x47\x31\x94\x81\xb2\x51\
\x3d\xd4\x0c\xb5\x43\xb9\xa8\x37\x1a\x84\x46\xa2\x0b\xd0\x64\x74\
\x31\x9a\x8f\x16\xa0\x9b\xd0\x72\xb4\x1a\x3d\x8c\x36\xa1\xe7\xd0\
\xab\x68\x0f\xda\x8f\x3e\x43\xc7\x30\xc0\xe8\x18\x07\x33\xc4\x6c\
\x30\x2e\xc6\xc3\x42\xb1\x38\x2c\x09\x93\x63\xcb\xb1\x22\xac\x0c\
\xab\xc6\x1a\xb0\x56\xac\x03\xbb\x89\xf5\x63\xcf\xb1\x77\x04\x12\
\x81\x45\xc0\x09\x36\x04\x77\x42\x20\x61\x1e\x41\x48\x58\x4c\x58\
\x4e\xd8\x48\xa8\x20\x1c\x24\x34\x11\xda\x09\x37\x09\x03\x84\x51\
\xc2\x27\x22\x93\xa8\x4b\xb4\x26\xba\x11\xf9\xc4\x18\x62\x32\x31\
\x87\x58\x48\x2c\x23\xd6\x12\x8f\x13\x2f\x10\x7b\x88\x43\xc4\x37\
\x24\x12\x89\x43\x32\x27\xb9\x90\x02\x49\xb1\xa4\x54\xd2\x12\xd2\
\x46\xd2\x6e\x52\x23\xe9\x2c\xa9\x9b\x34\x48\x1a\x23\x93\xc9\xda\
\x64\x6b\xb2\x07\x39\x94\x2c\x20\x2b\xc8\x85\xe4\x9d\xe4\xc3\xe4\
\x33\xe4\x1b\xe4\x21\xf2\x5b\x0a\x9d\x62\x40\x71\xa4\xf8\x53\xe2\
\x28\x52\xca\x6a\x4a\x19\xe5\x10\xe5\x34\xe5\x06\x65\x98\x32\x41\
\x55\xa3\x9a\x52\xdd\xa8\xa1\x54\x11\x35\x8f\x5a\x42\xad\xa1\xb6\
\x52\xaf\x51\x87\xa8\x13\x34\x75\x9a\x39\xcd\x83\x16\x49\x4b\xa5\
\xad\xa2\x95\xd3\x1a\x68\x17\x68\xf7\x69\xaf\xe8\x74\xba\x11\xdd\
\x95\x1e\x4e\x97\xd0\x57\xd2\xcb\xe9\x47\xe8\x97\xe8\x03\xf4\x77\
\x0c\x0d\x86\x15\x83\xc7\x88\x67\x28\x19\x9b\x18\x07\x18\x67\x19\
\x77\x18\xaf\x98\x4c\xa6\x19\xd3\x8b\x19\xc7\x54\x30\x37\x31\xeb\
\x98\xe7\x99\x0f\x99\x6f\x55\x58\x2a\xb6\x2a\x7c\x15\x91\xca\x0a\
\x95\x4a\x95\x26\x95\x1b\x2a\x2f\x54\xa9\xaa\xa6\xaa\xde\xaa\x0b\
\x55\xf3\x55\xcb\x54\x8f\xa9\x5e\x53\x7d\xae\x46\x55\x33\x53\xe3\
\xa9\x09\xd4\x96\xab\x55\xaa\x9d\x50\xeb\x53\x1b\x53\x67\xa9\x3b\
\xa8\x87\xaa\x67\xa8\x6f\x54\x3f\xa4\x7e\x59\xfd\x89\x06\x59\xc3\
\x4c\xc3\x4f\x43\xa4\x51\xa0\xb1\x5f\xe3\xbc\xc6\x20\x0b\x63\x19\
\xb3\x78\x2c\x21\x6b\x0d\xab\x86\x75\x81\x35\xc4\x26\xb1\xcd\xd9\
\x7c\x76\x2a\xbb\x98\xfd\x1d\xbb\x8b\x3d\xaa\xa9\xa1\x39\x43\x33\
\x4a\x33\x57\xb3\x52\xf3\x94\x66\x3f\x07\xe3\x98\x71\xf8\x9c\x74\
\x4e\x09\xe7\x28\xa7\x97\xf3\x7e\x8a\xde\x14\xef\x29\xe2\x29\x1b\
\xa6\x34\x4c\xb9\x31\x65\x5c\x6b\xaa\x96\x97\x96\x58\xab\x48\xab\
\x51\xab\x47\xeb\xbd\x36\xae\xed\xa7\x9d\xa6\xbd\x45\xbb\x59\xfb\
\x81\x0e\x41\xc7\x4a\x27\x5c\x27\x47\x67\x8f\xce\x05\x9d\xe7\x53\
\xd9\x53\xdd\xa7\x0a\xa7\x16\x4d\x3d\x3a\xf5\xae\x2e\xaa\x6b\xa5\
\x1b\xa1\xbb\x44\x77\xbf\x6e\xa7\xee\x98\x9e\xbe\x5e\x80\x9e\x4c\
\x6f\xa7\xde\x79\xbd\xe7\xfa\x1c\x7d\x2f\xfd\x54\xfd\x6d\xfa\xa7\
\xf5\x47\x0c\x58\x06\xb3\x0c\x24\x06\xdb\x0c\xce\x18\x3c\xc5\x35\
\x71\x6f\x3c\x1d\x2f\xc7\xdb\xf1\x51\x43\x5d\xc3\x40\x43\xa5\x61\
\x95\x61\x97\xe1\x84\x91\xb9\xd1\x3c\xa3\xd5\x46\x8d\x46\x0f\x8c\
\x69\xc6\x5c\xe3\x24\xe3\x6d\xc6\x6d\xc6\xa3\x26\x06\x26\x21\x26\
\x4b\x4d\xea\x4d\xee\x9a\x52\x4d\xb9\xa6\x29\xa6\x3b\x4c\x3b\x4c\
\xc7\xcd\xcc\xcd\xa2\xcd\xd6\x99\x35\x9b\x3d\x31\xd7\x32\xe7\x9b\
\xe7\x9b\xd7\x9b\xdf\xb7\x60\x5a\x78\x5a\x2c\xb6\xa8\xb6\xb8\x65\
\x49\xb2\xe4\x5a\xa6\x59\xee\xb6\xbc\x6e\x85\x5a\x39\x59\xa5\x58\
\x55\x5a\x5d\xb3\x46\xad\x9d\xad\x25\xd6\xbb\xad\xbb\xa7\x11\xa7\
\xb9\x4e\x93\x4e\xab\x9e\xd6\x67\xc3\xb0\xf1\xb6\xc9\xb6\xa9\xb7\
\x19\xb0\xe5\xd8\x06\xdb\xae\xb6\x6d\xb6\x7d\x61\x67\x62\x17\x67\
\xb7\xc5\xae\xc3\xee\x93\xbd\x93\x7d\xba\x7d\x8d\xfd\x3d\x07\x0d\
\x87\xd9\x0e\xab\x1d\x5a\x1d\x7e\x73\xb4\x72\x14\x3a\x56\x3a\xde\
\x9a\xce\x9c\xee\x3f\x7d\xc5\xf4\x96\xe9\x2f\x67\x58\xcf\x10\xcf\
\xd8\x33\xe3\xb6\x13\xcb\x29\xc4\x69\x9d\x53\x9b\xd3\x47\x67\x17\
\x67\xb9\x73\x83\xf3\x88\x8b\x89\x4b\x82\xcb\x2e\x97\x3e\x2e\x9b\
\x1b\xc6\xdd\xc8\xbd\xe4\x4a\x74\xf5\x71\x5d\xe1\x7a\xd2\xf5\x9d\
\x9b\xb3\x9b\xc2\xed\xa8\xdb\xaf\xee\x36\xee\x69\xee\x87\xdc\x9f\
\xcc\x34\x9f\x29\x9e\x59\x33\x73\xd0\xc3\xc8\x43\xe0\x51\xe5\xd1\
\x3f\x0b\x9f\x95\x30\x6b\xdf\xac\x7e\x4f\x43\x4f\x81\x67\xb5\xe7\
\x23\x2f\x63\x2f\x91\x57\xad\xd7\xb0\xb7\xa5\x77\xaa\xf7\x61\xef\
\x17\x3e\xf6\x3e\x72\x9f\xe3\x3e\xe3\x3c\x37\xde\x32\xde\x59\x5f\
\xcc\x37\xc0\xb7\xc8\xb7\xcb\x4f\xc3\x6f\x9e\x5f\x85\xdf\x43\x7f\
\x23\xff\x64\xff\x7a\xff\xd1\x00\xa7\x80\x25\x01\x67\x03\x89\x81\
\x41\x81\x5b\x02\xfb\xf8\x7a\x7c\x21\xbf\x8e\x3f\x3a\xdb\x65\xf6\
\xb2\xd9\xed\x41\x8c\xa0\xb9\x41\x15\x41\x8f\x82\xad\x82\xe5\xc1\
\xad\x21\x68\xc8\xec\x90\xad\x21\xf7\xe7\x98\xce\x91\xce\x69\x0e\
\x85\x50\x7e\xe8\xd6\xd0\x07\x61\xe6\x61\x8b\xc3\x7e\x0c\x27\x85\
\x87\x85\x57\x86\x3f\x8e\x70\x88\x58\x1a\xd1\x31\x97\x35\x77\xd1\
\xdc\x43\x73\xdf\x44\xfa\x44\x96\x44\xde\x9b\x67\x31\x4f\x39\xaf\
\x2d\x4a\x35\x2a\x3e\xaa\x2e\x6a\x3c\xda\x37\xba\x34\xba\x3f\xc6\
\x2e\x66\x59\xcc\xd5\x58\x9d\x58\x49\x6c\x4b\x1c\x39\x2e\x2a\xae\
\x36\x6e\x6c\xbe\xdf\xfc\xed\xf3\x87\xe2\x9d\xe2\x0b\xe3\x7b\x17\
\x98\x2f\xc8\x5d\x70\x79\xa1\xce\xc2\xf4\x85\xa7\x16\xa9\x2e\x12\
\x2c\x3a\x96\x40\x4c\x88\x4e\x38\x94\xf0\x41\x10\x2a\xa8\x16\x8c\
\x25\xf2\x13\x77\x25\x8e\x0a\x79\xc2\x1d\xc2\x67\x22\x2f\xd1\x36\
\xd1\x88\xd8\x43\x5c\x2a\x1e\x4e\xf2\x48\x2a\x4d\x7a\x92\xec\x91\
\xbc\x35\x79\x24\xc5\x33\xa5\x2c\xe5\xb9\x84\x27\xa9\x90\xbc\x4c\
\x0d\x4c\xdd\x9b\x3a\x9e\x16\x9a\x76\x20\x6d\x32\x3d\x3a\xbd\x31\
\x83\x92\x91\x90\x71\x42\xaa\x21\x4d\x93\xb6\x67\xea\x67\xe6\x66\
\x76\xcb\xac\x65\x85\xb2\xfe\xc5\x6e\x8b\xb7\x2f\x1e\x95\x07\xc9\
\x6b\xb3\x90\xac\x05\x59\x2d\x0a\xb6\x42\xa6\xe8\x54\x5a\x28\xd7\
\x2a\x07\xb2\x67\x65\x57\x66\xbf\xcd\x89\xca\x39\x96\xab\x9e\x2b\
\xcd\xed\xcc\xb3\xca\xdb\x90\x37\x9c\xef\x9f\xff\xed\x12\xc2\x12\
\xe1\x92\xb6\xa5\x86\x4b\x57\x2d\x1d\x58\xe6\xbd\xac\x6a\x39\xb2\
\x3c\x71\x79\xdb\x0a\xe3\x15\x05\x2b\x86\x56\x06\xac\x3c\xb8\x8a\
\xb6\x2a\x6d\xd5\x4f\xab\xed\x57\x97\xae\x7e\xbd\x26\x7a\x4d\x6b\
\x81\x5e\xc1\xca\x82\xc1\xb5\x01\x6b\xeb\x0b\x55\x0a\xe5\x85\x7d\
\xeb\xdc\xd7\xed\x5d\x4f\x58\x2f\x59\xdf\xb5\x61\xfa\x86\x9d\x1b\
\x3e\x15\x89\x8a\xae\x14\xdb\x17\x97\x15\x7f\xd8\x28\xdc\x78\xe5\
\x1b\x87\x6f\xca\xbf\x99\xdc\x94\xb4\xa9\xab\xc4\xb9\x64\xcf\x66\
\xd2\x66\xe9\xe6\xde\x2d\x9e\x5b\x0e\x96\xaa\x97\xe6\x97\x0e\x6e\
\x0d\xd9\xda\xb4\x0d\xdf\x56\xb4\xed\xf5\xf6\x45\xdb\x2f\x97\xcd\
\x28\xdb\xbb\x83\xb6\x43\xb9\xa3\xbf\x3c\xb8\xbc\x65\xa7\xc9\xce\
\xcd\x3b\x3f\x54\xa4\x54\xf4\x54\xfa\x54\x36\xee\xd2\xdd\xb5\x61\
\xd7\xf8\x6e\xd1\xee\x1b\x7b\xbc\xf6\x34\xec\xd5\xdb\x5b\xbc\xf7\
\xfd\x3e\xc9\xbe\xdb\x55\x01\x55\x4d\xd5\x66\xd5\x65\xfb\x49\xfb\
\xb3\xf7\x3f\xae\x89\xaa\xe9\xf8\x96\xfb\x6d\x5d\xad\x4e\x6d\x71\
\xed\xc7\x03\xd2\x03\xfd\x07\x23\x0e\xb6\xd7\xb9\xd4\xd5\x1d\xd2\
\x3d\x54\x52\x8f\xd6\x2b\xeb\x47\x0e\xc7\x1f\xbe\xfe\x9d\xef\x77\
\x2d\x0d\x36\x0d\x55\x8d\x9c\xc6\xe2\x23\x70\x44\x79\xe4\xe9\xf7\
\x09\xdf\xf7\x1e\x0d\x3a\xda\x76\x8c\x7b\xac\xe1\x07\xd3\x1f\x76\
\x1d\x67\x1d\x2f\x6a\x42\x9a\xf2\x9a\x46\x9b\x53\x9a\xfb\x5b\x62\
\x5b\xba\x4f\xcc\x3e\xd1\xd6\xea\xde\x7a\xfc\x47\xdb\x1f\x0f\x9c\
\x34\x3c\x59\x79\x4a\xf3\x54\xc9\x69\xda\xe9\x82\xd3\x93\x67\xf2\
\xcf\x8c\x9d\x95\x9d\x7d\x7e\x2e\xf9\xdc\x60\xdb\xa2\xb6\x7b\xe7\
\x63\xce\xdf\x6a\x0f\x6f\xef\xba\x10\x74\xe1\xd2\x45\xff\x8b\xe7\
\x3b\xbc\x3b\xce\x5c\xf2\xb8\x74\xf2\xb2\xdb\xe5\x13\x57\xb8\x57\
\x9a\xaf\x3a\x5f\x6d\xea\x74\xea\x3c\xfe\x93\xd3\x4f\xc7\xbb\x9c\
\xbb\x9a\xae\xb9\x5c\x6b\xb9\xee\x7a\xbd\xb5\x7b\x66\xf7\xe9\x1b\
\x9e\x37\xce\xdd\xf4\xbd\x79\xf1\x16\xff\xd6\xd5\x9e\x39\x3d\xdd\
\xbd\xf3\x7a\x6f\xf7\xc5\xf7\xf5\xdf\x16\xdd\x7e\x72\x27\xfd\xce\
\xcb\xbb\xd9\x77\x27\xee\xad\xbc\x4f\xbc\x5f\xf4\x40\xed\x41\xd9\
\x43\xdd\x87\xd5\x3f\x5b\xfe\xdc\xd8\xef\xdc\x7f\x6a\xc0\x77\xa0\
\xf3\xd1\xdc\x47\xf7\x06\x85\x83\xcf\xfe\x91\xf5\x8f\x0f\x43\x05\
\x8f\x99\x8f\xcb\x86\x0d\x86\xeb\x9e\x38\x3e\x39\x39\xe2\x3f\x72\
\xfd\xe9\xfc\xa7\x43\xcf\x64\xcf\x26\x9e\x17\xfe\xa2\xfe\xcb\xae\
\x17\x16\x2f\x7e\xf8\xd5\xeb\xd7\xce\xd1\x98\xd1\xa1\x97\xf2\x97\
\x93\xbf\x6d\x7c\xa5\xfd\xea\xc0\xeb\x19\xaf\xdb\xc6\xc2\xc6\x1e\
\xbe\xc9\x78\x33\x31\x5e\xf4\x56\xfb\xed\xc1\x77\xdc\x77\x1d\xef\
\xa3\xdf\x0f\x4f\xe4\x7c\x20\x7f\x28\xff\x68\xf9\xb1\xf5\x53\xd0\
\xa7\xfb\x93\x19\x93\x93\xff\x04\x03\x98\xf3\xfc\x63\x33\x2d\xdb\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x25\x00\x00\x80\x83\
\x00\x00\xf9\xff\x00\x00\x80\xe9\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x6f\x92\x5f\xc5\x46\x00\x00\x00\x40\
\x49\x44\x41\x54\x78\xda\x5c\x8c\x31\x11\x00\x30\x08\xc4\x42\x2d\
\x20\x03\xfc\x2b\x61\x45\x02\x1a\xe8\x54\xae\x6d\xc6\xcf\x7d\xc4\
\xcc\x1a\x20\x22\x84\x8b\x05\x90\x99\xa8\x6a\xdf\x42\xba\x7b\xc6\
\xaa\x92\x47\x1c\xdc\x7d\xb2\x8b\x8f\x93\x7d\x1e\xc0\x64\xf7\x00\
\xf5\x9f\x1d\xd3\x02\x88\xef\xaf\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x03\xac\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x03\x29\x49\x44\
\x41\x54\x58\x85\xed\x95\x4f\x68\x5c\x55\x14\xc6\x7f\xe7\x65\x88\
\x64\xda\xc6\xbd\xa9\x94\x48\x57\xb6\x91\x3a\x28\xae\xd3\x4d\xc5\
\x0a\x4d\x40\x66\x63\xda\x37\x2f\x25\xcd\x46\x07\xd1\x24\x8e\xae\
\xb2\x50\xa8\x49\xdd\x64\x99\xc2\xbc\x19\xd3\x6e\x9e\x20\x53\xc1\
\xe2\x9f\x85\x75\x1b\xfc\xd3\xa4\x15\x91\x52\x4a\x70\x4a\xd7\x25\
\x33\x24\xcd\xe0\xfb\x5c\xbc\x37\x4d\x90\xbc\x37\x1d\xe9\xce\xf9\
\x56\xf7\xcf\x77\xce\xfd\xee\x39\xe7\x9e\x0b\x3d\xf4\xf0\x7f\x87\
\x75\x43\x0e\x82\xa0\x7f\xab\xd1\x18\x97\xd9\x98\x41\x0e\x18\x8a\
\xb7\xea\x98\xfd\x2a\xa8\x65\xb3\xd9\x5a\x3e\x9f\xdf\x79\xea\x02\
\xaa\xe5\xf2\x5b\x98\x2d\x00\xc3\x06\xb7\x04\x37\x64\x56\x07\x70\
\xc2\x70\x08\xb3\x51\xc1\x08\x70\xd7\x60\xee\x9c\xe7\x7d\xf5\x54\
\x04\x04\x41\xd0\xb7\xd5\x6c\x2e\x00\xef\x1b\x7c\x6b\x61\x58\x3a\
\x7b\xfe\xfc\xda\x7e\x5c\xdf\xf7\x4f\x38\x70\x11\x38\x05\x2c\xde\
\xdb\xd8\x28\xcd\xcf\xcf\x87\x69\xfe\x33\x9d\x04\xc4\x87\xbf\x27\
\x69\xd6\x9d\x9c\xbc\x94\xc6\xf5\x3c\xef\x26\xf0\x7a\xd5\xf7\x67\
\x81\x8b\xc3\x47\x8e\x00\xcc\xa5\xd9\xa4\x46\x20\x0e\xfb\x97\x66\
\x36\x73\xae\x50\xf8\x1c\x60\x69\x69\xe9\x99\xc1\xc1\xc1\x69\x93\
\xde\x26\x0a\x39\x26\xad\xcb\xec\xea\xc3\xcd\xcd\xe5\x62\xb1\xf8\
\x08\xa0\x52\xa9\xcc\x99\xf4\x99\x03\xe3\x67\x3d\xaf\xd6\xb5\x80\
\x20\x08\xfa\xb7\x9b\xcd\x3f\x24\xfd\xe9\x4e\x4e\xbe\x01\x70\xe5\
\xf2\xe5\xc3\x61\x26\x73\x3d\xce\x75\x08\x38\x31\x3d\x1a\x9b\xad\
\xf7\xb5\x5a\xa7\x27\xa6\xa6\xea\x00\x15\xdf\xff\xde\xcc\x86\x07\
\xb2\xd9\x63\x49\x85\xe9\xec\xb7\x08\xb0\xd5\x68\x8c\x0b\x5e\x70\
\xa4\x8f\xda\x37\x0f\x33\x99\xeb\x32\x3b\xbe\x8f\x6d\x7b\x3c\xf2\
\x77\x26\xf3\x4d\x10\x04\xfd\x00\xe6\x38\x1f\x22\x1d\xdd\x6e\x36\
\xcf\x24\x9d\x93\x28\x40\x66\x63\xc0\x5a\xbb\xe0\x9e\x3d\x74\xe8\
\x82\x60\x04\x29\x39\x6d\xd1\xde\x4b\x5b\x8d\xc6\x05\x00\xd7\x75\
\x7f\xc3\xec\x36\xd0\xbd\x00\x83\x9c\x49\x3f\xed\x59\x9a\x20\x0a\
\x75\x3a\xa4\xd0\x22\x6e\x7b\xfe\xa3\xe0\x95\xae\x05\x60\xf6\x5c\
\xfb\x9d\xc7\x38\x96\xca\xdf\xb5\x73\x14\x71\xdb\xb8\x8f\xd9\x50\
\x12\x3d\xd5\xa1\xcc\xba\xea\x94\xfb\xea\x01\x43\x4a\x8c\x5c\xb2\
\x00\xe9\x81\x49\x87\xf7\xac\xfc\xce\x13\xa6\x40\x70\xfb\xf1\x34\
\xba\xfd\x83\xee\x05\x98\xfd\x8c\xd9\xe8\x9e\x95\x2b\xa9\xfc\x5d\
\x3b\xc7\xe0\xea\xae\x1e\x9d\x04\x56\xbb\x16\x20\xa8\x21\x1d\xf7\
\x7d\xff\x04\xc0\xc3\xcd\xcd\x65\xcc\xd6\x31\x53\xca\xe1\x02\x6e\
\x0e\x1c\x3c\xb8\x0c\xb0\x52\x2e\xe7\x0c\x5e\x44\xfa\xba\x6b\x01\
\xd9\x6c\xb6\x06\xdc\x8d\x7b\x3b\xc5\x62\xf1\x51\x5f\xab\x75\x1a\
\xb8\x15\x53\x76\xd3\xd1\xce\xb1\xb4\x86\xe3\xbc\x99\xcf\xe7\x77\
\x24\x59\x18\x7d\x5e\x77\xb6\x5b\xad\x6b\x5d\x0b\xc8\xe7\xf3\x3b\
\x38\xce\x2c\x70\x2a\xee\xed\x4c\x4c\x4d\xd5\x07\xb2\xd9\x57\x91\
\xde\x95\xb4\x0a\x34\x81\xa6\x60\xd5\xcc\xde\x19\x38\x70\xe0\x35\
\xd7\x75\xef\x03\x54\x7d\xbf\x04\x9c\x94\xd9\xcc\xf4\xf4\x74\x2b\
\xe9\x9c\x8e\x55\x5e\xf5\xfd\x05\xe0\x03\xa0\xe4\x7a\xde\x62\x27\
\xbe\x24\xab\xfa\x7e\xc9\xcc\x3e\x01\x16\x5d\xcf\x2b\xa5\xf1\x3b\
\x16\xd5\xbd\x8d\x8d\x92\xa4\x4b\xc0\x42\xd5\xf7\xbf\xab\x56\xab\
\x2f\x27\x71\x57\xca\xe5\xdc\x17\x95\xca\x0f\x66\xf6\x29\xd1\x77\
\xfc\x71\x27\xff\x4f\xfc\xce\x57\x7c\x7f\x2c\x34\x5b\x44\x3a\x1a\
\xb7\xd7\x1b\x82\xbf\x62\x27\xcf\x23\x8d\x12\x35\xa0\x3b\x32\x9b\
\x29\x14\x0a\x89\x85\xf7\x9f\x04\xc0\xe3\x1f\xf2\x8c\x60\x0c\xc8\
\x61\x16\xf5\x09\xa9\x6e\xf0\x8b\xa4\xda\x76\xab\x75\x2d\x2d\xe7\
\x3d\xf4\xd0\xc3\xbf\xf1\x0f\x78\xe5\x4e\xf2\x11\xe4\x69\x42\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x4a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x19\x10\x14\x1a\x38\xc7\x37\xd0\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x01\xae\x49\x44\x41\x54\x78\xda\xed\x9b\x49\
\x92\xc3\x20\x0c\x45\x23\x5d\xdc\xf6\xc9\xd3\xbb\xae\x54\x06\x26\
\xe9\x7f\x09\x8c\xd6\x5d\x32\xef\x21\x68\x20\xf0\x78\xec\xd8\xb1\
\xe3\xce\x21\xcc\x8f\x9d\xe7\xf9\x6c\xfc\x3b\x59\x42\x40\x2b\x70\
\xa4\x10\xc9\x0a\xcd\x92\x21\xb3\x80\xa3\x44\xc8\x8c\xf0\x9e\x12\
\x64\x46\x70\x4f\x11\x32\x3b\xbc\x55\x82\xcc\x0e\x6e\x15\x21\x2b\
\xc1\x8f\x48\x90\xd5\xe0\x7b\x25\xe8\x5e\x0a\x2f\xd8\xfb\x3d\x55\
\x20\x56\xf8\xe3\x38\xfe\x73\x5c\xd7\x45\x11\xf5\xfa\xcd\xda\x77\
\x6b\x12\xd4\xbb\x61\xef\x8d\x43\xc3\x5b\x43\x11\xa5\x8f\x92\x30\
\x92\xb7\xc6\xa0\xa8\x71\xef\x2d\xc1\x92\xaf\xc4\x62\x1e\x02\xa5\
\xf1\xe7\x25\xa1\x94\xc7\x3a\xef\x88\x57\xef\xa3\x1a\xe9\x99\xf7\
\xdb\x84\xe8\x36\x09\x22\x2a\x01\xd9\xf3\x90\xff\x02\x9e\x12\x18\
\xf0\x5f\x87\x80\xc7\xa2\xc7\xda\x78\x24\xfc\xfb\x30\x80\x2c\x85\
\x2d\x95\xc0\xea\x79\xf8\x5e\x60\x44\x02\x1b\x1e\xbe\x19\xea\x91\
\x10\x01\xff\x31\x07\xa0\x36\x3d\x35\x38\x36\xfc\xeb\x3c\x40\xd9\
\x0e\x8f\xce\x09\x8c\xcd\x15\xed\x3c\xa0\x17\x86\xb5\xb3\xa4\x1e\
\x88\xb4\x42\xb1\xe0\xe9\x02\x5a\xe0\x98\xf0\x21\x02\x2c\xeb\x80\
\xe9\x05\xb4\xc2\x31\x25\x68\x36\x78\xb6\x04\x8d\x86\x67\x9c\x27\
\x84\x0a\x68\x81\x8f\x94\x00\xd9\x0d\x8e\xf6\x3c\x63\x51\x44\xd9\
\x0d\x8e\xc2\x44\x54\x82\x66\x1a\xf3\x11\x12\x34\x13\x7c\x84\x04\
\xb7\x43\x51\xc4\x18\xf6\xce\x07\x3d\x14\x45\x4c\x60\x8c\x4a\xd0\
\xac\xf0\x2c\x09\x52\x28\x97\x67\x34\xbc\xe7\x77\x7e\xfd\x48\x1a\
\x72\x26\x98\x21\x5f\x55\x80\xe5\xe6\x15\xaa\xb1\xa3\x79\x4b\x2c\
\x9a\xbd\xe7\xd1\xf9\xcd\x17\x24\xb2\x47\xad\x92\xf7\x15\x99\x8e\
\x64\xfb\x96\xd8\x8a\xb1\x2f\x4a\x0e\x24\xbf\xef\x55\xd9\xcc\x22\
\x68\x97\xa5\x33\x4a\x08\xb9\x2e\x9f\x45\x82\xf5\xd1\xc4\x7e\x32\
\x03\x68\xd8\x3d\x1f\x4d\x21\x65\x4c\xf5\x6c\xce\x43\x08\xf3\xe1\
\xe4\x8e\xbb\xc7\x1f\xfe\x88\x5a\xe2\xcd\xef\x1c\x49\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xac\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x07\x00\x00\x00\x3f\x08\x06\x00\x00\x00\x2c\x7b\xd2\x13\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xb3\x00\x79\x00\x79\xdc\xdd\
\x53\xfc\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x19\x10\x2e\x14\xfa\xd6\xc4\xae\x00\x00\x00\x39\x49\x44\
\x41\x54\x38\xcb\x63\x60\x20\x06\xc4\xc7\xc7\x33\xc4\xc7\xc7\xa3\
\x88\x31\x61\x53\x84\x53\x12\xaf\xce\x91\x28\xc9\x82\xc4\xfe\x8f\
\xc4\x66\x1c\x0d\xa1\x51\xc9\x51\xc9\x51\x49\x7c\x05\x06\xe3\x68\
\x08\x91\x2a\x49\x3e\x00\x00\x88\x4b\x04\xd3\x39\x2e\x90\x3f\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xb6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x11\x08\x06\x00\x00\x00\xc7\x78\x6c\x30\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\
\x0b\x2c\x0d\x1f\x43\xaa\xe1\x00\x00\x00\x36\x49\x44\x41\x54\x38\
\xcb\x63\x60\x20\x01\x2c\x5a\xb4\xe8\xff\xa2\x45\x8b\xfe\x93\xa2\
\x87\x89\x81\xc6\x60\xd4\x82\x11\x60\x01\x23\xa9\xc9\x74\xd0\xf9\
\x80\x85\x1c\x4d\x71\x71\x71\x8c\xa3\xa9\x68\xd4\x82\x61\x64\x01\
\x00\x31\xb5\x09\xec\x1f\x4b\xb4\x15\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x02\x42\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xb3\x00\x79\x00\x79\xdc\xdd\
\x53\xfc\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x19\x10\x17\x3b\x5f\x83\x74\x4d\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x01\xa6\x49\x44\x41\x54\x78\xda\xed\x9b\xdb\
\x0e\xc3\x20\x0c\x43\x9b\x68\xff\xdd\xf6\xcb\xb7\xb7\x69\x9a\x76\
\x49\x4b\xec\x98\x42\x5e\x37\x51\x7c\x70\x28\x85\xb0\x2c\x33\x66\
\xcc\x18\x39\x8c\xf9\xb0\x6d\xdb\xee\xc1\xff\xd9\x25\x00\x44\x05\
\x57\x02\x31\x55\xd1\x2c\x18\xd6\x8b\x70\x14\x08\xeb\x51\x7c\x26\
\x04\xeb\x51\x78\x26\x08\xeb\x5d\x7c\x2b\x04\xeb\x5d\x78\x2b\x08\
\xbb\x92\xf8\x33\x10\xec\x6a\xe2\x8f\x42\xb8\x55\x76\x72\x5d\xd7\
\x67\x27\xf7\x7d\x2f\x01\x6c\x55\xa3\xff\x2a\x1e\x05\x21\xe2\x02\
\x53\x11\x5f\x05\xc1\x2b\x6d\x7f\xe6\x77\x6a\x0a\x64\x8f\xfe\x11\
\x71\x99\x4e\xf8\xe5\x02\x53\x14\xcf\x84\xe0\xd5\xb6\xff\x25\x92\
\x91\x0e\x86\x1e\xfd\xa8\x78\xc6\xc4\xf8\xc9\x05\xae\x32\xf2\x55\
\x4e\x70\x25\xdb\x57\x40\x30\x84\xfd\x5b\xed\x8c\x4c\x87\xf7\x34\
\x70\x85\x91\xaf\x74\x82\xab\x89\x67\x43\x70\x45\xf1\x4c\x08\x96\
\x91\xff\xe8\x57\x58\x76\xfb\xaf\xf3\x80\x2b\x8e\x3c\xd3\x09\xae\
\x2e\x1e\x0d\xc1\x7b\x10\x8f\x84\xe0\xcc\x4e\x2a\xb6\x4f\x5d\x07\
\x28\xb6\xef\x6a\x39\xc9\x4e\x3b\x57\xcb\x49\xf6\x9c\xe3\xc8\x9c\
\xcc\x82\x80\x9c\x70\x53\xe6\x00\x24\x04\xf4\xdb\x26\xf5\x6b\x30\
\xbb\xb3\x08\xf1\xd0\xaf\xc1\x4c\x27\xb0\xd6\x19\xd4\x75\x40\x14\
\x02\x73\x91\x05\xd9\x11\x6a\x81\xc0\x5e\x61\x42\x37\x45\x8f\x8a\
\x41\x8b\xa7\x6f\x8a\x1e\x71\x42\xc5\xb7\x05\x1c\x40\x14\x42\x95\
\xf8\xaf\x29\x90\x99\x06\x2d\xeb\x81\xcb\x9c\x0c\x9d\x11\xc3\xaa\
\x17\xa0\x1e\x8e\x46\x9d\xc0\x3c\x22\xa7\x1f\x8f\xff\x13\xc7\xae\
\x14\x29\x29\x90\xf8\xe6\x04\x84\xf8\x7f\x05\x12\x65\x25\x32\xef\
\x10\x2a\xc4\x87\x01\x20\x21\xa0\x22\x5a\x25\xe6\xcb\xe0\x31\x0b\
\x25\x4f\x34\x3e\x6e\xa9\xac\x32\x08\x5a\xb1\xb4\x22\x84\x92\x72\
\x79\x15\x08\xad\x97\x26\xe6\x95\x19\x40\xc7\xc6\xbc\x34\x85\x84\
\xd1\xd5\xb5\xb9\x0c\x20\xcc\x8b\x93\x33\x46\x8f\x07\x53\x21\x72\
\xe7\x17\x36\x2b\x63\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x02\xd8\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\x55\x49\x44\
\x41\x54\x58\x85\xed\x95\x4d\x4f\x53\x51\x10\x86\x9f\xb9\x1a\x12\
\xef\x4f\x10\x0d\xc1\xb0\x12\x4d\xb0\xf1\x0f\xc0\x06\xe3\x06\x48\
\x4c\x77\xd0\x0f\x16\x6c\x8d\x01\x2c\xae\x58\x68\x82\x05\xff\xc2\
\x3d\xad\xec\xae\x89\x16\x57\x7e\x2c\xc4\xad\xf1\x8b\x68\x62\x0c\
\x21\xa4\xb1\x86\x3f\xd0\x86\x86\x26\x7d\x5d\xb4\x21\xc6\x70\x5b\
\x2e\xb0\xb3\xef\x76\xe6\xcc\x3c\x67\xce\x99\x19\xe8\xa9\xa7\xff\
\x5d\x16\xc7\x39\x0c\xc3\xbe\xfd\x6a\x75\x4a\x66\x93\x06\x09\xa0\
\xbf\x6d\xaa\x60\xf6\x59\x50\xf2\x7d\xbf\x94\x4c\x26\x0f\xce\x1c\
\xa0\x18\x04\x77\x30\xcb\x03\x83\x06\xdf\x04\x9b\x32\xab\x00\x78\
\xcd\x66\x3f\x66\xa3\x82\xeb\xc0\x8e\xc1\xe2\x4c\x26\xf3\xfc\x4c\
\x00\xc2\x30\x3c\xb7\x5f\xab\xe5\x81\x7b\x06\xaf\xac\xd9\xcc\x4d\
\xcf\xce\x6e\x1d\xe5\xeb\x9c\x1b\xf1\x60\x05\x18\x07\x56\x77\xcb\
\xe5\xdc\xf2\xf2\x72\xb3\x53\xfc\xf3\xdd\x00\xda\xc9\xef\x4a\x5a\
\x48\x65\xb3\x6b\x9d\x7c\x33\x99\xcc\x57\xe0\x56\xd1\xb9\x05\x60\
\x65\x70\x60\x00\x60\xb1\xd3\x99\x8e\x15\x68\x97\xfd\x99\x99\xcd\
\xcf\xa4\xd3\x4f\xba\xc1\xfe\xad\x42\xa1\xb0\x68\xd2\x63\x0f\xa6\
\xa6\x33\x99\x52\x6c\x80\x30\x0c\xfb\xea\xb5\xda\x0f\x49\x3f\x53\
\xd9\xec\xed\x38\xc9\x0f\x21\x9c\x7b\x63\x66\x83\x17\x7c\x7f\x38\
\xea\x63\x7a\x51\x87\xf7\xab\xd5\x29\xc1\x15\x4f\x5a\x3a\x49\x72\
\x00\xf3\xbc\xfb\x48\x43\xf5\x5a\x6d\x22\xca\x27\x12\x40\x66\x93\
\xc0\x56\xd4\x87\x3b\x8e\x52\xa9\xd4\x17\xcc\xbe\x03\xf1\x01\x0c\
\x12\x26\xbd\x3f\x69\xf2\x43\x49\xef\x04\x37\xa3\xcc\xd1\x5d\x60\
\x76\x51\x50\x39\x35\x00\xfc\xc6\xac\x3f\xca\x18\x59\x01\x00\x99\
\xc5\x9a\x94\x47\xc9\xc0\x90\x22\x67\x41\x34\x80\xb4\x67\xd2\xa5\
\xd3\x02\xa8\x75\xfb\xbd\x28\x7b\xa7\x27\xf8\x08\x8c\x9e\x1a\x40\
\x1a\x33\xf8\x10\x65\x8f\xee\x02\x28\x21\x5d\x73\xce\x8d\x9c\x34\
\xf9\x7a\x10\x24\x0c\xae\x22\xbd\x8c\x0d\xe0\xfb\x7e\x09\xd8\x69\
\xcf\xf6\xd8\x92\x64\xcd\xd6\xf2\xda\xae\x37\x1a\x1b\xb1\x01\x92\
\xc9\xe4\x01\x9e\xb7\x00\x8c\xb7\x67\x7b\x2c\x15\x9d\xcb\x01\x63\
\x32\x9b\x9f\x9b\x9b\x6b\xc4\x06\x00\x48\xa5\x52\x2f\x80\x55\x60\
\xe5\xb8\x10\x92\xac\x10\x04\x4b\x66\xf6\x10\xc8\xa7\xd3\xe9\xc8\
\xf2\x77\x05\x00\xd8\x2d\x97\x73\x92\xd6\x80\x7c\xd1\xb9\xd7\xc5\
\x62\xf1\x46\x94\xef\x7a\x10\x24\x9e\x16\x0a\x6f\xcd\xec\x11\xad\
\x75\xfc\xa0\x5b\xfc\x63\xf7\xf9\xba\x73\x93\x4d\xb3\x55\xa4\xa1\
\xf6\x78\xdd\x14\xfc\x6a\x07\xb9\x8c\x34\x0a\x0c\x03\xdb\x32\x9b\
\xef\x76\xf3\xd8\x00\x70\xb8\x21\x27\x04\x93\x40\x02\xb3\xd6\x9c\
\x90\x2a\x06\x9f\x24\x95\xea\x8d\xc6\x46\xa7\x37\xef\xa9\xa7\x9e\
\xfe\xd5\x1f\x3e\xd4\xef\x44\x0d\xbc\xff\x65\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x9f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce\x7c\x4e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x08\x14\x1f\xf9\
\x23\xd9\x0b\x00\x00\x00\x23\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x0d\xe6\x7c\x80\xb1\x18\x91\x05\x52\x04\xe0\x42\x08\x15\x29\x02\
\x0c\x0c\x8c\xc8\x02\x08\x95\x68\x00\x00\xac\xac\x07\x90\x4e\x65\
\x34\xac\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xd0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x4d\x49\x44\
\x41\x54\x58\x85\xed\xd7\x4d\x4e\xc2\x40\x18\xc6\xf1\xff\x5b\x08\
\x08\xea\x01\xd0\x2b\x88\x09\x5b\xcf\x21\xbb\xca\xd8\x1a\x49\xe0\
\x3e\x62\x42\x42\x69\x49\x97\x78\x0c\xd7\x84\x70\x07\x71\xef\x07\
\x02\x81\xd7\x85\xd4\x10\xc0\xdd\x10\x13\xed\xb3\x9b\xc9\x9b\x79\
\x7e\x93\x6e\x3a\xf0\xdf\x23\x9b\x6b\xcf\x98\x6b\xa0\x01\x94\x81\
\x03\x4b\x3d\x1f\xc0\x48\x44\x5a\x41\x18\x46\x80\xee\x02\x88\x67\
\x4c\x08\xd4\x80\x29\x30\x00\x5e\x2d\x01\x8e\x80\x0a\x90\x07\xba\
\xdd\x28\xba\x49\x10\xdf\x00\xcf\x18\x0f\x08\x04\x1e\xb3\x8b\x45\
\xb5\x1d\xc7\x63\x4b\xe5\x00\xd4\x5d\xb7\x34\x77\x9c\x3e\x22\x17\
\x02\x26\x88\xa2\x1e\x80\xb3\x36\xd3\x00\xa6\x4b\x91\x4b\xdb\xe5\
\x00\xed\x38\x1e\x4b\x36\x5b\x05\x66\x2a\xd2\x4c\xf6\xd7\x01\x67\
\xc0\x20\x0c\xc3\x67\xdb\xe5\x49\x82\x20\x78\x42\x64\x80\x6a\x79\
\x17\xa0\x80\xea\xfb\xbe\xca\xbf\xb3\x5c\xbe\x01\xc5\x5d\x80\x5f\
\x49\x0a\x48\x01\x29\x20\x05\xa4\x80\x14\x90\x02\x52\xc0\x3a\x60\
\x82\x48\xf1\xc7\x49\x6b\x8d\xce\x21\x30\xd9\x02\x28\x8c\x80\x4a\
\xdd\x75\x4b\xfb\xea\xae\xd5\x6a\xa7\xa8\x56\x80\xe1\x16\xc0\x11\
\xb9\x07\xf2\xf3\x4c\xe6\xc1\xf7\xfd\x93\x7d\x94\x67\x44\xfa\x40\
\x4e\x45\x5a\xc9\xfe\xe6\xc3\xa4\x03\x78\xc0\x6c\xf5\xf7\xfa\x62\
\xa5\x5d\xe4\x78\x75\xf3\x9c\x42\x27\x8c\xa2\x5b\x36\x1f\x26\xc9\
\xa8\x6f\xcc\x95\x8a\x34\x51\x3d\x07\x0a\x56\x00\x5f\xdf\x7c\x88\
\xea\x5d\xb7\xd7\x8b\x2d\x9d\xf9\x47\xf2\x09\x3e\x70\x64\x41\x95\
\x87\xdf\x69\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xc3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdc\x0b\x07\x09\x2e\x37\xff\x44\xe8\xf0\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x00\x27\x49\x44\x41\x54\x78\xda\xed\xc1\x01\
\x0d\x00\x00\x00\xc2\xa0\xf7\x4f\x6d\x0e\x37\xa0\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x77\x03\x40\x40\
\x00\x01\xaf\x7a\x0e\xe8\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x01\xd0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x4d\x49\x44\
\x41\x54\x58\x85\xed\x97\x3b\x4e\xc3\x40\x14\x00\xe7\x45\x51\xc2\
\xf7\x00\x81\x2b\x00\x52\xee\x42\xca\x8d\xed\x58\x14\x70\x1f\x42\
\x65\x99\x8d\x29\xc3\x1d\x68\xa8\xa3\x28\x77\x20\xf4\x7c\x42\x3e\
\xf2\xa3\x70\x8c\x8c\x4c\xb9\x16\x12\x78\x2a\x5b\x5a\x79\x66\x25\
\x17\xef\xc1\x7f\x47\x8a\x2f\xaa\x2a\x36\x8e\xfd\x86\xc8\xa5\xc2\
\x29\xb0\xe3\xc8\xf3\x21\x30\x03\x86\xc6\xf7\xad\x88\x68\x29\x40\
\x55\x25\x89\xe3\x5b\x15\xe9\x03\x4b\x60\x82\xc8\xab\x13\xbd\xea\
\x01\xd0\x05\xda\x88\xc4\x7d\xcf\x0b\xf3\x88\x66\x7e\xc6\xc6\xb1\
\x2f\x99\xfc\xb1\xd1\x6c\xf6\x8c\x31\x73\x27\xf2\x2d\x49\x92\x74\
\xd2\xcd\x66\x8c\x6a\x60\xad\x7d\x00\x46\x00\x8d\xfc\x40\x43\xe4\
\x12\x58\xa6\x70\xee\x5a\x0e\x60\x8c\x99\x6f\xd2\xb4\x07\xac\x44\
\xf5\xea\xcb\x9b\x3f\x28\x9c\x00\x93\x20\x08\x9e\x5d\xcb\x73\xc2\
\x30\x7c\x02\x26\x64\xff\xd7\xf7\x00\x60\x17\x78\xaf\x4a\x5e\xe0\
\x0d\xd8\xfb\x29\xe0\x57\xa8\x03\xea\x80\x3a\xa0\x0e\xa8\x03\xea\
\x80\x3a\xa0\x0e\x28\x06\x2c\x28\x4c\x2a\x15\xb2\xbf\x75\x95\x02\
\x66\x40\x37\x49\x92\x4e\x55\x66\x6b\xed\x31\xd9\x78\x3e\x2d\x05\
\x08\xdc\x00\xed\x74\xbd\xbe\x8f\xa2\xe8\xa8\x12\x79\x9a\x8e\x81\
\x96\xc0\xb0\xe0\xcd\x50\x55\x19\x59\x1b\xa1\x1a\x00\x2b\xb2\xc5\
\xe4\xc5\x89\x5d\xf5\x90\xec\xe6\x2d\x85\xc8\xf3\xfd\x8b\x7c\x31\
\x29\xaf\x66\xd6\x9a\xed\xdc\x7e\x46\x36\x29\xbb\x60\x01\x4c\x51\
\xbd\xf6\x06\x83\x3b\x47\xdf\xfc\x23\x7c\x02\x90\xc4\x75\x30\xa3\
\x38\xd1\xd4\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xef\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x51\x00\x00\x00\x3a\x08\x06\x00\x00\x00\xc8\xbc\xb5\xaf\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\
\x0b\x2a\x32\xff\x7f\x20\x5a\x00\x00\x00\x6f\x49\x44\x41\x54\x78\
\xda\xed\xd0\xb1\x0d\x00\x30\x08\x03\x41\xc8\xa0\x0c\xc7\xa2\x49\
\xcf\x04\x28\xba\x2f\x5d\x59\x97\xb1\xb4\xee\xbe\x73\xab\xaa\xdc\
\xf8\xf5\x84\x20\x42\x84\x28\x88\x10\x21\x42\x14\x44\x88\x10\x21\
\x0a\x22\x44\x88\x10\x05\x11\x22\x44\x88\x82\x08\x11\x22\x44\x41\
\x84\x08\x51\x10\x21\x42\x84\x28\x88\x10\x21\x42\x14\x44\x88\x10\
\x21\x0a\x22\x44\x88\x10\x05\x11\x22\x44\x88\x82\x08\x11\x22\x44\
\x41\x84\x08\x51\x10\x21\x42\xfc\xaa\x07\x12\x55\x04\x74\x56\x9e\
\x9e\x54\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x56\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x19\x10\x14\x2d\x80\x7a\x92\xdf\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x01\xba\x49\x44\x41\x54\x78\xda\xed\x9b\x5b\
\x92\x02\x21\x0c\x45\x4d\x16\xa6\x1b\xd0\xd5\x8e\x1b\xd0\x8d\xe9\
\x9f\x65\x39\xda\x3c\x92\x7b\x13\x68\xf2\x3d\x95\xe6\x1c\x1e\x43\
\x10\x0e\x87\x15\x2b\x56\xec\x39\x84\xf9\xb1\xbf\xe3\xf1\x51\xf3\
\x77\x97\xfb\x5d\xa6\x10\x50\x0b\x1c\x29\x44\xb2\x42\xb3\x64\xc8\
\x28\xe0\x28\x11\x32\x22\xbc\xa7\x04\x19\x11\xdc\x53\x84\x8c\x0e\
\x6f\x95\x20\xa3\x83\x5b\x45\xc8\x4c\xf0\x3d\x12\x64\x36\xf8\x56\
\x09\xba\xb6\xc2\x13\xf6\x7e\xcb\x28\x10\x2b\xfc\xf9\x76\x7b\xe5\
\xb8\x9e\x4e\x14\x51\xef\xdf\x2c\x7d\xb7\x24\x41\xbd\x1b\xf6\xd9\
\x38\x34\xbc\x35\x14\x31\xf4\x51\x12\x7a\xf2\x96\x18\x14\x35\xef\
\xbd\x25\x58\xf2\x6d\xb1\x98\xa7\xc0\xd6\xfc\xf3\x92\xb0\x95\xc7\
\xba\xee\x88\x57\xef\xa3\x1a\xe9\x99\xf7\xdb\x82\xe8\xb6\x08\x22\
\x46\x02\xb2\xe7\x21\xff\x05\x3c\x25\x30\xe0\xbf\x4e\x01\x8f\x4d\
\x8f\xb5\xf1\x48\xf8\xcf\x69\x00\xd9\x0a\x5b\x46\x02\xab\xe7\xe1\
\xb5\x40\x8f\x04\x36\x3c\xbc\x18\x6a\x91\x10\x01\xff\x6f\x0d\x40\
\x15\x3d\x25\x38\x36\xfc\xfb\x3a\x40\x29\x87\x7b\xd7\x04\x46\x71\
\x45\x3b\x0f\x68\x85\x61\x55\x96\xd4\x03\x91\x5a\x28\x16\x3c\x5d\
\x40\x0d\x1c\x13\x3e\x44\x80\x65\x1f\x30\xbc\x80\x5a\x38\xa6\x04\
\xcd\x06\xcf\x96\xa0\xd1\xf0\x8c\xf3\x84\x50\x01\x35\xf0\x91\x12\
\x20\xd5\x60\x6f\xcf\x33\x36\x45\x94\x6a\xb0\x17\x26\x62\x24\x68\
\xa6\x39\x1f\x21\x41\x33\xc1\x47\x48\x70\x3b\x14\x45\xcc\x61\xef\
\x7c\xd0\x43\x51\xc4\x02\xc6\x18\x09\x9a\x15\x9e\x25\xe1\x67\x82\
\xda\x69\xc0\xaa\xe7\xad\xdf\xf9\xf5\x23\x69\xc8\x99\x60\x86\x7c\
\x45\x01\x96\x9b\x57\xa8\xc6\xf6\xe6\xdd\x62\xd1\xec\x3d\x8f\xce\
\x6f\xbe\x20\x91\x3d\x4a\x23\x79\x5d\x91\xa9\x4d\xb6\x6e\x89\x4d\
\x1a\xeb\xa2\x64\x6b\xf2\x5d\x5f\x95\xcd\x2c\x82\x76\x59\x3a\xa3\
\x84\x90\xeb\xf2\x59\x24\x58\x1f\x4d\xac\x27\x33\xde\x0d\xdb\xed\
\xa3\x29\xa4\x8c\xa1\x9e\xcd\x79\x08\x61\x3e\x9c\x5c\xb1\xf7\x78\
\x02\x51\xa0\x5a\x91\x77\xd2\x02\x23\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x01\xec\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x69\x49\x44\
\x41\x54\x58\x85\xed\x97\x3b\x4e\xc3\x40\x10\x86\xbf\xb1\xa2\x84\
\xe7\x01\x02\x57\x00\xa4\xdc\x85\x94\x8e\xed\x44\x14\x70\x1f\x42\
\x65\x2d\x1b\x53\x86\x3b\xd0\x50\x47\x51\xee\x40\xe8\x79\x84\x3c\
\xe4\xa1\x70\x8c\x8c\x2c\x25\x05\x36\x05\xf8\xaf\x76\xb5\x23\x7f\
\x9f\xad\x95\x3c\x03\xff\x3d\x92\xdd\xa8\xaa\x58\x63\x7c\x47\xe4\
\x52\xe1\x14\xd8\x29\x88\xf3\x21\x30\x01\xfa\xae\xef\x5b\x11\xd1\
\x9c\x80\xaa\x4a\x64\xcc\xad\x8a\x74\x80\x39\x30\x42\xe4\xb5\x10\
\xbc\xea\x01\xd0\x02\x1a\x88\x98\x8e\xe7\xf5\x52\x89\x5a\x5a\x63\
\x8d\xf1\x25\x81\x3f\x3a\xb5\x5a\xdb\x75\xdd\x69\x21\xf0\x75\xa2\
\x28\x6a\xc6\xab\xd5\x10\xd5\xc0\x5a\xfb\x00\x0c\x00\x9c\xb4\xc0\
\x11\xb9\x04\xe6\x31\x9c\x17\x0d\x07\x70\x5d\x77\xba\x8a\xe3\x36\
\xb0\x10\xd5\xab\x2f\x6e\xba\x50\x38\x01\x46\x41\x10\x3c\x17\x0d\
\x4f\xd3\xeb\xf5\x9e\x80\x11\xc9\xfd\xfa\x2e\x00\xec\x02\xef\x65\
\xc1\x33\x79\x03\xf6\xd2\x4d\x6d\x43\x21\x00\xd6\x18\xdd\x56\xb3\
\x29\x5e\x10\xc8\xa6\x73\x67\xd3\xe1\x6f\xa4\x12\xa8\x04\x2a\x81\
\x4a\xa0\x12\xa8\x04\x2a\x81\xad\xfd\xc0\xb6\xff\xf9\x4f\x93\xfd\
\x02\x33\x32\x9d\x4a\x89\xd9\x5f\xb3\x72\x02\x13\xa0\x15\x45\x51\
\xb3\x2c\xb2\xb5\xf6\x98\xa4\x3d\x1f\xe7\x04\x04\x6e\x80\x46\xbc\
\x5c\xde\x87\x61\x78\x54\x0a\x3c\x8e\x87\x40\x5d\xa0\x9f\xe1\x26\
\x51\x55\x19\x58\x1b\xa2\x1a\x00\x0b\x92\xc1\xe4\xa5\x10\xba\xea\
\x21\xc9\x9b\xd7\x15\x42\xcf\xf7\x2f\xd2\xc1\x24\x3f\x9a\x59\xeb\
\xae\xfb\xf6\x33\x92\x4e\xb9\x88\xcc\x80\x31\xaa\xd7\x5e\xb7\x7b\
\x57\xd0\x33\xff\x48\x3e\x01\xac\x18\x7a\x56\x83\xd7\xe8\x6e\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xa6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x14\x1d\x00\xb0\
\xd5\x35\xa3\x00\x00\x00\x2a\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x06\xfe\x9f\x67\x60\x60\x42\x30\xa1\x1c\x08\x93\x81\x81\x09\xc1\
\x64\x60\x60\x62\x60\x60\x34\x44\xe2\x20\x73\x19\x90\x8d\x40\x02\
\x00\x64\x40\x09\x75\x86\xb3\xad\x9c\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x00\x96\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce\x7c\x4e\
\x00\x00\x00\x02\x62\x4b\x47\x44\x00\xd3\xb5\x57\xa0\x5c\x00\x00\
\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\
\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x0b\x07\x0c\
\x0d\x1b\x75\xfe\x31\x99\x00\x00\x00\x27\x49\x44\x41\x54\x08\xd7\
\x65\x8c\xb1\x0d\x00\x00\x08\x83\xe0\xff\xa3\x75\x70\xb1\xca\xd4\
\x90\x50\x78\x08\x55\x21\x14\xb6\x54\x70\xe6\x48\x8d\x87\xcc\x0f\
\x0d\xe0\xf0\x08\x02\x34\xe2\x2b\xa7\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x00\xa0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x14\x1c\x1f\x24\
\xc6\x09\x17\x00\x00\x00\x24\x49\x44\x41\x54\x08\xd7\x63\x60\x40\
\x05\xff\xcf\xc3\x58\x4c\xc8\x5c\x26\x64\x59\x26\x64\xc5\x70\x0e\
\xa3\x21\x9c\xc3\x68\x88\x61\x1a\x0a\x00\x00\x6d\x84\x09\x75\x37\
\x9e\xd9\x23\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xa5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce\x7c\x4e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\x9c\x53\x34\xfc\x5d\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x0b\x02\x04\x6d\
\x98\x1b\x69\x00\x00\x00\x29\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x00\x8c\x0c\x0c\xff\xcf\xa3\x08\x18\x32\x32\x30\x20\x0b\x32\x1a\
\x32\x30\x30\x42\x98\x10\x41\x46\x43\x14\x13\x50\xb5\xa3\x01\x00\
\xd6\x10\x07\xd2\x2f\x48\xdf\x4a\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x00\xbb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x3f\x00\x00\x00\x07\x08\x06\x00\x00\x00\xbf\x76\x95\x1f\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\
\x09\x35\x2b\x55\xca\x52\x6a\x00\x00\x00\x3b\x49\x44\x41\x54\x38\
\xcb\x63\x60\x18\x05\x23\x13\x30\x12\xa3\xa8\xbe\x7d\x2a\x25\x76\
\xfc\xa7\x97\x3b\xd1\xc1\xaa\xa5\x73\x18\xae\x5f\x39\x8f\x53\x9e\
\x69\x34\xe6\x09\x00\x4d\x1d\xc3\x21\x19\xf3\x0c\x0c\x0c\x78\x63\
\x7e\x14\x8c\x54\x00\x00\x69\x64\x0b\x05\xfd\x6b\x58\xca\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xe4\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x36\x00\x00\x00\x0a\x08\x06\x00\x00\x00\xff\xfd\xad\x0b\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\x7f\x00\x87\x00\x95\xe6\xde\xa6\xaf\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\
\x09\x2a\x2b\x98\x90\x5c\xf4\x00\x00\x00\x64\x49\x44\x41\x54\x48\
\xc7\x63\xfc\xcf\x30\x3c\x01\x0b\xa5\x06\x34\xb4\x4f\x85\x87\xcd\
\xaa\xa5\x73\x18\xae\x5d\x39\xcf\x48\x2b\x35\x14\x79\xcc\xd8\xc8\
\x88\x24\x03\x7c\x89\xd0\x4f\x2d\x35\x84\xc0\xd9\x73\xe7\xe0\x6c\
\x26\x86\x91\x92\x14\x91\x7d\x4d\x54\x52\x0c\x4d\x26\xa8\x9f\x5a\
\x6a\x46\x93\xe2\x68\x52\x1c\x82\x49\x91\x91\xd2\x7a\x4c\x4b\xc7\
\x10\xc5\x08\x6c\xc5\x34\xb5\xd4\xd0\xd5\x63\x83\x15\x00\x00\x7a\
\x30\x4a\x09\x71\xea\x2d\x6e\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x00\xe0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x51\x00\x00\x00\x3a\x08\x06\x00\x00\x00\xc8\xbc\xb5\xaf\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\
\x0b\x29\x1c\x08\x84\x7e\x56\x00\x00\x00\x60\x49\x44\x41\x54\x78\
\xda\xed\xd9\xb1\x0d\x00\x20\x08\x00\x41\x71\x50\x86\x63\x51\xed\
\x8d\x85\x25\x89\x77\xa5\x15\xf9\x48\x45\x8c\xa6\xaa\x6a\x9d\x6f\
\x99\x19\x1d\x67\x9d\x03\x11\x45\x14\x11\x11\x45\x14\x51\x44\x44\
\x14\x51\x44\x11\x11\x51\x44\x11\x45\x44\x44\x11\x45\x14\x11\x11\
\x45\x14\xf1\x5b\xd1\x75\xb0\xdb\xdd\xd9\x4f\xb4\xce\x88\x28\x22\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x36\xce\x69\x07\x1e\xe9\
\x39\x55\x40\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\xf8\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\x75\x49\x44\
\x41\x54\x58\x85\xed\x96\xcd\x4e\x13\x51\x18\x86\x9f\xaf\x15\xd2\
\x32\x78\x03\x56\x4d\x69\x58\x89\xa6\x3f\xf1\x06\x20\x26\x1a\x37\
\x94\x84\xd9\xb6\x33\xc4\x0b\x30\x46\x10\x34\x51\x16\x2e\x48\xd1\
\xb8\x72\x43\xb4\x74\xd8\x92\x98\xe2\xca\xb8\x11\x37\x2c\x8c\xda\
\x36\x12\xc0\x10\x40\x03\x86\x0b\xc0\x54\xa3\x71\x3e\x17\xb4\xd1\
\x44\xa6\x65\x0a\x3b\xfb\x6c\xbf\xf7\x9c\xf7\x49\xe6\xcc\x99\x81\
\x36\x6d\xfe\x77\xc4\x4f\xd8\x34\xcd\xce\xee\x70\x78\x48\x44\xd2\
\x40\x4a\x21\x02\x80\xea\x0e\x22\xef\x05\x8a\x7b\xd5\x6a\x71\x7e\
\x7e\xfe\xc7\xb1\x0b\xd8\x99\xcc\xb0\x8a\xe4\x04\x7a\x80\x0f\xa2\
\xba\xa8\x22\x3b\xb5\x71\x04\xe8\x07\x2e\x00\x1b\x2a\x32\x56\x28\
\x14\x9e\x1d\x8b\x80\x69\x9a\xc1\x93\x86\x91\x53\xd5\x1b\x02\x2f\
\x08\x06\xc7\xf3\xf9\x7c\xe5\xa0\xac\x65\x59\x09\x81\x29\x54\x2f\
\xab\xea\x74\x34\x16\x1b\x9f\x9c\x9c\x74\x1b\xed\x7f\xa2\x99\x40\
\xad\xfc\x3a\x30\x9a\x77\x9c\x07\x8d\xb2\x85\x42\xa1\x0c\x5c\x19\
\xb1\xac\x51\x60\xea\xd3\xe6\x26\xc0\x58\xa3\x35\xc1\x46\x43\x3b\
\x93\x19\x06\x1e\x09\x8c\xce\x3a\xce\xc3\x66\xb2\x75\x4a\xe5\xf2\
\x52\x32\x91\xf8\x2e\x22\xf7\x12\xc9\x64\xa5\x5c\x2e\xaf\x79\x65\
\x3d\x1f\x81\x69\x9a\x9d\xdd\x5d\x5d\xab\xc0\xc7\x59\xc7\xb9\x7a\
\xd8\xf2\xbf\xb1\xb3\xd9\x97\x40\xcf\xd7\x6a\xb5\xcf\xeb\x60\x06\
\xbc\x16\x77\x87\xc3\x43\x40\x4c\x82\xc1\x89\x56\xca\x01\x02\xaa\
\xb7\x80\x5e\xc3\x30\x06\x3d\x33\x5e\x03\x11\x49\xa3\x5a\xf1\x3a\
\x70\x87\xe1\xe9\xdc\x5c\x09\x58\x46\xd5\xbf\x00\x90\x42\xe4\x75\
\xab\xe5\x75\x44\xf5\x95\xa8\x5e\xf4\x2d\xa0\x70\x4a\xfe\xbc\xe7\
\x2d\xe3\xc2\x17\x44\x22\xbe\x05\x00\x54\xd5\xd7\x4d\x79\x60\x41\
\x20\x20\xfb\x1e\xfe\x05\x76\x45\xf5\xf4\x51\x05\x54\x35\x82\xea\
\x6e\x2b\x02\x6f\x55\xa4\xff\xa8\x02\xc0\x80\xc0\x1b\xdf\x02\x02\
\x45\xe0\xbc\x65\x59\x89\x56\x9b\x6d\xdb\x4e\x01\xe7\x14\x9e\xfb\
\x16\xd8\xab\x56\x8b\xc0\x86\xc0\x54\x8b\xfd\x22\xae\x9b\x03\xd6\
\x3b\x42\xa1\x05\xaf\x90\xe7\x55\xbc\xb2\xb2\xf2\x2b\x15\x8f\x6f\
\x03\x77\x52\xc9\x64\xb5\x54\x2e\x2f\xf9\x69\xb7\xb3\xd9\x09\xe0\
\x9a\xc0\xc8\x93\x7c\x7e\xd5\xb7\x00\x40\xa9\x52\x59\x4b\xc4\xe3\
\x06\x70\x37\x95\x4c\x7e\x3b\xa4\x84\xd4\xca\xef\x8b\xc8\x74\xde\
\x71\x1e\x37\x0a\x37\xfd\x1a\x46\x63\xb1\xf1\xcf\x5b\x5b\xaa\xaa\
\x39\x2b\x9b\xbd\x14\x54\x1d\xaf\xdd\x70\xff\x60\xdb\x76\x4a\x5c\
\x37\xa7\x30\x20\x22\xb9\xb3\xd1\xe8\xed\xa6\xb6\xcd\x02\x75\x2c\
\xcb\x4a\x8b\xea\x34\xd0\x0b\x2c\x03\x8b\xc0\x76\x6d\x7c\x86\xfd\
\x1f\x92\x3e\x60\x5d\xe0\x66\xde\x71\x3c\x0f\x5e\x4b\x02\xb0\xff\
\x85\x34\x0c\x63\x50\x5c\x37\x8d\x48\x0a\xa8\xdf\x13\x3b\x0a\xef\
\x44\xb5\xd8\x11\x0a\x2d\xcc\xcc\xcc\xfc\xf4\xb3\x6f\x9b\x36\xff\
\x37\xbf\x01\x4a\x37\xdd\xdd\x8c\xf1\x82\x6a\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x93\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\
\x00\x00\x00\x02\x62\x4b\x47\x44\x00\xd3\xb5\x57\xa0\x5c\x00\x00\
\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\
\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x0b\x07\x0c\
\x0c\x2b\x4a\x3c\x30\x74\x00\x00\x00\x24\x49\x44\x41\x54\x08\xd7\
\x63\x60\x40\x05\xff\xff\xc3\x58\x4c\xc8\x5c\x26\x64\x59\x26\x64\
\xc5\x70\x0e\x23\x23\x9c\xc3\xc8\x88\x61\x1a\x0a\x00\x00\x9e\x14\
\x0a\x05\x2b\xca\xe5\x75\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x00\xa6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce\x7c\x4e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\x9c\x53\x34\xfc\x5d\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x0b\x1b\x0e\x16\
\x4d\x5b\x6f\x00\x00\x00\x2a\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x00\x8c\x0c\x0c\x73\x3e\x20\x0b\xa4\x08\x30\x32\x30\x20\x0b\xa6\
\x08\x30\x30\x30\x42\x98\x10\xc1\x14\x01\x14\x13\x50\xb5\xa3\x01\
\x00\xc6\xb9\x07\x90\x5d\x66\x1f\x83\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x00\x81\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x01\x03\x00\x00\x00\x25\x3d\x6d\x22\
\x00\x00\x00\x06\x50\x4c\x54\x45\x00\x00\x00\xae\xae\xae\x77\x6b\
\xd6\x2d\x00\x00\x00\x01\x74\x52\x4e\x53\x00\x40\xe6\xd8\x66\x00\
\x00\x00\x29\x49\x44\x41\x54\x78\x5e\x05\xc0\xb1\x0d\x00\x20\x08\
\x04\xc0\xc3\x58\xd8\xfe\x0a\xcc\xc2\x70\x8c\x6d\x28\x0e\x97\x47\
\x68\x86\x55\x71\xda\x1d\x6f\x25\xba\xcd\xd8\xfd\x35\x0a\x04\x1b\
\xd6\xd9\x1a\x92\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x00\xdc\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x40\x08\x06\x00\x00\x00\x13\x7d\xf7\x96\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xb3\x00\x79\x00\x79\xdc\xdd\
\x53\xfc\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x19\x10\x2d\x19\xaf\x4a\xeb\xd0\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x00\x40\x49\x44\x41\x54\x58\xc3\xed\xce\x31\
\x0a\x00\x20\x0c\x03\x40\xf5\xa3\x7d\x5b\x5f\xaa\x53\xc1\xc9\xc5\
\x45\xe4\x32\x05\x1a\x8e\xb6\x76\x99\x5e\x25\x22\x66\xf5\xcc\xec\
\xfb\xe8\x74\x1b\xb7\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\xf0\x36\xf0\x41\x16\x0b\x42\x08\x78\x15\x57\x44\xa2\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xe3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x60\x49\x44\
\x41\x54\x58\x85\xed\xd7\x3b\x4e\x42\x61\x10\x86\xe1\x77\x40\x45\
\xbc\x2c\x00\xdd\x82\x98\x90\x00\x46\x05\x57\x21\xa5\x17\x62\x2c\
\xb0\x43\x12\x5d\x83\x0d\xd8\x88\x15\xde\x62\x89\x7b\x30\x39\x60\
\x14\x49\x4e\x08\x7b\x10\x7b\x2f\x08\x08\x63\xa1\x87\x40\xa0\x3c\
\xc4\x44\xcf\xd7\xfd\x53\xfc\xdf\x53\xce\xc0\x7f\x8f\xf4\xbd\x54\
\x25\x92\x79\xd8\x16\x95\x04\x82\x1f\x98\xb4\xa9\xe7\x03\xa5\x0a\
\x92\x35\xf6\x43\x97\x88\xe8\x20\x40\x55\xd6\x8e\x4b\x17\xaa\x6c\
\x02\x0d\x01\x53\xd1\x57\x3b\xda\x05\x99\x51\x08\x00\x1e\x90\x73\
\x23\x19\xda\xb1\x10\x5d\x40\x24\x7d\x1f\x17\xe4\x0c\xb4\x88\x8c\
\xc5\x8c\x64\xb0\x66\x47\xb9\x95\x68\xa6\xec\x43\xdb\x79\x60\x45\
\x95\xad\x42\x6a\xe9\x0a\xc0\xd5\x55\xaa\x24\x80\x86\xfb\xd3\xb5\
\x6e\x77\x39\x80\x91\x0c\xd6\x3a\xad\x56\x0c\x68\x8a\xb0\x67\xcd\
\xbb\x00\x84\x05\x01\xf3\xf6\x20\xfc\x6c\x77\xb9\x95\xe2\x61\xe4\
\x09\x30\x01\xff\x20\x00\xbc\x0a\xef\xa3\x2a\xef\xc9\x1b\x30\x35\
\x0c\xf0\x2b\x71\x00\x0e\xc0\x01\x38\x00\x07\xe0\x00\x1c\x80\x03\
\xe8\x05\xd4\xa5\x67\x53\x19\x61\xa6\x81\xfa\x10\x80\x56\x15\x02\
\xd1\x4c\xd9\x37\xaa\xe6\xe5\xf4\xdd\x3c\x10\x10\xa8\x0c\x02\xd4\
\x75\x0a\x78\xd0\xf6\xcd\xea\x51\x61\x6e\x14\xe5\xe3\xb8\xf3\xc0\
\x44\x47\x34\x6b\xcd\xfb\x0e\x93\x68\xe6\x31\x07\x1a\x07\x9a\x80\
\x09\xfa\x62\x4f\xbd\xcc\xf2\x7d\x98\x4c\x28\xe4\x0a\xc9\xf0\xee\
\xc0\x61\x62\x21\x22\xe9\xd2\xc6\xcf\xde\xbe\x08\x78\xed\x01\x50\
\x17\xa8\xa8\xca\x89\x91\x0a\x5f\xdb\xf4\xe7\x1f\xc9\x17\xa4\x29\
\x70\x23\xfc\x8b\x13\x87\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x02\x56\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x19\x10\x15\x00\xdc\xbe\xff\xeb\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x01\xba\x49\x44\x41\x54\x78\xda\xed\x9b\x5b\
\x92\x02\x21\x0c\x45\x4d\xd6\x37\x2e\x48\x17\xa0\x0b\xd2\xfd\xe9\
\x9f\x65\x39\xda\x3c\x92\x7b\x13\x68\xf2\x3d\x95\xe6\x1c\x1e\x43\
\x10\x0e\x87\x15\x2b\x56\xec\x39\x84\xf9\xb1\xdb\xe9\xf4\xa8\xf9\
\xbb\xe3\xf5\x2a\x53\x08\xa8\x05\x8e\x14\x22\x59\xa1\x59\x32\x64\
\x14\x70\x94\x08\x19\x11\xde\x53\x82\x8c\x08\xee\x29\x42\x46\x87\
\xb7\x4a\x90\xd1\xc1\xad\x22\x64\x26\xf8\x1e\x09\x32\x1b\x7c\xab\
\x04\x5d\x5b\xe1\x09\x7b\xbf\x65\x14\x88\x15\xfe\xef\x72\x79\xe5\
\xb8\x9f\xcf\x14\x51\xef\xdf\x2c\x7d\xb7\x24\x41\xbd\x1b\xf6\xd9\
\x38\x34\xbc\x35\x14\x31\xf4\x51\x12\x7a\xf2\x96\x18\x14\x35\xef\
\xbd\x25\x58\xf2\x6d\xb1\x98\xa7\xc0\xd6\xfc\xf3\x92\xb0\x95\xc7\
\xba\xee\x88\x57\xef\xa3\x1a\xe9\x99\xf7\xdb\x82\xe8\xb6\x08\x22\
\x46\x02\xb2\xe7\x21\xff\x05\x3c\x25\x30\xe0\xbf\x4e\x01\x8f\x4d\
\x8f\xb5\xf1\x48\xf8\xcf\x69\x00\xd9\x0a\x5b\x46\x02\xab\xe7\xe1\
\xb5\x40\x8f\x04\x36\x3c\xbc\x18\x6a\x91\x10\x01\xff\x6f\x0d\x40\
\x15\x3d\x25\x38\x36\xfc\xfb\x3a\x40\x29\x87\x7b\xd7\x04\x46\x71\
\x45\x3b\x0f\x68\x85\x61\x55\x96\xd4\x03\x91\x5a\x28\x16\x3c\x5d\
\x40\x0d\x1c\x13\x3e\x44\x80\x65\x1f\x30\xbc\x80\x5a\x38\xa6\x04\
\xcd\x06\xcf\x96\xa0\xd1\xf0\x8c\xf3\x84\x50\x01\x35\xf0\x91\x12\
\x20\xd5\x60\x6f\xcf\x33\x36\x45\x94\x6a\xb0\x17\x26\x62\x24\x68\
\xa6\x39\x1f\x21\x41\x33\xc1\x47\x48\x70\x3b\x14\x45\xcc\x61\xef\
\x7c\xd0\x43\x51\xc4\x02\xc6\x18\x09\x9a\x15\x9e\x25\xe1\x67\x82\
\xda\x69\xc0\xaa\xe7\xad\xdf\xf9\xf5\x23\x69\xc8\x99\x60\x86\x7c\
\x45\x01\x96\x9b\x57\xa8\xc6\xf6\xe6\xdd\x62\xd1\xec\x3d\x8f\xce\
\x6f\xbe\x20\x91\x3d\x4a\x23\x79\x5d\x91\xa9\x4d\xb6\x6e\x89\x4d\
\x1a\xeb\xa2\x64\x6b\xf2\x5d\x5f\x95\xcd\x2c\x82\x76\x59\x3a\xa3\
\x84\x90\xeb\xf2\x59\x24\x58\x1f\x4d\xac\x27\x33\xde\x0d\xdb\xed\
\xa3\x29\xa4\x8c\xa1\x9e\xcd\x79\x08\x61\x3e\x9c\x5c\xb1\xf7\x78\
\x02\x47\xb0\x5b\x07\x3a\x44\x3e\x01\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x00\xa0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x14\x1f\x0d\xfc\
\x52\x2b\x9c\x00\x00\x00\x24\x49\x44\x41\x54\x08\xd7\x63\x60\x40\
\x05\x73\x3e\xc0\x58\x4c\xc8\x5c\x26\x64\x59\x26\x64\xc5\x70\x4e\
\x8a\x00\x9c\x93\x22\x80\x61\x1a\x0a\x00\x00\x29\x95\x08\xaf\x88\
\xac\xba\x34\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xcc\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x03\x49\x49\x44\
\x41\x54\x58\x85\xed\x96\xcd\x6b\x5c\x55\x18\xc6\x7f\xcf\x9d\x99\
\x98\xe9\x64\x16\xd2\x9d\xa9\x92\x0e\xa1\x0b\xd3\xd8\x76\xf0\x1f\
\x68\x11\x14\x2b\x34\x81\xde\x55\xca\xcc\xbd\xa5\x54\x5c\x04\x44\
\x6d\x3a\xd5\x4d\x16\x2e\xe2\x44\x57\xb3\x1b\xea\x78\xa7\x18\xb2\
\x08\xc8\x54\xb0\x88\x1b\xeb\xc6\x85\x68\xf3\x55\x53\xa4\xb4\x55\
\x9a\x52\x70\x25\x99\x30\xa5\x36\xb9\xaf\x8b\xf9\x68\xc1\xcc\x0c\
\x53\xba\x6b\x9e\xdd\x39\xe7\x39\xef\xfb\xbb\xef\x7d\xef\x39\x17\
\x76\xb5\xab\xe7\x5d\xea\xc5\xec\xba\x6e\xdf\x40\x3c\x3e\x2e\x69\
\x0c\x48\x1b\x0c\x02\x60\xb6\x8e\x74\x4d\x50\xa9\xd6\x6a\x95\x85\
\x85\x85\x7f\x9f\x39\x80\x9f\xc9\x9c\x34\x29\x2f\xd8\x0f\xac\xca\
\xec\xaa\x49\xeb\x8d\xe5\x41\xe0\x28\x30\x0a\xdc\x32\x69\x2a\x08\
\x82\x6f\x9e\x09\x80\xeb\xba\x91\x64\x22\x91\x37\xb3\x0f\x04\xdf\
\x13\x89\xe4\x4a\xa5\xd2\xf2\x4e\x5e\xcf\xf3\x0e\x0b\x66\x30\x7b\
\xd3\xcc\x66\x87\x52\xa9\xdc\xf4\xf4\x74\xd8\x29\x7e\xb4\x1b\x40\
\x23\xf9\xfb\xc0\xb9\x52\xb9\xfc\x79\x27\x6f\x10\x04\x4b\xc0\x5b\
\xa7\x3d\xef\x1c\x30\xf3\xe7\xed\xdb\x00\x53\x9d\xf6\x74\xac\x80\
\x9f\xc9\x9c\x44\x5a\x10\x7c\x54\x2a\x97\xbf\x00\x98\x9c\x9c\x7c\
\x61\x73\x63\xe3\x5d\x83\x09\xd5\x4b\x0e\x66\x2b\xe6\x38\x73\xc9\
\x64\xb2\x58\x28\x14\x1e\x02\xf8\xd9\xec\x14\xf0\x99\x49\xe3\x41\
\x10\x54\x7a\x06\x70\x5d\xb7\x6f\x60\xcf\x9e\x1b\xc0\x1f\x5f\x95\
\xcb\x6f\x03\x9c\x99\x98\xd8\xb7\x1d\x8b\x5d\xc1\x6c\x14\x08\x01\
\xa7\x61\x0f\x01\x47\xb0\xe2\x6c\x6d\x1d\xbf\x38\x37\xb7\xde\x80\
\xf8\x01\xd8\xbf\x59\xab\x8d\xb4\x6b\x4c\x67\xa7\x49\x80\x81\x78\
\x7c\x1c\x48\x29\x12\xb9\xd0\x7c\xf2\xed\x58\xec\x8a\x99\x1d\xdc\
\x61\xaf\xd3\xa0\x18\x0d\xa3\xd1\xef\x5c\xd7\xed\x03\x70\xcc\xce\
\x03\xc3\x89\x44\xe2\x44\xbb\x3c\x6d\x01\x24\x8d\x61\xb6\xdc\x6c\
\xb8\x6a\xb5\x7a\x16\xb3\x51\x75\xa8\x9a\x40\x06\xaf\x0d\xc4\xe3\
\x67\x01\xbe\xbc\x74\x69\x11\xb8\x8e\x59\xef\x00\x40\x1a\xe9\xa7\
\xd6\xc8\xec\x14\xf5\x52\x77\x96\x14\x02\xa7\x5a\x43\xb3\x1f\x65\
\xf6\x7a\xcf\x00\x06\x2f\xe9\xf1\x77\x8e\x60\xa4\x0b\x70\x13\xd4\
\x91\x34\xd2\x1c\x86\x70\x0f\x69\xb0\x67\x80\x7a\x2c\xeb\xe9\xa4\
\xdc\x31\x81\xe3\x88\x0e\x95\xeb\x04\x70\x5f\x66\xfb\x5a\x30\xf0\
\x7b\xa7\x40\x2d\x49\x61\x08\xd7\x5b\xfb\xcc\x06\x31\xbb\xff\x34\
\x00\xbf\x9a\x74\xf4\x89\xc0\x5f\x77\xf1\x37\x33\x3a\x32\x9b\x7b\
\x62\xe6\x98\xe0\x97\x9e\x01\x04\x15\xe0\xa0\xe7\x79\x87\x01\x92\
\xc9\x64\x51\xb0\x62\x60\x6d\x73\x83\x21\x2d\x6d\x3e\x78\x50\x04\
\xf0\x7d\x3f\x0d\xbc\x6a\xf0\x6d\xcf\x00\xd5\x5a\xad\x02\xdc\x12\
\xcc\x00\x14\x0a\x85\x87\xce\xd6\xd6\x71\x07\x56\x1b\x96\xc7\xaf\
\xa3\xde\xf9\x48\x5a\xde\x0e\xc3\x77\x1a\x87\x8e\x14\x86\x79\xe0\
\x66\xac\xbf\xff\x72\xbb\x3c\x91\x76\x0b\x6b\x6b\x6b\xdb\xe9\x43\
\x87\xee\x02\x9f\xa4\x8f\x1c\xa9\x2d\x2e\x2d\xfd\x7c\x6d\x75\x75\
\x63\xf8\xc0\x81\x52\x5f\x34\xfa\xb7\x49\x7b\x05\x2f\x02\x8f\x0c\
\x16\x1d\x98\xd9\xac\xd5\xde\x9b\x9f\x9f\xff\x07\xc0\xcf\x66\x2f\
\x00\x67\x04\xa7\x2f\x96\x4a\x37\xda\xe5\xe9\xda\xe5\x5e\x26\x93\
\x97\xf4\xa1\xa4\x5c\x29\x08\x66\xbb\xf9\x01\xf9\xd9\x6c\x0e\xf8\
\x54\xd2\x6c\x29\x08\x72\x9d\xcc\x5d\x6f\xc3\xa1\x54\x2a\xf7\xd7\
\x9d\x3b\x66\x66\x79\x2f\x9b\x7d\x23\x62\x96\x6b\x9c\x70\xff\x93\
\xef\xfb\x69\x85\x61\xde\xe0\x98\xa4\xfc\x2b\x43\x43\x1f\x77\xa5\
\xed\x66\x68\xca\xf3\xbc\x31\x99\xcd\x02\xc3\xd4\x3f\xb3\xab\xc0\
\xdd\xc6\xf2\xcb\xd4\x7f\x48\x46\x80\x9b\x8d\xdb\xb3\x6d\xe3\x3d\
\x15\x00\xd4\x6f\xc8\x44\x22\x71\x42\x61\x38\x86\x94\x06\x9a\xe7\
\xc4\xba\xc1\x6f\x32\xab\xc4\xfa\xfb\x2f\x17\x8b\xc5\x47\xbd\xc4\
\xdd\xd5\xae\x9e\x6f\xfd\x07\xb0\xd0\x3c\xea\x1c\xa0\xa5\x5f\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xa6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce\x7c\x4e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x08\x15\x3b\xdc\
\x3b\x0c\x9b\x00\x00\x00\x2a\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x00\x8c\x0c\x0c\x73\x3e\x20\x0b\xa4\x08\x30\x32\x30\x20\x0b\xa6\
\x08\x30\x30\x30\x42\x98\x10\xc1\x14\x01\x14\x13\x50\xb5\xa3\x01\
\x00\xc6\xb9\x07\x90\x5d\x66\x1f\x83\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x00\xa0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\x9c\x53\x34\xfc\x5d\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x0b\x1b\x29\xb3\
\x47\xee\x04\x00\x00\x00\x24\x49\x44\x41\x54\x08\xd7\x63\x60\x40\
\x05\x73\x3e\xc0\x58\x4c\xc8\x5c\x26\x64\x59\x26\x64\xc5\x70\x4e\
\x8a\x00\x9c\x93\x22\x80\x61\x1a\x0a\x00\x00\x29\x95\x08\xaf\x88\
\xac\xba\x34\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xed\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x6a\x49\x44\
\x41\x54\x58\x85\xed\x97\xcb\x4e\xc2\x40\x14\x86\xbf\x43\x08\x78\
\x7d\x00\xf4\x15\xd4\x84\x77\x91\x65\x69\x0b\x71\xa1\xef\x23\xae\
\x9a\x71\xa8\x4b\x7c\x07\x37\xae\x09\xe1\x1d\xc4\xbd\x17\xe4\x92\
\x1e\x17\xa5\xa6\x06\xd8\x98\x21\x18\xed\xbf\x9a\x76\x26\xfd\xbe\
\x4e\xa6\xcd\x39\xf0\xdf\x23\xf9\x0b\x55\x15\x6b\x4c\x50\x12\xb9\
\x54\x38\x05\x76\x1c\x71\x3e\x04\x86\x40\xc7\x0b\x02\x2b\x22\xba\
\x24\xa0\xaa\x12\x1b\x73\xab\x22\x4d\x60\x02\xf4\x11\x79\x75\x82\
\x57\x3d\x00\xea\x40\x15\x11\xd3\xf4\xfd\x76\x26\x51\xce\xd6\x58\
\x63\x02\x49\xe1\x8f\xa5\x72\xb9\xe1\x79\xde\xc8\x09\x7c\x91\x38\
\x8e\x6b\xc9\x7c\xde\x43\x35\xb4\xd6\x3e\x00\x5d\x80\x52\xb6\xa0\
\x24\x72\x09\x4c\x12\x38\x77\x0d\x07\xf0\x3c\x6f\x34\x4f\x92\x06\
\x30\x15\xd5\xab\x2f\x6e\x36\x50\x38\x01\xfa\x61\x18\x3e\xbb\x86\
\x67\x69\xb7\xdb\x4f\x40\x9f\xf4\x7c\x7d\x17\x00\x76\x81\xf7\x4d\
\xc1\x73\x79\x03\xf6\x56\x09\x6c\x25\x85\xc0\xd6\x05\xca\xeb\x26\
\xac\x31\xba\x6e\xee\x27\xf1\xc3\x50\x56\xdd\xdf\xfa\x0e\x14\x02\
\x85\x40\x21\xb0\xf6\x3f\xb0\xee\xbb\x75\x9d\xad\xef\x40\x21\xf0\
\xab\x04\xc6\xe4\x2a\x95\x0d\x66\x7f\xc1\x5a\x12\x18\x02\xf5\x38\
\x8e\x6b\x9b\x22\x5b\x6b\x8f\x49\xcb\xf3\xc1\x92\x80\xc0\x0d\x50\
\x4d\x66\xb3\xfb\x28\x8a\x8e\x36\x02\x4f\x92\x1e\x50\x11\xe8\xe4\
\xb8\x69\x54\x55\xba\xd6\x46\xa8\x86\xc0\x94\xb4\x31\x79\x71\x42\
\x57\x3d\x24\x7d\xf3\x8a\x42\xe4\x07\xc1\x45\xd6\x98\x2c\xb7\x66\
\xd6\x7a\x8b\xba\xfd\x8c\xb4\x52\x76\x91\x31\x30\x40\xf5\xda\x6f\
\xb5\xee\x1c\x3d\xf3\x8f\xe4\x13\xfb\x36\x7a\x56\x11\xde\xcf\xd8\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xa6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x14\x1f\x20\xb9\
\x8d\x77\xe9\x00\x00\x00\x2a\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x06\xe6\x7c\x60\x60\x60\x42\x30\xa1\x1c\x08\x93\x81\x81\x09\xc1\
\x64\x60\x60\x62\x60\x48\x11\x40\xe2\x20\x73\x19\x90\x8d\x40\x02\
\x00\x23\xed\x08\xaf\x64\x9f\x0f\x15\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x02\xd4\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\x51\x49\x44\
\x41\x54\x58\x85\xed\x96\x41\x4b\x54\x51\x14\xc7\x7f\xe7\x8d\xb8\
\xd0\x26\x30\x77\x69\x84\xe1\xaa\x29\x90\x41\xc7\x92\x5e\xa0\x1b\
\xa1\x8d\x0a\xf5\x19\x5a\x3b\x33\xda\xd8\x6a\x16\x41\x36\x83\xf3\
\xbe\x87\x41\x8d\xad\xc2\x4d\xf6\x14\xf4\x0d\x99\x48\x0e\x11\xe2\
\xaa\x11\xdb\x18\x34\xa8\x0b\xc3\x77\x5a\xcc\x48\x10\xf3\x74\xee\
\xe8\xae\xf9\x6f\xef\x39\xfc\x7f\xf7\xdc\x7b\xcf\x3d\xd0\x54\x53\
\xff\xbb\xc4\x24\x38\x92\x2e\xb6\x76\x86\x0f\x27\x54\x18\x07\x8d\
\x02\x5d\xd5\xa5\x12\xca\x67\x11\xc9\xef\x97\xdb\xf3\xc5\x74\xe4\
\xf8\xd2\x01\x6c\x67\xed\x31\x2a\x19\xa0\x07\xe4\x0b\xaa\x4b\x58\
\x94\x00\x44\xb5\x4b\xb1\x86\x41\xef\x22\xec\x08\x32\xed\x4e\xc6\
\xde\x5c\x0a\xc0\x93\xf9\xf9\xd0\x8f\xdd\x9b\x19\x94\x38\xf0\x5e\
\x95\xd4\x4a\x62\x70\xb3\x56\xec\x90\x53\xe8\x0b\xf9\x3a\x8b\x30\
\x0a\x64\x97\xcb\xb1\x14\x69\xf1\xeb\xdd\x64\x4d\xd9\x8e\x37\x67\
\xe7\xbc\x93\x87\xce\x5a\xb2\xee\x9c\x9c\x37\x65\xe7\xbc\x13\x3b\
\xe7\x65\xce\x8b\x3d\xb3\x02\xd5\xb2\xbf\x16\x24\xe9\xc6\x63\x73\
\xf5\x02\x54\x72\xbd\x69\x94\x57\x08\x13\xcb\x93\x83\x79\x63\x80\
\x48\xba\xd8\x7a\xed\xea\xc1\x57\x41\xbf\xb9\xf1\x7b\x8f\x4c\xcc\
\x4f\xf5\xc0\x29\x2c\x8a\x6a\xcf\xcf\xf2\x95\x48\xd0\xc5\xb4\x82\
\x92\x3b\xc3\x87\x13\xc0\x2d\x5f\x65\xa6\x11\x73\x00\xcb\x97\x67\
\x40\x6f\x47\xf8\x60\x2c\x30\x26\x68\xa1\xf2\xd4\xd8\x0c\xba\x70\
\xf5\xc8\x4d\x0c\x6c\xa8\xb2\x25\x60\x0e\x00\x1a\x15\xf4\x63\xa3\
\xe6\xa7\x12\xf8\x80\xd0\xdf\x00\x00\xd7\x15\x29\x5d\x14\x40\x61\
\x97\xbf\x0d\xcb\x08\x00\xc4\xac\x53\xd6\x34\x10\x11\x20\xb0\x17\
\x9c\x05\xb0\x87\x4f\xf7\x45\x01\x14\xed\x02\xf6\xcc\x01\x94\x4f\
\x0a\xc3\x17\x05\x00\x46\x80\x82\x31\x80\x88\xe4\x45\xb8\x33\xe4\
\x14\xfa\x1a\x75\xb6\x9d\xd5\x28\x70\x1b\xd1\x77\xc6\x00\xfb\xe5\
\xf6\x3c\xc2\x4e\xc8\xd7\xd9\x86\xdc\x55\x05\xb5\x32\xc0\xf6\x51\
\x5b\xcb\x82\x31\x40\x31\x1d\x39\x56\x65\x0a\x61\xd4\xce\x79\x53\
\xa6\xfe\x76\xce\x4b\x01\x23\xa2\x7e\x72\xfd\x69\xff\x6f\x63\x00\
\x80\x95\xf8\xe0\x5b\x20\x0b\xcc\xd6\x0d\xa1\x2a\xf6\xdc\xda\x0c\
\x22\x2f\x44\xc8\xb8\x89\xfb\x81\xe5\x87\x7a\xe6\x81\xb4\x5a\x76\
\xb8\xf0\x12\x61\x1a\x58\x14\xb5\x52\x6e\x62\x60\xa3\x56\xa8\xed\
\xac\x46\xab\x65\x1f\x11\x21\xe3\xfe\x8a\x3d\x3f\xef\x3b\x36\x18\
\x48\xbc\x71\x94\x2c\xd0\xab\xca\x96\x08\x4b\x08\xdf\x01\x50\x6e\
\x50\x79\x31\x11\x60\x5b\xd4\x4f\x9e\xb7\x73\x63\x00\xa8\xfc\x90\
\x1d\xe1\x83\x31\xaa\x23\x99\x20\xdd\x15\x7f\x2d\x89\xca\x3a\x96\
\xe6\x8f\xda\x5a\x16\xce\x3a\xf3\xa6\x9a\x6a\xea\x5f\xfd\x01\xd3\
\x1c\xd9\x7f\x5e\xb9\x33\xcd\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x02\x00\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x7d\x49\x44\
\x41\x54\x58\x85\xed\x97\x3b\x4e\x02\x51\x14\x86\xbf\x83\x28\x3e\
\x17\xa0\x6e\x41\x4d\x48\x78\x44\x9d\x71\x15\x5a\xfa\x8a\xb1\xd0\
\x0e\x48\x74\x0d\x36\x60\xa3\x56\x3e\x63\xa9\x7b\x30\x19\x34\x82\
\x24\x84\xb8\x07\xb5\xf7\x81\x82\x70\x2c\x74\x08\x04\xc3\x14\xce\
\x58\xe8\x7c\xdd\xbd\xe7\xe6\xfe\x5f\x6e\x73\xcf\x81\xff\x8e\xb4\
\xac\x54\xc5\xc8\xe4\x96\x44\x65\x0d\x61\x1c\xe8\x75\x29\xe7\x15\
\xe5\x16\x64\xd7\x4a\x46\x8f\x11\xd1\x76\x01\x55\x99\xd9\xce\x1f\
\xa9\xb2\x00\xbc\x09\x14\x15\x7d\x72\x23\x5d\x90\x41\x85\x30\x10\
\x02\x39\xb4\x12\xd1\x15\x5b\xa2\x21\x60\xa4\xaf\x97\x05\x39\x00\
\xbd\x44\x82\x73\x56\x22\x72\xef\x46\xb8\x8d\x99\x29\x0c\xa3\xb5\
\x33\x60\x4a\x95\xc5\x6c\x2a\x7e\x02\x10\x68\x58\xaa\xac\x01\x6f\
\x5d\xef\x81\x59\xb7\xc3\x01\xac\x44\xe4\xbe\x5e\xad\xce\x01\x15\
\x11\xd6\xed\xfd\x86\x00\xc2\x98\x40\xf1\x62\x23\xf6\xe0\x76\xb8\
\xcd\xe5\xa6\x71\x07\x14\x81\xf1\x76\x01\xe8\x53\x78\xf1\x2a\xbc\
\x89\x67\xa0\xdf\x5e\x04\x9d\x4e\x9b\xe9\x9c\x3a\x9d\xe9\x84\x95\
\x8c\x4b\xa7\x7a\xa0\x53\xf1\x37\xf0\x05\x7c\x01\x5f\xc0\x17\xf0\
\x05\x7c\x01\x5f\xc0\xb1\x1f\x70\xfa\xcf\x7f\x4a\xf3\x0b\x94\xa5\
\xa9\x53\xf1\x90\x01\xa0\xfc\x8d\x80\xde\x2a\x84\xcd\x4c\x61\xd8\
\xab\xe4\xc9\xf4\xd5\x28\x10\x16\x28\xb5\x0b\x68\x60\x0f\x08\xa1\
\xb5\xf3\xe9\xad\xec\x88\x17\xe1\xdd\x74\x9d\x01\x3d\x75\xd1\x5d\
\x7b\xbf\x65\x30\x31\x33\x37\xfb\xa0\xcb\x40\x05\x28\x82\x3e\xba\
\x13\x2f\x43\x7c\x0e\x26\x3d\x0a\xfb\xd9\x44\x6c\xb5\x6d\x30\xb1\
\x25\x8c\x74\x7e\xfe\xab\x6f\x9f\x00\xfa\xdc\x11\xa0\x2c\x50\x52\
\x95\x1d\x2b\x15\x3b\x75\xe9\xce\x3f\xc2\x07\xd1\xbc\x75\x94\xcf\
\xbc\x8d\xf9\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xa5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x03\x22\x49\x44\
\x41\x54\x58\x85\xed\x96\x4d\x6c\x54\x55\x14\xc7\x7f\xe7\x0d\xa9\
\x09\xcc\x90\x50\x76\xb6\xc6\x60\x60\xe3\xa3\x86\x34\xf4\xc3\xc6\
\x67\xa4\x1b\xa2\x98\x40\x13\x5d\xc9\x1a\x36\xda\x84\x7e\x59\x5c\
\xcd\xce\x3a\xa4\x33\x09\xcb\xae\x65\x83\x89\x19\x4c\x04\xc3\xc6\
\x3a\x98\xb4\x6f\x22\x62\x4b\x27\xc6\x34\xac\x9c\x06\x37\x94\x74\
\x98\x92\x50\x3a\xef\xef\xe2\x4d\xa7\x35\x99\xe9\xcc\x43\x76\xf4\
\xbf\xba\xe7\xbd\x73\xef\xf9\xdd\x73\xee\x17\xec\x69\x4f\xaf\xba\
\x2c\x8a\xb3\x9b\x2c\xb4\x1d\x4e\xac\x0f\xc9\x38\x07\xea\x06\x3a\
\xaa\xbf\x8a\x88\xdf\xcd\x2c\xfb\xa8\x74\x20\x5b\x48\xba\x1b\x2f\
\x1d\xc0\xcb\xcc\x7f\x82\x2c\x05\x1c\x01\xbb\x8f\x34\x8b\x43\x11\
\xc0\xa4\x0e\xe1\x9c\x02\x75\x61\x3c\x30\x6c\x22\x77\xa9\xf7\xfb\
\x97\x02\xf0\xe9\xf5\xeb\xb1\x7f\x56\xde\x4c\x21\x46\x80\x9f\x24\
\x26\x7f\x1d\xed\x5b\xa8\xe7\x3b\x90\xc9\x9f\x88\x05\x9a\xc2\x38\
\x0d\x5c\xb9\x53\xea\x9d\x24\x69\x41\xab\x93\xac\x2b\x2f\xe3\x4f\
\x7b\x69\xbf\xf2\x7e\x66\x7e\xac\xe5\x3e\x69\x7f\xdc\x4b\xfb\x15\
\x2f\xed\xa7\x9a\xf9\xee\x9a\x81\x6a\xda\xbf\x33\x6c\x2c\x37\xd2\
\x3b\x0d\xf0\xe1\xd5\xe5\xd7\x9e\x3c\x7f\x7c\xd1\xe0\x33\x59\xd0\
\x15\x0e\x62\x8b\x18\xd7\xe2\xb1\xf6\x99\x5b\xc3\xc7\x9e\x55\xc1\
\x27\x10\xdf\x60\x0c\xdd\xb9\xd4\x97\x8d\x0c\xe0\x26\x0b\x6d\xed\
\x07\xcb\x7f\x1a\xfa\x2b\x37\xd2\xff\x11\xc0\x07\x57\xe7\x3b\x2b\
\x9b\xce\x4d\x50\x17\x58\x00\x72\xaa\xc3\x84\x6d\x63\x31\x16\xd3\
\x99\xd9\xe1\xfe\x22\xc0\x7b\x99\xfc\x6d\x93\x8e\xac\x96\xe2\x6e\
\xa3\x85\xe9\x34\x02\x38\x9c\x58\x1f\x02\xde\x0a\x64\x97\xb7\x66\
\x5e\xd9\x74\x6e\x62\x3a\x1e\x7a\x68\x47\xdf\x5a\xbb\xab\xb2\xc9\
\x8f\x6e\xb2\xd0\x06\xe0\x04\xf6\x25\x70\xf4\x50\xa2\x7c\xb6\x51\
\x9c\x86\x00\xe1\x56\x63\x61\x6b\xc1\x95\x2b\xab\x17\x40\x5d\x68\
\x97\xb2\x09\x03\x7b\xa7\xfd\x60\xf9\x02\x40\x6e\xb4\xe7\x9e\xc4\
\x92\x41\x74\x00\x50\xb7\xa1\x5f\x6a\x66\x60\xe7\xc3\x54\xef\x2e\
\x41\x00\x9c\xdf\xb2\x0d\x7e\xc6\x38\xf9\x02\x00\xbc\x2e\xac\x58\
\xb3\x4c\xee\x7f\xd3\x5e\x5f\x06\x0e\xc8\xdd\x01\xb4\xc2\xf6\x81\
\x15\x09\x00\x2c\xda\x49\x59\x37\x80\x99\x11\x66\x25\x32\xc0\x43\
\x02\x3a\x6b\x96\xac\xd0\x6a\x09\x24\x96\xb6\x6d\x75\x00\x0f\xa3\
\x03\x88\xdf\x04\xa7\xb6\x3d\xf5\x6d\xab\x25\x30\xb3\x6b\x3b\x3e\
\x0d\x02\xf9\xc8\x00\x66\x96\x35\xe3\xf8\x40\x26\x7f\x02\x20\x1e\
\x6b\x9f\xc1\x58\xc4\xd0\x2e\xd1\x25\xe3\x8f\xd5\x52\x7c\x06\xc0\
\xcb\xcc\x75\x03\x6f\x63\xfa\x21\x32\xc0\xa3\xd2\x81\x2c\xc6\x83\
\x58\xa0\x29\x80\x5b\xc3\xc7\x9e\xc5\x62\x3a\x03\xdc\xaf\x46\xab\
\x95\xa3\xba\xf2\x11\x2c\x54\x54\xf9\xb8\x90\x74\x37\x90\x0c\x39\
\x29\x60\xf9\xe9\xfe\x7d\x37\x22\x03\x14\x92\xee\x86\xc4\x38\xc6\
\x69\x2f\xed\x8f\x03\xcc\x0e\xf7\x17\x57\xd7\xe2\x3d\xc0\x17\x52\
\x90\x07\xd6\x81\x75\xa4\xbc\x99\x3e\x7f\xbc\x16\xef\x9b\x1b\x19\
\x58\x01\xf0\xd2\xfe\x24\x30\x68\x0a\xc6\xee\x5e\x3c\xf9\xbc\x51\
\x9c\xa6\xf2\xd2\x7e\xaa\x7a\xb1\x8c\xb7\xd4\x41\x32\x6f\x7a\xfe\
\x72\x78\x81\xf9\x53\xcd\xdc\x9b\x6f\xb3\xa4\x1c\x2f\x91\xff\x1a\
\x63\x02\xb8\x6d\x72\x26\x73\xa3\x3d\xf7\xea\xc2\x66\xe6\xba\xab\
\x69\x1f\x34\x23\x95\x5b\xeb\xfd\xaa\xd9\x75\x1c\xe1\x41\xe2\x9f\
\x43\x5c\x01\x8e\x4a\x2c\x99\x31\x8b\xf1\x37\x00\xe2\x0d\xc2\x1d\
\xe3\x02\xcb\xa6\x60\x2c\x37\xfa\x6e\xc3\x85\xf7\x42\x00\x10\xde\
\x90\x87\x12\xe5\xb3\x54\x9f\x64\x86\x75\x86\xf1\x55\x34\xd9\x5d\
\x1c\x65\x9f\xee\xdf\x77\xe3\x7f\xd5\x7c\x4f\x7b\x7a\xe5\xf4\x2f\
\x95\x3f\x47\xac\x6d\xe5\x30\x73\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x02\x02\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x7f\x49\x44\
\x41\x54\x58\x85\xed\x97\xcb\x4a\x42\x51\x14\x86\xbf\x65\xa5\xd9\
\xe5\x01\xac\x57\xc8\x40\x28\xa3\xd2\x9e\x22\x87\xdd\x88\x06\x36\
\x33\xa1\x9e\xa1\x89\x36\xa9\x46\x5d\x69\x58\xef\x10\x1c\x8d\xb4\
\x40\xa2\x77\xc8\xe6\x5d\xac\x2c\x57\x83\x3a\xa2\x1c\xcf\x24\xb6\
\x18\x75\xfe\xd9\x5e\x1b\xf6\xf7\xb1\x60\x6f\xf6\x82\xff\x1e\x69\
\x5a\xa9\x4a\x2c\x5b\x58\x14\x95\x24\x42\x18\xe8\x35\xc4\x79\x41\
\xb9\x05\xd9\xb1\xd6\xc6\x8f\x10\x51\xa7\x80\xaa\xcc\x6c\x15\x0f\
\x55\x99\x07\x5e\x05\x4a\x8a\x3e\x9a\xa0\x0b\x32\xa0\x10\x01\x02\
\x20\x07\x56\x6a\x7c\xd9\x96\xa8\x0b\xc4\x32\x97\x4b\x82\xec\x83\
\xe6\x91\xee\x84\x95\x1a\x2b\x9b\x80\xdb\x89\x67\xaf\x43\xe8\xc7\
\x29\x30\xa5\xca\x42\x2e\x3d\x71\x0c\xe0\xab\x5b\xaa\x24\x81\xd7\
\xae\x77\xdf\xac\x69\x38\x80\x95\x1a\x2b\xd7\xaa\xd5\x04\xf0\x26\
\xc2\xaa\x5d\xaf\x0b\x20\x8c\x08\x94\xce\xd7\xa3\xf7\xa6\xe1\x76\
\xf2\x1b\xb1\x3b\xa0\x04\x84\x9d\x02\x10\x54\x78\x6e\x17\xbc\x21\
\x4f\x40\x5f\x2b\x81\x8e\xc4\x13\xe8\xb8\x40\xb7\xdb\x46\x3c\x53\
\x50\xb7\xbd\x9f\xc4\x5a\x9b\x90\x56\xf5\x8e\x77\xc0\x13\xf0\x04\
\x3c\x01\xd7\x77\xc0\xed\xde\x9a\x4e\xc7\x3b\xe0\x09\xfc\x2a\x81\
\x8a\x34\xfc\x54\xda\x98\x7e\xa0\xd2\x42\x40\x6f\x15\x22\xf1\xec\
\x75\xa8\x5d\xe4\xc9\xcc\xc5\x30\x10\x11\xb8\x71\x0a\xa8\x6f\x17\
\x08\xa0\x1f\x67\xd3\x9b\xb9\xa1\x76\xc0\x7b\xe8\x3a\x05\xfc\x35\
\xd1\x1d\xbb\xde\x34\x98\xc4\xb3\x57\x7b\xa0\x4b\xc0\x1b\x50\x02\
\x7d\x30\x83\x97\x41\xbe\x06\x13\xbf\xc2\x5e\x2e\x15\x5d\x71\x0c\
\x26\xb6\x44\x2c\x53\x9c\xfb\xfe\xb7\x8f\x02\x41\x33\x02\x54\x04\
\x6e\x54\x65\xdb\x4a\x47\x4f\x0c\x9d\xf9\x47\xf2\x09\xb5\xbd\x75\
\x94\xee\x91\xe8\xbe\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x00\x9e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce\x7c\x4e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x08\x15\x0f\xfd\
\x8f\xf8\x2e\x00\x00\x00\x22\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x0d\xfe\x9f\x87\xb1\x18\x91\x05\x18\x0d\xe1\x42\x48\x2a\x0c\x19\
\x18\x18\x91\x05\x10\x2a\xd1\x00\x00\xca\xb5\x07\xd2\x76\xbb\xb2\
\xc5\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xeb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x68\x49\x44\
\x41\x54\x58\x85\xed\x97\x4d\x4e\xc2\x40\x18\x86\x9f\xaf\x10\x14\
\xd4\x03\xa0\x57\x10\x13\xb6\x9e\x43\x76\xc8\x58\x8c\x26\x70\x1f\
\x31\x31\xa1\x74\x48\x97\x78\x0c\xd7\xc4\x78\x07\x71\xef\x0f\x02\
\x91\xcf\x85\x94\x20\xa0\x2c\x1c\x5c\x68\xdf\xdd\x4c\xdf\xf4\x79\
\xa6\x4d\xd3\x19\xf8\xef\x91\xf9\xb1\x6f\xcc\x09\x50\x03\x0a\xc0\
\xa6\x23\xce\x2b\x70\x27\x22\x8d\x20\x0c\x2d\xa0\xcb\x04\xc4\x37\
\x26\x04\x2a\xc0\x00\xe8\x02\x4f\x8e\x04\xb6\x81\x22\xb0\x01\xb4\
\x5a\xd6\x9e\xc6\x12\x53\x01\xdf\x18\x1f\x08\x04\x6e\xd2\x6f\x6f\
\xa5\xab\x28\xea\x39\x82\x03\x70\x5e\x2e\xe7\x47\x9e\xd7\x41\xe4\
\x50\xc0\x04\xd6\xb6\x01\xbc\x99\x4e\x0d\x18\x8c\x45\x8e\x5c\xc3\
\x01\xae\xa2\xa8\x27\xe9\x74\x09\x18\xaa\x48\x3d\x9e\x9f\x15\xd8\
\x07\xba\x61\x18\x3e\xb8\x86\xc7\x09\x82\xe0\x1e\x91\x2e\xaa\x85\
\x65\x02\x59\x54\x5f\xd6\x05\x9f\x66\x3c\x7e\x06\x72\xf1\x30\xbd\
\xaa\xef\x1b\xa3\xab\x3a\xdf\xa5\x65\xed\xfc\x97\xf6\x29\xde\x77\
\x17\x7f\x23\x89\x40\x22\x90\x08\x24\x02\x89\x40\x22\x90\x08\xac\
\xdc\x0f\xac\xfa\x9f\xff\x34\xb3\x4f\xa0\x8f\x48\xee\xcb\xa6\x33\
\xa2\xb7\x05\xf4\x17\x04\x14\xee\x80\xe2\x79\xb9\x9c\x5f\x17\xbb\
\x52\xa9\xec\xa1\x5a\x04\x6e\x17\x04\x3c\x91\x4b\x60\x63\x94\x4a\
\x5d\x57\xab\xd5\xdd\x75\xc0\x53\x22\x1d\x20\xa3\x22\x8d\x78\x7e\
\xfe\x60\xd2\x04\x7c\x60\x38\xd9\xbd\x3e\x3a\xa1\x8b\xec\x4c\x56\
\x9e\x51\x68\x86\xd6\x9e\x31\x7f\x30\x89\xab\x55\x63\x8e\x55\xa4\
\x8e\xea\x01\x90\x75\x22\xf0\xf1\xce\x6f\x51\xbd\x68\xb5\xdb\x91\
\xa3\x7b\xfe\x91\xbc\x03\x16\x71\x6a\x27\x44\x74\xfe\x4f\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x12\x44\
\x00\
\x00\x64\xf9\x78\x9c\xcd\x1c\x6b\x53\x1b\x39\xf2\xbb\x7f\x85\x16\
\xbe\x84\x9c\x1d\x6c\x83\x79\x4c\x2e\x57\x65\xc0\x04\xd7\x01\x06\
\xe3\x6c\x6e\x6b\x6b\x2b\x35\xb6\x65\x3c\x97\x61\xc6\x3b\x33\x0e\
\xb0\x57\xf9\xef\xd7\xd2\x48\xb2\x5e\xf3\x84\xe4\x0e\xef\x42\x3c\
\x92\xfa\xad\x56\x77\x4b\x9a\xdd\xb7\x0d\xf4\x16\x4d\x96\x18\x5d\
\x0d\x27\xe8\xd2\x9b\xe1\x20\xc6\xe8\x0d\x7c\xd9\x81\x06\xd2\x76\
\x1a\xae\x9e\x23\xef\x7e\x99\xa0\x37\xb3\x1d\xf4\xf7\x6e\xbb\xb3\
\xd7\x82\x5f\xfb\xff\x40\x7f\x3f\x0d\x7d\x2f\x40\x67\xeb\x3f\xd7\
\x38\x0e\xc2\xe7\x7f\xb0\x11\x37\x38\x7a\xf0\xe2\xd8\x0b\x03\xe4\
\xc5\x68\x89\x23\x3c\x7d\x46\xf7\x91\x1b\x24\x78\xde\x44\x8b\x08\
\x63\x14\x2e\xd0\x6c\xe9\x46\xf7\xb8\x89\x92\x10\xb9\xc1\x33\x5a\
\xe1\x28\x86\x01\xe1\x34\x71\xbd\xc0\x0b\xee\x91\x8b\x66\x80\x99\
\xc0\x83\xce\xc9\x12\x20\xc5\xe1\x22\x79\x74\x23\x0c\xfd\xe7\xc8\
\x8d\xe3\x70\xe6\xb9\x00\x12\xcd\xc3\xd9\xfa\x01\x07\x89\x9b\x10\
\x94\x0b\xcf\xc7\x31\x7a\x93\x00\x4b\x5b\x77\x6c\xc4\xd6\x0e\xc5\
\x33\xc7\xae\x4f\x00\x02\xd1\xa4\x99\xb7\xa2\x47\x2f\x59\x86\xeb\
\x04\x45\x38\x4e\x22\x6f\x46\xc0\x34\xa1\xd3\xcc\x5f\xcf\x09\x25\
\xbc\xd9\xf7\x1e\x3c\x86\x84\x0c\xa7\x42\x89\x09\x3c\x00\xbd\x8e\
\x81\x15\x42\x70\x13\x3d\x84\x73\x6f\x41\xfe\x62\xca\xdf\x6a\x3d\
\xf5\xbd\x78\xd9\x44\x73\x8f\x40\x9f\xae\x13\x78\x18\x93\x87\x54\
\xd6\x4d\xc2\xcd\x6e\x18\xa1\x18\xfb\x94\x38\x00\xe2\x01\x03\x94\
\xe9\x0d\x8d\xb4\x1b\x41\xb4\x22\xc2\x4d\x98\xb8\x62\xf2\xe4\x71\
\x19\x3e\xa8\xfc\x78\x94\xaa\xc5\x3a\x0a\x00\x31\xa6\xc3\xe6\x21\
\x88\x8f\xe2\xfd\x37\x9e\x25\xe4\x09\x19\xb1\x08\x7d\x3f\x7c\x24\
\x3c\xce\xc2\x60\xee\x11\xd6\x62\xa7\xc1\x2d\xc2\x9d\x86\xdf\x30\
\x65\x2a\xd5\x7f\x10\x26\x40\x73\x4a\x08\xd1\xc7\x6a\xa3\x67\xd6\
\x14\x2f\x5d\xdf\x47\x53\xcc\x84\x07\xa8\xbd\x80\x40\x23\x4f\x39\
\x5f\x11\x21\x22\x4e\xc0\x1a\x3c\xd7\x47\xab\x30\xa2\x58\x75\x7e\
\xdf\xa5\x54\x5c\x0c\xd0\xdd\xe8\x7c\xf2\xb9\x3f\x1e\xa0\xe1\x1d\
\xba\x19\x8f\x7e\x1d\x9e\x0d\xce\xd0\x56\xff\x0e\xbe\x6f\x35\xd1\
\xe7\xe1\xe4\x62\xf4\x69\x82\xa0\xc7\xb8\x7f\x3d\xf9\x0d\x8d\xce\
\x51\xff\xfa\x37\xf4\xcf\xe1\xf5\x59\x13\x0d\xfe\x75\x33\x1e\xdc\
\xdd\xa1\xd1\x98\x40\x1b\x5e\xdd\x5c\x0e\x07\xf0\x78\x78\x7d\x7a\
\xf9\xe9\x6c\x78\xfd\x11\x9d\xc0\xd0\xeb\x11\x18\xfe\x10\x2c\x1e\
\xe0\x4e\x46\x14\x27\x83\x36\x1c\xdc\x11\x78\x57\x83\xf1\xe9\x05\
\x7c\xed\x9f\x0c\x2f\x87\x93\xdf\x9a\x04\xd6\xf9\x70\x72\x4d\x20\
\x9f\x8f\xc6\xa8\x8f\x6e\xfa\xe3\xc9\xf0\xf4\xd3\x65\x7f\x8c\x6e\
\x3e\x8d\x6f\x46\x77\x03\x20\xe2\x0c\x20\x5f\x0f\xaf\xcf\xc7\x80\
\x68\x70\x35\xb8\x9e\xbc\x03\xc4\xf0\x0c\x0d\x7e\x85\x2f\xe8\xee\
\xa2\x7f\x79\x49\xb0\x11\x70\xfd\x4f\xc0\xc6\x98\x10\x8a\x4e\x47\
\x37\xbf\x8d\x87\x1f\x2f\x26\xe8\x62\x74\x79\x36\x80\x87\x27\x03\
\xa0\xaf\x7f\x72\x39\x48\xb1\x01\x77\xa7\x97\xfd\xe1\x55\x13\x9d\
\xf5\xaf\xfa\x1f\x07\x74\xd4\x08\x00\x51\x26\x49\xcf\x94\x4c\xf4\
\xf9\x62\x40\x9e\x12\xac\x7d\xf8\xef\x74\x32\x1c\x5d\x13\x7e\x4e\
\x47\xd7\x93\x31\x7c\x6d\x02\xbb\xe3\x89\x18\xfd\x79\x78\x37\x68\
\xa2\xfe\x78\x78\x47\x24\x73\x3e\x1e\x5d\x51\x4e\x89\x74\x61\xd0\
\x88\xc2\x81\xa1\xd7\x83\x14\x10\x91\xbc\xaa\x20\xe8\x42\xbe\x7f\
\xba\x1b\x08\x98\xe8\x6c\xd0\xbf\x04\x70\xa0\xad\x6b\x5d\xa1\xef\
\xe0\xc1\x6e\xa3\x71\x7b\x13\x85\xf7\x30\xf3\xe2\x13\x37\x72\x96\
\x61\xe4\xfd\x15\xc2\x54\xf6\xd1\x7f\x1a\x08\x7e\xa6\x61\x34\xc7\
\x91\x83\x3a\xab\x27\x30\x60\xdf\x9b\xa3\xed\xbd\xfe\xde\xf1\xde\
\xf1\x7b\xda\x9c\xe0\xa7\xa4\xe5\xfa\xde\x7d\xe0\x20\x98\x4e\x09\
\x8e\xd2\xe7\x2b\x77\x4e\xa6\x2e\x1d\x97\x3e\x99\xba\xb3\xaf\xf7\
\x51\xb8\x0e\xe6\x0e\xda\x06\xff\x75\xde\x39\x7f\xdf\xf8\xae\x62\
\x77\x66\xcb\x75\xf0\xd5\x42\x84\x18\xdb\x9a\x85\x7e\x08\xe4\xfc\
\x09\x8e\x0f\x83\xff\x8a\xdc\xb9\x07\x68\xdf\xc4\xab\x08\xbb\x73\
\x27\xc2\x0b\x1f\xa6\x57\x13\x3d\x75\x9c\x4e\x13\x3d\x77\x9c\xf6\
\xbb\xde\x7e\x0f\xbe\x77\xe9\xf7\xae\xd3\x86\x39\x98\x84\x2b\xa7\
\x8d\xa2\xfb\xa9\xfb\xa6\x7b\xd4\x44\x07\x07\x4d\xd4\xe9\x40\x73\
\xb7\xd7\xdb\x61\xcd\x9d\xb4\x79\xef\xb0\x89\x8e\xe0\xff\xce\xfe\
\x41\xda\xbc\x43\x68\x6e\xdc\x4e\xc2\xd0\x9f\x78\xab\x46\x29\x19\
\x99\xd4\x03\xec\x37\xc7\x40\x49\xa7\xdd\x25\xa8\x0f\x77\xde\xa7\
\x3d\x59\xf3\xe3\xd2\x4b\x70\x96\x1c\xc3\x95\x3b\xf3\x92\x67\x07\
\x75\xdb\xed\x94\x98\xcf\xde\xfc\x1e\x27\x8c\x16\x06\x22\xf6\xfc\
\x6f\x5c\x17\x26\xfe\xed\xbd\x76\xf7\xbc\x7b\x9e\x36\x83\xdf\xc3\
\xd4\xe5\xb6\x8c\x8e\xdb\x7b\xf3\x23\x3c\x3b\xd6\xfb\x31\x28\x53\
\x1f\xfa\x9b\x28\x7c\x6f\xe5\x30\x91\xbc\x97\xc4\xd3\xf2\x1e\xdc\
\x7b\xec\x80\xa7\x0a\x18\x6f\xe0\xd2\x89\x16\x1d\xa4\xf0\xe1\x00\
\xef\x0f\x60\x02\x40\x7e\x23\x4b\xfb\xdb\x87\x47\x47\x87\xc7\x53\
\x45\x68\x8c\x1a\x1d\x50\x4a\x35\x9e\x67\xc3\xe2\x3c\x92\x91\xa7\
\x4b\x3c\xfb\x7a\x12\x3e\xb1\xde\x31\x91\x35\x91\x7e\x4f\x48\x9f\
\xd3\xbc\x61\x83\xc3\x99\x4e\x19\x41\x0f\x60\x97\x1e\x48\x33\x4c\
\x92\xf0\x01\xf4\x44\xc6\xca\xc0\x1d\x58\x88\xdc\xa9\x2f\x68\x12\
\x4c\xd1\x1f\x3a\x29\x44\x57\xc7\x83\x65\x61\xe6\x26\x61\xd4\x6c\
\xdc\x7e\x04\xc2\x57\xea\x53\x06\xe2\xd1\x9b\x27\x4b\x30\x92\x23\
\x4e\xe7\x12\x93\x15\x83\x3f\xf9\x9e\x37\x96\x91\xeb\xe3\x45\x62\
\x23\x76\xd3\xdf\x59\x07\x33\xf2\x14\x82\x88\x82\xf6\x54\x7d\x76\
\x8a\x15\x28\xb9\xed\x8a\x11\x30\xeb\x59\x47\xfe\x1b\x67\x77\xee\
\x46\x5f\xbf\x78\xb0\x64\xc6\xbb\xf4\x79\x4c\x9f\xec\xd2\x71\xd3\
\xf0\xe9\x8b\x00\xf1\x6e\x15\xdc\xef\x94\x60\xc8\x59\x40\x08\x13\
\x17\xb2\xb5\x22\x8e\xaa\x04\xe1\x1c\x5a\x7e\x2f\x06\x8d\x32\xc8\
\x5d\xc8\xc6\xac\xea\x31\xfc\x85\xa2\x2e\x60\x3b\x5f\x8b\x65\x74\
\x98\xaf\xc1\x17\xeb\xaf\x9c\xf6\xca\xe8\xae\x9c\xe6\xca\xe8\xed\
\x07\x68\xad\x8a\xce\xe0\x5f\x38\x21\xf1\x5e\x00\x21\x77\x06\xb3\
\x4a\x1f\xa1\xbf\xe2\x9e\x9c\x25\x2b\xdf\x3a\xe2\xc2\x3e\xb9\x86\
\x63\x47\x5c\xc3\x44\x14\x40\x55\x64\x97\xab\x64\x4b\xcf\x17\x13\
\x57\x65\x4a\x8a\xa5\xa1\xc0\x08\xb5\x15\xa4\x96\xd5\x71\x18\xa5\
\x7d\x64\x01\x6d\x66\xc7\x17\xb9\x6e\x1b\x7d\x63\x88\xf6\xc2\x93\
\x35\xac\xab\xc1\x0f\x59\xa1\x25\xf8\x25\x16\x69\xb9\x77\xd6\x8a\
\xdc\xed\xe8\x2b\x72\xfa\x44\xc7\x96\xb1\x3e\x16\x75\xa9\xec\x62\
\x49\xbc\x1c\x5a\xd7\xc7\x42\x54\x7c\xde\x14\x76\xcc\xf3\x90\x16\
\xed\xd4\x20\xdb\x98\x52\x19\x34\x15\x0a\xd2\x14\xe3\xab\x12\x5c\
\x5a\xca\xa5\x64\xec\x38\x0f\x38\x58\xb7\xdc\x28\x0a\x1f\xcb\xad\
\x47\xaf\x41\x7c\x59\x59\xeb\xeb\x44\x99\x6e\x62\xa9\x28\xd5\x59\
\x5d\x2d\xaa\x31\x93\xb5\x5c\x14\xe8\x43\x71\x02\x86\x34\xab\xcb\
\x32\xdf\xa7\xbd\x9a\x37\xd5\xa7\x8b\x89\xb6\x71\x7b\x05\xb6\x04\
\x39\x77\x4e\x5e\x24\xe5\x88\x5a\x56\xf9\x7d\x33\xde\xa1\x69\x96\
\x01\xc5\x41\x49\xe4\x06\xe0\x9d\x23\xc8\xce\x2d\x03\xb2\xf3\x32\
\x6d\x64\x41\x7e\x6d\x02\x56\xad\xa4\x72\x6a\xae\xa4\xbc\x66\xa2\
\xab\xae\x1c\x2d\xe1\xdd\x59\x9e\xce\x1b\x84\x8f\x27\xb4\x95\x23\
\xc5\x92\xb8\xa7\xc8\xa4\xf5\x89\x40\x03\x36\x67\x62\xf9\xe3\x3d\
\x7a\x5a\x8f\x8d\x4a\x44\x01\x01\xba\xa0\xbd\x36\xfc\xe2\xff\x50\
\x18\x4a\xb3\x3f\xb1\x88\x9a\xb4\xca\x4a\x41\xbb\x6f\x49\xa9\x16\
\x47\xdf\x30\x5d\x80\x49\x1d\x33\xda\xd4\x07\xd8\x68\x52\x5c\x52\
\x49\xd2\x95\x6e\x66\xee\x69\xdf\x18\x03\x22\x32\x0b\x58\xf9\x47\
\x2c\x9d\xb6\x5a\x92\x4f\xda\xa6\xfe\x1a\x5b\xf8\xe9\xe8\x6c\x46\
\x29\x20\x5d\x5c\x7c\xd6\xa1\x92\xd9\x74\x03\x04\x00\x4e\xa0\x85\
\x9f\x66\xfe\x3a\xf6\xbe\x91\x7a\x2b\x07\xf1\x01\xd1\x59\x07\x42\
\x00\xd1\x25\xcf\xbe\xd4\x46\x60\xbd\x89\x31\x46\xb7\x7d\x2a\x28\
\x1a\x45\x11\x76\x93\x01\x07\xb4\x43\xa4\xa6\x93\xe5\x28\xb8\x36\
\xae\x01\xbd\x52\x6a\x5c\x12\x9d\x50\xdf\x8b\xf0\xda\x7c\x60\x3e\
\x01\x2f\xe0\xb6\x2a\xaf\xaf\xc1\x69\x36\x9f\x60\x35\x76\x8b\xa1\
\xfe\x1a\x4d\xe9\x22\xa0\x1b\x4d\x3d\x8b\x79\x99\xb5\x64\x47\x89\
\x25\xf0\xd4\x11\x5e\xf1\x82\x95\x83\xb9\x2e\x7f\x55\xb8\x7b\x31\
\x6f\x85\x9c\x51\xd7\x94\x06\x77\xc8\xe2\xdc\x4b\x63\x23\x60\xbe\
\x50\x30\x14\x45\xba\xd8\xb3\x8a\x68\x46\x4e\xb3\xdf\x26\x9f\xc2\
\x62\x31\xa1\xb5\x3f\x8d\x61\x25\x98\x25\x43\x70\xe7\xbf\x7a\xf8\
\x91\x41\x72\x7d\x08\xae\x48\x68\x65\xd6\x90\x0b\x57\x39\x73\xad\
\x51\x56\xe8\xb4\x78\x4c\x84\xb8\x8e\xa5\x25\x40\x2d\x8c\x4b\x55\
\xdf\x34\x84\x46\x22\x2a\x90\x93\x78\xcb\x12\xcc\x2b\xc9\xb4\xaa\
\xef\x4e\x35\x20\x22\x21\xe6\x0f\xe4\x68\x8d\x3f\xbb\x03\x40\xd8\
\x8e\x28\x8d\x14\x09\xec\x4b\x88\x1c\x07\x73\x2f\xc9\x8e\xb7\xf8\
\x9e\x88\xc2\x5d\x37\x7b\x45\xde\x2e\x23\x26\x4b\xe0\xc6\x73\x77\
\x75\x83\xa7\x22\x5c\xb6\x9e\x92\xad\x12\xd4\x6d\x73\x15\x6c\xca\
\x02\x89\x97\x80\x13\x63\x39\xfa\x7a\x0a\x66\x9a\x44\xa1\xdf\x0a\
\xc1\x3c\x89\x49\xa7\xc3\xdf\xeb\xcd\xab\x30\xa6\x7b\xa0\x10\x03\
\x86\x2b\xdb\x7e\x92\xb1\xae\xf3\xe7\x6c\x61\x37\x1b\x28\x85\x1d\
\x41\x21\xb7\xdf\xbb\x19\xe0\xf3\xfb\x11\x76\x15\x8d\x99\x8c\x56\
\x0e\x20\x8d\xb8\x37\x45\xa5\x6e\xae\x35\xd4\xc0\xa6\xd3\x53\xe5\
\xea\xa0\x3d\x40\xd7\xa1\x91\x1a\xfb\x87\x49\x8d\x84\x08\x8c\xa7\
\xdf\x3d\xee\xda\x75\xb6\x6f\x46\x4d\x1b\x9b\x63\xc3\x54\x3a\x9d\
\xa5\x1b\xcc\x7d\x6c\xd2\x6b\x81\x70\xd0\xee\x9d\xf7\x98\xd5\x42\
\x76\xd5\x62\xb1\x93\x46\xb1\x4a\x8c\x86\x0d\x34\xd5\xa2\x89\x95\
\x81\x8f\x4b\xa3\xcd\x04\xc1\xfe\xda\xb6\x95\xca\x3a\x45\xdd\x01\
\xcb\xf1\x5e\xdb\x88\xf7\xc4\x13\xab\x91\x52\xb0\x46\xbb\x6e\xe3\
\x1a\xb7\xd0\xf3\x07\x73\x4b\xe6\x48\x36\xb3\x26\x6f\x06\xfb\x56\
\x66\x09\xd4\xaa\xbc\x5a\x34\xcb\x52\xfe\xa2\x5e\x22\xc3\xa9\xa7\
\xe5\x57\xe0\xb7\xb4\x72\x8b\xb4\xcb\x38\x46\x45\xdd\xea\xb0\xbc\
\x51\xf5\xff\x48\xc3\xeb\x55\x1a\xb2\x48\x8c\xa8\x9c\xce\xc3\xc7\
\xc0\xe8\x62\x49\xfa\xc5\x4a\x69\x18\xd0\x8a\x88\x21\x0b\x3c\x11\
\xa4\xd6\x21\x0f\xb8\x34\x14\x74\x92\x40\x98\x97\xe3\xda\x64\x9f\
\xca\x25\x68\x38\x6a\xc5\x49\xeb\xd3\xb5\x86\xa3\xb6\x7b\xe2\x62\
\x62\x75\x3f\xcc\x8d\xa0\x8a\x23\x16\x56\xa9\xa1\x93\x17\x25\xc9\
\x39\x55\x76\x4c\xeb\xd5\xeb\xbb\x25\x58\xdf\x6b\x7b\xa5\x1f\xc4\
\x26\x31\xf9\xd7\x67\x34\xad\x2c\xd5\x5e\x6d\x38\xaf\x16\xff\x6b\
\xf6\xa1\xae\xa8\xae\x76\x7f\x9e\x52\xf3\xb5\x6a\xf3\xbb\x66\xa7\
\x3a\x5e\x77\xa3\xe0\xff\x99\x5e\x85\xdf\xe5\x8c\x64\x7a\xdd\x4c\
\xd7\x51\xe8\x73\xed\xa0\x85\xc7\x2d\x03\xf8\x76\x82\x9f\x92\xf2\
\x69\x4f\xa9\xec\x50\xa9\xfd\xde\xf8\xae\x17\x94\x46\x52\x80\xa5\
\x52\xf4\x4f\xb0\x5f\x60\x17\x9a\x49\x1e\x4c\x6a\x32\xb4\x42\x93\
\x53\x4f\xcf\x4f\x83\xd5\x0c\x67\x3f\x87\x82\x83\x19\xf9\x30\x93\
\xf0\xfe\xc2\x1f\x23\x6f\x55\xa1\x1e\x11\xc3\x90\x7b\x18\x62\x09\
\x80\xbb\x46\x00\xcc\x4b\xcf\x8d\xdb\x2b\x10\xf4\x67\x2f\x00\xcb\
\x92\x0a\xb4\x95\x76\x0f\xcc\x63\x6d\x3a\xb3\x62\x37\xd7\xaa\x80\
\xb9\x4b\x0f\xd3\xaa\xb5\x7f\x1b\x55\x62\x3b\x2f\x8b\xb8\xc3\x23\
\xf8\x1c\x54\x24\xae\x20\x13\x54\x69\x17\x5b\x2c\xa6\xb0\x84\x6c\
\x73\xf2\x32\x8b\xad\x14\x13\x58\xa9\xf4\xdd\xb8\x3d\x8f\xdc\x07\
\x5c\x3d\xfb\xdd\xdf\xdf\x4f\x45\x4f\xc7\xff\xbe\x20\xbf\xef\x96\
\xee\x0a\x7f\xd8\x6a\x6f\xfd\x51\x01\x9e\x12\x16\x09\xa8\x77\x89\
\x4b\xea\x64\xca\x99\xc6\xac\x51\xd2\xc6\x01\x39\x90\x09\xee\xc9\
\x72\x68\x55\x41\x03\x52\x3d\xda\x3b\xd2\xc5\xae\xea\x54\x32\xdc\
\x05\x38\xe3\xd6\x23\xd3\xd7\x34\xf4\xe7\x0a\x32\x4b\x9e\x5c\x61\
\x1a\x5e\x3c\x80\x91\x26\x00\x69\xea\x46\xa2\x18\x68\x80\xe6\x4e\
\xb6\x02\xe0\x5f\xf3\x01\xcb\xb3\xa4\x0e\xd9\x74\x7c\x19\xf0\x75\
\x48\x97\x81\xcb\x7b\xce\x37\xeb\x78\xa9\x9c\xfb\xa8\x7e\xcc\x95\
\x99\x24\x77\x75\x5a\x5c\x27\x4a\xa1\xfd\xfd\xe3\x7d\x35\x3c\xa7\
\xf5\x78\x27\x35\x0f\x4b\x6d\x49\x04\xd8\xda\x16\xa0\xd9\xa0\xed\
\xb1\x69\xa5\xab\xac\x40\x5d\xcc\x1d\x6d\xfb\x59\x15\x8b\x5e\xda\
\x7d\x15\x61\x58\x0a\x81\xaf\x2c\x8c\x12\x85\xbc\xdd\xb7\x16\x81\
\xbc\xdd\x95\xad\x60\x7b\xbf\x47\x3e\x86\x4c\x68\x45\x36\xf3\x08\
\xb9\x6d\xa7\x97\x39\x59\x7a\x16\x2a\x7c\x98\x86\xd2\x51\xe0\xec\
\x53\xd2\x2a\xa4\x82\x10\x27\x53\x8e\x35\x0b\xbc\x66\xa5\x58\xaa\
\xc2\x1d\x8a\x8d\x4e\x49\x2a\x6c\x27\x22\x53\x2c\x96\x29\x20\x92\
\xcc\xfe\xc1\xf1\xc1\xb1\x2a\x1f\x9e\x50\x48\x18\xd8\x13\x51\x6b\
\x5d\x79\x81\xd4\x91\x57\xc2\xf9\x77\x1e\xbd\x09\x40\x72\x48\xa7\
\xc3\xa2\xb1\x16\x1f\x18\x61\xbc\xf9\x5e\x5c\xe2\xb7\xf8\x0d\x85\
\x91\xbc\xe0\xed\xa0\x7b\x70\x74\xb8\x67\xb1\xf8\x3d\xbb\x61\x6f\
\x42\x9a\x3c\xbb\xd9\x77\xc9\x47\x25\x03\x65\xed\xb1\x94\x33\xac\
\xd2\x4b\x78\x69\xa3\x56\x64\xe4\xcc\xa3\x70\xd5\x22\xe9\x05\x9f\
\x17\x66\xca\xc2\x24\x91\x9f\xdf\xc9\xf5\x35\xa5\xb8\x22\x33\x43\
\x64\xc9\x6d\x59\xcf\xc4\x69\x1b\xa3\x95\x2c\x1e\xf7\x91\xfb\x6c\
\x76\xc8\x9a\x67\x44\x79\xa9\xaf\x11\x12\xd3\x4b\xad\xa9\xeb\xb2\
\x75\xd2\x24\x22\x72\xad\x0a\x27\x74\xb2\x6b\x04\x59\xc0\x1d\x72\
\x27\xce\xde\x22\x4e\xfa\xda\x1a\xab\x1e\x62\x35\x92\x5b\xcd\x7d\
\x68\xe7\x6b\x2c\x46\x7d\x04\x9f\x03\x6d\xbb\x25\x75\x01\xcc\x17\
\x2b\x13\x48\xf7\x64\xea\xa9\xcc\x32\x9e\xb1\x66\x4e\x69\xd9\xd4\
\xb2\x79\x4e\xdd\x8b\x41\xe6\x3d\x95\x23\x91\x82\x1d\xa0\x8c\x39\
\x22\xdf\x8f\xb1\x4e\x91\x74\xf3\x8b\xcf\x12\x1b\x21\x54\xc7\x3f\
\x91\x94\xb4\x3c\x6c\xa3\x44\x54\x23\x4c\x9f\xbf\x29\x54\x88\xd3\
\xc3\x39\x7d\xc2\xc5\xa2\x42\xb4\x98\x57\x4e\x2c\xd8\xd2\xf9\x9e\
\xc3\x44\xe5\x33\xb5\x5a\xe1\x8b\x26\x57\x06\xf4\xcd\x8c\xb4\x08\
\x40\x9a\xae\x39\x62\x92\x7d\xc1\x62\xf1\x2a\xce\xa6\xa6\xa8\x74\
\xdf\xf3\x42\xf7\xd2\xb8\xbd\x74\xa7\xd8\xd7\x56\xf0\xb6\x98\xee\
\x72\x9e\xc7\xb7\xe8\xcb\x67\x86\x62\x53\xdf\x59\xb9\x01\xce\xbe\
\xd6\x28\x56\x45\xcd\x3d\xe8\x2b\xbc\xb4\x08\x00\xec\xcd\x11\xca\
\x3f\x57\xb0\x34\x42\xce\xf3\xdc\x9a\x47\xee\xe3\x89\x1b\xa7\x37\
\xdb\x48\xd3\x26\xf2\x27\x47\xe7\x48\x7e\xc6\x2f\x00\xa7\x17\x7b\
\xa7\xcf\xf4\x54\x1e\x3b\x30\x97\x42\xb5\x9e\x27\x68\xe7\xf2\x99\
\x5e\xa1\xf4\xc3\x18\x33\xbf\x80\xaa\x9c\x5f\x22\xe3\x24\xb3\xc8\
\x3d\xcb\x69\x43\x56\xfd\xba\x0f\x1d\x4c\x47\xbd\x08\x2f\x5b\x91\
\xaa\xf2\xda\x62\xe3\x4a\xe2\x06\xc5\x4d\x46\x37\x68\xd2\x3f\xb9\
\xa3\x27\xae\x38\x29\x89\x3b\x75\x48\x3c\xa3\xe4\xa5\xdb\xd3\x0e\
\xf9\x64\x47\x60\x66\x8c\xcd\x97\xbd\x0c\x53\xce\x58\xe9\xe4\xa4\
\x4e\x3e\xea\x69\xc4\x3a\x34\x1a\xca\x08\x0e\x2d\xc1\x50\x57\x35\
\x71\xc1\xa6\xf3\x8b\xfd\x0c\xa7\xca\x70\x99\x48\xd5\x56\x04\x2a\
\x2f\x15\x5b\x5f\x83\x4d\x3d\x64\x34\xd9\x6c\x17\xb2\x99\x5a\x75\
\x76\x12\xb9\x7f\xd4\x3b\x3c\x98\x72\x0b\x39\x19\x4d\x26\xa3\x2b\
\xbb\x91\xa4\xac\xbc\xd8\x4e\xd2\x03\x2d\xaf\x6f\x24\x2c\xdc\xcd\
\xb5\x13\x6b\x48\x6c\x35\x15\xa6\xb7\x9f\x6c\x2d\x56\xd9\x64\x9b\
\x55\xbe\xb5\x58\x99\xb5\x1a\x8c\xce\x6c\x91\xcd\x48\x67\xce\xc0\
\x66\x2e\x07\xe7\x13\xbb\xc5\x10\xf2\x5e\x6c\x2f\xac\xd0\xf2\x63\
\xbc\x8a\xc5\x12\x6a\x99\x0b\x25\xf2\x27\x1b\x4b\x24\xca\xf0\xa5\
\x3c\x8b\xc5\x0e\x6a\x19\x8b\xca\x6a\x05\xf7\x42\x6c\x25\x7d\x2f\
\x85\xd5\x58\xd2\x48\xe2\xa5\xd6\x92\x21\x94\x9f\xb1\x08\x65\x3a\
\x20\x5d\x82\x29\x8d\xff\xdf\xd6\x52\xc6\xb3\x98\x7d\x0a\x38\xad\
\x60\x2c\x0c\x0c\x4a\x0b\xf3\xec\x76\x91\x74\xc4\xd8\xc1\x01\xcd\
\x03\x18\xb0\x5a\x47\x8b\x81\x27\xc0\x84\xac\xa8\x28\x6f\xb5\x31\
\x99\xc7\x9a\xca\xb0\xc4\x73\x9b\x9a\x3c\x19\xb9\x51\x19\xe6\xea\
\xe0\xcc\x39\x9f\x47\x93\xa1\xb3\x70\xf6\x35\x4d\x59\x72\x92\x95\
\xf6\xde\xf9\x1e\x33\x63\x7a\xd6\x76\xea\x46\xad\x34\xac\x25\x08\
\x2b\xc6\xfa\x02\x42\x10\x46\x0f\xae\x5f\x06\x04\xd8\x1d\x90\x29\
\x95\x88\x36\x54\xab\xe1\x79\x13\x29\x4d\x0b\x3f\x74\x13\x9e\x9e\
\x64\xb1\x97\x71\x03\xcc\xe2\x3a\xf2\x92\x85\x2c\x8a\xc4\xc1\x90\
\x2c\xba\x32\xe6\x99\xc3\x5e\x19\xd3\xeb\xd1\x77\xc1\xb0\x5f\x9d\
\x76\x81\x00\xc4\xbb\x07\x72\x10\xaa\x29\x8c\x7c\xb6\x1d\xb5\xc4\
\x2f\xeb\xab\x74\x72\x49\xe2\x15\x72\x40\x7d\xe9\xc5\x89\x5c\x4c\
\xce\xcb\x7f\xb3\xbd\xa6\x0c\xd4\x99\x82\xc0\x67\x4b\x71\x25\x02\
\x90\xe8\x4d\xf6\xb7\xb8\x14\x58\x96\xa4\x47\xc9\xbc\x04\x68\x01\
\xdb\x8d\x5b\xb1\x37\xf5\x41\x50\xb1\xf3\x8b\x3b\xff\x77\xe8\x05\
\x71\x8b\x5c\x33\x53\xcc\xaa\xd4\x81\x9e\x3a\x28\x7f\x0a\xc6\x5f\
\x08\xca\xd9\xd2\xf3\xe7\xd0\x33\xfd\xf6\x73\x09\xc8\xc1\x4f\x6d\
\x9c\xdc\xad\x36\x06\xa5\x2d\xea\x58\x79\x68\x85\x4c\x3d\x85\xf8\
\x25\x85\x98\x47\x68\xb8\x62\x48\xec\xd4\xda\xc8\x34\x87\x28\x44\
\x56\xa7\x92\x00\x7c\x91\x30\x45\x25\xbf\xb2\x48\x15\x97\x55\x59\
\xb0\xad\x30\x90\x56\x83\x3a\xf2\xcd\xa6\x3c\x5f\xca\x8c\xee\x7a\
\xb2\x36\xc9\xe6\x5e\x8e\xdd\x37\xd5\x22\x27\xc9\x0d\x66\x76\xd8\
\xd0\x6f\xeb\x80\xb2\x16\x85\x76\x13\xb1\xff\x18\x35\xf2\xcb\xad\
\xe4\xf0\xf4\x9c\xfe\x34\x2c\xc4\x16\xd1\x5a\x44\x6a\x31\xa5\xb6\
\x1d\x6f\x46\x10\x3b\x7c\x93\xde\x64\x72\x60\x00\xc0\xa8\xfc\x32\
\x38\x5e\x38\x3e\xb2\x2d\x54\x4a\xe4\x2d\xdd\x6c\xe6\x02\xb2\x2d\
\xf1\x32\x4d\x59\x67\x6d\x64\x1c\x74\x6f\xd9\x8d\x3e\xf2\xb7\xc2\
\x91\xd7\xc0\x11\xa5\x3c\xa7\x7f\x9e\xba\xe9\x37\xf8\xd3\x69\xa6\
\xf1\x5a\xfa\xb6\x37\xd4\x7e\xd7\x66\x3b\x44\x4d\xf1\xa4\x8b\xb6\
\xdd\x23\xf2\xe1\x8f\x3a\x90\xb9\x77\xc9\x67\xe7\x7d\x19\x69\xf0\
\xba\xfa\xbe\x51\x57\xd7\x4e\x6c\x39\xa8\xb5\x5f\x5e\x0e\x4c\x37\
\xda\xe9\x9a\x72\xb4\x94\x57\x4c\xdb\x48\x5a\x8a\x14\xa3\x13\x54\
\x53\x2d\xa6\x42\x34\x45\xfd\x44\xb5\xb4\xa9\x62\xf2\xa5\xb0\xc9\
\x0b\x32\xf3\xb2\xdc\xf7\x19\x14\xe6\x9d\xf6\x9b\x77\xf9\xdb\x12\
\x82\xa6\xdf\x57\xe1\x6a\xbd\xba\x0a\xe7\xf8\xc3\x56\x67\xeb\x0f\
\xf4\x1f\xb2\xff\x10\x06\xfe\x33\xbd\xb1\x4f\xaf\x48\xd2\x7e\x37\
\xa4\x1b\x29\x29\xe8\xe7\x70\xe8\xdd\x3e\xba\x69\xe1\x7e\xc5\xe8\
\xd1\x4d\x07\x92\x9d\x0b\x0a\x99\xdf\x59\x26\x23\x8b\xb9\xd2\x58\
\xea\xe5\xd3\xdb\x35\xe8\x1d\x06\xf4\xb5\xa5\x99\xc4\x76\x5e\x91\
\x58\x9a\x89\x49\x49\x9f\x70\xba\x52\x1e\x48\x5f\xc7\x92\x9b\x32\
\x94\x32\x01\xdb\x4d\xd4\x0d\x16\xfe\x0e\x1b\x05\xb5\x78\x9d\x19\
\x62\x3f\x99\x74\xa9\x99\x45\xa9\xe3\x3e\x59\x54\x81\x60\x89\x2c\
\x37\x7b\xc4\x68\x8a\xfd\xf0\x91\xbc\x63\x78\x4d\x50\x50\x3d\xb1\
\x57\xfb\x2a\xaa\x02\x25\x9c\x61\xdf\x7d\xc6\xf3\xf4\xfb\x03\xe8\
\x37\xad\x5f\xe9\x44\xeb\x6f\x62\xa8\x7d\x1a\x9f\x7a\x86\xd6\x21\
\xb1\x87\xb4\x00\xda\xea\x32\xdb\x88\x97\xde\x22\x41\x5e\x82\x5c\
\x34\x85\xdf\xe9\x9e\x9b\xc1\x5a\xcc\x78\x23\x6f\xf3\x35\x98\xd3\
\xe7\x4d\x36\x3f\x99\x79\x6e\xf5\x8a\xe3\x41\x99\x8a\xa3\xe8\x04\
\x0c\x75\xe0\x4b\xea\xed\xd0\xdf\xc8\x79\x24\x3a\x19\xd8\xab\x39\
\x3e\xd0\x89\x4d\x5e\x0b\x1c\xce\xe8\x2b\x9c\xd3\x57\x0d\xb3\x63\
\x75\xdc\x45\x0a\x68\x96\xa3\x87\x06\xab\xf2\xcd\xf5\xda\xe7\x4c\
\xec\x50\x69\x0c\xc9\x40\xf3\x12\x3f\xd7\x6b\x27\x53\xad\x05\x6b\
\x81\x76\xac\x45\xb7\xbf\xec\xbb\xcb\xc5\xc7\x9b\xd8\x4e\x8f\x74\
\xc2\x29\xa5\xf5\x48\xaa\xf1\xf9\xb8\x64\x36\x7e\x1f\x79\x73\x22\
\xfc\xcd\x51\x34\x76\xfc\x3f\x63\x3e\x4b\xa9\xba\x84\x08\xdc\xc7\
\xe6\xb2\x82\xfd\x90\x76\xdb\xa0\x4e\x7d\xa3\x8e\x19\x91\x6e\x1a\
\xb4\x50\x94\x3b\x1d\x5b\x0c\x6a\x39\x8d\xa7\xc4\xa0\x3a\x76\x11\
\xd6\xba\xb3\xc4\xfb\x86\x73\xe2\x5e\xd1\x21\x2b\x70\x4e\x3b\xd4\
\x89\x8c\x1b\x59\xd2\x33\x66\x73\xb9\xc5\x5b\x3f\xf6\x2a\x49\xdf\
\x72\xa7\xc4\xa4\xb8\xe2\xa5\x92\x72\xf7\x49\xb2\x6c\x82\x4e\x3b\
\xcb\x7b\x9e\x33\xa8\x75\x36\x97\xab\x9c\x85\x17\xc5\x89\x62\x7b\
\xd6\x6e\xc4\xb9\x42\x32\xa7\xdd\x46\x10\xbb\x79\x96\x7b\x2f\xb9\
\x00\x2d\x60\x8c\xe2\xa0\x15\x80\x74\x1b\x37\x9f\x74\xb9\xa3\x9d\
\xf8\xcd\xb6\x5b\x59\xea\xcd\x1b\xac\x32\x24\x9d\x01\x3b\x10\x16\
\x2c\x34\xd4\x7d\x20\xe9\xea\x88\xc5\x84\x7a\xfd\xde\x71\xef\x98\
\xd5\xbb\x89\x27\xa5\xef\x9f\xa1\xcb\x61\x18\x25\xd2\x8b\x68\xc8\
\x0a\x27\x23\xdd\x9c\x02\x7a\xa9\xdb\x97\xa1\xf2\x63\x58\x35\x0e\
\x82\xc9\x27\x8a\xa8\x1b\x39\x0d\xa3\x00\x47\xdc\xc3\xf3\xd9\x54\
\x6a\x32\xd5\x38\x8e\x2d\x16\xb0\xf0\x09\xe9\x95\xdc\xc2\x8b\xc1\
\xfa\x5d\x16\xfa\x12\x0d\x77\x9a\xb7\xa1\x57\xe6\x88\x7f\xdd\x53\
\x27\xb6\xdb\x02\xc6\xbe\x56\xf1\x8e\x30\x0f\xf1\x91\xc2\x94\xf4\
\x16\x1d\x62\x70\x1e\x98\xbc\x37\xf3\xfe\xc2\x48\x3c\x87\x4e\x31\
\x5f\xc3\xc9\x25\x1c\x87\x75\xaa\xc0\xb9\x79\x38\x9f\x5d\x2f\x4a\
\xd6\xb1\x78\x5b\x5c\xb9\xec\xd9\xea\xcd\x37\x77\xa8\x7e\x4f\x33\
\xc8\x0f\x5b\x7b\x5b\x7f\x80\xc3\x48\x9f\xd1\x18\x8a\x3e\xca\x09\
\xbd\xd9\xa5\x27\x20\x6b\xe5\x7b\x49\xb2\xc9\xa6\x2d\x74\xd9\x6e\
\xbf\xe9\xc3\x8a\x0f\x1e\x6c\xee\xbd\x15\x84\x46\x16\xd0\x5a\xe1\
\x45\xbe\x3c\x62\x1d\xa1\x55\x04\x94\x7b\x6f\xdf\x1b\xff\x05\x99\
\xf9\xdd\xb0\
\x00\x00\x07\xac\
\x00\
\x00\x2b\x10\x78\x9c\xdd\x5a\x6d\x6f\xdb\x36\x10\xfe\x5e\x20\xff\
\x81\x45\xbe\x34\x45\x9c\x48\x8a\xed\x36\xca\xa7\x75\x1b\xba\x01\
\x1b\xb0\x6c\xc1\xfa\xb1\xa0\x5e\x6c\x6b\x51\x44\x4d\x92\x97\xa4\
\x45\xff\xfb\xee\x48\xf1\x4d\xa2\x64\x3b\x71\x82\x6c\x26\x10\x3b\
\x24\x45\x1e\x9f\xbb\x7b\x78\x3c\xea\xf2\x8a\xb1\xfc\x2a\x2b\x0f\
\x5e\x7d\x3d\x78\x45\xf0\x13\xb1\x2a\x49\xab\x90\xf8\xe5\x1d\xa9\
\x59\x9e\x25\x24\xca\x69\x7c\x7d\x21\x9b\xe1\xf7\xb2\x62\xeb\x22\
\x99\xc4\x2c\x67\xd0\xf1\x70\xb1\xa0\x5e\xb0\x90\x1d\x4a\x9a\x24\
\x59\xb1\xe4\x03\x5c\x58\x63\x4e\x2a\x9a\x64\xeb\x3a\x24\x67\xba\
\x85\x95\x34\xce\x9a\x7b\xe8\xed\x79\x50\xf7\xed\xe0\xd5\xc1\xab\
\xcb\x4f\x59\xb2\x4c\x1b\x25\x92\x9c\x27\xf2\xb1\xb4\x4f\x3a\xe4\
\x38\x0b\xb0\x74\x46\x09\xb3\x26\xbd\x09\x57\xec\x9f\xb4\x52\x03\
\xf6\x9f\xbd\xfc\x25\x2b\x52\x5a\x7d\x44\x09\xd3\xa2\x79\x43\xee\
\xfc\x90\x78\xc7\xe4\x5e\x7c\xdd\x05\xe2\x3f\xf8\xf2\x8f\x49\xdd\
\xb0\x12\xfe\x97\x0b\x97\x15\x3e\x39\x8c\xa9\x37\xf7\xcf\x8f\x2e\
\x6c\xb9\x3d\xfe\x71\xca\x55\xa7\x79\x1a\x37\x69\xf2\xf4\xa2\x25\
\xef\xde\x7b\x3e\x3d\x52\x52\xfc\x9a\x16\xeb\x0f\xb4\x0a\xb9\x1c\
\x8e\xe9\x43\xd2\x54\xb4\xa8\x4b\x5a\xc1\xa4\xee\xa7\xc6\xa4\xef\
\x3e\xee\xb6\x2c\x14\x93\x1a\xd0\xd8\xc3\x97\x55\x5a\xd7\x03\xa3\
\x1f\x4e\xa7\xd3\xe1\x61\x05\xdc\xdb\xe1\xd9\x1a\x22\x7c\x00\x58\
\x81\xab\x77\x6c\x54\x06\xa1\x80\xd7\x37\x2a\x39\xaa\x00\x6a\xe0\
\x63\xe9\x36\x78\x27\x53\x30\xc6\x29\x96\xd3\xb7\xfd\xc6\x40\x36\
\xf6\x9b\x7c\x89\xc8\xdb\x53\xd1\x26\x4d\xe9\x86\x56\xcb\xac\x98\
\x44\xac\x69\xd8\x4d\x38\xd1\xae\xd5\x7a\x9b\x6c\x11\x0d\x1a\x4c\
\x8d\xdd\x10\x48\xba\x6f\xc7\x14\x94\x1f\x07\xf0\x48\xe0\xe1\x9f\
\xf6\x87\xe3\xa9\xbe\x29\x8c\x1b\x7f\x92\xd5\x34\xca\x1d\xdd\xa7\
\x1e\x96\xad\x7d\xfc\xbb\xa8\x06\x3b\x8b\x9b\x9f\x41\x86\x3f\xb3\
\xf4\x76\xef\x7e\x34\x4d\xb0\xa8\x0a\xd4\xd0\x7c\x8a\xc5\x70\xad\
\x59\x82\xe5\xa8\xbb\xc6\x05\x8b\xd7\xb5\x12\xe8\xf4\xad\x54\x41\
\xa0\x54\xb0\x67\xd7\x46\xa3\x11\x12\xe0\xb8\x3f\x26\x59\xf3\xd4\
\x70\x8c\x82\x31\xb0\x19\xb4\x7b\x41\xdd\xdc\xe7\x69\x28\x80\x18\
\x76\x64\x3f\xc5\x62\x3f\x29\x77\x91\x99\x02\xfc\xb7\x75\xbd\xfa\
\xb0\x06\x07\x28\x76\xdd\x33\x1e\x86\xc2\x6c\x8e\xc5\x32\x8a\x59\
\x80\x45\x57\xcd\x00\xab\x14\x8b\xae\x3a\x87\x2a\x8a\xc5\x40\x8b\
\xa3\x37\x3f\xb2\x17\x78\x9b\x25\xcd\xca\x01\x99\x5c\x93\x0b\x94\
\x41\x38\x15\x5c\xf3\xae\x4a\xf4\x2e\xbc\x60\x45\x33\xa9\xb3\x2f\
\x30\x80\x1f\xf4\xa8\x25\x4f\x17\x0d\xa0\xdd\xab\xaf\xb2\xe5\x4a\
\x35\x74\x15\x31\x42\xde\x8f\xc3\x3e\x48\xb0\x58\xd8\x07\x11\x16\
\x0b\xfb\xe0\x1c\x8b\x85\x7d\xf0\x1e\x8b\x81\x3d\xd7\xd8\x4c\xbb\
\xed\xf7\xec\x26\x62\x1f\xd8\x9d\x12\x59\x70\x5a\xc6\x80\x77\x9d\
\x41\x0f\xdd\x61\x97\x79\x11\x86\xb5\x4f\x9f\x93\x68\x89\xd0\xea\
\xd8\x54\x7d\x27\xd8\x7a\x6a\xda\x6b\x25\x32\x65\x32\x88\x40\x5a\
\x2b\x7f\xea\x6c\xc0\xb8\xa7\xda\xd5\x5e\xb0\x99\x6e\x6f\x93\xb6\
\x8a\xc8\xc8\x36\xd9\x53\x4e\x42\x2b\x18\x96\xde\x6f\x31\xdf\x13\
\x45\xa6\x4a\x8b\x61\x52\xb1\x72\x92\xb0\x5b\xad\x4d\x52\xaf\xa3\
\x18\xd8\xaa\x62\xf9\x84\x01\x01\x65\x45\x28\x55\x79\xd1\xef\x51\
\xb2\x3a\x43\xd9\x21\x18\x65\x25\xe1\x7c\x25\x7b\x49\x8e\x15\xec\
\x65\x1f\x53\xd0\x26\x24\x09\x7b\xbd\x43\x0c\x6f\x6d\x11\xe8\x80\
\x65\x75\xb1\x9c\x0d\xb6\x7f\xf2\xd7\xba\x6e\x08\x25\x35\xc8\x9a\
\xa7\x24\x07\xec\x88\x0c\xf3\xe4\x83\x20\xa6\xa0\x55\xeb\xa8\x84\
\x0f\xd7\xf4\x26\x25\xa2\x92\xd0\x9a\x34\xab\x94\x68\xfd\x76\x87\
\x11\xc1\xa0\x63\x24\xe8\xd6\xc3\x18\xe0\x9d\xd0\xaa\x62\xda\x28\
\x48\x76\x43\x97\x20\xfa\xba\xca\xdf\x84\xa7\xb8\x46\x06\xb1\xfc\
\x32\xfd\x9c\x01\xb0\xf5\x29\x6f\xad\x8d\xfa\x53\x1c\xe3\x33\x1f\
\xe3\xa4\x2c\x96\x5a\x93\x1f\xc1\x5c\x4a\x9c\x45\x87\x42\xcf\xc5\
\x06\x97\x57\xe9\x5d\x83\x21\x50\x27\x0c\x7b\xb6\xf9\xff\x88\xc1\
\x02\x73\x3c\xcc\xac\xc0\x50\xbf\x80\x45\xd2\x9c\x0c\x9f\xb1\x0f\
\x03\xfe\xe9\x9f\xb2\x1f\xe6\x66\x27\x20\x22\x3f\x9b\x18\x3b\x47\
\xe0\x20\x97\xe9\x7b\x2c\x92\x5c\xc8\x2a\x15\x7b\xfa\x3b\x6d\xf5\
\xe2\xe4\xc1\x1d\x81\xf8\x73\xf8\xe3\xf1\x2f\xc7\x3a\xc3\x15\x2d\
\x12\x30\x79\xbd\x5e\x6d\x51\xbb\x2e\xc8\x17\x0b\xf2\x06\x11\xe7\
\xf4\x29\x30\x37\x96\x23\xfa\xa8\xe5\x90\x1b\x38\x33\xc9\x35\x05\
\x86\x2b\x77\xb7\xb5\xc0\xbd\x20\x20\x96\x09\xfa\xa9\x4b\x85\xce\
\x9d\x13\x63\xcd\xf3\xf1\x49\x9e\x06\x0e\xcb\x00\x89\xc5\x70\x53\
\x63\x5a\x27\x37\x5a\xbc\xe8\x22\x58\x61\x02\x2e\x80\xa0\xf3\xff\
\x0a\x20\x27\x3e\xc8\xe6\x8e\x0e\x9b\xd1\x11\xfc\xcb\x79\xd1\x00\
\xe8\x98\x98\x7d\xf8\x56\xd1\xed\x62\xfa\xcd\x78\x36\x8e\xf4\xce\
\x0a\xda\x89\xfd\x01\x38\x6f\x57\x70\x62\x1f\x32\xf7\x12\x99\x7f\
\x48\x58\x54\x77\xa7\xc3\x80\x8b\x17\xac\x70\x4d\x01\x71\x61\x93\
\xc5\x7b\x25\x86\x07\x32\x9d\x04\xce\x60\x3a\x45\x75\x06\xcd\x11\
\xef\x62\xc4\x9c\x15\x67\x0f\x51\xe1\xe3\xd7\xbb\x71\xeb\x79\x5e\
\x22\x74\x2d\xe8\x19\xdd\x7c\xb7\x8d\xb8\xe3\x0e\x1b\x89\x50\x84\
\x4d\x8f\x64\xc2\x97\x87\xd0\x46\xe3\xd8\x1e\x21\x18\xe1\x61\xf0\
\xac\xcb\x96\xe4\x24\x3c\x36\xb1\xe8\x20\x74\x3b\xfc\xf6\xcb\x81\
\x03\x2c\xe8\x16\x55\x71\xe0\x06\xdf\xb6\x18\x50\x06\xa3\x23\xd9\
\x90\xc3\x60\x8a\x45\xa7\x52\x72\x9a\x15\x0f\x78\xee\xa7\x94\x02\
\x58\x78\xc8\x03\x59\xc5\xd1\x6d\x87\x1c\x8c\x91\x02\x37\x33\xdf\
\x6d\x7e\x9a\x1c\xce\x7d\x2c\x16\xf7\xcc\x3c\x2c\xba\x6a\x0e\x5c\
\xcb\xf3\xdb\xc7\x2a\x4d\xce\xb3\x1a\xb3\xce\xb5\x88\x54\x01\x56\
\x0d\x9e\xc6\xfb\x9e\x33\x8f\xb1\xe8\xc3\xe2\x2a\x8d\xaf\xf1\x88\
\x61\x65\x94\x55\x36\xd9\xc7\xa2\x3a\xff\xc0\xe2\xeb\x36\x39\x1b\
\x36\x59\x93\xa7\x0a\x99\x06\x80\x9e\xd0\x3c\x5b\x82\x15\xc7\x00\
\x44\x5a\xc9\xf3\x2f\xde\x4f\xc9\x6c\x19\x3f\x88\x89\x0a\x12\xa5\
\xcd\x6d\x9a\x16\x04\xb3\xdf\x35\xc9\x0a\x7e\x24\x6b\x18\xcb\x01\
\xe5\x4a\x9d\xc8\x1e\x8f\xb8\x48\x78\xdb\x59\x03\xae\x71\x0d\xaf\
\xe8\x72\xe4\x5c\x66\x9c\xb3\x3a\x9d\x44\x3c\x31\x03\x86\x6c\x36\
\x2d\x72\x46\x9b\xb6\x69\x07\x20\xfc\xff\x3e\x10\x6d\xce\x6a\x18\
\x8e\xc1\xfb\xc2\xbe\xbf\x0d\xce\xd1\x26\x40\xc7\x66\xe9\xe6\x48\
\xcd\x6c\x39\x99\xa8\x3f\xd6\xa5\x0e\xb0\xc2\xa7\xac\x00\xc2\x44\
\xf7\x2e\x69\x45\x1b\xb6\xcb\xc5\xe6\x26\x94\xb9\x7f\xcf\x2d\x94\
\xfd\x19\x16\xcb\xc1\xdb\x7b\x2f\x0d\x3c\xf7\xf8\xfd\x38\xf8\x34\
\xc6\xf2\x78\xf7\x1b\xc1\xcb\x50\xef\xbe\x70\xb3\x36\x59\x0e\x5b\
\x34\x9b\xc7\xfe\x3b\x05\x91\xbd\xe9\xee\x89\x03\xf7\x02\x11\xbe\
\x01\x60\x04\xad\x46\xae\xed\x71\xdc\x67\xb9\xcd\xd6\xb9\x24\x21\
\x84\x9d\x47\x12\xf7\x8c\x7d\x73\x57\x11\xed\x0e\xe9\xdb\x17\xe6\
\x00\xed\xc5\xae\xa8\xf6\xbd\x6e\xbd\xeb\x8e\xa5\x62\x4b\xe4\x0d\
\x50\xd9\x48\x56\x0b\xba\xdc\x5b\x06\xa4\x53\xf8\x6a\x0e\x27\xd7\
\xf7\x26\x01\x5a\x5b\xad\x8b\xeb\xb1\x10\x44\x18\x7f\x3b\x6a\x1b\
\x8d\x05\x27\xfe\xac\xb3\x1a\x0e\xa7\xb1\x92\x2b\x1a\xf1\xf1\x1b\
\x1a\xc9\xac\x81\xfb\xda\xce\x41\x12\x9d\xdb\x7f\x99\xf2\x6c\x93\
\xae\x6d\xf0\x35\x20\xae\xba\x43\xee\x2b\xc6\xd0\x40\xe7\x9a\xcb\
\xd1\x32\x70\xa5\xd0\x5e\xc5\x1b\x56\x69\x2b\x73\xe2\xdb\x18\xc8\
\xad\xa1\xa4\x45\x4a\xbe\x6e\xb1\x64\x11\xc7\xbb\x81\x0c\x73\x5a\
\xeb\x70\xd1\x9e\xd8\xe3\x4e\x8c\xbe\x8a\x9d\x88\xbc\xb2\x27\x88\
\xff\x8a\xd6\x80\x5a\xb3\x42\xf7\x6e\x18\x41\x7e\xcc\x69\x09\xca\
\x6c\x56\x84\x09\x07\xe7\xa3\xe8\x0d\x7d\x2c\x63\xed\x12\x6c\x91\
\x55\x75\x13\xbe\xb6\x5f\x14\xb0\xcc\xdf\x6b\x69\x66\x0f\x12\x2a\
\x62\xd7\x52\xf2\x4c\xcb\x26\x21\x5f\x0f\xbe\xc7\xe0\xb0\xc8\x8e\
\xd1\x99\xd7\x6a\xed\xba\x3a\x16\xf2\x48\x76\xf2\x3b\xbc\xa3\x5f\
\x33\x39\x72\x2e\xa6\xff\x7a\xce\x28\x1a\x5b\x68\xd5\x58\x9a\xb4\
\x72\x6f\x13\x92\x9d\x58\x4a\xbe\x07\x21\xb0\xd1\x84\x65\x5f\xa8\
\x76\x7d\x09\xa7\x77\x58\xde\xc3\x57\xb1\x67\x55\x18\xaf\xfc\xa8\
\x9a\xa0\x57\xa3\x5e\xef\xd1\xfa\xfa\x1d\x66\x63\xed\xbd\x69\x08\
\x71\x0a\x9c\x28\x31\x40\x89\xf1\x58\xc3\x83\xc7\x81\x0e\xeb\xa2\
\xed\x32\x6a\xa6\x1b\xb8\xcf\xc1\x32\x2e\x33\x57\x6f\x10\x18\xaa\
\x1e\x97\x7b\x64\xb3\xf8\x1b\x47\xa3\xf9\xb2\xff\xfa\x55\x7c\xc7\
\x37\x88\x63\x12\xdf\x8b\x1f\xba\x69\x21\x9b\x16\xbd\x26\x29\x9c\
\x7f\xe2\x75\xdf\xa7\xc2\x4c\xe0\x4c\x42\xee\x68\x3c\x93\x70\x88\
\x26\xe3\x06\x52\x1e\x2a\xf5\xca\x9e\x12\xe6\x76\xc7\x3c\x57\xc6\
\x29\xe3\x9a\xf3\x8d\x80\x77\x5d\x7b\x07\x55\xc9\x33\x90\x63\xb1\
\x03\x37\xf8\xc3\xef\xed\xb9\xc6\xe8\x9a\x82\xb8\x54\xdc\x3a\x0e\
\xe4\x8f\x47\xec\xce\x8e\x04\x5d\x13\xc9\x93\xff\xb0\xb3\xf4\xde\
\x36\x1b\xdc\x5e\xbf\xfd\x0b\x17\x3f\x69\xe4\
\x00\x00\x05\x3d\
\x00\
\x00\x13\x71\x78\x9c\xad\x57\x5b\x6f\xdb\x36\x14\x7e\x4e\x81\xfe\
\x07\x22\x7b\xd9\x82\x28\xce\x75\xe9\x54\xec\xc1\xb1\xb3\x46\x68\
\x62\xaf\xb6\xd3\xa0\x4f\x03\x25\xd1\x32\x11\x99\xd4\x28\x6a\x76\
\x56\xec\xbf\xef\x90\x12\x15\x51\xb7\x38\x41\x63\x14\x85\xc9\xc3\
\x8f\xdf\xb9\xf0\x9c\xcf\x83\x83\xf7\xef\xf6\x46\x3c\x79\x12\x34\
\x5a\x49\x74\x7a\x7c\x72\x86\xae\xd7\x98\x65\x24\x46\xa3\x18\x93\
\x34\xe5\xec\xfd\x3b\xb0\xb9\xa5\x01\x61\x29\x09\x51\xc6\x42\x22\
\x90\x5c\x11\x34\x4c\x70\x00\xff\x15\x3b\x87\xe8\x2b\x11\x29\xe5\
\x0c\x9d\x1e\x1d\xa3\x9f\x95\xc1\x7e\xb1\xb5\xff\xcb\x47\x40\x78\
\xe2\x19\x5a\xe3\x27\xc4\xb8\x44\x59\x4a\x00\x82\xa6\x68\x49\x63\
\x82\xc8\x36\x20\x89\x44\x94\xa1\x80\xaf\x93\x98\x62\x16\x10\xb4\
\xa1\x72\xa5\xaf\x29\x40\x8e\x00\xe2\x5b\x01\xc1\x7d\x89\xc1\x1a\
\x83\x7d\x02\xdf\x96\x55\x3b\x84\xa5\x26\xbc\xb7\x92\x32\x71\x07\
\x83\xcd\x66\x73\x84\x35\xd3\x23\x2e\xa2\x41\x9c\x5b\xa5\x83\x5b\
\x6f\x74\x3d\x99\x5f\x3b\xc0\x56\xdb\xdf\xb3\x18\xbc\x45\x82\xfc\
\x9d\x51\x01\x7e\xfa\x4f\x08\x27\x40\x26\xc0\x3e\x50\x8c\xf1\x06\
\x71\x81\x70\x24\x08\xec\x49\xae\xc8\x6e\x04\x95\x94\x45\x87\x28\
\xe5\x4b\xb9\xc1\x82\x00\x4a\x48\x53\x29\xa8\x9f\x49\x2b\x52\x86\
\x1a\x38\x5c\x35\x80\x58\x61\x86\xf6\x87\x73\xe4\xcd\xf7\xd1\xd5\
\x70\xee\xcd\x0f\x01\xe3\xc1\x5b\xdc\x4c\xef\x17\xe8\x61\x38\x9b\
\x0d\x27\x0b\xef\x7a\x8e\xa6\x33\x34\x9a\x4e\xc6\xde\xc2\x9b\x4e\
\xe0\xdb\x1f\x68\x38\xf9\x86\x3e\x7b\x93\xf1\x21\x22\x10\x27\xb8\
\x86\x6c\x13\xa1\xf8\x03\x49\xaa\x62\x48\x42\x15\xb0\x39\x21\x16\
\x81\x25\xcf\x09\xa5\x09\x09\xe8\x92\x06\xe0\x17\x8b\x32\x1c\x11\
\x14\xf1\x7f\x88\x60\xe0\x0e\x4a\x88\x58\xd3\x54\x65\x32\x05\x7a\
\x21\xa0\xc4\x74\x4d\x25\x96\x7a\xa5\xe1\x14\x5c\x73\x30\x50\x11\
\x1c\xe8\x52\x9a\xde\x4e\x67\x7f\x8d\x87\xb3\xcf\x48\xfd\xfd\x8e\
\x7e\x3a\xf9\x4d\x7d\xca\xad\xbb\xeb\xb1\x77\x7f\x97\x6f\x9d\x5d\
\xa8\x4f\x75\xeb\xd6\xfb\x74\xb3\x50\x5b\x17\x43\xf5\x29\xb7\xf2\
\xf5\xfc\xd4\x58\xff\x95\x5b\xc3\x11\xe4\x71\x51\x00\x8e\x2f\x3f\
\x9c\x7f\x30\x84\x0e\xd0\x77\xb0\xf2\x71\xf0\x18\x09\x0e\xbc\x5d\
\x43\x46\x55\x63\xc0\x63\x2e\x5c\x83\xa6\x56\x7c\x2e\xc0\x35\x17\
\x9d\x24\x5b\xc8\x68\x4c\x43\xc3\x02\x36\xff\x53\x78\x5f\x1e\x68\
\x18\x11\xe9\xba\x54\x92\xb5\x9b\x92\x98\x04\x2a\x8b\x8d\x4b\x72\
\x16\xe5\xa9\xd1\x8a\x04\x8f\x57\x7c\x7b\x88\xbe\xcc\x70\x48\xf9\
\x55\x26\x25\x64\xfe\x7b\xe5\x4a\xc6\x19\x29\xed\x2b\x46\x70\x15\
\x0b\xa1\x02\x25\x17\x70\xda\x00\x55\x56\x35\xca\x86\x86\x72\x05\
\xbc\xcf\x92\xad\xf2\x63\x45\xd4\x53\x2e\xbf\xf7\x81\xba\x6e\xc6\
\x02\x85\x4a\xc2\x76\xfc\x8a\x81\x45\xb8\x2d\x46\x56\x10\x76\xf0\
\xe8\x19\xda\x5d\xa9\xe2\xeb\x60\x50\xb3\xea\xa2\x51\xe6\xb1\xdf\
\xdf\x17\xbc\x7d\x93\xaf\xf5\x32\xe9\xb8\x7b\x17\x67\x5f\xe7\xaa\
\xcd\xa2\x16\x81\x4f\xb0\x9e\x00\xbc\x46\x59\x63\x11\x51\xe6\x48\
\x9e\xb8\xe8\xd7\x4a\x59\x18\x23\xd7\x95\x54\x42\x9b\x53\xb6\xda\
\xc8\xb9\xcc\x8b\x29\x26\x4b\x28\xa5\xcb\xca\x91\x79\x20\x78\x1c\
\x5f\xe1\x4e\x7a\x1d\x41\x2a\x9f\x5e\x0d\x06\x7c\x15\xf4\x5f\xce\
\x24\x8e\x35\x62\x59\xbe\x17\x39\x83\x9c\xba\x8b\x8e\xe1\x06\xf3\
\xef\xec\xb4\x8d\x90\x0b\x31\x93\x10\xc8\xd8\x7a\x15\x35\x18\x75\
\xb4\x8a\xd5\x02\xe3\xae\xa0\xed\x15\xc1\xb0\x1f\xb6\xee\x57\xbb\
\xf5\x8a\x06\x5c\xdd\xcf\x1c\xc2\x29\x68\x2a\x36\x27\x05\xab\x93\
\x3e\x56\xb6\x93\x36\x48\x05\xa0\xdf\xb5\x3a\x97\x35\xd4\x46\x81\
\x71\xda\x7f\xd0\xba\x5d\x1d\x33\xe9\xea\x3a\x87\xc3\xd0\x89\x29\
\x03\x79\x50\x5d\x4d\x33\x5f\xaf\xd6\x63\xbc\x63\x88\xf7\xe0\x7c\
\x00\xf4\x01\xcf\x01\x4f\x74\x62\xf3\x04\xf7\x51\xd0\x97\x25\x3c\
\xa5\x6a\x92\xb9\x08\xfb\x00\x0b\x73\xb8\xef\x48\x3d\x50\xb5\xa2\
\xaa\xd0\x78\xc6\x55\x4f\xe6\xf9\xe9\x14\xa6\xdd\x37\x58\x11\xad\
\x17\x7f\xeb\x05\xf0\x40\x3f\x9a\x77\xda\x05\x6f\x02\xfc\x16\x07\
\x00\xd8\x38\xd1\x8d\xfb\x56\xda\x75\x44\x75\x91\x83\x85\xe0\x1b\
\xbb\x42\xb4\x1e\x6d\xdb\xc8\x92\xb6\xd5\x90\x6f\x58\xbe\xfe\x72\
\x5b\x2a\x62\x50\x1b\x97\x67\xdd\x79\x4a\x40\x20\x35\xeb\x57\xad\
\x36\x7a\x84\x35\xf7\x86\x3e\x68\x3d\x1c\xc8\x62\x16\x54\x9a\x7a\
\x6b\x57\x69\x3d\xa4\x55\x5d\x9b\xca\xa8\xf5\x1b\x73\xcc\x03\x6d\
\xf2\x95\x92\x3c\x0c\xe9\x8a\x6f\x9c\x90\x04\x5c\x68\xf5\xe6\x18\
\xcd\x02\x81\xd1\x69\xd2\x5f\xd5\xc6\x33\xb2\x63\x84\x51\xa9\x62\
\x2a\x66\x4d\xd1\x84\x63\x09\xb2\x11\x4b\xd2\x0a\x61\x7b\x76\x43\
\x30\x24\xa5\x24\xf7\x72\x0f\x7d\x3e\x00\x11\xcf\x29\xf4\x49\xba\
\xde\xa4\x27\x90\x48\x50\xb7\x2e\x3a\xaf\xe4\xb9\xe5\x82\x52\xd7\
\x41\xc2\xdb\xb6\x6d\x89\xd0\x93\xc7\x85\xfa\xdd\x50\x3a\x1b\x09\
\x1a\xaa\x77\x53\xc6\xa6\xe6\x2a\x58\x9b\x91\x5a\xcc\xea\xbc\x7d\
\x54\x07\x5d\x6e\x03\xa3\x1a\xfb\xd5\xe6\x2f\x40\x6e\x64\xa9\x5b\
\x74\xfc\xba\xa3\xe5\xe0\x3b\x6f\x47\xea\xd3\xb1\xb6\x43\x23\xbe\
\xf6\xb9\xd6\x0a\xaf\x79\x6c\x3b\x22\x0a\x9e\x38\x0a\xf6\x87\x01\
\xda\x14\xfb\x9e\xfc\x0e\x85\x68\x1e\xd7\x3c\xa1\xcc\x48\xaa\x22\
\xce\x8e\xb0\x5a\x5f\xab\xbd\x6e\x5a\xbe\x7e\xcf\x50\x55\x8d\x4d\
\xcd\xd5\x6f\xfe\x2a\x78\x85\xfb\x6d\x03\x31\x87\xe9\xe5\x64\x1a\
\x69\x3b\xa5\x1f\x16\xbe\x39\x2c\xd6\xe4\xac\xd5\x27\xf3\x7d\xd7\
\x05\xbf\xa0\x45\x76\x09\xc2\x8b\x46\x3d\x6b\xb1\x73\x5e\x17\x3b\
\x36\x58\x9b\x26\x6c\x51\x96\x15\xb4\x06\x52\x55\x0e\xbe\xb5\x32\
\x6d\xac\x17\xa6\xb2\x21\xe6\x18\x56\x4e\x97\x93\x6d\xaa\xac\x4f\
\x40\x5b\x88\x0d\x34\x33\xeb\x4a\x3c\x35\xf4\x8a\x3d\x33\xf1\x1a\
\x32\xb6\xe7\x97\x6f\xe3\x6c\x0b\x6e\x79\x67\x2f\xae\x1d\xcc\x5b\
\xec\x93\xb8\xbb\x9c\xfe\x14\x3c\x52\x53\xd3\x34\x54\x49\xb6\xa0\
\x24\x62\x1a\x41\x10\x02\xc2\x64\xe5\x51\x54\x4c\x55\x67\xcf\xd8\
\xa3\x95\x8f\xa2\xc0\x7b\x86\x63\x0e\x73\x47\x58\xa6\x46\x44\x82\
\x45\xf9\xab\xbc\xc3\x81\xff\x01\x2c\xd9\xf9\xb9\
"
qt_resource_name = "\
\x00\x06\
\x07\xac\x02\xc3\
\x00\x73\
\x00\x74\x00\x79\x00\x6c\x00\x65\x00\x73\
\x00\x0a\
\x01\x52\xf7\x73\
\x00\x64\
\x00\x61\x00\x72\x00\x6b\x00\x5f\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x10\
\x0f\x95\x51\xb3\
\x00\x64\
\x00\x61\x00\x72\x00\x6b\x00\x6f\x00\x72\x00\x61\x00\x6e\x00\x67\x00\x65\x00\x5f\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x0a\
\x02\x65\x55\xf5\
\x00\x64\
\x00\x61\x00\x72\x00\x6b\x00\x6f\x00\x72\x00\x61\x00\x6e\x00\x67\x00\x65\
\x00\x0c\
\x04\x56\x23\x67\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x04\xa2\xfc\xa7\
\x00\x64\
\x00\x6f\x00\x77\x00\x6e\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x0b\x2d\x87\xc7\
\x00\x68\
\x00\x61\x00\x6e\x00\x64\x00\x6c\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x04\
\x00\x06\xa8\x8b\
\x00\x64\
\x00\x61\x00\x72\x00\x6b\
\x00\x11\
\x0a\xe5\x6c\x07\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x09\
\x06\x98\x83\x27\
\x00\x63\
\x00\x6c\x00\x6f\x00\x73\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x08\x8c\x6a\xa7\
\x00\x48\
\x00\x73\x00\x65\x00\x70\x00\x61\x00\x72\x00\x74\x00\x6f\x00\x6f\x00\x6c\x00\x62\x00\x61\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x1a\
\x01\x21\xeb\x47\
\x00\x73\
\x00\x74\x00\x79\x00\x6c\x00\x65\x00\x73\x00\x68\x00\x65\x00\x65\x00\x74\x00\x2d\x00\x62\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\
\x00\x2d\x00\x6d\x00\x6f\x00\x72\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x05\x95\xde\x27\
\x00\x75\
\x00\x6e\x00\x64\x00\x6f\x00\x63\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x13\
\x08\xc8\x96\xe7\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x5f\x00\x75\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x2e\x00\x70\
\x00\x6e\x00\x67\
\x00\x15\
\x0f\xf3\xc0\x07\
\x00\x75\
\x00\x70\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\x00\x65\x00\x64\
\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1f\
\x0a\xae\x27\x47\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x75\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\
\x00\x64\x00\x5f\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x0c\xe2\x68\x67\
\x00\x74\
\x00\x72\x00\x61\x00\x6e\x00\x73\x00\x70\x00\x61\x00\x72\x00\x65\x00\x6e\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x16\
\x01\x75\xcc\x87\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x75\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\
\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x14\
\x0b\xc5\xd7\xc7\
\x00\x73\
\x00\x74\x00\x79\x00\x6c\x00\x65\x00\x73\x00\x68\x00\x65\x00\x65\x00\x74\x00\x2d\x00\x76\x00\x6c\x00\x69\x00\x6e\x00\x65\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x08\x90\x94\x67\
\x00\x63\
\x00\x6c\x00\x6f\x00\x73\x00\x65\x00\x2d\x00\x70\x00\x72\x00\x65\x00\x73\x00\x73\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x14\
\x07\xec\xd1\xc7\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x0e\xde\xfa\xc7\
\x00\x6c\
\x00\x65\x00\x66\x00\x74\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x12\
\x07\x8f\x9d\x27\
\x00\x62\
\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\x00\x5f\x00\x6f\x00\x70\x00\x65\x00\x6e\x00\x2d\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\
\x00\x67\
\x00\x0f\
\x02\x9f\x05\x87\
\x00\x72\
\x00\x69\x00\x67\x00\x68\x00\x74\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x08\xc4\x6a\xa7\
\x00\x56\
\x00\x73\x00\x65\x00\x70\x00\x61\x00\x72\x00\x74\x00\x6f\x00\x6f\x00\x6c\x00\x62\x00\x61\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x10\
\x01\x07\x4a\xa7\
\x00\x56\
\x00\x6d\x00\x6f\x00\x76\x00\x65\x00\x74\x00\x6f\x00\x6f\x00\x6c\x00\x62\x00\x61\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x19\
\x08\x3e\xcc\x07\
\x00\x73\
\x00\x74\x00\x79\x00\x6c\x00\x65\x00\x73\x00\x68\x00\x65\x00\x65\x00\x74\x00\x2d\x00\x62\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\
\x00\x2d\x00\x65\x00\x6e\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1c\
\x01\xe0\x4a\x07\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x5f\x00\x75\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\x00\x64\
\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x14\
\x06\x5e\x2c\x07\
\x00\x62\
\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\x00\x5f\x00\x63\x00\x6c\x00\x6f\x00\x73\x00\x65\x00\x64\x00\x2d\x00\x6f\x00\x6e\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x06\x53\x25\xa7\
\x00\x62\
\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\x00\x5f\x00\x6f\x00\x70\x00\x65\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x06\x41\x40\x87\
\x00\x73\
\x00\x69\x00\x7a\x00\x65\x00\x67\x00\x72\x00\x69\x00\x70\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x01\x00\xca\xa7\
\x00\x48\
\x00\x6d\x00\x6f\x00\x76\x00\x65\x00\x74\x00\x6f\x00\x6f\x00\x6c\x00\x62\x00\x61\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1c\
\x08\x3f\xda\x67\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x75\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\
\x00\x64\x00\x5f\x00\x66\x00\x6f\x00\x63\x00\x75\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x01\xf4\x81\x47\
\x00\x63\
\x00\x6c\x00\x6f\x00\x73\x00\x65\x00\x2d\x00\x68\x00\x6f\x00\x76\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x18\
\x03\x8e\xde\x67\
\x00\x72\
\x00\x69\x00\x67\x00\x68\x00\x74\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\
\x00\x6c\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x0e\xbc\xc3\x67\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\x00\x64\x00\x69\x00\x73\
\x00\x61\x00\x62\x00\x6c\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x17\
\x0c\xab\x51\x07\
\x00\x64\
\x00\x6f\x00\x77\x00\x6e\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\
\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x0b\xda\x30\xa7\
\x00\x62\
\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\x00\x5f\x00\x63\x00\x6c\x00\x6f\x00\x73\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x1a\
\x01\x87\xae\x67\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x69\x00\x6e\x00\x64\x00\x65\x00\x74\x00\x65\x00\x72\x00\x6d\
\x00\x69\x00\x6e\x00\x61\x00\x74\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x17\
\x0c\x65\xce\x07\
\x00\x6c\
\x00\x65\x00\x66\x00\x74\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\
\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x19\
\x0b\x59\x6e\x87\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x5f\x00\x75\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\x00\x66\
\x00\x6f\x00\x63\x00\x75\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x05\x11\xe0\xe7\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\
\x00\x66\x00\x6f\x00\x63\x00\x75\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x17\
\x0f\x1e\x9b\x47\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\x00\x66\x00\x6f\x00\x63\
\x00\x75\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x20\
\x09\xd7\x1f\xa7\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x69\x00\x6e\x00\x64\x00\x65\x00\x74\x00\x65\x00\x72\x00\x6d\
\x00\x69\x00\x6e\x00\x61\x00\x74\x00\x65\x00\x5f\x00\x66\x00\x6f\x00\x63\x00\x75\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x06\xe6\xe6\x67\
\x00\x75\
\x00\x70\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1d\
\x09\x07\x81\x07\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\
\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x03\
\x00\x00\x78\xa3\
\x00\x71\
\x00\x73\x00\x73\
\x00\x08\
\x08\x8e\x55\xe3\
\x00\x64\
\x00\x61\x00\x72\x00\x6b\x00\x2e\x00\x71\x00\x73\x00\x73\
\x00\x0e\
\x05\xf4\x92\x03\
\x00\x64\
\x00\x61\x00\x72\x00\x6b\x00\x6f\x00\x72\x00\x61\x00\x6e\x00\x67\x00\x65\x00\x2e\x00\x71\x00\x73\x00\x73\
\x00\x09\
\x08\xe5\x8d\xa3\
\x00\x64\
\x00\x61\x00\x72\x00\x6b\x00\x32\x00\x2e\x00\x71\x00\x73\x00\x73\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x01\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x01\x00\x00\x00\x0d\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x09\
\x00\x00\x00\x2c\x00\x02\x00\x00\x00\x01\x00\x00\x00\x04\
\x00\x00\x00\x52\x00\x02\x00\x00\x00\x01\x00\x00\x00\x05\
\x00\x00\x00\x64\x00\x02\x00\x00\x00\x03\x00\x00\x00\x06\
\x00\x00\x00\x7e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x9c\x00\x00\x00\x00\x00\x01\x00\x00\x01\x5b\
\x00\x00\x00\xbe\x00\x00\x00\x00\x00\x01\x00\x00\x05\x4f\
\x00\x00\x07\xcc\x00\x02\x00\x00\x00\x03\x00\x00\x00\x0a\
\x00\x00\x07\xee\x00\x01\x00\x00\x00\x01\x00\x00\x5e\x01\
\x00\x00\x07\xd8\x00\x01\x00\x00\x00\x01\x00\x00\x4b\xb9\
\x00\x00\x08\x10\x00\x01\x00\x00\x00\x01\x00\x00\x65\xb1\
\x00\x00\x00\x52\x00\x02\x00\x00\x00\x01\x00\x00\x00\x0e\
\x00\x00\x00\xd8\x00\x02\x00\x00\x00\x27\x00\x00\x00\x0f\
\x00\x00\x04\xc0\x00\x00\x00\x00\x00\x01\x00\x00\x31\x1f\
\x00\x00\x03\xb4\x00\x00\x00\x00\x00\x01\x00\x00\x2a\x91\
\x00\x00\x01\x4e\x00\x00\x00\x00\x00\x01\x00\x00\x17\x16\
\x00\x00\x02\x66\x00\x00\x00\x00\x00\x01\x00\x00\x20\x30\
\x00\x00\x06\x14\x00\x00\x00\x00\x00\x01\x00\x00\x3c\x02\
\x00\x00\x04\x12\x00\x00\x00\x00\x00\x01\x00\x00\x2c\x5d\
\x00\x00\x05\x24\x00\x00\x00\x00\x00\x01\x00\x00\x33\xe6\
\x00\x00\x03\x68\x00\x00\x00\x00\x00\x01\x00\x00\x28\x85\
\x00\x00\x05\x48\x00\x00\x00\x00\x00\x01\x00\x00\x36\x40\
\x00\x00\x00\x9c\x00\x00\x00\x00\x00\x01\x00\x00\x29\x29\
\x00\x00\x06\xba\x00\x00\x00\x00\x00\x01\x00\x00\x41\x75\
\x00\x00\x01\x88\x00\x00\x00\x00\x00\x01\x00\x00\x17\xd0\
\x00\x00\x04\xa2\x00\x00\x00\x00\x00\x01\x00\x00\x30\x9a\
\x00\x00\x04\x7e\x00\x00\x00\x00\x00\x01\x00\x00\x2f\xf0\
\x00\x00\x04\x50\x00\x00\x00\x00\x00\x01\x00\x00\x2f\x59\
\x00\x00\x01\x0e\x00\x00\x00\x00\x00\x01\x00\x00\x14\x18\
\x00\x00\x07\x6e\x00\x00\x00\x00\x00\x01\x00\x00\x49\x28\
\x00\x00\x03\x3e\x00\x00\x00\x00\x00\x01\x00\x00\x27\xeb\
\x00\x00\x02\xee\x00\x00\x00\x00\x00\x01\x00\x00\x25\x51\
\x00\x00\x03\xda\x00\x00\x00\x00\x00\x01\x00\x00\x2b\x79\
\x00\x00\x04\xe6\x00\x00\x00\x00\x00\x01\x00\x00\x31\xff\
\x00\x00\x01\x26\x00\x00\x00\x00\x00\x01\x00\x00\x16\x66\
\x00\x00\x02\xc6\x00\x00\x00\x00\x00\x01\x00\x00\x22\xf7\
\x00\x00\x03\x8c\x00\x00\x00\x00\x00\x01\x00\x00\x29\xd2\
\x00\x00\x01\xa2\x00\x00\x00\x00\x00\x01\x00\x00\x1a\x16\
\x00\x00\x07\x8c\x00\x00\x00\x00\x00\x01\x00\x00\x49\xca\
\x00\x00\x07\x28\x00\x00\x00\x00\x00\x01\x00\x00\x47\x22\
\x00\x00\x01\xfe\x00\x00\x00\x00\x00\x01\x00\x00\x1d\x95\
\x00\x00\x00\xe6\x00\x00\x00\x00\x00\x01\x00\x00\x10\x68\
\x00\x00\x06\x82\x00\x00\x00\x00\x00\x01\x00\x00\x3e\x9d\
\x00\x00\x02\x98\x00\x00\x00\x00\x00\x01\x00\x00\x22\x04\
\x00\x00\x05\xec\x00\x00\x00\x00\x00\x01\x00\x00\x3b\x5e\
\x00\x00\x06\x4e\x00\x00\x00\x00\x00\x01\x00\x00\x3d\xf3\
\x00\x00\x05\xb8\x00\x00\x00\x00\x00\x01\x00\x00\x3a\xb4\
\x00\x00\x02\x42\x00\x00\x00\x00\x00\x01\x00\x00\x1f\x69\
\x00\x00\x05\x7e\x00\x00\x00\x00\x00\x01\x00\x00\x36\xe4\
\x00\x00\x03\x1c\x00\x00\x00\x00\x00\x01\x00\x00\x27\x41\
\x00\x00\x06\xf4\x00\x00\x00\x00\x00\x01\x00\x00\x43\x79\
\x00\x00\x01\xce\x00\x00\x00\x00\x00\x01\x00\x00\x1c\xf2\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| mit | 4,097,503,754,306,612,700 | 58.234323 | 129 | 0.724808 | false |
QualiSystems/AWS-Shell | package/tests/test_domain_services/test_instance_waiter.py | 1 | 6035 | from unittest import TestCase
from mock import Mock, patch
from cloudshell.cp.aws.domain.services.waiters.instance import InstanceWaiter
instance = Mock()
instance.state = {'Name': ''}
class helper:
@staticmethod
def change_to_terminate(a):
instance.state['Name'] = InstanceWaiter.TERMINATED
@staticmethod
def change_to_stopped(a):
instance.state['Name'] = InstanceWaiter.STOPPED
@staticmethod
def change_to_running(a):
instance.state['Name'] = InstanceWaiter.RUNNING
@staticmethod
def change_to_stopping(a):
instance.state['Name'] = InstanceWaiter.STOPPING
@staticmethod
def change_to_pending(a):
instance.state['Name'] = InstanceWaiter.PENDING
class TestInstanceWaiter(TestCase):
def setUp(self):
self.cancellation_service = Mock()
self.instance_waiter = InstanceWaiter(self.cancellation_service, 1, 0.02)
self.instance = Mock()
self.logger = Mock()
@patch('time.sleep', helper.change_to_stopped)
def test_waiter(self):
helper.change_to_running(Mock())
inst = self.instance_waiter.wait(instance, InstanceWaiter.STOPPED)
self.assertEqual(inst, instance)
self.assertEqual(inst.reload.call_count, 1)
def test_waiter_timeout(self):
helper.change_to_running(Mock())
self.assertRaises(Exception, self.instance_waiter.wait, instance, InstanceWaiter.STOPPED)
@patch('time.sleep', helper.change_to_stopped)
def test_waiter_multi(self):
helper.change_to_stopped(Mock())
instance.state['Name'] = InstanceWaiter.RUNNING
inst = Mock()
inst.state = dict()
inst.state['Name'] = InstanceWaiter.STOPPED
res = self.instance_waiter.multi_wait([instance, inst], InstanceWaiter.STOPPED)
self.assertEqual(res, [instance, inst])
self.assertTrue(instance.reload.call_count, 2)
@patch('time.sleep', helper.change_to_stopped)
def test_waiter_multi_with_cancellation(self):
cancellation_context = Mock()
helper.change_to_stopped(Mock())
instance.state['Name'] = InstanceWaiter.RUNNING
inst = Mock()
inst.state = dict()
inst.state['Name'] = InstanceWaiter.STOPPED
instances = [instance, inst]
res = self.instance_waiter.multi_wait(instances, InstanceWaiter.STOPPED, cancellation_context)
self.assertEqual(res, [instance, inst])
self.assertTrue(instance.reload.call_count, 2)
self.assertTrue(self.cancellation_service.check_if_cancelled.call_count, 2)
instance_ids = filter(lambda x: str(x.id), instances)
self.cancellation_service.check_if_cancelled.assert_called_with(cancellation_context,
{'instance_ids': instance_ids})
def test_waiter_multi_errors(self):
self.assertRaises(ValueError, self.instance_waiter.multi_wait, [], InstanceWaiter.STOPPED)
self.assertRaises(ValueError, self.instance_waiter.multi_wait, [Mock], 'blalala')
@patch('cloudshell.cp.aws.domain.services.waiters.instance.time')
def test_wait_status_ok(self, time):
# arrange
def describe_instance_status_handler(*args, **kwargs):
result = Mock()
instance_id_mock = kwargs['InstanceIds'][0]
if hasattr(instance_id_mock, "called_already") and instance_id_mock.called_already is True:
result.InstanceStatuses = [{'SystemStatus': {'Status': self.instance_waiter.STATUS_OK},
'InstanceStatus': {'Status': self.instance_waiter.STATUS_OK}}]
else:
instance_id_mock.called_already = True
result.InstanceStatuses = [
{'SystemStatus': {'Status': 'initializing'}, 'InstanceStatus': {'Status': 'initializing'}}]
return result
time.time.return_value = 0
ec2_client = Mock()
ec2_client.describe_instance_status = Mock(side_effect=describe_instance_status_handler)
instance = Mock()
# act
instance_state = self.instance_waiter.wait_status_ok(ec2_client=ec2_client,
instance=instance,
logger=self.logger)
# assert
self.assertEquals(instance_state['SystemStatus']['Status'], self.instance_waiter.STATUS_OK)
self.assertEquals(instance_state['InstanceStatus']['Status'], self.instance_waiter.STATUS_OK)
@patch('cloudshell.cp.aws.domain.services.waiters.instance.time')
def test_wait_status_ok_raises_impaired_status(self, time):
# arrange
def describe_instance_status_handler(*args, **kwargs):
result = Mock()
instance_id_mock = kwargs['InstanceIds'][0]
if hasattr(instance_id_mock, "called_already") and instance_id_mock.called_already is True:
result.InstanceStatuses = [{'SystemStatus': {'Status': self.instance_waiter.STATUS_IMPAIRED},
'InstanceStatus': {'Status': self.instance_waiter.STATUS_IMPAIRED}}]
else:
instance_id_mock.called_already = True
result.InstanceStatuses = [
{'SystemStatus': {'Status': 'initializing'}, 'InstanceStatus': {'Status': 'initializing'}}]
return result
time.time.return_value = 0
ec2_client = Mock()
ec2_client.describe_instance_status = Mock(side_effect=describe_instance_status_handler)
instance = Mock()
# act & assert
with self.assertRaisesRegexp(ValueError, "Instance status check is not OK.*"):
instance_state = self.instance_waiter.wait_status_ok(ec2_client=ec2_client,
instance=instance,
logger=self.logger)
| isc | -1,264,604,737,494,325,500 | 40.335616 | 112 | 0.612262 | false |
juju/juju-gui-charm | hooks/charmhelpers/core/templating.py | 1 | 3186 | # Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
from charmhelpers.core import host
from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
"""
Render a template.
The `source` path, if not absolute, is relative to the `templates_dir`.
The `target` path should be absolute. It can also be `None`, in which
case no file will be written.
The context should be a dict containing the values to be replaced in the
template.
The `owner`, `group`, and `perms` options will be passed to `write_file`.
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
The rendered template will be written to the file as well as being returned
as a string.
Note: Using this requires python-jinja2; if it is not installed, calling
this will attempt to use charmhelpers.fetch.apt_install to install it.
"""
try:
from jinja2 import FileSystemLoader, Environment, exceptions
except ImportError:
try:
from charmhelpers.fetch import apt_install
except ImportError:
hookenv.log('Could not import jinja2, and could not import '
'charmhelpers.fetch to install it',
level=hookenv.ERROR)
raise
apt_install('python-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment, exceptions
if template_loader:
template_env = Environment(loader=template_loader)
else:
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
template_env = Environment(loader=FileSystemLoader(templates_dir))
try:
source = source
template = template_env.get_template(source)
except exceptions.TemplateNotFound as e:
hookenv.log('Could not load template %s from %s.' %
(source, templates_dir),
level=hookenv.ERROR)
raise e
content = template.render(context)
if target is not None:
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
# This is a terrible default directory permission, as the file
# or its siblings will often contain secrets.
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
host.write_file(target, content.encode(encoding), owner, group, perms)
return content
| agpl-3.0 | -3,079,095,182,892,421,000 | 38.333333 | 84 | 0.677652 | false |
nkalodimas/invenio | modules/bibupload/lib/bibupload.py | 1 | 143104 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibUpload: Receive MARC XML file and update the appropriate database
tables according to options.
"""
__revision__ = "$Id$"
import os
import re
import sys
import time
from datetime import datetime
from zlib import compress
import socket
import marshal
import copy
import tempfile
import urlparse
import urllib2
import urllib
from invenio.config import CFG_OAI_ID_FIELD, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG, \
CFG_BIBUPLOAD_STRONG_TAGS, \
CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_DELETE_FORMATS, \
CFG_SITE_URL, CFG_SITE_SECURE_URL, CFG_SITE_RECORD, \
CFG_OAI_PROVENANCE_ALTERED_SUBFIELD, \
CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS, \
CFG_BIBUPLOAD_CONFLICTING_REVISION_TICKET_QUEUE
from invenio.jsonutils import json, CFG_JSON_AVAILABLE
from invenio.bibupload_config import CFG_BIBUPLOAD_CONTROLFIELD_TAGS, \
CFG_BIBUPLOAD_SPECIAL_TAGS, \
CFG_BIBUPLOAD_DELETE_CODE, \
CFG_BIBUPLOAD_DELETE_VALUE, \
CFG_BIBUPLOAD_OPT_MODES
from invenio.dbquery import run_sql, \
Error
from invenio.bibrecord import create_records, \
record_add_field, \
record_delete_field, \
record_xml_output, \
record_get_field_instances, \
record_get_field_value, \
record_get_field_values, \
field_get_subfield_values, \
field_get_subfield_instances, \
record_modify_subfield, \
record_delete_subfield_from, \
record_delete_fields, \
record_add_subfield_into, \
record_find_field, \
record_extract_oai_id, \
record_extract_dois, \
record_has_field,\
records_identical
from invenio.search_engine import get_record
from invenio.dateutils import convert_datestruct_to_datetext
from invenio.errorlib import register_exception
from invenio.bibcatalog import bibcatalog_system
from invenio.intbitset import intbitset
from invenio.urlutils import make_user_agent_string
from invenio.config import CFG_BIBDOCFILE_FILEDIR
from invenio.bibtask import task_init, write_message, \
task_set_option, task_get_option, task_get_task_param, task_update_status, \
task_update_progress, task_sleep_now_if_required, fix_argv_paths
from invenio.bibdocfile import BibRecDocs, file_strip_ext, normalize_format, \
get_docname_from_url, check_valid_url, download_url, \
KEEP_OLD_VALUE, decompose_bibdocfile_url, InvenioBibDocFileError, \
bibdocfile_url_p, CFG_BIBDOCFILE_AVAILABLE_FLAGS, guess_format_from_url, \
BibRelation, MoreInfo
from invenio.search_engine import search_pattern
from invenio.bibupload_revisionverifier import RevisionVerifier, \
InvenioBibUploadConflictingRevisionsError, \
InvenioBibUploadInvalidRevisionError, \
InvenioBibUploadMissing005Error, \
InvenioBibUploadUnchangedRecordError
#Statistic variables
stat = {}
stat['nb_records_to_upload'] = 0
stat['nb_records_updated'] = 0
stat['nb_records_inserted'] = 0
stat['nb_errors'] = 0
stat['nb_holdingpen'] = 0
stat['exectime'] = time.localtime()
_WRITING_RIGHTS = None
CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS = ('oracle', )
CFG_HAS_BIBCATALOG = "UNKNOWN"
def check_bibcatalog():
"""
Return True if bibcatalog is available.
"""
global CFG_HAS_BIBCATALOG # pylint: disable=W0603
if CFG_HAS_BIBCATALOG != "UNKNOWN":
return CFG_HAS_BIBCATALOG
CFG_HAS_BIBCATALOG = True
if bibcatalog_system is not None:
bibcatalog_response = bibcatalog_system.check_system()
else:
bibcatalog_response = "No ticket system configured"
if bibcatalog_response != "":
write_message("BibCatalog error: %s\n" % (bibcatalog_response,))
CFG_HAS_BIBCATALOG = False
return CFG_HAS_BIBCATALOG
## Let's set a reasonable timeout for URL request (e.g. FFT)
socket.setdefaulttimeout(40)
def parse_identifier(identifier):
"""Parse the identifier and determine if it is temporary or fixed"""
id_str = str(identifier)
if not id_str.startswith("TMP:"):
return (False, identifier)
else:
return (True, id_str[4:])
def resolve_identifier(tmps, identifier):
"""Resolves an identifier. If the identifier is not temporary, this
function is an identity on the second argument. Otherwise, a resolved
value is returned or an exception raised"""
is_tmp, tmp_id = parse_identifier(identifier)
if is_tmp:
if not tmp_id in tmps:
raise StandardError("Temporary identifier %s not present in the dictionary" % (tmp_id, ))
if tmps[tmp_id] == -1:
# the identifier has been signalised but never assigned a value - probably error during processing
raise StandardError("Temporary identifier %s has been declared, but never assigned a value. Probably an error during processign of an appropriate FFT has happened. Please see the log" % (tmp_id, ))
return int(tmps[tmp_id])
else:
return int(identifier)
_re_find_001 = re.compile('<controlfield\\s+tag=("001"|\'001\')\\s*>\\s*(\\d*)\\s*</controlfield>', re.S)
def bibupload_pending_recids():
"""This function embed a bit of A.I. and is more a hack than an elegant
algorithm. It should be updated in case bibupload/bibsched are modified
in incompatible ways.
This function return the intbitset of all the records that are being
(or are scheduled to be) touched by other bibuploads.
"""
options = run_sql("""SELECT arguments FROM schTASK WHERE status<>'DONE' AND
proc='bibupload' AND (status='RUNNING' OR status='CONTINUING' OR
status='WAITING' OR status='SCHEDULED' OR status='ABOUT TO STOP' OR
status='ABOUT TO SLEEP')""")
ret = intbitset()
xmls = []
if options:
for arguments in options:
arguments = marshal.loads(arguments[0])
for argument in arguments[1:]:
if argument.startswith('/'):
# XMLs files are recognizable because they're absolute
# files...
xmls.append(argument)
for xmlfile in xmls:
# Let's grep for the 001
try:
xml = open(xmlfile).read()
ret += [int(group[1]) for group in _re_find_001.findall(xml)]
except:
continue
return ret
### bibupload engine functions:
def bibupload(record, opt_mode=None, opt_notimechange=0, oai_rec_id="", pretend=False,
tmp_ids=None, tmp_vers=None):
"""Main function: process a record and fit it in the tables
bibfmt, bibrec, bibrec_bibxxx, bibxxx with proper record
metadata.
Return (error_code, recID) of the processed record.
"""
if tmp_ids is None:
tmp_ids = {}
if tmp_vers is None:
tmp_vers = {}
if opt_mode == 'reference':
## NOTE: reference mode has been deprecated in favour of 'correct'
opt_mode = 'correct'
assert(opt_mode in CFG_BIBUPLOAD_OPT_MODES)
error = None
affected_tags = {}
original_record = {}
rec_old = {}
now = datetime.now() # will hold record creation/modification date
record_had_altered_bit = False
is_opt_mode_delete = False
# Extraction of the Record Id from 001, SYSNO or OAIID or DOI tags:
rec_id = retrieve_rec_id(record, opt_mode, pretend=pretend)
if rec_id == -1:
msg = " Failed: either the record already exists and insert was " \
"requested or the record does not exists and " \
"replace/correct/append has been used"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, -1, msg)
elif rec_id > 0:
write_message(" -Retrieve record ID (found %s): DONE." % rec_id, verbose=2)
(unique_p, msg) = check_record_doi_is_unique(rec_id, record)
if not unique_p:
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if not record.has_key('001'):
# Found record ID by means of SYSNO or OAIID or DOI, and the
# input MARCXML buffer does not have this 001 tag, so we
# should add it now:
error = record_add_field(record, '001', controlfield_value=rec_id)
if error is None:
msg = " Failed: Error during adding the 001 controlfield " \
"to the record"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
error = None
write_message(" -Added tag 001: DONE.", verbose=2)
write_message(" -Check if the xml marc file is already in the database: DONE" , verbose=2)
record_deleted_p = False
if opt_mode == 'insert' or \
(opt_mode == 'replace_or_insert') and rec_id is None:
insert_mode_p = True
# Insert the record into the bibrec databases to have a recordId
rec_id = create_new_record(pretend=pretend)
write_message(" -Creation of a new record id (%d): DONE" % rec_id, verbose=2)
# we add the record Id control field to the record
error = record_add_field(record, '001', controlfield_value=rec_id)
if error is None:
msg = " Failed: Error during adding the 001 controlfield " \
"to the record"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
error = None
error = record_add_field(record, '005', controlfield_value=now.strftime("%Y%m%d%H%M%S.0"))
if error is None:
msg = " Failed: Error during adding to 005 controlfield to record"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
error=None
elif opt_mode != 'insert':
insert_mode_p = False
# Update Mode
# Retrieve the old record to update
rec_old = get_record(rec_id)
record_had_altered_bit = record_get_field_values(rec_old, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4], CFG_OAI_PROVENANCE_ALTERED_SUBFIELD)
# Also save a copy to restore previous situation in case of errors
original_record = get_record(rec_id)
if rec_old is None:
msg = " Failed during the creation of the old record!"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
write_message(" -Retrieve the old record to update: DONE", verbose=2)
# flag to check whether the revisions have been verified and patch generated.
# If revision verification failed, then we need to manually identify the affected tags
# and process them
revision_verified = False
rev_verifier = RevisionVerifier()
#check for revision conflicts before updating record
if record_has_field(record, '005') and not CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS:
write_message(" -Upload Record has 005. Verifying Revision", verbose=2)
try:
rev_res = rev_verifier.verify_revision(record, original_record, opt_mode)
if rev_res:
opt_mode = rev_res[0]
record = rev_res[1]
affected_tags = rev_res[2]
revision_verified = True
write_message(lambda: " -Patch record generated. Changing opt_mode to correct.\nPatch:\n%s " % record_xml_output(record), verbose=2)
else:
write_message(" -No Patch Record.", verbose=2)
except InvenioBibUploadUnchangedRecordError, err:
msg = " -ISSUE: %s" % err
write_message(msg, verbose=1, stream=sys.stderr)
write_message(msg, " Continuing anyway in case there are FFT or other tags")
except InvenioBibUploadConflictingRevisionsError, err:
msg = " -ERROR: Conflicting Revisions - %s" % err
write_message(msg, verbose=1, stream=sys.stderr)
submit_ticket_for_holding_pen(rec_id, err, "Conflicting Revisions. Inserting record into holding pen.")
insert_record_into_holding_pen(record, str(rec_id))
return (2, int(rec_id), msg)
except InvenioBibUploadInvalidRevisionError, err:
msg = " -ERROR: Invalid Revision - %s" % err
write_message(msg)
submit_ticket_for_holding_pen(rec_id, err, "Invalid Revisions. Inserting record into holding pen.")
insert_record_into_holding_pen(record, str(rec_id))
return (2, int(rec_id), msg)
except InvenioBibUploadMissing005Error, err:
msg = " -ERROR: Missing 005 - %s" % err
write_message(msg)
submit_ticket_for_holding_pen(rec_id, err, "Missing 005. Inserting record into holding pen.")
insert_record_into_holding_pen(record, str(rec_id))
return (2, int(rec_id), msg)
else:
write_message(" - No 005 Tag Present. Resuming normal flow.", verbose=2)
# dictionaries to temporarily hold original recs tag-fields
existing_tags = {}
retained_tags = {}
# in case of delete operation affected tags should be deleted in delete_bibrec_bibxxx
# but should not be updated again in STAGE 4
# utilising the below flag
is_opt_mode_delete = False
if not revision_verified:
# either 005 was not present or opt_mode was not correct/replace
# in this case we still need to find out affected tags to process
write_message(" - Missing 005 or opt_mode!=Replace/Correct.Revision Verifier not called.", verbose=2)
# Identify affected tags
if opt_mode == 'correct' or opt_mode == 'replace' or opt_mode == 'replace_or_insert':
rec_diff = rev_verifier.compare_records(record, original_record, opt_mode)
affected_tags = rev_verifier.retrieve_affected_tags_with_ind(rec_diff)
elif opt_mode == 'delete':
# populate an intermediate dictionary
# used in upcoming step related to 'delete' mode
is_opt_mode_delete = True
for tag, fields in original_record.iteritems():
existing_tags[tag] = [tag + (field[1] != ' ' and field[1] or '_') + (field[2] != ' ' and field[2] or '_') for field in fields]
elif opt_mode == 'append':
for tag, fields in record.iteritems():
if tag not in CFG_BIBUPLOAD_CONTROLFIELD_TAGS:
affected_tags[tag]=[(field[1], field[2]) for field in fields]
# In Replace mode, take over old strong tags if applicable:
if opt_mode == 'replace' or \
opt_mode == 'replace_or_insert':
copy_strong_tags_from_old_record(record, rec_old)
# Delete tags to correct in the record
if opt_mode == 'correct':
delete_tags_to_correct(record, rec_old)
write_message(" -Delete the old tags to correct in the old record: DONE",
verbose=2)
# Delete tags specified if in delete mode
if opt_mode == 'delete':
record = delete_tags(record, rec_old)
for tag, fields in record.iteritems():
retained_tags[tag] = [tag + (field[1] != ' ' and field[1] or '_') + (field[2] != ' ' and field[2] or '_') for field in fields]
#identify the tags that have been deleted
for tag in existing_tags.keys():
if tag not in retained_tags:
for item in existing_tags[tag]:
tag_to_add = item[0:3]
ind1, ind2 = item[3], item[4]
if tag_to_add in affected_tags and (ind1, ind2) not in affected_tags[tag_to_add]:
affected_tags[tag_to_add].append((ind1, ind2))
else:
affected_tags[tag_to_add] = [(ind1, ind2)]
else:
deleted = list(set(existing_tags[tag]) - set(retained_tags[tag]))
for item in deleted:
tag_to_add = item[0:3]
ind1, ind2 = item[3], item[4]
if tag_to_add in affected_tags and (ind1, ind2) not in affected_tags[tag_to_add]:
affected_tags[tag_to_add].append((ind1, ind2))
else:
affected_tags[tag_to_add] = [(ind1, ind2)]
write_message(" -Delete specified tags in the old record: DONE", verbose=2)
# Append new tag to the old record and update the new record with the old_record modified
if opt_mode == 'append' or opt_mode == 'correct':
record = append_new_tag_to_old_record(record, rec_old)
write_message(" -Append new tags to the old record: DONE", verbose=2)
write_message(" -Affected Tags found after comparing upload and original records: %s"%(str(affected_tags)), verbose=2)
# 005 tag should be added everytime the record is modified
# If an exiting record is modified, its 005 tag should be overwritten with a new revision value
if record.has_key('005'):
record_delete_field(record, '005')
write_message(" Deleted the existing 005 tag.", verbose=2)
last_revision = run_sql("SELECT MAX(job_date) FROM hstRECORD WHERE id_bibrec=%s", (rec_id, ))[0][0]
if last_revision and last_revision.strftime("%Y%m%d%H%M%S.0") == now.strftime("%Y%m%d%H%M%S.0"):
## We are updating the same record within the same seconds! It's less than
## the minimal granularity. Let's pause for 1 more second to take a breath :-)
time.sleep(1)
now = datetime.now()
error = record_add_field(record, '005', controlfield_value=now.strftime("%Y%m%d%H%M%S.0"))
if error is None:
write_message(" Failed: Error during adding to 005 controlfield to record", verbose=1, stream=sys.stderr)
return (1, int(rec_id))
else:
error=None
write_message(lambda: " -Added tag 005: DONE. "+ str(record_get_field_value(record, '005', '', '')), verbose=2)
# adding 005 to affected tags will delete the existing 005 entry
# and update with the latest timestamp.
if '005' not in affected_tags:
affected_tags['005'] = [(' ', ' ')]
write_message(" -Stage COMPLETED", verbose=2)
record_deleted_p = False
try:
if not record_is_valid(record):
msg = "ERROR: record is not valid"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, -1, msg)
# Have a look if we have FFT tags
write_message("Stage 2: Start (Process FFT tags if exist).", verbose=2)
record_had_FFT = False
if extract_tag_from_record(record, 'FFT') is not None:
record_had_FFT = True
if not writing_rights_p():
write_message(" Stage 2 failed: Error no rights to write fulltext files",
verbose=1, stream=sys.stderr)
task_update_status("ERROR")
sys.exit(1)
try:
record = elaborate_fft_tags(record, rec_id, opt_mode,
pretend=pretend, tmp_ids=tmp_ids,
tmp_vers=tmp_vers)
except Exception, e:
register_exception()
msg = " Stage 2 failed: Error while elaborating FFT tags: %s" % e
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if record is None:
msg = " Stage 2 failed: Error while elaborating FFT tags"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
# Have a look if we have FFT tags
write_message("Stage 2B: Start (Synchronize 8564 tags).", verbose=2)
if record_had_FFT or extract_tag_from_record(record, '856') is not None:
try:
record = synchronize_8564(rec_id, record, record_had_FFT, pretend=pretend)
# in case if FFT is in affected list make appropriate changes
if ('4', ' ') not in affected_tags.get('856', []):
if '856' not in affected_tags:
affected_tags['856'] = [('4', ' ')]
elif ('4', ' ') not in affected_tags['856']:
affected_tags['856'].append(('4', ' '))
write_message(" -Modified field list updated with FFT details: %s" % str(affected_tags), verbose=2)
except Exception, e:
register_exception(alert_admin=True)
msg = " Stage 2B failed: Error while synchronizing 8564 tags: %s" % e
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if record is None:
msg = " Stage 2B failed: Error while synchronizing 8564 tags"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
write_message("Stage 3: Start (Apply fields deletion requests).", verbose=2)
write_message(lambda: " Record before deletion:\n%s" % record_xml_output(record), verbose=9)
# remove fields with __DELETE_FIELDS__
# NOTE:creating a temporary deep copy of record for iteration to avoid RunTimeError
# RuntimeError due to change in dictionary size during iteration
tmp_rec = copy.deepcopy(record)
for tag in tmp_rec:
for data_tuple in record[tag]:
if (CFG_BIBUPLOAD_DELETE_CODE, CFG_BIBUPLOAD_DELETE_VALUE) in data_tuple[0]:
# delete the tag with particular indicator pairs from original record
record_delete_field(record, tag, data_tuple[1], data_tuple[2])
write_message(lambda: " Record after cleaning up fields to be deleted:\n%s" % record_xml_output(record), verbose=9)
# Update of the BibFmt
write_message("Stage 4: Start (Update bibfmt).", verbose=2)
updates_exist = not records_identical(record, original_record)
if updates_exist:
# if record_had_altered_bit, this must be set to true, since the
# record has been altered.
if record_had_altered_bit:
oai_provenance_fields = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
for oai_provenance_field in oai_provenance_fields:
for i, (code, dummy_value) in enumerate(oai_provenance_field[0]):
if code == CFG_OAI_PROVENANCE_ALTERED_SUBFIELD:
oai_provenance_field[0][i] = (code, 'true')
tmp_indicators = (CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
if tmp_indicators not in affected_tags.get(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], []):
if CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3] not in affected_tags:
affected_tags[CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3]] = [tmp_indicators]
else:
affected_tags[CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3]].append(tmp_indicators)
write_message(lambda: " Updates exists:\n%s\n!=\n%s" % (record, original_record), verbose=9)
# format the single record as xml
rec_xml_new = record_xml_output(record)
# Update bibfmt with the format xm of this record
modification_date = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(record_get_field_value(record, '005'), '%Y%m%d%H%M%S.0'))
error = update_bibfmt_format(rec_id, rec_xml_new, 'xm', modification_date, pretend=pretend)
if error == 1:
msg = " Failed: error during update_bibfmt_format 'xm'"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
error = update_bibfmt_format(rec_id, marshal.dumps(record), 'recstruct', modification_date, pretend=pretend)
if error == 1:
msg = " Failed: error during update_bibfmt_format 'recstruct'"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if not CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS:
# archive MARCXML format of this record for version history purposes:
error = archive_marcxml_for_history(rec_id, pretend=pretend)
if error == 1:
msg = " Failed to archive MARCXML for history"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
write_message(" -Archived MARCXML for history: DONE", verbose=2)
# delete some formats like HB upon record change:
if updates_exist or record_had_FFT:
for format_to_delete in CFG_BIBUPLOAD_DELETE_FORMATS:
try:
delete_bibfmt_format(rec_id, format_to_delete, pretend=pretend)
except:
# OK, some formats like HB could not have been deleted, no big deal
pass
write_message(" -Stage COMPLETED", verbose=2)
# Update the database MetaData
write_message("Stage 5: Start (Update the database with the metadata).",
verbose=2)
if insert_mode_p:
update_database_with_metadata(record, rec_id, oai_rec_id, pretend=pretend)
elif opt_mode in ('replace', 'replace_or_insert',
'append', 'correct', 'delete') and updates_exist:
# now we clear all the rows from bibrec_bibxxx from the old
record_deleted_p = True
delete_bibrec_bibxxx(rec_old, rec_id, affected_tags, pretend=pretend)
# metadata update will insert tags that are available in affected_tags.
# but for delete, once the tags have been deleted from bibrec_bibxxx, they dont have to be inserted
# except for 005.
if is_opt_mode_delete:
tmp_affected_tags = copy.deepcopy(affected_tags)
for tag in tmp_affected_tags:
if tag != '005':
affected_tags.pop(tag)
write_message(" -Clean bibrec_bibxxx: DONE", verbose=2)
update_database_with_metadata(record, rec_id, oai_rec_id, affected_tags, pretend=pretend)
else:
write_message(" -Stage NOT NEEDED in mode %s" % opt_mode,
verbose=2)
write_message(" -Stage COMPLETED", verbose=2)
record_deleted_p = False
# Finally we update the bibrec table with the current date
write_message("Stage 6: Start (Update bibrec table with current date).",
verbose=2)
if opt_notimechange == 0 and (updates_exist or record_had_FFT):
bibrec_now = convert_datestruct_to_datetext(time.localtime())
write_message(" -Retrieved current localtime: DONE", verbose=2)
update_bibrec_date(bibrec_now, rec_id, insert_mode_p, pretend=pretend)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
# Increase statistics
if insert_mode_p:
stat['nb_records_inserted'] += 1
else:
stat['nb_records_updated'] += 1
# Upload of this record finish
write_message("Record "+str(rec_id)+" DONE", verbose=1)
return (0, int(rec_id), "")
finally:
if record_deleted_p:
## BibUpload has failed living the record deleted. We should
## back the original record then.
update_database_with_metadata(original_record, rec_id, oai_rec_id, pretend=pretend)
write_message(" Restored original record", verbose=1, stream=sys.stderr)
def record_is_valid(record):
"""
Check if the record is valid. Currently this simply checks if the record
has exactly one rec_id.
@param record: the record
@type record: recstruct
@return: True if the record is valid
@rtype: bool
"""
rec_ids = record_get_field_values(record, tag="001")
if len(rec_ids) != 1:
write_message(" The record is not valid: it has not a single rec_id: %s" % (rec_ids), stream=sys.stderr)
return False
return True
def find_record_ids_by_oai_id(oaiId):
"""
A method finding the records identifier provided the oai identifier
returns a list of identifiers matching a given oai identifier
"""
# Is this record already in invenio (matching by oaiid)
if oaiId:
recids = search_pattern(p=oaiId, f=CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, m='e')
# Is this record already in invenio (matching by reportnumber i.e.
# particularly 037. Idea: to avoid doubbles insertions)
repnumber = oaiId.split(":")[-1]
if repnumber:
recids |= search_pattern(p = repnumber,
f = "reportnumber",
m = 'e' )
# Is this record already in invenio (matching by reportnumber i.e.
# particularly 037. Idea: to avoid double insertions)
repnumber = "arXiv:" + oaiId.split(":")[-1]
recids |= search_pattern(p = repnumber,
f = "reportnumber",
m = 'e' )
return recids
else:
return intbitset()
def bibupload_post_phase(record, mode=None, rec_id="", pretend=False,
tmp_ids=None, tmp_vers=None):
def _elaborate_tag(record, tag, fun):
if extract_tag_from_record(record, tag) is not None:
try:
record = fun()
except Exception, e:
register_exception()
write_message(" Stage failed: Error while elaborating %s tags: %s" % (tag, e),
verbose=1, stream=sys.stderr)
return (1, int(rec_id)) # TODO: ?
if record is None:
write_message(" Stage failed: Error while elaborating %s tags" % (tag, ),
verbose=1, stream=sys.stderr)
return (1, int(rec_id))
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
if tmp_ids is None:
tmp_ids = {}
if tmp_vers is None:
tmp_vers = {}
_elaborate_tag(record, "BDR", lambda: elaborate_brt_tags(record, rec_id = rec_id,
mode = mode,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers))
_elaborate_tag(record, "BDM", lambda: elaborate_mit_tags(record, rec_id = rec_id,
mode = mode,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers))
def submit_ticket_for_holding_pen(rec_id, err, msg):
"""
Submit a ticket via BibCatalog to report about a record that has been put
into the Holding Pen.
@rec_id: the affected record
@err: the corresponding Exception
msg: verbose message
"""
from invenio import bibtask
from invenio.webuser import get_email_from_username, get_uid_from_email
user = task_get_task_param("user")
uid = None
if user:
try:
uid = get_uid_from_email(get_email_from_username(user))
except Exception, err:
write_message("WARNING: can't reliably retrieve uid for user %s: %s" % (user, err), stream=sys.stderr)
if check_bibcatalog():
text = """
%(msg)s found for record %(rec_id)s: %(err)s
See: <%(siteurl)s/record/edit/#state=edit&recid=%(rec_id)s>
BibUpload task information:
task_id: %(task_id)s
task_specific_name: %(task_specific_name)s
user: %(user)s
task_params: %(task_params)s
task_options: %(task_options)s""" % {
"msg": msg,
"rec_id": rec_id,
"err": err,
"siteurl": CFG_SITE_SECURE_URL,
"task_id": task_get_task_param("task_id"),
"task_specific_name": task_get_task_param("task_specific_name"),
"user": user,
"task_params": bibtask._TASK_PARAMS,
"task_options": bibtask._OPTIONS}
bibcatalog_system.ticket_submit(subject="%s: %s by %s" % (msg, rec_id, user), recordid=rec_id, text=text, queue=CFG_BIBUPLOAD_CONFLICTING_REVISION_TICKET_QUEUE, owner=uid)
def insert_record_into_holding_pen(record, oai_id, pretend=False):
query = "INSERT INTO bibHOLDINGPEN (oai_id, changeset_date, changeset_xml, id_bibrec) VALUES (%s, NOW(), %s, %s)"
xml_record = record_xml_output(record)
bibrec_ids = find_record_ids_by_oai_id(oai_id) # here determining the identifier of the record
if len(bibrec_ids) > 0:
bibrec_id = bibrec_ids.pop()
else:
# id not found by using the oai_id, let's use a wider search based
# on any information we might have.
bibrec_id = retrieve_rec_id(record, 'holdingpen', pretend=pretend)
if bibrec_id is None:
bibrec_id = 0
if not pretend:
run_sql(query, (oai_id, xml_record, bibrec_id))
# record_id is logged as 0! ( We are not inserting into the main database)
log_record_uploading(oai_id, task_get_task_param('task_id', 0), 0, 'H', pretend=pretend)
stat['nb_holdingpen'] += 1
def print_out_bibupload_statistics():
"""Print the statistics of the process"""
out = "Task stats: %(nb_input)d input records, %(nb_updated)d updated, " \
"%(nb_inserted)d inserted, %(nb_errors)d errors, %(nb_holdingpen)d inserted to holding pen. " \
"Time %(nb_sec).2f sec." % { \
'nb_input': stat['nb_records_to_upload'],
'nb_updated': stat['nb_records_updated'],
'nb_inserted': stat['nb_records_inserted'],
'nb_errors': stat['nb_errors'],
'nb_holdingpen': stat['nb_holdingpen'],
'nb_sec': time.time() - time.mktime(stat['exectime']) }
write_message(out)
def open_marc_file(path):
"""Open a file and return the data"""
try:
# open the file containing the marc document
marc_file = open(path, 'r')
marc = marc_file.read()
marc_file.close()
except IOError, erro:
write_message("Error: %s" % erro, verbose=1, stream=sys.stderr)
write_message("Exiting.", sys.stderr)
if erro.errno == 2:
# No such file or directory
# Not scary
task_update_status("CERROR")
else:
task_update_status("ERROR")
sys.exit(1)
return marc
def xml_marc_to_records(xml_marc):
"""create the records"""
# Creation of the records from the xml Marc in argument
recs = create_records(xml_marc, 1, 1)
if recs == []:
write_message("Error: Cannot parse MARCXML file.", verbose=1, stream=sys.stderr)
write_message("Exiting.", sys.stderr)
task_update_status("ERROR")
sys.exit(1)
elif recs[0][0] is None:
write_message("Error: MARCXML file has wrong format: %s" % recs,
verbose=1, stream=sys.stderr)
write_message("Exiting.", sys.stderr)
task_update_status("CERROR")
sys.exit(1)
else:
recs = map((lambda x:x[0]), recs)
return recs
def find_record_format(rec_id, bibformat):
"""Look whether record REC_ID is formatted in FORMAT,
i.e. whether FORMAT exists in the bibfmt table for this record.
Return the number of times it is formatted: 0 if not, 1 if yes,
2 if found more than once (should never occur).
"""
out = 0
query = """SELECT COUNT(*) FROM bibfmt WHERE id_bibrec=%s AND format=%s"""
params = (rec_id, bibformat)
res = []
res = run_sql(query, params)
out = res[0][0]
return out
def find_record_from_recid(rec_id):
"""
Try to find record in the database from the REC_ID number.
Return record ID if found, None otherwise.
"""
res = run_sql("SELECT id FROM bibrec WHERE id=%s",
(rec_id,))
if res:
return res[0][0]
else:
return None
def find_record_from_sysno(sysno):
"""
Try to find record in the database from the external SYSNO number.
Return record ID if found, None otherwise.
"""
bibxxx = 'bib'+CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[0:2]+'x'
bibrec_bibxxx = 'bibrec_' + bibxxx
res = run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb,
%(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
(CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, sysno,))
if res:
return res[0][0]
else:
return None
def find_records_from_extoaiid(extoaiid, extoaisrc=None):
"""
Try to find records in the database from the external EXTOAIID number.
Return list of record ID if found, None otherwise.
"""
assert(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:5] == CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[:5])
bibxxx = 'bib'+CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:2]+'x'
bibrec_bibxxx = 'bibrec_' + bibxxx
write_message(' Looking for extoaiid="%s" with extoaisrc="%s"' % (extoaiid, extoaisrc), verbose=9)
id_bibrecs = intbitset(run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb,
%(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, extoaiid,)))
write_message(' Partially found %s for extoaiid="%s"' % (id_bibrecs, extoaiid), verbose=9)
ret = intbitset()
for id_bibrec in id_bibrecs:
record = get_record(id_bibrec)
instances = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
write_message(' recid %s -> instances "%s"' % (id_bibrec, instances), verbose=9)
for instance in instances:
this_extoaisrc = field_get_subfield_values(instance, CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[5])
this_extoaisrc = this_extoaisrc and this_extoaisrc[0] or None
this_extoaiid = field_get_subfield_values(instance, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5])
this_extoaiid = this_extoaiid and this_extoaiid[0] or None
write_message(" this_extoaisrc -> %s, this_extoaiid -> %s" % (this_extoaisrc, this_extoaiid), verbose=9)
if this_extoaiid == extoaiid:
write_message(' recid %s -> provenance "%s"' % (id_bibrec, this_extoaisrc), verbose=9)
if this_extoaisrc == extoaisrc:
write_message('Found recid %s for extoaiid="%s" with provenance="%s"' % (id_bibrec, extoaiid, extoaisrc), verbose=9)
ret.add(id_bibrec)
break
if this_extoaisrc is None:
write_message('WARNING: Found recid %s for extoaiid="%s" that doesn\'t specify any provenance, while input record does.' % (id_bibrec, extoaiid), stream=sys.stderr)
if extoaisrc is None:
write_message('WARNING: Found recid %s for extoaiid="%s" that specify a provenance (%s), while input record does not have a provenance.' % (id_bibrec, extoaiid, this_extoaisrc), stream=sys.stderr)
return ret
def find_record_from_oaiid(oaiid):
"""
Try to find record in the database from the OAI ID number and OAI SRC.
Return record ID if found, None otherwise.
"""
bibxxx = 'bib'+CFG_OAI_ID_FIELD[0:2]+'x'
bibrec_bibxxx = 'bibrec_' + bibxxx
res = run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb,
%(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
(CFG_OAI_ID_FIELD, oaiid,))
if res:
return res[0][0]
else:
return None
def find_record_from_doi(doi):
"""
Try to find record in the database from the given DOI.
Return record ID if found, None otherwise.
"""
bibxxx = 'bib02x'
bibrec_bibxxx = 'bibrec_' + bibxxx
res = run_sql("""SELECT bb.id_bibrec, bb.field_number
FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b
WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
('0247_a', doi,))
# For each of the result, make sure that it is really tagged as doi
for (id_bibrec, field_number) in res:
res = run_sql("""SELECT bb.id_bibrec
FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b
WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id and bb.field_number=%%s and bb.id_bibrec=%%s""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
('0247_2', "doi", field_number, id_bibrec))
if res and res[0][0] == id_bibrec:
return res[0][0]
return None
def extract_tag_from_record(record, tag_number):
""" Extract the tag_number for record."""
# first step verify if the record is not already in the database
if record:
return record.get(tag_number, None)
return None
def retrieve_rec_id(record, opt_mode, pretend=False, post_phase = False):
"""Retrieve the record Id from a record by using tag 001 or SYSNO or OAI ID or DOI
tag. opt_mod is the desired mode.
@param post_phase Tells if we are calling this method in the postprocessing phase. If true, we accept presence of 001 fields even in the insert mode
@type post_phase boolean
"""
rec_id = None
# 1st step: we look for the tag 001
tag_001 = extract_tag_from_record(record, '001')
if tag_001 is not None:
# We extract the record ID from the tag
rec_id = tag_001[0][3]
# if we are in insert mode => error
if opt_mode == 'insert' and not post_phase:
write_message(" Failed: tag 001 found in the xml" \
" submitted, you should use the option replace," \
" correct or append to replace an existing" \
" record. (-h for help)",
verbose=1, stream=sys.stderr)
return -1
else:
# we found the rec id and we are not in insert mode => continue
# we try to match rec_id against the database:
if find_record_from_recid(rec_id) is not None:
# okay, 001 corresponds to some known record
return int(rec_id)
elif opt_mode in ('replace', 'replace_or_insert'):
if task_get_option('force'):
# we found the rec_id but it's not in the system and we are
# requested to replace records. Therefore we create on the fly
# a empty record allocating the recid.
write_message(" Warning: tag 001 found in the xml with"
" value %(rec_id)s, but rec_id %(rec_id)s does"
" not exist. Since the mode replace was"
" requested the rec_id %(rec_id)s is allocated"
" on-the-fly." % {"rec_id": rec_id},
stream=sys.stderr)
return create_new_record(rec_id=rec_id, pretend=pretend)
else:
# Since --force was not used we are going to raise an error
write_message(" Failed: tag 001 found in the xml"
" submitted with value %(rec_id)s. The"
" corresponding record however does not"
" exists. If you want to really create"
" such record, please use the --force"
" parameter when calling bibupload." % {
"rec_id": rec_id}, stream=sys.stderr)
return -1
else:
# The record doesn't exist yet. We shall have try to check
# the SYSNO or OAI or DOI id later.
write_message(" -Tag 001 value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag 001 not found in the xml marc file.", verbose=9)
if rec_id is None:
# 2nd step we look for the SYSNO
sysnos = record_get_field_values(record,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[0:3],
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[3:4] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[3:4] or "",
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[4:5] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[4:5] or "",
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[5:6])
if sysnos:
sysno = sysnos[0] # there should be only one external SYSNO
write_message(" -Checking if SYSNO " + sysno + \
" exists in the database", verbose=9)
# try to find the corresponding rec id from the database
rec_id = find_record_from_sysno(sysno)
if rec_id is not None:
# rec_id found
pass
else:
# The record doesn't exist yet. We will try to check
# external and internal OAI ids later.
write_message(" -Tag SYSNO value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag SYSNO not found in the xml marc file.",
verbose=9)
if rec_id is None:
# 2nd step we look for the external OAIID
extoai_fields = record_get_field_instances(record,
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:3],
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3:4] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3:4] or "",
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4:5] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4:5] or "")
if extoai_fields:
for field in extoai_fields:
extoaiid = field_get_subfield_values(field, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5:6])
extoaisrc = field_get_subfield_values(field, CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[5:6])
if extoaiid:
extoaiid = extoaiid[0]
if extoaisrc:
extoaisrc = extoaisrc[0]
else:
extoaisrc = None
write_message(" -Checking if EXTOAIID %s (%s) exists in the database" % (extoaiid, extoaisrc), verbose=9)
# try to find the corresponding rec id from the database
rec_ids = find_records_from_extoaiid(extoaiid, extoaisrc)
if rec_ids:
# rec_id found
rec_id = rec_ids.pop()
break
else:
# The record doesn't exist yet. We will try to check
# OAI id later.
write_message(" -Tag EXTOAIID value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag EXTOAIID not found in the xml marc file.", verbose=9)
if rec_id is None:
# 4th step we look for the OAI ID
oaiidvalues = record_get_field_values(record,
CFG_OAI_ID_FIELD[0:3],
CFG_OAI_ID_FIELD[3:4] != "_" and \
CFG_OAI_ID_FIELD[3:4] or "",
CFG_OAI_ID_FIELD[4:5] != "_" and \
CFG_OAI_ID_FIELD[4:5] or "",
CFG_OAI_ID_FIELD[5:6])
if oaiidvalues:
oaiid = oaiidvalues[0] # there should be only one OAI ID
write_message(" -Check if local OAI ID " + oaiid + \
" exist in the database", verbose=9)
# try to find the corresponding rec id from the database
rec_id = find_record_from_oaiid(oaiid)
if rec_id is not None:
# rec_id found
pass
else:
write_message(" -Tag OAI ID value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag SYSNO not found in the xml marc file.",
verbose=9)
if rec_id is None:
# 5th step we look for the DOI.
record_dois = record_extract_dois(record)
matching_recids = set()
if record_dois:
# try to find the corresponding rec id from the database
for record_doi in record_dois:
possible_recid = find_record_from_doi(record_doi)
if possible_recid:
matching_recids.add(possible_recid)
if len(matching_recids) > 1:
# Oops, this record refers to DOI existing in multiple records.
# Dunno which one to choose.
write_message(" Failed: Multiple records found in the" \
" database %s that match the DOI(s) in the input" \
" MARCXML %s" % (repr(matching_recids), repr(record_dois)),
verbose=1, stream=sys.stderr)
return -1
elif len(matching_recids) == 1:
rec_id = matching_recids.pop()
if opt_mode == 'insert':
write_message(" Failed: DOI tag matching record #%s found in the xml" \
" submitted, you should use the option replace," \
" correct or append to replace an existing" \
" record. (-h for help)" % rec_id,
verbose=1, stream=sys.stderr)
return -1
else:
write_message(" - Tag DOI value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag DOI not found in the xml marc file.",
verbose=9)
# Now we should have detected rec_id from SYSNO or OAIID
# tags. (None otherwise.)
if rec_id:
if opt_mode == 'insert':
write_message(" Failed: Record found in the database," \
" you should use the option replace," \
" correct or append to replace an existing" \
" record. (-h for help)",
verbose=1, stream=sys.stderr)
return -1
else:
if opt_mode != 'insert' and \
opt_mode != 'replace_or_insert':
write_message(" Failed: Record not found in the database."\
" Please insert the file before updating it."\
" (-h for help)", verbose=1, stream=sys.stderr)
return -1
return rec_id and int(rec_id) or None
def check_record_doi_is_unique(rec_id, record):
"""
Check that DOI found in 'record' does not exist in any other
record than 'recid'.
Return (boolean, msg) where 'boolean' would be True if the DOI is
unique.
"""
record_dois = record_extract_dois(record)
if record_dois:
matching_recids = set()
for record_doi in record_dois:
possible_recid = find_record_from_doi(record_doi)
if possible_recid:
matching_recids.add(possible_recid)
if len(matching_recids) > 1:
# Oops, this record refers to DOI existing in multiple records.
msg = " Failed: Multiple records found in the" \
" database %s that match the DOI(s) in the input" \
" MARCXML %s" % (repr(matching_recids), repr(record_dois))
return (False, msg)
elif len(matching_recids) == 1:
matching_recid = matching_recids.pop()
if str(matching_recid) != str(rec_id):
# Oops, this record refers to DOI existing in a different record.
msg = " Failed: DOI(s) %s found in this record (#%s)" \
" already exist(s) in another other record (#%s)" % \
(repr(record_dois), rec_id, matching_recid)
return (False, msg)
return (True, "")
### Insert functions
def create_new_record(rec_id=None, pretend=False):
"""
Create new record in the database
@param rec_id: if specified the new record will have this rec_id.
@type rec_id: int
@return: the allocated rec_id
@rtype: int
@note: in case of errors will be returned None
"""
if rec_id is not None:
try:
rec_id = int(rec_id)
except (ValueError, TypeError), error:
write_message(" Error during the creation_new_record function: %s "
% error, verbose=1, stream=sys.stderr)
return None
if run_sql("SELECT id FROM bibrec WHERE id=%s", (rec_id, )):
write_message(" Error during the creation_new_record function: the requested rec_id %s already exists." % rec_id)
return None
if pretend:
if rec_id:
return rec_id
else:
return run_sql("SELECT max(id)+1 FROM bibrec")[0][0]
if rec_id is not None:
return run_sql("INSERT INTO bibrec (id, creation_date, modification_date) VALUES (%s, NOW(), NOW())", (rec_id, ))
else:
return run_sql("INSERT INTO bibrec (creation_date, modification_date) VALUES (NOW(), NOW())")
def insert_bibfmt(id_bibrec, marc, bibformat, modification_date='1970-01-01 00:00:00', pretend=False):
"""Insert the format in the table bibfmt"""
# compress the marc value
pickled_marc = compress(marc)
try:
time.strptime(modification_date, "%Y-%m-%d %H:%M:%S")
except ValueError:
modification_date = '1970-01-01 00:00:00'
query = """INSERT LOW_PRIORITY INTO bibfmt (id_bibrec, format, last_updated, value)
VALUES (%s, %s, %s, %s)"""
if not pretend:
row_id = run_sql(query, (id_bibrec, bibformat, modification_date, pickled_marc))
return row_id
else:
return 1
def insert_record_bibxxx(tag, value, pretend=False):
"""Insert the record into bibxxx"""
# determine into which table one should insert the record
table_name = 'bib'+tag[0:2]+'x'
# check if the tag, value combination exists in the table
query = """SELECT id,value FROM %s """ % table_name
query += """ WHERE tag=%s AND value=%s"""
params = (tag, value)
res = None
res = run_sql(query, params)
# Note: compare now the found values one by one and look for
# string binary equality (e.g. to respect lowercase/uppercase
# match), regardless of the charset etc settings. Ideally we
# could use a BINARY operator in the above SELECT statement, but
# we would have to check compatibility on various MySQLdb versions
# etc; this approach checks all matched values in Python, not in
# MySQL, which is less cool, but more conservative, so it should
# work better on most setups.
if res:
for row in res:
row_id = row[0]
row_value = row[1]
if row_value == value:
return (table_name, row_id)
# We got here only when the tag, value combination was not found,
# so it is now necessary to insert the tag, value combination into
# bibxxx table as new.
query = """INSERT INTO %s """ % table_name
query += """ (tag, value) values (%s , %s)"""
params = (tag, value)
if not pretend:
row_id = run_sql(query, params)
else:
return (table_name, 1)
return (table_name, row_id)
def insert_record_bibrec_bibxxx(table_name, id_bibxxx,
field_number, id_bibrec, pretend=False):
"""Insert the record into bibrec_bibxxx"""
# determine into which table one should insert the record
full_table_name = 'bibrec_'+ table_name
# insert the proper row into the table
query = """INSERT INTO %s """ % full_table_name
query += """(id_bibrec,id_bibxxx, field_number) values (%s , %s, %s)"""
params = (id_bibrec, id_bibxxx, field_number)
if not pretend:
res = run_sql(query, params)
else:
return 1
return res
def synchronize_8564(rec_id, record, record_had_FFT, pretend=False):
"""
Synchronize 8564_ tags and BibDocFile tables.
This function directly manipulate the record parameter.
@type rec_id: positive integer
@param rec_id: the record identifier.
@param record: the record structure as created by bibrecord.create_record
@type record_had_FFT: boolean
@param record_had_FFT: True if the incoming bibuploaded-record used FFT
@return: the manipulated record (which is also modified as a side effect)
"""
def merge_marc_into_bibdocfile(field, pretend=False):
"""
Internal function that reads a single field and stores its content
in BibDocFile tables.
@param field: the 8564_ field containing a BibDocFile URL.
"""
write_message('Merging field: %s' % (field, ), verbose=9)
url = field_get_subfield_values(field, 'u')[:1] or field_get_subfield_values(field, 'q')[:1]
description = field_get_subfield_values(field, 'y')[:1]
comment = field_get_subfield_values(field, 'z')[:1]
if url:
recid, docname, docformat = decompose_bibdocfile_url(url[0])
if recid != rec_id:
write_message("INFO: URL %s is not pointing to a fulltext owned by this record (%s)" % (url, recid), stream=sys.stderr)
else:
try:
bibdoc = BibRecDocs(recid).get_bibdoc(docname)
if description and not pretend:
bibdoc.set_description(description[0], docformat)
if comment and not pretend:
bibdoc.set_comment(comment[0], docformat)
except InvenioBibDocFileError:
## Apparently the referenced docname doesn't exist anymore.
## Too bad. Let's skip it.
write_message("WARNING: docname %s does not seem to exist for record %s. Has it been renamed outside FFT?" % (docname, recid), stream=sys.stderr)
def merge_bibdocfile_into_marc(field, subfields):
"""
Internal function that reads BibDocFile table entries referenced by
the URL in the given 8564_ field and integrate the given information
directly with the provided subfields.
@param field: the 8564_ field containing a BibDocFile URL.
@param subfields: the subfields corresponding to the BibDocFile URL
generated after BibDocFile tables.
"""
write_message('Merging subfields %s into field %s' % (subfields, field), verbose=9)
subfields = dict(subfields) ## We make a copy not to have side-effects
subfield_to_delete = []
for subfield_position, (code, value) in enumerate(field_get_subfield_instances(field)):
## For each subfield instance already existing...
if code in subfields:
## ...We substitute it with what is in BibDocFile tables
record_modify_subfield(record, '856', code, subfields[code],
subfield_position, field_position_global=field[4])
del subfields[code]
else:
## ...We delete it otherwise
subfield_to_delete.append(subfield_position)
subfield_to_delete.sort()
for counter, position in enumerate(subfield_to_delete):
## FIXME: Very hackish algorithm. Since deleting a subfield
## will alterate the position of following subfields, we
## are taking note of this and adjusting further position
## by using a counter.
record_delete_subfield_from(record, '856', position - counter,
field_position_global=field[4])
subfields = subfields.items()
subfields.sort()
for code, value in subfields:
## Let's add non-previously existing subfields
record_add_subfield_into(record, '856', code, value,
field_position_global=field[4])
def get_bibdocfile_managed_info():
"""
Internal function, returns a dictionary of
BibDocFile URL -> wanna-be subfields.
This information is retrieved from internal BibDoc
structures rather than from input MARC XML files
@rtype: mapping
@return: BibDocFile URL -> wanna-be subfields dictionary
"""
ret = {}
bibrecdocs = BibRecDocs(rec_id)
latest_files = bibrecdocs.list_latest_files(list_hidden=False)
for afile in latest_files:
url = afile.get_url()
ret[url] = {'u': url}
description = afile.get_description()
comment = afile.get_comment()
subformat = afile.get_subformat()
if description:
ret[url]['y'] = description
if comment:
ret[url]['z'] = comment
if subformat:
ret[url]['x'] = subformat
return ret
write_message("Synchronizing MARC of recid '%s' with:\n%s" % (rec_id, record), verbose=9)
tags856s = record_get_field_instances(record, '856', '%', '%')
write_message("Original 856%% instances: %s" % tags856s, verbose=9)
tags8564s_to_add = get_bibdocfile_managed_info()
write_message("BibDocFile instances: %s" % tags8564s_to_add, verbose=9)
positions_tags8564s_to_remove = []
for local_position, field in enumerate(tags856s):
if field[1] == '4' and field[2] == ' ':
write_message('Analysing %s' % (field, ), verbose=9)
for url in field_get_subfield_values(field, 'u') + field_get_subfield_values(field, 'q'):
if url in tags8564s_to_add:
# there exists a link in the MARC of the record and the connection exists in BibDoc tables
if record_had_FFT:
merge_bibdocfile_into_marc(field, tags8564s_to_add[url])
else:
merge_marc_into_bibdocfile(field, pretend=pretend)
del tags8564s_to_add[url]
break
elif bibdocfile_url_p(url) and decompose_bibdocfile_url(url)[0] == rec_id:
# The link exists and is potentially correct-looking link to a document
# moreover, it refers to current record id ... but it does not exist in
# internal BibDoc structures. This could have happen in the case of renaming a document
# or its removal. In both cases we have to remove link... a new one will be created
positions_tags8564s_to_remove.append(local_position)
write_message("%s to be deleted and re-synchronized" % (field, ), verbose=9)
break
record_delete_fields(record, '856', positions_tags8564s_to_remove)
tags8564s_to_add = tags8564s_to_add.values()
tags8564s_to_add.sort()
for subfields in tags8564s_to_add:
subfields = subfields.items()
subfields.sort()
record_add_field(record, '856', '4', ' ', subfields=subfields)
write_message('Final record: %s' % record, verbose=9)
return record
def _get_subfield_value(field, subfield_code, default=None):
res = field_get_subfield_values(field, subfield_code)
if res != [] and res != None:
return res[0]
else:
return default
def elaborate_mit_tags(record, rec_id, mode, pretend = False, tmp_ids = {},
tmp_vers = {}):
"""
Uploading MoreInfo -> BDM tags
"""
tuple_list = extract_tag_from_record(record, 'BDM')
# Now gathering information from BDR tags - to be processed later
write_message("Processing BDM entries of the record ")
recordDocs = BibRecDocs(rec_id)
if tuple_list:
for mit in record_get_field_instances(record, 'BDM', ' ', ' '):
relation_id = _get_subfield_value(mit, "r")
bibdoc_id = _get_subfield_value(mit, "i")
# checking for a possibly temporary ID
if not (bibdoc_id is None):
bibdoc_id = resolve_identifier(tmp_ids, bibdoc_id)
bibdoc_ver = _get_subfield_value(mit, "v")
if not (bibdoc_ver is None):
bibdoc_ver = resolve_identifier(tmp_vers, bibdoc_ver)
bibdoc_name = _get_subfield_value(mit, "n")
bibdoc_fmt = _get_subfield_value(mit, "f")
moreinfo_str = _get_subfield_value(mit, "m")
if bibdoc_id == None:
if bibdoc_name == None:
raise StandardError("Incorrect relation. Neither name nor identifier of the first obejct has been specified")
else:
# retrieving the ID based on the document name (inside current record)
# The document is attached to current record.
try:
bibdoc_id = recordDocs.get_docid(bibdoc_name)
except:
raise StandardError("BibDoc of a name %s does not exist within a record" % (bibdoc_name, ))
else:
if bibdoc_name != None:
write_message("Warning: both name and id of the first document of a relation have been specified. Ignoring the name")
if (moreinfo_str is None or mode in ("replace", "correct")) and (not pretend):
MoreInfo(docid=bibdoc_id , version = bibdoc_ver,
docformat = bibdoc_fmt, relation = relation_id).delete()
if (not moreinfo_str is None) and (not pretend):
MoreInfo.create_from_serialised(moreinfo_str,
docid=bibdoc_id,
version = bibdoc_ver,
docformat = bibdoc_fmt,
relation = relation_id)
return record
def elaborate_brt_tags(record, rec_id, mode, pretend=False, tmp_ids = {}, tmp_vers = {}):
"""
Process BDR tags describing relations between existing objects
"""
tuple_list = extract_tag_from_record(record, 'BDR')
# Now gathering information from BDR tags - to be processed later
relations_to_create = []
write_message("Processing BDR entries of the record ")
recordDocs = BibRecDocs(rec_id) #TODO: check what happens if there is no record yet ! Will the class represent an empty set?
if tuple_list:
for brt in record_get_field_instances(record, 'BDR', ' ', ' '):
relation_id = _get_subfield_value(brt, "r")
bibdoc1_id = None
bibdoc1_name = None
bibdoc1_ver = None
bibdoc1_fmt = None
bibdoc2_id = None
bibdoc2_name = None
bibdoc2_ver = None
bibdoc2_fmt = None
if not relation_id:
bibdoc1_id = _get_subfield_value(brt, "i")
bibdoc1_name = _get_subfield_value(brt, "n")
if bibdoc1_id == None:
if bibdoc1_name == None:
raise StandardError("Incorrect relation. Neither name nor identifier of the first obejct has been specified")
else:
# retrieving the ID based on the document name (inside current record)
# The document is attached to current record.
try:
bibdoc1_id = recordDocs.get_docid(bibdoc1_name)
except:
raise StandardError("BibDoc of a name %s does not exist within a record" % \
(bibdoc1_name, ))
else:
# resolving temporary identifier
bibdoc1_id = resolve_identifier(tmp_ids, bibdoc1_id)
if bibdoc1_name != None:
write_message("Warning: both name and id of the first document of a relation have been specified. Ignoring the name")
bibdoc1_ver = _get_subfield_value(brt, "v")
if not (bibdoc1_ver is None):
bibdoc1_ver = resolve_identifier(tmp_vers, bibdoc1_ver)
bibdoc1_fmt = _get_subfield_value(brt, "f")
bibdoc2_id = _get_subfield_value(brt, "j")
bibdoc2_name = _get_subfield_value(brt, "o")
if bibdoc2_id == None:
if bibdoc2_name == None:
raise StandardError("Incorrect relation. Neither name nor identifier of the second obejct has been specified")
else:
# retrieving the ID based on the document name (inside current record)
# The document is attached to current record.
try:
bibdoc2_id = recordDocs.get_docid(bibdoc2_name)
except:
raise StandardError("BibDoc of a name %s does not exist within a record" % (bibdoc2_name, ))
else:
bibdoc2_id = resolve_identifier(tmp_ids, bibdoc2_id)
if bibdoc2_name != None:
write_message("Warning: both name and id of the first document of a relation have been specified. Ignoring the name")
bibdoc2_ver = _get_subfield_value(brt, "w")
if not (bibdoc2_ver is None):
bibdoc2_ver = resolve_identifier(tmp_vers, bibdoc2_ver)
bibdoc2_fmt = _get_subfield_value(brt, "g")
control_command = _get_subfield_value(brt, "d")
relation_type = _get_subfield_value(brt, "t")
if not relation_type and not relation_id:
raise StandardError("The relation type must be specified")
more_info = _get_subfield_value(brt, "m")
# the relation id might be specified in the case of updating
# MoreInfo table instead of other fields
rel_obj = None
if not relation_id:
rels = BibRelation.get_relations(rel_type = relation_type,
bibdoc1_id = bibdoc1_id,
bibdoc2_id = bibdoc2_id,
bibdoc1_ver = bibdoc1_ver,
bibdoc2_ver = bibdoc2_ver,
bibdoc1_fmt = bibdoc1_fmt,
bibdoc2_fmt = bibdoc2_fmt)
if len(rels) > 0:
rel_obj = rels[0]
relation_id = rel_obj.id
else:
rel_obj = BibRelation(rel_id=relation_id)
relations_to_create.append((relation_id, bibdoc1_id, bibdoc1_ver,
bibdoc1_fmt, bibdoc2_id, bibdoc2_ver,
bibdoc2_fmt, relation_type, more_info,
rel_obj, control_command))
record_delete_field(record, 'BDR', ' ', ' ')
if mode in ("insert", "replace_or_insert", "append", "correct", "replace"):
# now creating relations between objects based on the data
if not pretend:
for (relation_id, bibdoc1_id, bibdoc1_ver, bibdoc1_fmt,
bibdoc2_id, bibdoc2_ver, bibdoc2_fmt, rel_type,
more_info, rel_obj, control_command) in relations_to_create:
if rel_obj == None:
rel_obj = BibRelation.create(bibdoc1_id = bibdoc1_id,
bibdoc1_ver = bibdoc1_ver,
bibdoc1_fmt = bibdoc1_fmt,
bibdoc2_id = bibdoc2_id,
bibdoc2_ver = bibdoc2_ver,
bibdoc2_fmt = bibdoc2_fmt,
rel_type = rel_type)
relation_id = rel_obj.id
if mode in ("replace"):
# Clearing existing MoreInfo content
rel_obj.get_more_info().delete()
if more_info:
MoreInfo.create_from_serialised(more_info, relation = relation_id)
if control_command == "DELETE":
rel_obj.delete()
else:
write_message("BDR tag is not processed in the %s mode" % (mode, ))
return record
def elaborate_fft_tags(record, rec_id, mode, pretend=False,
tmp_ids = {}, tmp_vers = {}):
"""
Process FFT tags that should contain $a with file pathes or URLs
to get the fulltext from. This function enriches record with
proper 8564 URL tags, downloads fulltext files and stores them
into var/data structure where appropriate.
CFG_BIBUPLOAD_WGET_SLEEP_TIME defines time to sleep in seconds in
between URL downloads.
Note: if an FFT tag contains multiple $a subfields, we upload them
into different 856 URL tags in the metadata. See regression test
case test_multiple_fft_insert_via_http().
"""
# Let's define some handy sub procedure.
def _add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, modification_date, pretend=False):
"""Adds a new format for a given bibdoc. Returns True when everything's fine."""
write_message('Add new format to %s url: %s, format: %s, docname: %s, doctype: %s, newname: %s, description: %s, comment: %s, flags: %s, modification_date: %s' % (repr(bibdoc), url, docformat, docname, doctype, newname, description, comment, flags, modification_date), verbose=9)
try:
if not url: # Not requesting a new url. Just updating comment & description
return _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=pretend)
try:
if not pretend:
bibdoc.add_file_new_format(url, description=description, comment=comment, flags=flags, modification_date=modification_date)
except StandardError, e:
write_message("('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') not inserted because format already exists (%s)." % (url, docformat, docname, doctype, newname, description, comment, flags, modification_date, e), stream=sys.stderr)
raise
except Exception, e:
write_message("Error in adding '%s' as a new format because of: %s" % (url, e), stream=sys.stderr)
raise
return True
def _add_new_version(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, modification_date, pretend=False):
"""Adds a new version for a given bibdoc. Returns True when everything's fine."""
write_message('Add new version to %s url: %s, format: %s, docname: %s, doctype: %s, newname: %s, description: %s, comment: %s, flags: %s' % (repr(bibdoc), url, docformat, docname, doctype, newname, description, comment, flags))
try:
if not url:
return _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=pretend)
try:
if not pretend:
bibdoc.add_file_new_version(url, description=description, comment=comment, flags=flags, modification_date=modification_date)
except StandardError, e:
write_message("('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') not inserted because '%s'." % (url, docformat, docname, doctype, newname, description, comment, flags, modification_date, e), stream=sys.stderr)
raise
except Exception, e:
write_message("Error in adding '%s' as a new version because of: %s" % (url, e), stream=sys.stderr)
raise
return True
def _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=False):
"""Directly update comments and descriptions."""
write_message('Just updating description and comment for %s with format %s with description %s, comment %s and flags %s' % (docname, docformat, description, comment, flags), verbose=9)
try:
if not pretend:
bibdoc.set_description(description, docformat)
bibdoc.set_comment(comment, docformat)
for flag in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
if flag in flags:
bibdoc.set_flag(flag, docformat)
else:
bibdoc.unset_flag(flag, docformat)
except StandardError, e:
write_message("('%s', '%s', '%s', '%s', '%s') description and comment not updated because '%s'." % (docname, docformat, description, comment, flags, e))
raise
return True
def _process_document_moreinfos(more_infos, docname, version, docformat, mode):
if not mode in ('correct', 'append', 'replace_or_insert', 'replace', 'correct', 'insert'):
print "exited because the mode is incorrect"
return
brd = BibRecDocs(rec_id)
docid = None
try:
docid = brd.get_docid(docname)
except:
raise StandardError("MoreInfo: No document of a given name associated with the record")
if not version:
# We have to retrieve the most recent version ...
version = brd.get_bibdoc(docname).get_latest_version()
doc_moreinfo_s, version_moreinfo_s, version_format_moreinfo_s, format_moreinfo_s = more_infos
if mode in ("replace", "replace_or_insert"):
if doc_moreinfo_s: #only if specified, otherwise do not touch
MoreInfo(docid = docid).delete()
if format_moreinfo_s: #only if specified... otherwise do not touch
MoreInfo(docid = docid, docformat = docformat).delete()
if not doc_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = doc_moreinfo_s, docid = docid)
if not version_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = version_moreinfo_s,
docid = docid, version = version)
if not version_format_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = version_format_moreinfo_s,
docid = docid, version = version,
docformat = docformat)
if not format_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = format_moreinfo_s,
docid = docid, docformat = docformat)
if mode == 'delete':
raise StandardError('FFT tag specified but bibupload executed in --delete mode')
tuple_list = extract_tag_from_record(record, 'FFT')
if tuple_list: # FFT Tags analysis
write_message("FFTs: "+str(tuple_list), verbose=9)
docs = {} # docnames and their data
for fft in record_get_field_instances(record, 'FFT', ' ', ' '):
# Very first, we retrieve the potentially temporary odentifiers...
#even if the rest fails, we should include them in teh dictionary
version = _get_subfield_value(fft, 'v', '')
# checking if version is temporary... if so, filling a different varaible
is_tmp_ver, bibdoc_tmpver = parse_identifier(version)
if is_tmp_ver:
version = None
else:
bibdoc_tmpver = None
if not version: #treating cases of empty string etc...
version = None
bibdoc_tmpid = field_get_subfield_values(fft, 'i')
if bibdoc_tmpid:
bibdoc_tmpid = bibdoc_tmpid[0]
else:
bibdoc_tmpid
is_tmp_id, bibdoc_tmpid = parse_identifier(bibdoc_tmpid)
if not is_tmp_id:
bibdoc_tmpid = None
# In the case of having temporary id's, we dont resolve them yet but signaklise that they have been used
# value -1 means that identifier has been declared but not assigned a value yet
if bibdoc_tmpid:
if bibdoc_tmpid in tmp_ids:
write_message("WARNING: the temporary identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpid, ))
else:
tmp_ids[bibdoc_tmpid] = -1
if bibdoc_tmpver:
if bibdoc_tmpver in tmp_vers:
write_message("WARNING: the temporary version identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpver, ))
else:
tmp_vers[bibdoc_tmpver] = -1
# Let's discover the type of the document
# This is a legacy field and will not be enforced any particular
# check on it.
doctype = _get_subfield_value(fft, 't', 'Main') #Default is Main
# Let's discover the url.
url = field_get_subfield_values(fft, 'a')
if url:
url = url[0]
try:
check_valid_url(url)
except StandardError, e:
raise StandardError, "fft '%s' specifies in $a a location ('%s') with problems: %s" % (fft, url, e)
else:
url = ''
#TODO: a lot of code can be compactified using similar syntax ... should be more readable on the longer scale
# maybe right side expressions look a bit cryptic, but the elaborate_fft function would be much clearer
if mode == 'correct' and doctype != 'FIX-MARC':
arg2 = ""
else:
arg2 = KEEP_OLD_VALUE
description = _get_subfield_value(fft, 'd', arg2)
# Let's discover the description
# description = field_get_subfield_values(fft, 'd')
# if description != []:
# description = description[0]
# else:
# if mode == 'correct' and doctype != 'FIX-MARC':
## If the user require to correct, and do not specify
## a description this means she really want to
## modify the description.
# description = ''
# else:
# description = KEEP_OLD_VALUE
# Let's discover the desired docname to be created/altered
name = field_get_subfield_values(fft, 'n')
if name:
## Let's remove undesired extensions
name = file_strip_ext(name[0] + '.pdf')
else:
if url:
name = get_docname_from_url(url)
elif mode != 'correct' and doctype != 'FIX-MARC':
raise StandardError, "Warning: fft '%s' doesn't specifies either a location in $a or a docname in $n" % str(fft)
else:
continue
# Let's discover the desired new docname in case we want to change it
newname = field_get_subfield_values(fft, 'm')
if newname:
newname = file_strip_ext(newname[0] + '.pdf')
else:
newname = name
# Let's discover the desired format
docformat = field_get_subfield_values(fft, 'f')
if docformat:
docformat = normalize_format(docformat[0])
else:
if url:
docformat = guess_format_from_url(url)
else:
docformat = ""
# Let's discover the icon
icon = field_get_subfield_values(fft, 'x')
if icon != []:
icon = icon[0]
if icon != KEEP_OLD_VALUE:
try:
check_valid_url(icon)
except StandardError, e:
raise StandardError, "fft '%s' specifies in $x an icon ('%s') with problems: %s" % (fft, icon, e)
else:
icon = ''
# Let's discover the comment
comment = field_get_subfield_values(fft, 'z')
if comment != []:
comment = comment[0]
else:
if mode == 'correct' and doctype != 'FIX-MARC':
## See comment on description
comment = ''
else:
comment = KEEP_OLD_VALUE
# Let's discover the restriction
restriction = field_get_subfield_values(fft, 'r')
if restriction != []:
restriction = restriction[0]
else:
if mode == 'correct' and doctype != 'FIX-MARC':
## See comment on description
restriction = ''
else:
restriction = KEEP_OLD_VALUE
document_moreinfo = _get_subfield_value(fft, 'w')
version_moreinfo = _get_subfield_value(fft, 'p')
version_format_moreinfo = _get_subfield_value(fft, 'b')
format_moreinfo = _get_subfield_value(fft, 'u')
# Let's discover the timestamp of the file (if any)
timestamp = field_get_subfield_values(fft, 's')
if timestamp:
try:
timestamp = datetime(*(time.strptime(timestamp[0], "%Y-%m-%d %H:%M:%S")[:6]))
except ValueError:
write_message('Warning: The timestamp is not in a good format, thus will be ignored. The format should be YYYY-MM-DD HH:MM:SS')
timestamp = ''
else:
timestamp = ''
flags = field_get_subfield_values(fft, 'o')
for flag in flags:
if flag not in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
raise StandardError, "fft '%s' specifies a non available flag: %s" % (fft, flag)
if docs.has_key(name): # new format considered
(doctype2, newname2, restriction2, version2, urls, dummybibdoc_moreinfos2, dummybibdoc_tmpid2, dummybibdoc_tmpver2 ) = docs[name]
if doctype2 != doctype:
raise StandardError, "fft '%s' specifies a different doctype from previous fft with docname '%s'" % (str(fft), name)
if newname2 != newname:
raise StandardError, "fft '%s' specifies a different newname from previous fft with docname '%s'" % (str(fft), name)
if restriction2 != restriction:
raise StandardError, "fft '%s' specifies a different restriction from previous fft with docname '%s'" % (str(fft), name)
if version2 != version:
raise StandardError, "fft '%s' specifies a different version than the previous fft with docname '%s'" % (str(fft), name)
for (dummyurl2, format2, dummydescription2, dummycomment2, dummyflags2, dummytimestamp2) in urls:
if docformat == format2:
raise StandardError, "fft '%s' specifies a second file '%s' with the same format '%s' from previous fft with docname '%s'" % (str(fft), url, docformat, name)
if url or docformat:
urls.append((url, docformat, description, comment, flags, timestamp))
if icon:
urls.append((icon, icon[len(file_strip_ext(icon)):] + ';icon', description, comment, flags, timestamp))
else:
if url or docformat:
docs[name] = (doctype, newname, restriction, version, [(url, docformat, description, comment, flags, timestamp)], [document_moreinfo, version_moreinfo, version_format_moreinfo, format_moreinfo], bibdoc_tmpid, bibdoc_tmpver)
if icon:
docs[name][4].append((icon, icon[len(file_strip_ext(icon)):] + ';icon', description, comment, flags, timestamp))
elif icon:
docs[name] = (doctype, newname, restriction, version, [(icon, icon[len(file_strip_ext(icon)):] + ';icon', description, comment, flags, timestamp)], [document_moreinfo, version_moreinfo, version_format_moreinfo, format_moreinfo], bibdoc_tmpid, bibdoc_tmpver)
else:
docs[name] = (doctype, newname, restriction, version, [], [document_moreinfo, version_moreinfo, version_format_moreinfo, format_moreinfo], bibdoc_tmpid, bibdoc_tmpver)
write_message('Result of FFT analysis:\n\tDocs: %s' % (docs,), verbose=9)
# Let's remove all FFT tags
record_delete_field(record, 'FFT', ' ', ' ')
# Preprocessed data elaboration
bibrecdocs = BibRecDocs(rec_id)
## Let's pre-download all the URLs to see if, in case of mode 'correct' or 'append'
## we can avoid creating a new revision.
for docname, (doctype, newname, restriction, version, urls, more_infos, bibdoc_tmpid, bibdoc_tmpver ) in docs.items():
downloaded_urls = []
try:
bibdoc = bibrecdocs.get_bibdoc(docname)
except InvenioBibDocFileError:
## A bibdoc with the given docname does not exists.
## So there is no chance we are going to revise an existing
## format with an identical file :-)
bibdoc = None
new_revision_needed = False
for url, docformat, description, comment, flags, timestamp in urls:
if url:
try:
downloaded_url = download_url(url, docformat)
write_message("%s saved into %s" % (url, downloaded_url), verbose=9)
except Exception, err:
write_message("Error in downloading '%s' because of: %s" % (url, err), stream=sys.stderr)
raise
if mode == 'correct' and bibdoc is not None and not new_revision_needed:
downloaded_urls.append((downloaded_url, docformat, description, comment, flags, timestamp))
if not bibrecdocs.check_file_exists(downloaded_url, docformat):
new_revision_needed = True
else:
write_message("WARNING: %s is already attached to bibdoc %s for recid %s" % (url, docname, rec_id), stream=sys.stderr)
elif mode == 'append' and bibdoc is not None:
if not bibrecdocs.check_file_exists(downloaded_url, docformat):
downloaded_urls.append((downloaded_url, docformat, description, comment, flags, timestamp))
else:
write_message("WARNING: %s is already attached to bibdoc %s for recid %s" % (url, docname, rec_id), stream=sys.stderr)
else:
downloaded_urls.append((downloaded_url, docformat, description, comment, flags, timestamp))
else:
downloaded_urls.append(('', docformat, description, comment, flags, timestamp))
if mode == 'correct' and bibdoc is not None and not new_revision_needed:
## Since we don't need a new revision (because all the files
## that are being uploaded are different)
## we can simply remove the urls but keep the other information
write_message("No need to add a new revision for docname %s for recid %s" % (docname, rec_id), verbose=2)
docs[docname] = (doctype, newname, restriction, version, [('', docformat, description, comment, flags, timestamp) for (dummy, docformat, description, comment, flags, timestamp) in downloaded_urls], more_infos, bibdoc_tmpid, bibdoc_tmpver)
for downloaded_url, dummy, dummy, dummy, dummy, dummy in downloaded_urls:
## Let's free up some space :-)
if downloaded_url and os.path.exists(downloaded_url):
os.remove(downloaded_url)
else:
if downloaded_urls or mode != 'append':
docs[docname] = (doctype, newname, restriction, version, downloaded_urls, more_infos, bibdoc_tmpid, bibdoc_tmpver)
else:
## In case we are in append mode and there are no urls to append
## we discard the whole FFT
del docs[docname]
if mode == 'replace': # First we erase previous bibdocs
if not pretend:
for bibdoc in bibrecdocs.list_bibdocs():
bibdoc.delete()
bibrecdocs.build_bibdoc_list()
for docname, (doctype, newname, restriction, version, urls, more_infos, bibdoc_tmpid, bibdoc_tmpver) in docs.iteritems():
write_message("Elaborating olddocname: '%s', newdocname: '%s', doctype: '%s', restriction: '%s', urls: '%s', mode: '%s'" % (docname, newname, doctype, restriction, urls, mode), verbose=9)
if mode in ('insert', 'replace'): # new bibdocs, new docnames, new marc
if newname in bibrecdocs.get_bibdoc_names():
write_message("('%s', '%s') not inserted because docname already exists." % (newname, urls), stream=sys.stderr)
raise StandardError("('%s', '%s') not inserted because docname already exists." % (newname, urls), stream=sys.stderr)
try:
if not pretend:
bibdoc = bibrecdocs.add_bibdoc(doctype, newname)
bibdoc.set_status(restriction)
else:
bibdoc = None
except Exception, e:
write_message("('%s', '%s', '%s') not inserted because: '%s'." % (doctype, newname, urls, e), stream=sys.stderr)
raise e
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend))
elif mode == 'replace_or_insert': # to be thought as correct_or_insert
for bibdoc in bibrecdocs.list_bibdocs():
brd = BibRecDocs(rec_id)
dn = brd.get_docname(bibdoc.id)
if dn == docname:
if doctype not in ('PURGE', 'DELETE', 'EXPUNGE', 'REVERT', 'FIX-ALL', 'FIX-MARC', 'DELETE-FILE'):
if newname != docname:
try:
if not pretend:
bibrecdocs.change_name(newname = newname, docid = bibdoc.id)
## Let's refresh the list of bibdocs.
bibrecdocs.build_bibdoc_list()
except StandardError, e:
write_message(e, stream=sys.stderr)
raise
found_bibdoc = False
for bibdoc in bibrecdocs.list_bibdocs():
brd = BibRecDocs(rec_id)
dn = brd.get_docname(bibdoc.id)
if dn == newname:
found_bibdoc = True
if doctype == 'PURGE':
if not pretend:
bibdoc.purge()
elif doctype == 'DELETE':
if not pretend:
bibdoc.delete()
elif doctype == 'EXPUNGE':
if not pretend:
bibdoc.expunge()
elif doctype == 'FIX-ALL':
if not pretend:
bibrecdocs.fix(docname)
elif doctype == 'FIX-MARC':
pass
elif doctype == 'DELETE-FILE':
if urls:
for (url, docformat, description, comment, flags, timestamp) in urls:
if not pretend:
bibdoc.delete_file(docformat, version)
elif doctype == 'REVERT':
try:
if not pretend:
bibdoc.revert(version)
except Exception, e:
write_message('(%s, %s) not correctly reverted: %s' % (newname, version, e), stream=sys.stderr)
raise
else:
if restriction != KEEP_OLD_VALUE:
if not pretend:
bibdoc.set_status(restriction)
# Since the docname already existed we have to first
# bump the version by pushing the first new file
# then pushing the other files.
if urls:
(first_url, first_format, first_description, first_comment, first_flags, first_timestamp) = urls[0]
other_urls = urls[1:]
assert(_add_new_version(bibdoc, first_url, first_format, docname, doctype, newname, first_description, first_comment, first_flags, first_timestamp, pretend=pretend))
for (url, docformat, description, comment, flags, timestamp) in other_urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend))
## Let's refresh the list of bibdocs.
bibrecdocs.build_bibdoc_list()
if not found_bibdoc:
if not pretend:
bibdoc = bibrecdocs.add_bibdoc(doctype, newname)
bibdoc.set_status(restriction)
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp))
elif mode == 'correct':
for bibdoc in bibrecdocs.list_bibdocs():
brd = BibRecDocs(rec_id)
dn = brd.get_docname(bibdoc.id)
if dn == docname:
if doctype not in ('PURGE', 'DELETE', 'EXPUNGE', 'REVERT', 'FIX-ALL', 'FIX-MARC', 'DELETE-FILE'):
if newname != docname:
try:
if not pretend:
bibrecdocs.change_name(docid = bibdoc.id, newname=newname)
## Let's refresh the list of bibdocs.
bibrecdocs.build_bibdoc_list()
except StandardError, e:
write_message('Error in renaming %s to %s: %s' % (docname, newname, e), stream=sys.stderr)
raise
found_bibdoc = False
for bibdoc in bibrecdocs.list_bibdocs():
brd = BibRecDocs(rec_id)
dn = brd.get_docname(bibdoc.id)
if dn == newname:
found_bibdoc = True
if doctype == 'PURGE':
if not pretend:
bibdoc.purge()
elif doctype == 'DELETE':
if not pretend:
bibdoc.delete()
elif doctype == 'EXPUNGE':
if not pretend:
bibdoc.expunge()
elif doctype == 'FIX-ALL':
if not pretend:
bibrecdocs.fix(newname)
elif doctype == 'FIX-MARC':
pass
elif doctype == 'DELETE-FILE':
if urls:
for (url, docformat, description, comment, flags, timestamp) in urls:
if not pretend:
bibdoc.delete_file(docformat, version)
elif doctype == 'REVERT':
try:
if not pretend:
bibdoc.revert(version)
except Exception, e:
write_message('(%s, %s) not correctly reverted: %s' % (newname, version, e), stream=sys.stderr)
raise
else:
if restriction != KEEP_OLD_VALUE:
if not pretend:
bibdoc.set_status(restriction)
if doctype and doctype!= KEEP_OLD_VALUE:
if not pretend:
bibdoc.change_doctype(doctype)
if urls:
(first_url, first_format, first_description, first_comment, first_flags, first_timestamp) = urls[0]
other_urls = urls[1:]
assert(_add_new_version(bibdoc, first_url, first_format, docname, doctype, newname, first_description, first_comment, first_flags, first_timestamp, pretend=pretend))
for (url, docformat, description, comment, flags, timestamp) in other_urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend))
## Let's refresh the list of bibdocs.
bibrecdocs.build_bibdoc_list()
if not found_bibdoc:
if doctype in ('PURGE', 'DELETE', 'EXPUNGE', 'FIX-ALL', 'FIX-MARC', 'DELETE-FILE', 'REVERT'):
write_message("('%s', '%s', '%s') not performed because '%s' docname didn't existed." % (doctype, newname, urls, docname), stream=sys.stderr)
raise StandardError
else:
if not pretend:
bibdoc = bibrecdocs.add_bibdoc(doctype, newname)
bibdoc.set_status(restriction)
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp))
elif mode == 'append':
try:
found_bibdoc = False
for bibdoc in bibrecdocs.list_bibdocs():
brd = BibRecDocs(rec_id)
dn = brd.get_docname(bibdoc.id)
if dn == docname:
found_bibdoc = True
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend))
if not found_bibdoc:
try:
if not pretend:
bibdoc = bibrecdocs.add_bibdoc(doctype, docname)
bibdoc.set_status(restriction)
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp))
except Exception, e:
register_exception()
write_message("('%s', '%s', '%s') not appended because: '%s'." % (doctype, newname, urls, e), stream=sys.stderr)
raise
except:
register_exception()
raise
if not pretend:
_process_document_moreinfos(more_infos, newname, version, urls and urls[0][1], mode)
# resolving temporary version and identifier
brd = BibRecDocs(rec_id)
if bibdoc_tmpid:
if bibdoc_tmpid in tmp_ids and tmp_ids[bibdoc_tmpid] != -1:
write_message("WARNING: the temporary identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpid, ))
else:
tmp_ids[bibdoc_tmpid] = brd.get_docid(docname)
if bibdoc_tmpver:
if bibdoc_tmpver in tmp_vers and tmp_vers[bibdoc_tmpver] != -1:
write_message("WARNING: the temporary version identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpver, ))
else:
if version == None:
if version:
tmp_vers[bibdoc_tmpver] = version
else:
tmp_vers[bibdoc_tmpver] = brd.get_bibdoc(docname).get_latest_version()
else:
tmp_vers[bibdoc_tmpver] = version
return record
### Update functions
def update_bibrec_date(now, bibrec_id, insert_mode_p, pretend=False):
"""Update the date of the record in bibrec table """
if insert_mode_p:
query = """UPDATE bibrec SET creation_date=%s, modification_date=%s WHERE id=%s"""
params = (now, now, bibrec_id)
else:
query = """UPDATE bibrec SET modification_date=%s WHERE id=%s"""
params = (now, bibrec_id)
if not pretend:
run_sql(query, params)
write_message(" -Update record creation/modification date: DONE" , verbose=2)
def update_bibfmt_format(id_bibrec, format_value, format_name, modification_date=None, pretend=False):
"""Update the format in the table bibfmt"""
if modification_date is None:
modification_date = time.strftime('%Y-%m-%d %H:%M:%S')
else:
try:
time.strptime(modification_date, "%Y-%m-%d %H:%M:%S")
except ValueError:
modification_date = '1970-01-01 00:00:00'
# We check if the format is already in bibFmt
nb_found = find_record_format(id_bibrec, format_name)
if nb_found == 1:
# we are going to update the format
# compress the format_value value
pickled_format_value = compress(format_value)
# update the format:
query = """UPDATE LOW_PRIORITY bibfmt SET last_updated=%s, value=%s WHERE id_bibrec=%s AND format=%s"""
params = (modification_date, pickled_format_value, id_bibrec, format_name)
if not pretend:
row_id = run_sql(query, params)
if not pretend and row_id is None:
write_message(" Failed: Error during update_bibfmt_format function", verbose=1, stream=sys.stderr)
return 1
else:
write_message(" -Update the format %s in bibfmt: DONE" % format_name , verbose=2)
return 0
elif nb_found > 1:
write_message(" Failed: Same format %s found several time in bibfmt for the same record." % format_name, verbose=1, stream=sys.stderr)
return 1
else:
# Insert the format information in BibFMT
res = insert_bibfmt(id_bibrec, format_value, format_name, modification_date, pretend=pretend)
if res is None:
write_message(" Failed: Error during insert_bibfmt", verbose=1, stream=sys.stderr)
return 1
else:
write_message(" -Insert the format %s in bibfmt: DONE" % format_name , verbose=2)
return 0
def delete_bibfmt_format(id_bibrec, format_name, pretend=False):
"""
Delete format FORMAT_NAME from bibfmt table fo record ID_BIBREC.
"""
if not pretend:
run_sql("DELETE LOW_PRIORITY FROM bibfmt WHERE id_bibrec=%s and format=%s", (id_bibrec, format_name))
return 0
def archive_marcxml_for_history(recID, pretend=False):
"""
Archive current MARCXML format of record RECID from BIBFMT table
into hstRECORD table. Useful to keep MARCXML history of records.
Return 0 if everything went fine. Return 1 otherwise.
"""
res = run_sql("SELECT id_bibrec, value, last_updated FROM bibfmt WHERE format='xm' AND id_bibrec=%s",
(recID,))
if res and not pretend:
run_sql("""INSERT INTO hstRECORD (id_bibrec, marcxml, job_id, job_name, job_person, job_date, job_details)
VALUES (%s,%s,%s,%s,%s,%s,%s)""",
(res[0][0], res[0][1], task_get_task_param('task_id', 0), 'bibupload', task_get_task_param('user', 'UNKNOWN'), res[0][2],
'mode: ' + task_get_option('mode', 'UNKNOWN') + '; file: ' + task_get_option('file_path', 'UNKNOWN') + '.'))
return 0
def update_database_with_metadata(record, rec_id, oai_rec_id="oai", affected_tags=None, pretend=False):
"""Update the database tables with the record and the record id given in parameter"""
# extract only those tags that have been affected.
# check happens at subfield level. This is to prevent overhead
# associated with inserting already existing field with given ind pair
write_message("update_database_with_metadata: record=%s, rec_id=%s, oai_rec_id=%s, affected_tags=%s" % (record, rec_id, oai_rec_id, affected_tags), verbose=9)
tmp_record = {}
if affected_tags:
for tag in record.keys():
if tag in affected_tags.keys():
write_message(" -Tag %s found to be modified.Setting up for update" % tag, verbose=9)
# initialize new list to hold affected field
new_data_tuple_list = []
for data_tuple in record[tag]:
ind1 = data_tuple[1]
ind2 = data_tuple[2]
if (ind1, ind2) in affected_tags[tag]:
write_message(" -Indicator pair (%s, %s) added to update list" % (ind1, ind2), verbose=9)
new_data_tuple_list.append(data_tuple)
tmp_record[tag] = new_data_tuple_list
write_message(lambda: " -Modified fields: \n%s" % record_xml_output(tmp_record), verbose=2)
else:
tmp_record = record
for tag in tmp_record.keys():
# check if tag is not a special one:
if tag not in CFG_BIBUPLOAD_SPECIAL_TAGS:
# for each tag there is a list of tuples representing datafields
tuple_list = tmp_record[tag]
# this list should contain the elements of a full tag [tag, ind1, ind2, subfield_code]
tag_list = []
tag_list.append(tag)
for single_tuple in tuple_list:
# these are the contents of a single tuple
subfield_list = single_tuple[0]
ind1 = single_tuple[1]
ind2 = single_tuple[2]
# append the ind's to the full tag
if ind1 == '' or ind1 == ' ':
tag_list.append('_')
else:
tag_list.append(ind1)
if ind2 == '' or ind2 == ' ':
tag_list.append('_')
else:
tag_list.append(ind2)
datafield_number = single_tuple[4]
if tag in CFG_BIBUPLOAD_SPECIAL_TAGS:
# nothing to do for special tags (FFT, BDR, BDM)
pass
elif tag in CFG_BIBUPLOAD_CONTROLFIELD_TAGS and tag != "001":
value = single_tuple[3]
# get the full tag
full_tag = ''.join(tag_list)
# update the tables
write_message(" insertion of the tag "+full_tag+" with the value "+value, verbose=9)
# insert the tag and value into into bibxxx
(table_name, bibxxx_row_id) = insert_record_bibxxx(full_tag, value, pretend=pretend)
#print 'tname, bibrow', table_name, bibxxx_row_id;
if table_name is None or bibxxx_row_id is None:
write_message(" Failed: during insert_record_bibxxx", verbose=1, stream=sys.stderr)
# connect bibxxx and bibrec with the table bibrec_bibxxx
res = insert_record_bibrec_bibxxx(table_name, bibxxx_row_id, datafield_number, rec_id, pretend=pretend)
if res is None:
write_message(" Failed: during insert_record_bibrec_bibxxx", verbose=1, stream=sys.stderr)
else:
# get the tag and value from the content of each subfield
for subfield in subfield_list:
subtag = subfield[0]
value = subfield[1]
tag_list.append(subtag)
# get the full tag
full_tag = ''.join(tag_list)
# update the tables
write_message(" insertion of the tag "+full_tag+" with the value "+value, verbose=9)
# insert the tag and value into into bibxxx
(table_name, bibxxx_row_id) = insert_record_bibxxx(full_tag, value, pretend=pretend)
if table_name is None or bibxxx_row_id is None:
write_message(" Failed: during insert_record_bibxxx", verbose=1, stream=sys.stderr)
# connect bibxxx and bibrec with the table bibrec_bibxxx
res = insert_record_bibrec_bibxxx(table_name, bibxxx_row_id, datafield_number, rec_id, pretend=pretend)
if res is None:
write_message(" Failed: during insert_record_bibrec_bibxxx", verbose=1, stream=sys.stderr)
# remove the subtag from the list
tag_list.pop()
tag_list.pop()
tag_list.pop()
tag_list.pop()
write_message(" -Update the database with metadata: DONE", verbose=2)
log_record_uploading(oai_rec_id, task_get_task_param('task_id', 0), rec_id, 'P', pretend=pretend)
def append_new_tag_to_old_record(record, rec_old):
"""Append new tags to a old record"""
def _append_tag(tag):
if tag in CFG_BIBUPLOAD_CONTROLFIELD_TAGS:
if tag == '001':
pass
else:
# if it is a controlfield, just access the value
for single_tuple in record[tag]:
controlfield_value = single_tuple[3]
# add the field to the old record
newfield_number = record_add_field(rec_old, tag,
controlfield_value=controlfield_value)
if newfield_number is None:
write_message(" Error when adding the field"+tag, verbose=1, stream=sys.stderr)
else:
# For each tag there is a list of tuples representing datafields
for single_tuple in record[tag]:
# We retrieve the information of the tag
subfield_list = single_tuple[0]
ind1 = single_tuple[1]
ind2 = single_tuple[2]
if '%s%s%s' % (tag, ind1 == ' ' and '_' or ind1, ind2 == ' ' and '_' or ind2) in (CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:5], CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[:5]):
## We don't want to append the external identifier
## if it is already existing.
if record_find_field(rec_old, tag, single_tuple)[0] is not None:
write_message(" Not adding tag: %s ind1=%s ind2=%s subfields=%s: it's already there" % (tag, ind1, ind2, subfield_list), verbose=9)
continue
# We add the datafield to the old record
write_message(" Adding tag: %s ind1=%s ind2=%s subfields=%s" % (tag, ind1, ind2, subfield_list), verbose=9)
newfield_number = record_add_field(rec_old, tag, ind1,
ind2, subfields=subfield_list)
if newfield_number is None:
write_message(" Error when adding the field"+tag, verbose=1, stream=sys.stderr)
# Go through each tag in the appended record
for tag in record:
_append_tag(tag)
return rec_old
def copy_strong_tags_from_old_record(record, rec_old):
"""
Look for strong tags in RECORD and REC_OLD. If no strong tags are
found in RECORD, then copy them over from REC_OLD. This function
modifies RECORD structure on the spot.
"""
for strong_tag in CFG_BIBUPLOAD_STRONG_TAGS:
if not record_get_field_instances(record, strong_tag, strong_tag[3:4] or '%', strong_tag[4:5] or '%'):
strong_tag_old_field_instances = record_get_field_instances(rec_old, strong_tag)
if strong_tag_old_field_instances:
for strong_tag_old_field_instance in strong_tag_old_field_instances:
sf_vals, fi_ind1, fi_ind2, controlfield, dummy = strong_tag_old_field_instance
record_add_field(record, strong_tag, fi_ind1, fi_ind2, controlfield, sf_vals)
return
### Delete functions
def delete_tags(record, rec_old):
"""
Returns a record structure with all the fields in rec_old minus the
fields in record.
@param record: The record containing tags to delete.
@type record: record structure
@param rec_old: The original record.
@type rec_old: record structure
@return: The modified record.
@rtype: record structure
"""
returned_record = copy.deepcopy(rec_old)
for tag, fields in record.iteritems():
if tag in ('001', ):
continue
for field in fields:
local_position = record_find_field(returned_record, tag, field)[1]
if local_position is not None:
record_delete_field(returned_record, tag, field_position_local=local_position)
return returned_record
def delete_tags_to_correct(record, rec_old):
"""
Delete tags from REC_OLD which are also existing in RECORD. When
deleting, pay attention not only to tags, but also to indicators,
so that fields with the same tags but different indicators are not
deleted.
"""
## Some fields are controlled via provenance information.
## We should re-add saved fields at the end.
fields_to_readd = {}
for tag in CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS:
if tag[:3] in record:
tmp_field_instances = record_get_field_instances(record, tag[:3], tag[3], tag[4]) ## Let's discover the provenance that will be updated
provenances_to_update = []
for instance in tmp_field_instances:
for code, value in instance[0]:
if code == tag[5]:
if value not in provenances_to_update:
provenances_to_update.append(value)
break
else:
## The provenance is not specified.
## let's add the special empty provenance.
if '' not in provenances_to_update:
provenances_to_update.append('')
potential_fields_to_readd = record_get_field_instances(rec_old, tag[:3], tag[3], tag[4]) ## Let's take all the field corresponding to tag
## Let's save apart all the fields that should be updated, but
## since they have a different provenance not mentioned in record
## they should be preserved.
fields = []
for sf_vals, ind1, ind2, dummy_cf, dummy_line in potential_fields_to_readd:
for code, value in sf_vals:
if code == tag[5]:
if value not in provenances_to_update:
fields.append(sf_vals)
break
else:
if '' not in provenances_to_update:
## Empty provenance, let's protect in any case
fields.append(sf_vals)
fields_to_readd[tag] = fields
# browse through all the tags from the MARCXML file:
for tag in record:
# check if the tag exists in the old record too:
if tag in rec_old and tag != '001':
# the tag does exist, so delete all record's tag+ind1+ind2 combinations from rec_old
for dummy_sf_vals, ind1, ind2, dummy_cf, dummyfield_number in record[tag]:
write_message(" Delete tag: " + tag + " ind1=" + ind1 + " ind2=" + ind2, verbose=9)
record_delete_field(rec_old, tag, ind1, ind2)
## Ok, we readd necessary fields!
for tag, fields in fields_to_readd.iteritems():
for sf_vals in fields:
write_message(" Adding tag: " + tag[:3] + " ind1=" + tag[3] + " ind2=" + tag[4] + " code=" + str(sf_vals), verbose=9)
record_add_field(rec_old, tag[:3], tag[3], tag[4], subfields=sf_vals)
def delete_bibrec_bibxxx(record, id_bibrec, affected_tags={}, pretend=False):
"""Delete the database record from the table bibxxx given in parameters"""
# we clear all the rows from bibrec_bibxxx from the old record
# clearing only those tags that have been modified.
write_message(lambda: "delete_bibrec_bibxxx(record=%s, id_bibrec=%s, affected_tags=%s)" % (record, id_bibrec, affected_tags), verbose=9)
for tag in affected_tags:
# sanity check with record keys just to make sure its fine.
if tag not in CFG_BIBUPLOAD_SPECIAL_TAGS:
write_message("%s found in record"%tag, verbose=2)
# for each name construct the bibrec_bibxxx table name
table_name = 'bib'+tag[0:2]+'x'
bibrec_table = 'bibrec_'+table_name
# delete all the records with proper id_bibrec. Indicators matter for individual affected tags
tmp_ind_1 = ''
tmp_ind_2 = ''
# construct exact tag value using indicators
for ind_pair in affected_tags[tag]:
if ind_pair[0] == ' ':
tmp_ind_1 = '_'
else:
tmp_ind_1 = ind_pair[0]
if ind_pair[1] == ' ':
tmp_ind_2 = '_'
else:
tmp_ind_2 = ind_pair[1]
# need to escape incase of underscore so that mysql treats it as a char
tag_val = tag+"\\"+tmp_ind_1+"\\"+tmp_ind_2 + '%'
query = """DELETE br.* FROM `%s` br,`%s` b where br.id_bibrec=%%s and br.id_bibxxx=b.id and b.tag like %%s""" % (bibrec_table, table_name)
params = (id_bibrec, tag_val)
write_message(query % params, verbose=9)
if not pretend:
run_sql(query, params)
else:
write_message("%s not found"%tag, verbose=2)
def main():
"""Main that construct all the bibtask."""
task_init(authorization_action='runbibupload',
authorization_msg="BibUpload Task Submission",
description="""Receive MARC XML file and update appropriate database
tables according to options.
Examples:
$ bibupload -i input.xml
""",
help_specific_usage=""" -a, --append\t\tnew fields are appended to the existing record
-c, --correct\t\tfields are replaced by the new ones in the existing record, except
\t\t\twhen overridden by CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS
-i, --insert\t\tinsert the new record in the database
-r, --replace\t\tthe existing record is entirely replaced by the new one,
\t\t\texcept for fields in CFG_BIBUPLOAD_STRONG_TAGS
-d, --delete\t\tspecified fields are deleted in existing record
-n, --notimechange\tdo not change record last modification date when updating
-o, --holdingpen\tInsert record into holding pen instead of the normal database
--pretend\t\tdo not really insert/append/correct/replace the input file
--force\t\twhen --replace, use provided 001 tag values, even if the matching
\t\t\trecord does not exist (thus allocating it on-the-fly)
--callback-url\tSend via a POST request a JSON-serialized answer (see admin guide), in
\t\t\torder to provide a feedback to an external service about the outcome of the operation.
--nonce\t\twhen used together with --callback add the nonce value in the JSON message.
--special-treatment=MODE\tif "oracle" is specified, when used together with --callback_url,
\t\t\tPOST an application/x-www-form-urlencoded request where the JSON message is encoded
\t\t\tinside a form field called "results".
""",
version=__revision__,
specific_params=("ircazdnoS:",
[
"insert",
"replace",
"correct",
"append",
"reference",
"delete",
"notimechange",
"holdingpen",
"pretend",
"force",
"callback-url=",
"nonce=",
"special-treatment=",
"stage=",
]),
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_run_fnc=task_run_core)
def task_submit_elaborate_specific_parameter(key, value, opts, args): # pylint: disable=W0613
""" Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ['-n', '--number']:
task_get_option(\1) = value
return True
return False
"""
# No time change option
if key in ("-n", "--notimechange"):
task_set_option('notimechange', 1)
# Insert mode option
elif key in ("-i", "--insert"):
if task_get_option('mode') == 'replace':
# if also replace found, then set to replace_or_insert
task_set_option('mode', 'replace_or_insert')
else:
task_set_option('mode', 'insert')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Replace mode option
elif key in ("-r", "--replace"):
if task_get_option('mode') == 'insert':
# if also insert found, then set to replace_or_insert
task_set_option('mode', 'replace_or_insert')
else:
task_set_option('mode', 'replace')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Holding pen mode option
elif key in ("-o", "--holdingpen"):
write_message("Holding pen mode", verbose=3)
task_set_option('mode', 'holdingpen')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Correct mode option
elif key in ("-c", "--correct"):
task_set_option('mode', 'correct')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Append mode option
elif key in ("-a", "--append"):
task_set_option('mode', 'append')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Deprecated reference mode option (now correct)
elif key in ("-z", "--reference"):
task_set_option('mode', 'correct')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
elif key in ("-d", "--delete"):
task_set_option('mode', 'delete')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
elif key in ("--pretend",):
task_set_option('pretend', True)
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
elif key in ("--force",):
task_set_option('force', True)
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
elif key in ("--callback-url", ):
task_set_option('callback_url', value)
elif key in ("--nonce", ):
task_set_option('nonce', value)
elif key in ("--special-treatment", ):
if value.lower() in CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS:
if value.lower() == 'oracle':
task_set_option('oracle_friendly', True)
else:
print >> sys.stderr, """The specified value is not in the list of allowed special treatments codes: %s""" % CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS
return False
elif key in ("-S", "--stage"):
print >> sys.stderr, """WARNING: the --stage parameter is deprecated and ignored."""
else:
return False
return True
def task_submit_check_options():
""" Reimplement this method for having the possibility to check options
before submitting the task, in order for example to provide default
values. It must return False if there are errors in the options.
"""
if task_get_option('mode') is None:
write_message("Please specify at least one update/insert mode!")
return False
if task_get_option('file_path') is None:
write_message("Missing filename! -h for help.")
return False
return True
def writing_rights_p():
"""Return True in case bibupload has the proper rights to write in the
fulltext file folder."""
if _WRITING_RIGHTS is not None:
return _WRITING_RIGHTS
try:
if not os.path.exists(CFG_BIBDOCFILE_FILEDIR):
os.makedirs(CFG_BIBDOCFILE_FILEDIR)
fd, filename = tempfile.mkstemp(suffix='.txt', prefix='test', dir=CFG_BIBDOCFILE_FILEDIR)
test = os.fdopen(fd, 'w')
test.write('TEST')
test.close()
if open(filename).read() != 'TEST':
raise IOError("Can not successfully write and readback %s" % filename)
os.remove(filename)
except:
register_exception(alert_admin=True)
return False
return True
def post_results_to_callback_url(results, callback_url):
write_message("Sending feedback to %s" % callback_url)
if not CFG_JSON_AVAILABLE:
from warnings import warn
warn("--callback-url used but simplejson/json not available")
return
json_results = json.dumps(results)
write_message("Message to send: %s" % json_results, verbose=9)
## <scheme>://<netloc>/<path>?<query>#<fragment>
scheme, dummynetloc, dummypath, dummyquery, dummyfragment = urlparse.urlsplit(callback_url)
## See: http://stackoverflow.com/questions/111945/is-there-any-way-to-do-http-put-in-python
if scheme == 'http':
opener = urllib2.build_opener(urllib2.HTTPHandler)
elif scheme == 'https':
opener = urllib2.build_opener(urllib2.HTTPSHandler)
else:
raise ValueError("Scheme not handled %s for callback_url %s" % (scheme, callback_url))
if task_get_option('oracle_friendly'):
write_message("Oracle friendly mode requested", verbose=9)
request = urllib2.Request(callback_url, data=urllib.urlencode({'results': json_results}))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
else:
request = urllib2.Request(callback_url, data=json_results)
request.add_header('Content-Type', 'application/json')
request.add_header('User-Agent', make_user_agent_string('BibUpload'))
write_message("Headers about to be sent: %s" % request.headers, verbose=9)
write_message("Data about to be sent: %s" % request.data, verbose=9)
res = opener.open(request)
msg = res.read()
write_message("Result of posting the feedback: %s %s" % (res.code, res.msg), verbose=9)
write_message("Returned message is: %s" % msg, verbose=9)
return res
def bibupload_records(records, opt_mode=None, opt_notimechange=0,
pretend=False, callback_url=None, results_for_callback=None):
"""perform the task of uploading a set of records
returns list of (error_code, recid) tuples for separate records
"""
#Dictionaries maintaining temporary identifiers
# Structure: identifier -> number
tmp_ids = {}
tmp_vers = {}
results = []
# The first phase -> assigning meaning to temporary identifiers
if opt_mode == 'reference':
## NOTE: reference mode has been deprecated in favour of 'correct'
opt_mode = 'correct'
record = None
for record in records:
record_id = record_extract_oai_id(record)
task_sleep_now_if_required(can_stop_too=True)
if opt_mode == "holdingpen":
#inserting into the holding pen
write_message("Inserting into holding pen", verbose=3)
insert_record_into_holding_pen(record, record_id)
else:
write_message("Inserting into main database", verbose=3)
error = bibupload(
record,
opt_mode = opt_mode,
opt_notimechange = opt_notimechange,
oai_rec_id = record_id,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers)
results.append(error)
if error[0] == 1:
if record:
write_message(lambda: record_xml_output(record),
stream=sys.stderr)
else:
write_message("Record could not have been parsed",
stream=sys.stderr)
stat['nb_errors'] += 1
if callback_url:
results_for_callback['results'].append({'recid': error[1], 'success': False, 'error_message': error[2]})
elif error[0] == 2:
if record:
write_message(lambda: record_xml_output(record),
stream=sys.stderr)
else:
write_message("Record could not have been parsed",
stream=sys.stderr)
stat['nb_holdingpen'] += 1
if callback_url:
results_for_callback['results'].append({'recid': error[1], 'success': False, 'error_message': error[2]})
elif error[0] == 0:
if callback_url:
from invenio.search_engine import print_record
results_for_callback['results'].append({'recid': error[1], 'success': True, "marcxml": print_record(error[1], 'xm'), 'url': "%s/%s/%s" % (CFG_SITE_URL, CFG_SITE_RECORD, error[1])})
else:
if callback_url:
results_for_callback['results'].append({'recid': error[1], 'success': False, 'error_message': error[2]})
# stat us a global variable
task_update_progress("Done %d out of %d." % \
(stat['nb_records_inserted'] + \
stat['nb_records_updated'],
stat['nb_records_to_upload']))
# Second phase -> Now we can process all entries where temporary identifiers might appear (BDR, BDM)
write_message("Identifiers table after processing: %s versions: %s" % (str(tmp_ids), str(tmp_vers)))
write_message("Uploading BDR and BDM fields")
if opt_mode != "holdingpen":
for record in records:
record_id = retrieve_rec_id(record, opt_mode, pretend=pretend, post_phase = True)
bibupload_post_phase(record,
rec_id = record_id,
mode = opt_mode,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers)
return results
def task_run_core():
""" Reimplement to add the body of the task."""
write_message("Input file '%s', input mode '%s'." %
(task_get_option('file_path'), task_get_option('mode')))
write_message("STAGE 0:", verbose=2)
if task_get_option('file_path') is not None:
write_message("start preocessing", verbose=3)
task_update_progress("Reading XML input")
recs = xml_marc_to_records(open_marc_file(task_get_option('file_path')))
stat['nb_records_to_upload'] = len(recs)
write_message(" -Open XML marc: DONE", verbose=2)
task_sleep_now_if_required(can_stop_too=True)
write_message("Entering records loop", verbose=3)
callback_url = task_get_option('callback_url')
results_for_callback = {'results': []}
if recs is not None:
# We proceed each record by record
bibupload_records(records=recs, opt_mode=task_get_option('mode'),
opt_notimechange=task_get_option('notimechange'),
pretend=task_get_option('pretend'),
callback_url=callback_url,
results_for_callback=results_for_callback)
else:
write_message(" Error bibupload failed: No record found",
verbose=1, stream=sys.stderr)
callback_url = task_get_option("callback_url")
if callback_url:
nonce = task_get_option("nonce")
if nonce:
results_for_callback["nonce"] = nonce
post_results_to_callback_url(results_for_callback, callback_url)
if task_get_task_param('verbose') >= 1:
# Print out the statistics
print_out_bibupload_statistics()
# Check if they were errors
return not stat['nb_errors'] >= 1
def log_record_uploading(oai_rec_id, task_id, bibrec_id, insertion_db, pretend=False):
if oai_rec_id != "" and oai_rec_id != None:
query = """UPDATE oaiHARVESTLOG SET date_inserted=NOW(), inserted_to_db=%s, id_bibrec=%s WHERE oai_id = %s AND bibupload_task_id = %s ORDER BY date_harvested LIMIT 1"""
if not pretend:
run_sql(query, (str(insertion_db), str(bibrec_id), str(oai_rec_id), str(task_id), ))
if __name__ == "__main__":
main()
| gpl-2.0 | 413,680,829,889,584,100 | 48.092281 | 287 | 0.555743 | false |
kivy/plyer | plyer/facades/wifi.py | 1 | 4169 | '''
Wifi Facade.
=============
The :class:`Wifi` is to provide access to the wifi of your mobile/ desktop
devices.
It currently supports `connecting`, `disconnecting`, `scanning`, `getting
available wifi network list` and `getting network information`.
Simple examples
---------------
To enable/ turn on wifi scanning::
>>> from plyer import wifi
>>> wifi.start_scanning()
Once the wifi is enabled/ turned on, then this command starts to scan
all the nearby available wifi networks.
To get network info::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> return wifi.get_network_info(name)
Returns network details of the network who's name/ssid is provided in the
`name` parameter.
To connect to a network::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> wifi.connect(network, parameters)
This connects to the network who's name/ssid is provided under `network`
parameter and along with other necessary methods for connection
which depends upon platform to platform.
please visit following files for more details about requirements of
`paramaters` argument in `connect` method:
plyer/platforms/win/wifi.py
plyer/platforms/macosx/wifi.py
plyer/platforms/win/wifi.py
To disconnect from wifi::
>>> from plyer import wifi
>>> wifi.disconnect()
This disconnects your device from any wifi network.
To get available wifi networks::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> return wifi.get_available_wifi()
This returns all the available wifi networks near the device.
Supported Platforms
-------------------
Windows, OS X, Linux
Ex: 6
----------
from plyer import wifi
wifi.enable()
This enables wifi device.
Ex: 7
----------
from plyer import wifi
wifi.disable()
This disable wifi device
'''
class Wifi:
'''
Wifi Facade.
'''
def is_enabled(self):
'''
Return enabled status of WiFi hardware.
'''
return self._is_enabled()
def is_connected(self, interface=None):
'''
Return connection state of WiFi interface.
.. versionadded:: 1.4.0
'''
return self._is_connected(interface=interface)
@property
def interfaces(self):
'''
List all available WiFi interfaces.
.. versionadded:: 1.4.0
'''
raise NotImplementedError()
def start_scanning(self, interface=None):
'''
Turn on scanning.
'''
return self._start_scanning(interface=interface)
def get_network_info(self, name):
'''
Return a dictionary of specified network.
'''
return self._get_network_info(name=name)
def get_available_wifi(self):
'''
Returns a list of all the available wifi.
'''
return self._get_available_wifi()
def connect(self, network, parameters, interface=None):
'''
Method to connect to some network.
'''
self._connect(
network=network,
parameters=parameters,
interface=interface
)
def disconnect(self, interface=None):
'''
To disconnect from some network.
'''
self._disconnect(interface=interface)
def enable(self):
'''
Wifi interface power state is set to "ON".
'''
self._enable()
def disable(self):
'''
Wifi interface power state is set to "OFF".
'''
self._disable()
# private
def _is_enabled(self):
raise NotImplementedError()
def _is_connected(self, interface=None):
raise NotImplementedError()
def _start_scanning(self, interface=None):
raise NotImplementedError()
def _get_network_info(self, **kwargs):
raise NotImplementedError()
def _get_available_wifi(self):
raise NotImplementedError()
def _connect(self, **kwargs):
raise NotImplementedError()
def _disconnect(self, interface=None):
raise NotImplementedError()
def _enable(self):
raise NotImplementedError()
def _disable(self):
raise NotImplementedError()
| mit | -8,623,485,429,783,333,000 | 21.294118 | 74 | 0.623411 | false |
jakevdp/lombscargle | lombscargle/implementations/utils.py | 1 | 5934 | from __future__ import print_function, division
import numpy as np
try:
from scipy import special as scipy_special
except ImportError:
scipy_special = None
# Precomputed factorials
FACTORIALS = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800,
39916800, 479001600, 6227020800, 87178291200, 1307674368000]
def factorial(N):
"""Compute the factorial of N.
If N <= 16, use a fast lookup table; otherwise use scipy.special.factorial
"""
if N < len(FACTORIALS):
return FACTORIALS[N]
elif scipy_special is None:
raise ValueError("need scipy for computing larger factorials")
else:
return int(scipy_special.factorial(N))
def bitceil(N):
"""
Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N)))
"""
# Note: for Python 2.7 and 3.x, this is faster:
# return 1 << int(N - 1).bit_length()
N = int(N) - 1
for i in [1, 2, 4, 8, 16, 32]:
N |= N >> i
return N + 1
def extirpolate(x, y, N=None, M=4):
"""
Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array_like
array of abscissas
y : array_like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Example
-------
>>> rng = np.random.RandomState(0)
>>> x = 100 * rng.rand(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
"""
if not hasattr(np.ufunc, 'at'):
raise NotImplementedError("extirpolate functionality requires numpy "
"version 1.8 or newer")
x, y = map(np.ravel, np.broadcast_arrays(x, y))
if N is None:
N = int(np.max(x) + 0.5 * M + 1)
# Now use legendre polynomial weights to populate the results array;
# This is an efficient recursive implementation (See Press et al. 1989)
result = np.zeros(N, dtype=y.dtype)
# first take care of the easy cases where x is an integer
integers = (x % 1 == 0)
np.add.at(result, x[integers].astype(int), y[integers])
x, y = x[~integers], y[~integers]
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
# adjusted so that the limits are within the range 0...N
ilo = np.clip((x - M // 2).astype(int), 0, N - M)
numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
denominator = factorial(M - 1)
for j in range(M):
if j > 0:
denominator *= j / (j - M)
ind = ilo + (M - 1 - j)
np.add.at(result, ind, numerator / (denominator * (x - ind)))
return result
def trig_sum(t, h, df, N, f0=0, freq_factor=1,
oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies
This routine computes weighted sine and cosine sums:
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array_like
array of input times
h : array_like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float (optional, default=0)
The low frequency to use
freq_factor : float (optional, default=1)
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughtly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the tradeoff between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarrays
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
assert df > 0
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
assert(Mfft > 0)
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)
if t0 != 0:
f = f0 + df * np.arange(Nfft)
fftgrid *= np.exp(2j * np.pi * t0 * f)
fftgrid = fftgrid[:N]
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C
| bsd-3-clause | 7,082,537,745,301,977,000 | 30.903226 | 79 | 0.582406 | false |
17zuoye/luigi | luigi/contrib/hdfs/snakebite_client.py | 1 | 10933 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A luigi file system client that wraps around snakebite
Originally written by Alan Brenner <[email protected]> github.com/alanbbr
"""
from luigi.contrib.hdfs import config as hdfs_config
from luigi.contrib.hdfs import error as hdfs_error
from luigi.contrib.hdfs import hadoopcli_clients as hdfs_hadoopcli_clients
from luigi import six
import luigi.contrib.target
import logging
import datetime
import os
logger = logging.getLogger('luigi-interface')
class SnakebiteHdfsClient(hdfs_hadoopcli_clients.HdfsClient):
"""
A hdfs client using snakebite. Since Snakebite has a python API, it'll be
about 100 times faster than the hadoop cli client, which does shell out to
a java program on each file system operation.
"""
def __init__(self):
super(SnakebiteHdfsClient, self).__init__()
self._bite = None
self.pid = -1
@staticmethod
def list_path(path):
if isinstance(path, list) or isinstance(path, tuple):
return path
if isinstance(path, str) or isinstance(path, unicode):
return [path, ]
return [str(path), ]
def get_bite(self):
"""
If Luigi has forked, we have a different PID, and need to reconnect.
"""
config = hdfs_config.hdfs()
if self.pid != os.getpid() or not self._bite:
client_kwargs = dict(filter(
lambda k_v: k_v[1] is not None and k_v[1] != '', six.iteritems({
'hadoop_version': config.client_version,
'effective_user': config.effective_user,
})
))
if config.snakebite_autoconfig:
"""
This is fully backwards compatible with the vanilla Client and can be used for a non HA cluster as well.
This client tries to read ``${HADOOP_PATH}/conf/hdfs-site.xml`` to get the address of the namenode.
The behaviour is the same as Client.
"""
from snakebite.client import AutoConfigClient
self._bite = AutoConfigClient(**client_kwargs)
else:
from snakebite.client import Client
self._bite = Client(config.namenode_host, config.namenode_port, **client_kwargs)
return self._bite
def exists(self, path):
"""
Use snakebite.test to check file existence.
:param path: path to test
:type path: string
:return: boolean, True if path exists in HDFS
"""
try:
return self.get_bite().test(path, exists=True)
except Exception as err: # IGNORE:broad-except
raise hdfs_error.HDFSCliError("snakebite.test", -1, str(err), repr(err))
def rename(self, path, dest):
"""
Use snakebite.rename, if available.
:param path: source file(s)
:type path: either a string or sequence of strings
:param dest: destination file (single input) or directory (multiple)
:type dest: string
:return: list of renamed items
"""
parts = dest.rstrip('/').split('/')
if len(parts) > 1:
dir_path = '/'.join(parts[0:-1])
if not self.exists(dir_path):
self.mkdir(dir_path, parents=True)
return list(self.get_bite().rename(self.list_path(path), dest))
def rename_dont_move(self, path, dest):
"""
Use snakebite.rename_dont_move, if available.
:param path: source path (single input)
:type path: string
:param dest: destination path
:type dest: string
:return: True if succeeded
:raises: snakebite.errors.FileAlreadyExistsException
"""
from snakebite.errors import FileAlreadyExistsException
try:
self.get_bite().rename2(path, dest, overwriteDest=False)
return True
except FileAlreadyExistsException:
return False
def remove(self, path, recursive=True, skip_trash=False):
"""
Use snakebite.delete, if available.
:param path: delete-able file(s) or directory(ies)
:type path: either a string or a sequence of strings
:param recursive: delete directories trees like \*nix: rm -r
:type recursive: boolean, default is True
:param skip_trash: do or don't move deleted items into the trash first
:type skip_trash: boolean, default is False (use trash)
:return: list of deleted items
"""
return list(self.get_bite().delete(self.list_path(path), recurse=recursive))
def chmod(self, path, permissions, recursive=False):
"""
Use snakebite.chmod, if available.
:param path: update-able file(s)
:type path: either a string or sequence of strings
:param permissions: \*nix style permission number
:type permissions: octal
:param recursive: change just listed entry(ies) or all in directories
:type recursive: boolean, default is False
:return: list of all changed items
"""
if type(permissions) == str:
permissions = int(permissions, 8)
return list(self.get_bite().chmod(self.list_path(path),
permissions, recursive))
def chown(self, path, owner, group, recursive=False):
"""
Use snakebite.chown/chgrp, if available.
One of owner or group must be set. Just setting group calls chgrp.
:param path: update-able file(s)
:type path: either a string or sequence of strings
:param owner: new owner, can be blank
:type owner: string
:param group: new group, can be blank
:type group: string
:param recursive: change just listed entry(ies) or all in directories
:type recursive: boolean, default is False
:return: list of all changed items
"""
bite = self.get_bite()
if owner:
if group:
return all(bite.chown(self.list_path(path), "%s:%s" % (owner, group),
recurse=recursive))
return all(bite.chown(self.list_path(path), owner, recurse=recursive))
return list(bite.chgrp(self.list_path(path), group, recurse=recursive))
def count(self, path):
"""
Use snakebite.count, if available.
:param path: directory to count the contents of
:type path: string
:return: dictionary with content_size, dir_count and file_count keys
"""
try:
res = self.get_bite().count(self.list_path(path)).next()
dir_count = res['directoryCount']
file_count = res['fileCount']
content_size = res['spaceConsumed']
except StopIteration:
dir_count = file_count = content_size = 0
return {'content_size': content_size, 'dir_count': dir_count,
'file_count': file_count}
def get(self, path, local_destination):
"""
Use snakebite.copyToLocal, if available.
:param path: HDFS file
:type path: string
:param local_destination: path on the system running Luigi
:type local_destination: string
"""
return list(self.get_bite().copyToLocal(self.list_path(path),
local_destination))
def mkdir(self, path, parents=True, mode=0o755, raise_if_exists=False):
"""
Use snakebite.mkdir, if available.
Snakebite's mkdir method allows control over full path creation, so by
default, tell it to build a full path to work like ``hadoop fs -mkdir``.
:param path: HDFS path to create
:type path: string
:param parents: create any missing parent directories
:type parents: boolean, default is True
:param mode: \*nix style owner/group/other permissions
:type mode: octal, default 0755
"""
result = list(self.get_bite().mkdir(self.list_path(path),
create_parent=parents, mode=mode))
if raise_if_exists and "ile exists" in result[0].get('error', ''):
raise luigi.target.FileAlreadyExists("%s exists" % (path, ))
return result
def listdir(self, path, ignore_directories=False, ignore_files=False,
include_size=False, include_type=False, include_time=False,
recursive=False):
"""
Use snakebite.ls to get the list of items in a directory.
:param path: the directory to list
:type path: string
:param ignore_directories: if True, do not yield directory entries
:type ignore_directories: boolean, default is False
:param ignore_files: if True, do not yield file entries
:type ignore_files: boolean, default is False
:param include_size: include the size in bytes of the current item
:type include_size: boolean, default is False (do not include)
:param include_type: include the type (d or f) of the current item
:type include_type: boolean, default is False (do not include)
:param include_time: include the last modification time of the current item
:type include_time: boolean, default is False (do not include)
:param recursive: list subdirectory contents
:type recursive: boolean, default is False (do not recurse)
:return: yield with a string, or if any of the include_* settings are
true, a tuple starting with the path, and include_* items in order
"""
bite = self.get_bite()
for entry in bite.ls(self.list_path(path), recurse=recursive):
if ignore_directories and entry['file_type'] == 'd':
continue
if ignore_files and entry['file_type'] == 'f':
continue
rval = [entry['path'], ]
if include_size:
rval.append(entry['length'])
if include_type:
rval.append(entry['file_type'])
if include_time:
rval.append(datetime.datetime.fromtimestamp(entry['modification_time'] / 1000))
if len(rval) > 1:
yield tuple(rval)
else:
yield rval[0]
| apache-2.0 | -2,731,325,352,027,294,000 | 38.90146 | 120 | 0.60697 | false |
a25kk/biobee | docs/conf.py | 1 | 5997 | # -*- coding: utf-8 -*-
# Build configuration file.
# This file is execfile()d with the current directory set to its
# containing dir.
# Note that not all possible configuration values are present in this
# autogenerated file.
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
from datetime import datetime
project = u'biobee.buildout'
copyright = u'%s, Serge Davidov.' % datetime.now().year
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'buildoutdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index',
'buildout.tex',
u'biobee.buildout Documentation',
u'', 'manual'
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| mit | -6,147,307,173,122,633,000 | 31.770492 | 79 | 0.711689 | false |
zmathew/django-backbone | backbone/tests/tests.py | 1 | 28973 | from __future__ import unicode_literals
import datetime
from decimal import Decimal
import json
from django.contrib.auth.models import User, Permission
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.translation import ugettext as _
from backbone.tests.models import Product, Brand, Category, ExtendedProduct, DisplayFieldsProduct
from backbone.tests.backbone_api import BrandBackboneView
class TestHelper(TestCase):
def parseJsonResponse(self, response, status_code=200):
self.assertEqual(response.status_code, status_code)
self.assertEqual(response['Content-Type'], 'application/json')
data = json.loads(response.content)
return data
def create_product(self, **kwargs):
defaults = {
'name': 'Test Product',
'price': '12.32',
'sku': '12345678',
}
if 'brand' not in kwargs:
defaults['brand'] = self.create_brand()
defaults.update(kwargs)
return Product.objects.create(**defaults)
def create_extended_product(self, **kwargs):
defaults = {
'name': 'Test Product',
'price': '12.32'
}
if 'brand' not in kwargs:
defaults['brand'] = self.create_brand()
defaults.update(kwargs)
return ExtendedProduct.objects.create(**defaults)
def create_displayfields_product(self, **kwargs):
defaults = {
'name': 'Beta Product',
'price': '13.32'
}
if 'brand' not in kwargs:
defaults['brand'] = self.create_brand()
defaults.update(kwargs)
return DisplayFieldsProduct.objects.create(**defaults)
def create_brand(self, **kwargs):
defaults = {
'name': 'Test Brand',
}
defaults.update(kwargs)
return Brand.objects.create(**defaults)
def create_category(self, **kwargs):
defaults = {
'name': 'Test Category',
}
defaults.update(kwargs)
return Category.objects.create(**defaults)
class CollectionTests(TestHelper):
def test_collection_view_returns_all_products_in_order(self):
p3 = self.create_product(order=3)
p1 = self.create_product(order=1)
p2 = self.create_product(order=2)
url = reverse('backbone:tests_product')
response = self.client.get(url)
data = self.parseJsonResponse(response)
self.assertEqual(len(data), 3)
self.assertEqual(data[0]['id'], p1.id)
self.assertEqual(data[0]['name'], p1.name)
self.assertEqual(data[1]['id'], p2.id)
self.assertEqual(data[1]['name'], p1.name)
self.assertEqual(data[2]['id'], p3.id)
self.assertEqual(data[2]['name'], p1.name)
def test_collection_view_only_returns_fields_specified_in_display_fields(self):
self.create_product()
url = reverse('backbone:tests_product')
response = self.client.get(url)
data = self.parseJsonResponse(response)
self.assertEqual(len(data), 1)
fields = data[0].keys()
expected_fields = [
'id', 'creation_date', 'name', 'brand', 'categories', 'price', 'order',
'is_priced_under_10', 'get_first_category_id', 'sku', 'custom2'
]
self.assertEqual(set(expected_fields), set(fields))
self.assertTrue('is_hidden' not in fields)
def test_collection_view_foreign_key_is_returned_as_id(self):
brand = self.create_brand()
self.create_product(brand=brand)
url = reverse('backbone:tests_product')
response = self.client.get(url)
data = self.parseJsonResponse(response)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['brand'], brand.id)
def test_collection_view_m2m_field_is_returned_as_list_of_ids(self):
cat1 = self.create_category()
cat2 = self.create_category()
p = self.create_product()
p.categories = [cat1, cat2]
url = reverse('backbone:tests_product')
response = self.client.get(url)
data = self.parseJsonResponse(response)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['categories'], [cat1.id, cat2.id])
def test_collection_view_with_custom_queryset(self):
p1 = self.create_product()
self.create_product(is_hidden=True) # this should not appear
url = reverse('backbone:tests_product')
response = self.client.get(url)
data = self.parseJsonResponse(response)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['id'], p1.id)
def test_collection_view_put_request_returns_403(self):
url = reverse('backbone:tests_product')
response = self.client.put(url)
self.assertEqual(response.status_code, 403)
def test_collection_view_delete_request_returns_403(self):
url = reverse('backbone:tests_product')
response = self.client.delete(url)
self.assertEqual(response.status_code, 403)
def test_collection_view_pagination(self):
# Brand is paginated 2 per page
p1 = self.create_brand()
p2 = self.create_brand()
p3 = self.create_brand()
url = reverse('backbone:tests_brand')
# First page
response = self.client.get(url, {'page': 1})
data = self.parseJsonResponse(response)
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['id'], p1.id)
self.assertEqual(data[1]['id'], p2.id)
# Second Page
response = self.client.get(url, {'page': 2})
data = self.parseJsonResponse(response)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['id'], p3.id)
def test_collection_view_page_parameter_out_of_range_returns_error(self):
url = reverse('backbone:tests_brand')
response = self.client.get(url, {'page': 2})
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, _('Invalid `page` parameter: Out of range.'))
def test_collection_view_page_parameter_not_an_integer_returns_error(self):
url = reverse('backbone:tests_brand')
response = self.client.get(url, {'page': 'abcd'})
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, _('Invalid `page` parameter: Not a valid integer.'))
def test_collection_view_that_is_not_paginated_ignores_page_parameter(self):
url = reverse('backbone:tests_product')
response = self.client.get(url, {'page': 999})
self.assertEqual(response.status_code, 200)
def test_collection_view_for_view_with_custom_url_slug(self):
brand = self.create_brand()
url = reverse('backbone:tests_brand_alternate')
response = self.client.get(url)
data = self.parseJsonResponse(response)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['id'], brand.id)
self.assertEqual(data[0]['custom'], 'foo')
class DetailTests(TestHelper):
def test_detail_view_returns_object_details(self):
product = self.create_product(price='3.00')
category = self.create_category()
product.categories.add(category)
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.get(url)
data = self.parseJsonResponse(response)
self.assertEqual(data['id'], product.id)
self.assertEqual(data['name'], product.name)
self.assertEqual(data['brand'], product.brand.id)
self.assertEqual(data['categories'], [category.id])
self.assertEqual(data['price'], product.price)
self.assertEqual(data['order'], product.order)
# Attribute on model
self.assertEqual(data['is_priced_under_10'], True)
# Callable
self.assertEqual(data['sku'], '#: %s' % product.sku)
# Callable on admin class
self.assertEqual(data['custom2'], 'custom2: %s' % product.name)
# Callable on model
self.assertEqual(data['get_first_category_id'], category.id)
def test_detail_view_uses_display_detail_fields_when_defined(self):
display_fields_product = self.create_displayfields_product(price=111)
category = self.create_category()
display_fields_product.categories.add(category)
url = reverse('backbone:tests_displayfieldsproduct_detail', args=[display_fields_product.id])
response = self.client.get(url)
data = self.parseJsonResponse(response)
self.assertEqual(data['id'], display_fields_product.id)
self.assertEqual(data['name'], display_fields_product.name)
self.assertEqual(data['brand'], display_fields_product.brand.id)
self.assertEqual(data['categories'], [category.id])
self.assertTrue('price' not in data)
self.assertTrue('order' not in data)
# Attribute on model
self.assertTrue('is_priced_under_10' not in data)
# Callable
self.assertTrue('sku' not in data)
# Callable on admin class
self.assertTrue('custom2' not in data)
# Callable on model
self.assertTrue('get_first_category_id' not in data)
self.assertEqual(len(data.keys()), 4)
def test_collection_view_uses_display_collection_fields_when_defined(self):
display_fields_product = self.create_displayfields_product(price=111)
category = self.create_category()
display_fields_product.categories.add(category)
url = reverse('backbone:tests_displayfieldsproduct')
response = self.client.get(url)
data = self.parseJsonResponse(response)
self.assertEqual(len(data), 1)
data = data[0]
self.assertEqual(data['id'], display_fields_product.id)
self.assertEqual(data['name'], display_fields_product.name)
self.assertEqual(data['brand'], display_fields_product.brand.id)
self.assertEqual(data['categories'], [category.id])
self.assertTrue('price' not in data)
self.assertTrue('order' not in data)
# Attribute on model
self.assertTrue('is_priced_under_10' not in data)
# Callable
self.assertTrue('sku' not in data)
# Callable on admin class
self.assertTrue('custom2' not in data)
# Callable on model
self.assertTrue('get_first_category_id' not in data)
self.assertEqual(len(data.keys()), 4)
def test_detail_view_doesnt_return_unspecified_fields(self):
product = self.create_product()
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.get(url)
data = self.parseJsonResponse(response)
fields = data.keys()
self.assertTrue('is_hidden' not in fields)
def test_detail_view_returns_404_for_invalid_id(self):
url = reverse('backbone:tests_product_detail', args=[999])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_detail_view_returns_404_for_object_not_in_custom_queryset(self):
product = self.create_product(is_hidden=True)
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_detail_view_post_request_returns_403(self):
product = self.create_product()
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
def test_detail_view_for_view_with_custom_url_slug(self):
brand = self.create_brand()
url = reverse('backbone:tests_brand_alternate_detail', args=[brand.id])
response = self.client.get(url)
data = self.parseJsonResponse(response)
self.assertEqual(data['id'], brand.id)
self.assertEqual(data['custom'], 'foo')
class AddTests(TestHelper):
def setUp(self):
self.user = User.objects.create_user(username='test', password='test', email='[email protected]')
self.client.login(username='test', password='test')
add_product = Permission.objects.get_by_natural_key('add_product', 'tests', 'product')
add_brand = Permission.objects.get_by_natural_key('add_brand', 'tests', 'brand')
self.user.user_permissions = [add_product, add_brand]
def test_post_request_on_product_collection_view_adds_product_to_db(self):
brand = self.create_brand()
cat1 = self.create_category()
cat2 = self.create_category()
data = json.dumps({
'name': 'Test',
'brand': brand.id,
'categories': [cat1.id, cat2.id],
'price': 12.34,
'order': 1,
'sale_date': '2006-10-25 14:30:59',
})
url = reverse('backbone:tests_product')
response = self.client.post(url, data, content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(Product.objects.count(), 1)
product = Product.objects.order_by('-id')[0]
self.assertEqual(product.name, 'Test')
self.assertEqual(product.brand, brand)
self.assertEqual(product.categories.count(), 2)
self.assertEqual(product.categories.all()[0], cat1)
self.assertEqual(product.categories.all()[1], cat2)
self.assertEqual(product.price, Decimal('12.34'))
self.assertEqual(product.sale_date, datetime.datetime(2006, 10, 25, 14, 30, 59))
data = self.parseJsonResponse(response, status_code=201)
self.assertEqual(data['id'], product.id)
self.assertEqual(data['name'], product.name)
self.assertEqual(data['brand'], product.brand.id)
self.assertEqual(data['categories'], [cat1.id, cat2.id])
self.assertEqual(data['price'], '12.34')
self.assertTrue(response['Location'].endswith(
reverse('backbone:tests_product_detail', args=[product.id])
))
def test_post_request_on_product_collection_view_with_invalid_json_returns_error(self):
url = reverse('backbone:tests_product')
response = self.client.post(url, 'Some invalid json', content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, _('Unable to parse JSON request body.'))
def test_post_request_on_product_collection_view_with_validation_errors_returns_error_list_as_json(self):
data = json.dumps({
'name': '',
'brand': '',
'categories': [],
'price': None,
'order': '',
})
url = reverse('backbone:tests_product')
response = self.client.post(url, data, content_type='application/json')
self.assertEqual(Product.objects.count(), 0)
data = self.parseJsonResponse(response, status_code=400)
self.assertEqual(len(data), 3)
self.assertEqual(data['name'], [_('This field is required.')])
self.assertEqual(data['price'], [_('This field is required.')])
self.assertEqual(data['order'], [_('This field is required.')])
def test_post_request_on_product_collection_view_ignores_fields_not_specified(self):
brand = self.create_brand()
cat1 = self.create_category()
cat2 = self.create_category()
data = json.dumps({
'name': 'Test',
'brand': brand.id,
'categories': [cat1.id, cat2.id],
'price': 12.34,
'order': 1,
'is_hidden': True # User should not be able to alter is_hidden
})
url = reverse('backbone:tests_product')
response = self.client.post(url, data, content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(Product.objects.count(), 1)
product = Product.objects.order_by('-id')[0]
self.assertEqual(product.is_hidden, False)
def test_post_request_on_product_collection_view_when_user_not_logged_in_returns_403(self):
self.client.logout()
url = reverse('backbone:tests_product')
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, _('You do not have permission to perform this action.'))
self.assertEqual(Product.objects.count(), 0)
def test_post_request_on_product_collection_view_when_user_doesnt_have_add_permission_returns_403(self):
self.client.logout()
self.user.user_permissions.clear()
self.client.login(username='test', password='test')
url = reverse('backbone:tests_product')
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, _('You do not have permission to perform this action.'))
def test_post_request_on_product_collection_view_violating_field_specific_permission_returns_403(self):
brand = self.create_brand()
cat1 = self.create_category()
data = json.dumps({
'name': 'NOTALLOWED',
'brand': brand.id,
'categories': [cat1.id],
'price': 12.34,
'order': 1
})
url = reverse('backbone:tests_product')
response = self.client.post(url, data, content_type='application/json')
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, _('You do not have permission to perform this action.'))
def test_post_request_on_brand_collection_view_uses_custom_model_form(self):
data = json.dumps({
'name': 'this should give an error',
})
url = reverse('backbone:tests_brand')
response = self.client.post(url, data, content_type='application/json')
self.assertEqual(Brand.objects.count(), 0)
data = self.parseJsonResponse(response, status_code=400)
self.assertEqual(len(data), 1)
self.assertEqual(data['name'], [_('Brand name must start with a capital letter.')])
def test_post_request_on_custom_url_slug_view_contains_custom_url_in_location_header(self):
data = json.dumps({
'name': 'Foo',
})
url = reverse('backbone:tests_brand_alternate')
response = self.client.post(url, data, content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(Brand.objects.count(), 1)
self.assertTrue(response['Location'].endswith(reverse(
'backbone:tests_brand_alternate_detail', args=[Brand.objects.get().id]
)))
class UpdateTests(TestHelper):
def setUp(self):
self.user = User.objects.create_user(username='test', password='test', email='[email protected]')
self.client.login(username='test', password='test')
update_product = Permission.objects.get_by_natural_key('change_product', 'tests', 'product')
update_brand = Permission.objects.get_by_natural_key('change_brand', 'tests', 'brand')
self.user.user_permissions = [update_product, update_brand]
def test_put_request_on_product_detail_view_updates_product(self):
product = self.create_product()
brand = self.create_brand()
cat1 = self.create_category()
cat2 = self.create_category()
data = json.dumps({
'name': 'Test',
'brand': brand.id,
'categories': [cat1.id, cat2.id],
'price': 56.78,
'order': 2
})
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.put(url, data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(Product.objects.count(), 1)
product = Product.objects.get(id=product.id) # refresh from db
self.assertEqual(product.name, 'Test')
self.assertEqual(product.brand, brand)
self.assertEqual(product.categories.count(), 2)
self.assertEqual(product.categories.all()[0], cat1)
self.assertEqual(product.categories.all()[1], cat2)
self.assertEqual(product.price, Decimal('56.78'))
data = self.parseJsonResponse(response, status_code=200)
self.assertEqual(data['id'], product.id)
self.assertEqual(data['name'], product.name)
self.assertEqual(data['brand'], product.brand.id)
self.assertEqual(data['categories'], [cat1.id, cat2.id])
self.assertEqual(data['price'], '56.78')
def test_put_request_on_product_detail_view_with_invalid_json_returns_error(self):
product = self.create_product()
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.put(url, '', content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, _('Unable to parse JSON request body.'))
response = self.client.put(url, 'Some invalid json', content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, _('Unable to parse JSON request body.'))
def test_put_request_on_product_detail_view_with_validation_errors_returns_error_list_as_json(self):
product = self.create_product()
data = json.dumps({
'name': '',
'price': None,
'order': '',
})
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.put(url, data, content_type='application/json')
self.assertEqual(Product.objects.count(), 1)
data = self.parseJsonResponse(response, status_code=400)
self.assertEqual(len(data), 3)
self.assertEqual(data['name'], [_('This field is required.')])
self.assertEqual(data['price'], [_('This field is required.')])
self.assertEqual(data['order'], [_('This field is required.')])
def test_put_request_on_product_detail_view_ignores_fields_not_specified(self):
product = self.create_product()
brand = self.create_brand()
cat1 = self.create_category()
cat2 = self.create_category()
data = json.dumps({
'name': 'Test',
'brand': brand.id,
'categories': [cat1.id, cat2.id],
'price': 12.34,
'order': 1,
'is_hidden': True # User should not be able to alter is_hidden
})
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.put(url, data, content_type='application/json')
self.assertEqual(response.status_code, 200)
product = Product.objects.get(id=product.id) # refresh from db
self.assertEqual(product.is_hidden, False)
def test_put_request_on_product_detail_view_when_user_not_logged_in_returns_403(self):
product = self.create_product()
self.client.logout()
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.put(url)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, _('You do not have permission to perform this action.'))
def test_put_request_on_product_detail_view_when_user_doesnt_have_update_permission_returns_403(self):
product = self.create_product()
self.client.logout()
self.user.user_permissions.clear()
self.client.login(username='test', password='test')
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.put(url)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, _('You do not have permission to perform this action.'))
def test_put_request_on_product_detail_view_violating_field_specific_permission_returns_403(self):
product = self.create_product()
brand = self.create_brand()
cat1 = self.create_category()
data = json.dumps({
'name': 'NOTALLOWED',
'brand': brand.id,
'categories': [cat1.id],
'price': 12.34,
'order': 2
})
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.put(url, data, content_type='application/json')
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, _('You do not have permission to perform this action.'))
def test_put_request_on_brand_collection_view_uses_custom_model_form(self):
brand = self.create_brand()
data = json.dumps({
'name': 'this should give an error',
})
url = reverse('backbone:tests_brand_detail', args=[brand.id])
response = self.client.put(url, data, content_type='application/json')
self.assertEqual(Product.objects.count(), 0)
data = self.parseJsonResponse(response, status_code=400)
self.assertEqual(len(data), 1)
self.assertEqual(data['name'], [_('Brand name must start with a capital letter.')])
class DeleteTests(TestHelper):
def setUp(self):
self.user = User.objects.create_user(username='test', password='test', email='[email protected]')
self.client.login(username='test', password='test')
delete_product = Permission.objects.get_by_natural_key('delete_product', 'tests', 'product')
delete_brand = Permission.objects.get_by_natural_key('delete_brand', 'tests', 'brand')
self.user.user_permissions.add(delete_product, delete_brand)
def test_delete_request_on_product_deletes_the_item(self):
product = self.create_product()
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
self.assertEqual(Product.objects.count(), 0)
def test_delete_request_on_product_when_user_not_logged_in_returns_403(self):
self.client.logout()
product = self.create_product()
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.delete(url)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, _('You do not have permission to perform this action.'))
self.assertEqual(Product.objects.count(), 1)
def test_delete_request_on_product_when_user_doesnt_have_delete_permission_returns_403(self):
self.client.logout()
self.user.user_permissions.clear()
self.client.login(username='test', password='test')
product = self.create_product()
url = reverse('backbone:tests_product_detail', args=[product.id])
response = self.client.delete(url)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, _('You do not have permission to perform this action.'))
self.assertEqual(Product.objects.count(), 1)
def test_delete_request_on_brand_returns_403(self):
brand = self.create_brand()
url = reverse('backbone:tests_brand_detail', args=[brand.id])
response = self.client.delete(url)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, _('You do not have permission to perform this action.'))
self.assertEqual(Brand.objects.count(), 1)
class InheritanceTests(TestHelper):
def test_detail_view_returns_inherited_object_details(self):
ext_product = self.create_extended_product(price='9.00')
category = self.create_category()
ext_product.categories.add(category)
url = reverse('backbone:tests_extendedproduct_detail', args=[ext_product.id])
response = self.client.get(url)
data = self.parseJsonResponse(response)
self.assertEqual(data['id'], ext_product.id)
self.assertEqual(data['name'], ext_product.name)
self.assertEqual(data['brand'], ext_product.brand.id)
self.assertEqual(data['categories'], [category.id])
self.assertEqual(data['price'], ext_product.price)
self.assertEqual(data['order'], ext_product.order)
self.assertEqual(data['description'], ext_product.description)
# Attribute on model
self.assertEqual(data['is_priced_under_10'], True)
# Callable on model
self.assertEqual(data['get_first_category_id'], category.id)
class InvalidViewTests(TestHelper):
def setUp(self):
BrandBackboneView.display_fields += ['invalid_field']
def tearDown(self):
BrandBackboneView.display_fields.remove('invalid_field')
def test_invalid_field_name_raises_attribute_error(self):
brand = self.create_brand()
url = reverse('backbone:tests_brand_detail', args=[brand.id])
try:
self.client.get(url)
except AttributeError as err:
self.assertEqual(str(err), "Invalid field: invalid_field")
| bsd-3-clause | -2,077,347,136,853,208,800 | 42.243284 | 109 | 0.636731 | false |
kapilt/cloud-custodian | c7n/resources/s3.py | 1 | 112011 | # Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""S3 Resource Manager
Filters:
The generic Values filters (jmespath) expression and Or filter are
available with all resources, including buckets, we include several
additonal bucket data (Tags, Replication, Acl, Policy) as keys within
a bucket representation.
Actions:
encrypt-keys
Scan all keys in a bucket and optionally encrypt them in place.
global-grants
Check bucket acls for global grants
encryption-policy
Attach an encryption required policy to a bucket, this will break
applications that are not using encryption, including aws log
delivery.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import functools
import json
import itertools
import logging
import math
import os
import time
import ssl
import six
from botocore.client import Config
from botocore.exceptions import ClientError
from collections import defaultdict
from concurrent.futures import as_completed
from dateutil.parser import parse as parse_date
try:
from urllib3.exceptions import SSLError
except ImportError:
from botocore.vendored.requests.packages.urllib3.exceptions import SSLError
from c7n.actions import (
ActionRegistry, BaseAction, PutMetric, RemovePolicyBase)
from c7n.exceptions import PolicyValidationError
from c7n.filters import (
FilterRegistry, Filter, CrossAccountAccessFilter, MetricsFilter,
ValueFilter)
from c7n.manager import resources
from c7n import query
from c7n.resources.securityhub import PostFinding
from c7n.tags import RemoveTag, Tag, TagActionFilter, TagDelayedAction
from c7n.utils import (
chunks, local_session, set_annotation, type_schema, filter_empty,
dumps, format_string_values, get_account_alias_from_sts)
log = logging.getLogger('custodian.s3')
filters = FilterRegistry('s3.filters')
actions = ActionRegistry('s3.actions')
filters.register('marked-for-op', TagActionFilter)
actions.register('put-metric', PutMetric)
MAX_COPY_SIZE = 1024 * 1024 * 1024 * 2
@resources.register('s3')
class S3(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 's3'
arn_type = ''
enum_spec = ('list_buckets', 'Buckets[]', None)
detail_spec = ('list_objects', 'Bucket', 'Contents[]')
name = id = 'Name'
date = 'CreationDate'
dimension = 'BucketName'
config_type = 'AWS::S3::Bucket'
filter_registry = filters
action_registry = actions
def get_arns(self, resources):
return ["arn:aws:s3:::{}".format(r["Name"]) for r in resources]
def get_source(self, source_type):
if source_type == 'describe':
return DescribeS3(self)
elif source_type == 'config':
return ConfigS3(self)
else:
return super(S3, self).get_source(source_type)
@classmethod
def get_permissions(cls):
perms = ["s3:ListAllMyBuckets"]
perms.extend([n[-1] for n in S3_AUGMENT_TABLE])
return perms
class DescribeS3(query.DescribeSource):
def augment(self, buckets):
with self.manager.executor_factory(
max_workers=min((10, len(buckets) + 1))) as w:
results = w.map(
assemble_bucket,
zip(itertools.repeat(self.manager.session_factory), buckets))
results = list(filter(None, results))
return results
def get_resources(self, bucket_names):
return [{'Name': b} for b in bucket_names]
class ConfigS3(query.ConfigSource):
def get_query_params(self, query):
q = super(ConfigS3, self).get_query_params(query)
if 'expr' in q:
q['expr'] = q['expr'].replace('select ', 'select awsRegion, ')
return q
def load_resource(self, item):
resource = super(ConfigS3, self).load_resource(item)
cfg = item['supplementaryConfiguration']
# aka standard
if 'awsRegion' in item and item['awsRegion'] != 'us-east-1':
resource['Location'] = {'LocationConstraint': item['awsRegion']}
else:
resource['Location'] = {}
# owner is under acl per describe
resource.pop('Owner', None)
resource['CreationDate'] = parse_date(resource['CreationDate'])
for k, null_value in S3_CONFIG_SUPPLEMENT_NULL_MAP.items():
if cfg.get(k) == null_value:
continue
method = getattr(self, "handle_%s" % k, None)
if method is None:
raise ValueError("unhandled supplementary config %s", k)
continue
v = cfg[k]
if isinstance(cfg[k], six.string_types):
v = json.loads(cfg[k])
method(resource, v)
for el in S3_AUGMENT_TABLE:
if el[1] not in resource:
resource[el[1]] = el[2]
return resource
PERMISSION_MAP = {
'FullControl': 'FULL_CONTROL',
'Write': 'WRITE',
'WriteAcp': 'WRITE_ACP',
'Read': 'READ',
'ReadAcp': 'READ_ACP'}
GRANTEE_MAP = {
'AllUsers': "http://acs.amazonaws.com/groups/global/AllUsers",
'AuthenticatedUsers': "http://acs.amazonaws.com/groups/global/AuthenticatedUsers",
'LogDelivery': 'http://acs.amazonaws.com/groups/s3/LogDelivery'}
def handle_AccessControlList(self, resource, item_value):
# double serialized in config for some reason
if isinstance(item_value, six.string_types):
item_value = json.loads(item_value)
resource['Acl'] = {}
resource['Acl']['Owner'] = {'ID': item_value['owner']['id']}
if item_value['owner']['displayName']:
resource['Acl']['Owner']['DisplayName'] = item_value[
'owner']['displayName']
resource['Acl']['Grants'] = grants = []
for g in (item_value.get('grantList') or ()):
if 'id' not in g['grantee']:
assert g['grantee'] in self.GRANTEE_MAP, "unknown grantee %s" % g
rg = {'Type': 'Group', 'URI': self.GRANTEE_MAP[g['grantee']]}
else:
rg = {'ID': g['grantee']['id'], 'Type': 'CanonicalUser'}
if 'displayName' in g:
rg['DisplayName'] = g['displayName']
grants.append({
'Permission': self.PERMISSION_MAP[g['permission']],
'Grantee': rg,
})
def handle_BucketAccelerateConfiguration(self, resource, item_value):
# not currently auto-augmented by custodian
return
def handle_BucketLoggingConfiguration(self, resource, item_value):
if ('destinationBucketName' not in item_value or
item_value['destinationBucketName'] is None):
return {}
resource[u'Logging'] = {
'TargetBucket': item_value['destinationBucketName'],
'TargetPrefix': item_value['logFilePrefix']}
def handle_BucketLifecycleConfiguration(self, resource, item_value):
rules = []
for r in item_value.get('rules'):
rr = {}
rules.append(rr)
expiry = {}
for ek, ck in (
('Date', 'expirationDate'),
('ExpiredObjectDeleteMarker', 'expiredObjectDeleteMarker'),
('Days', 'expirationInDays')):
if ck in r and r[ck] and r[ck] != -1:
expiry[ek] = r[ck]
if expiry:
rr['Expiration'] = expiry
transitions = []
for t in (r.get('transitions') or ()):
tr = {}
for k in ('date', 'days', 'storageClass'):
if t[k]:
tr["%s%s" % (k[0].upper(), k[1:])] = t[k]
transitions.append(tr)
if transitions:
rr['Transitions'] = transitions
if r.get('abortIncompleteMultipartUpload'):
rr['AbortIncompleteMultipartUpload'] = {
'DaysAfterInitiation': r[
'abortIncompleteMultipartUpload']['daysAfterInitiation']}
if r.get('noncurrentVersionExpirationInDays'):
rr['NoncurrentVersionExpiration'] = {
'NoncurrentDays': r['noncurrentVersionExpirationInDays']}
nonc_transitions = []
for t in (r.get('noncurrentVersionTransitions') or ()):
nonc_transitions.append({
'NoncurrentDays': t['days'],
'StorageClass': t['storageClass']})
if nonc_transitions:
rr['NoncurrentVersionTransitions'] = nonc_transitions
rr['Status'] = r['status']
rr['ID'] = r['id']
if r.get('prefix'):
rr['Prefix'] = r['prefix']
if 'filter' not in r or not r['filter']:
continue
if r['filter']['predicate']:
rr['Filter'] = self.convertLifePredicate(r['filter']['predicate'])
resource['Lifecycle'] = {'Rules': rules}
def convertLifePredicate(self, p):
if p['type'] == 'LifecyclePrefixPredicate':
return {'Prefix': p['prefix']}
if p['type'] == 'LifecycleTagPredicate':
return {'Tags': [{'Key': p['tag']['key'], 'Value': p['tag']['value']}]}
if p['type'] == 'LifecycleAndOperator':
n = {}
for o in p['operands']:
ot = self.convertLifePredicate(o)
if 'Tags' in n and 'Tags' in ot:
n['Tags'].extend(ot['Tags'])
else:
n.update(ot)
return {'And': n}
raise ValueError("unknown predicate: %s" % p)
NotifyTypeMap = {
'QueueConfiguration': 'QueueConfigurations',
'LambdaConfiguration': 'LambdaFunctionConfigurations',
'CloudFunctionConfiguration': 'LambdaFunctionConfigurations',
'TopicConfiguration': 'TopicConfigurations'}
def handle_BucketNotificationConfiguration(self, resource, item_value):
d = {}
for nid, n in item_value['configurations'].items():
ninfo = {}
d.setdefault(self.NotifyTypeMap[n['type']], []).append(ninfo)
if n['type'] == 'QueueConfiguration':
ninfo['QueueArn'] = n['queueARN']
elif n['type'] == 'TopicConfiguration':
ninfo['TopicArn'] = n['topicARN']
elif n['type'] == 'LambdaConfiguration':
ninfo['LambdaFunctionArn'] = n['functionARN']
ninfo['Id'] = nid
ninfo['Events'] = n['events']
rules = []
if n['filter']:
for r in n['filter'].get('s3KeyFilter', {}).get('filterRules', []):
rules.append({'Name': r['name'], 'Value': r['value']})
if rules:
ninfo['Filter'] = {'Key': {'FilterRules': rules}}
resource['Notification'] = d
def handle_BucketReplicationConfiguration(self, resource, item_value):
d = {'Role': item_value['roleARN'], 'Rules': []}
for rid, r in item_value['rules'].items():
rule = {
'ID': rid,
'Status': r['status'],
'Prefix': r['prefix'],
'Destination': {
'Bucket': r['destinationConfig']['bucketARN']}
}
if r['destinationConfig']['storageClass']:
rule['Destination']['StorageClass'] = r['destinationConfig']['storageClass']
d['Rules'].append(rule)
resource['Replication'] = {'ReplicationConfiguration': d}
def handle_BucketPolicy(self, resource, item_value):
resource['Policy'] = item_value['policyText']
def handle_BucketTaggingConfiguration(self, resource, item_value):
resource['Tags'] = [
{"Key": k, "Value": v} for k, v in item_value['tagSets'][0]['tags'].items()]
def handle_BucketVersioningConfiguration(self, resource, item_value):
# Config defaults versioning to 'Off' for a null value
if item_value['status'] not in ('Enabled', 'Suspended'):
return
resource['Versioning'] = {'Status': item_value['status']}
if item_value['isMfaDeleteEnabled']:
resource['Versioning']['MFADelete'] = item_value[
'isMfaDeleteEnabled'].title()
def handle_BucketWebsiteConfiguration(self, resource, item_value):
website = {}
if item_value['indexDocumentSuffix']:
website['IndexDocument'] = {
'Suffix': item_value['indexDocumentSuffix']}
if item_value['errorDocument']:
website['ErrorDocument'] = {
'Key': item_value['errorDocument']}
if item_value['redirectAllRequestsTo']:
website['RedirectAllRequestsTo'] = {
'HostName': item_value['redirectAllRequestsTo']['hostName'],
'Protocol': item_value['redirectAllRequestsTo']['protocol']}
for r in item_value['routingRules']:
redirect = {}
rule = {'Redirect': redirect}
website.setdefault('RoutingRules', []).append(rule)
if 'condition' in r:
cond = {}
for ck, rk in (
('keyPrefixEquals', 'KeyPrefixEquals'),
('httpErrorCodeReturnedEquals',
'HttpErrorCodeReturnedEquals')):
if r['condition'][ck]:
cond[rk] = r['condition'][ck]
rule['Condition'] = cond
for ck, rk in (
('protocol', 'Protocol'),
('hostName', 'HostName'),
('replaceKeyPrefixWith', 'ReplaceKeyPrefixWith'),
('replaceKeyWith', 'ReplaceKeyWith'),
('httpRedirectCode', 'HttpRedirectCode')):
if r['redirect'][ck]:
redirect[rk] = r['redirect'][ck]
resource['Website'] = website
S3_CONFIG_SUPPLEMENT_NULL_MAP = {
'BucketLoggingConfiguration': u'{"destinationBucketName":null,"logFilePrefix":null}',
'BucketPolicy': u'{"policyText":null}',
'BucketVersioningConfiguration': u'{"status":"Off","isMfaDeleteEnabled":null}',
'BucketAccelerateConfiguration': u'{"status":null}',
'BucketNotificationConfiguration': u'{"configurations":{}}',
'BucketLifecycleConfiguration': None,
'AccessControlList': None,
'BucketTaggingConfiguration': None,
'BucketWebsiteConfiguration': None,
'BucketReplicationConfiguration': None
}
S3_AUGMENT_TABLE = (
('get_bucket_location', 'Location', {}, None, 's3:GetBucketLocation'),
('get_bucket_tagging', 'Tags', [], 'TagSet', 's3:GetBucketTagging'),
('get_bucket_policy', 'Policy', None, 'Policy', 's3:GetBucketPolicy'),
('get_bucket_acl', 'Acl', None, None, 's3:GetBucketAcl'),
('get_bucket_replication',
'Replication', None, None, 's3:GetReplicationConfiguration'),
('get_bucket_versioning', 'Versioning', None, None, 's3:GetBucketVersioning'),
('get_bucket_website', 'Website', None, None, 's3:GetBucketWebsite'),
('get_bucket_logging', 'Logging', None, 'LoggingEnabled', 's3:GetBucketLogging'),
('get_bucket_notification_configuration',
'Notification', None, None, 's3:GetBucketNotification'),
('get_bucket_lifecycle_configuration',
'Lifecycle', None, None, 's3:GetLifecycleConfiguration'),
# ('get_bucket_cors', 'Cors'),
)
def assemble_bucket(item):
"""Assemble a document representing all the config state around a bucket.
TODO: Refactor this, the logic here feels quite muddled.
"""
factory, b = item
s = factory()
c = s.client('s3')
# Bucket Location, Current Client Location, Default Location
b_location = c_location = location = "us-east-1"
methods = list(S3_AUGMENT_TABLE)
for minfo in methods:
m, k, default, select = minfo[:4]
try:
method = getattr(c, m)
v = method(Bucket=b['Name'])
v.pop('ResponseMetadata')
if select is not None and select in v:
v = v[select]
except (ssl.SSLError, SSLError) as e:
# Proxy issues? i assume
log.warning("Bucket ssl error %s: %s %s",
b['Name'], b.get('Location', 'unknown'),
e)
continue
except ClientError as e:
code = e.response['Error']['Code']
if code.startswith("NoSuch") or "NotFound" in code:
v = default
elif code == 'PermanentRedirect':
s = factory()
c = bucket_client(s, b)
# Requeue with the correct region given location constraint
methods.append((m, k, default, select))
continue
else:
log.warning(
"Bucket:%s unable to invoke method:%s error:%s ",
b['Name'], m, e.response['Error']['Message'])
# For auth failures, we don't bail out, continue processing if we can.
# Note this can lead to missing data, but in general is cleaner than
# failing hard, due to the common use of locked down s3 bucket policies
# that may cause issues fetching information across a fleet of buckets.
# This does mean s3 policies depending on augments should check denied
# methods annotation, generally though lacking get access to an augment means
# they won't have write access either.
# For other error types we raise and bail policy execution.
if e.response['Error']['Code'] == 'AccessDenied':
b.setdefault('c7n:DeniedMethods', []).append(m)
continue
raise
# As soon as we learn location (which generally works)
if k == 'Location' and v is not None:
b_location = v.get('LocationConstraint')
# Location == region for all cases but EU
# https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
if b_location is None:
b_location = "us-east-1"
elif b_location == 'EU':
b_location = "eu-west-1"
v['LocationConstraint'] = 'eu-west-1'
if v and v != c_location:
c = s.client('s3', region_name=b_location)
elif c_location != location:
c = s.client('s3', region_name=location)
b[k] = v
return b
def bucket_client(session, b, kms=False):
region = get_region(b)
if kms:
# Need v4 signature for aws:kms crypto, else let the sdk decide
# based on region support.
config = Config(
signature_version='s3v4',
read_timeout=200, connect_timeout=120)
else:
config = Config(read_timeout=200, connect_timeout=120)
return session.client('s3', region_name=region, config=config)
def modify_bucket_tags(session_factory, buckets, add_tags=(), remove_tags=()):
for bucket in buckets:
client = bucket_client(local_session(session_factory), bucket)
# Bucket tags are set atomically for the set/document, we want
# to refetch against current to guard against any staleness in
# our cached representation across multiple policies or concurrent
# modifications.
if 'get_bucket_tagging' in bucket.get('c7n:DeniedMethods', []):
# avoid the additional API call if we already know that it's going
# to result in AccessDenied. The chances that the resource's perms
# would have changed between fetching the resource and acting on it
# here are pretty low-- so the check here should suffice.
log.warning(
"Unable to get new set of bucket tags needed to modify tags,"
"skipping tag action for bucket: %s" % bucket["Name"])
continue
try:
bucket['Tags'] = client.get_bucket_tagging(
Bucket=bucket['Name']).get('TagSet', [])
except ClientError as e:
if e.response['Error']['Code'] != 'NoSuchTagSet':
raise
bucket['Tags'] = []
new_tags = {t['Key']: t['Value'] for t in add_tags}
for t in bucket.get('Tags', ()):
if (t['Key'] not in new_tags and t['Key'] not in remove_tags):
new_tags[t['Key']] = t['Value']
tag_set = [{'Key': k, 'Value': v} for k, v in new_tags.items()]
try:
client.put_bucket_tagging(
Bucket=bucket['Name'], Tagging={'TagSet': tag_set})
except ClientError as e:
log.exception(
'Exception tagging bucket %s: %s', bucket['Name'], e)
continue
def get_region(b):
"""Tries to get the bucket region from Location.LocationConstraint
Special cases:
LocationConstraint EU defaults to eu-west-1
LocationConstraint null defaults to us-east-1
Args:
b (object): A bucket object
Returns:
string: an aws region string
"""
remap = {None: 'us-east-1', 'EU': 'eu-west-1'}
region = b.get('Location', {}).get('LocationConstraint')
return remap.get(region, region)
@filters.register('metrics')
class S3Metrics(MetricsFilter):
"""S3 CW Metrics need special handling for attribute/dimension
mismatch, and additional required dimension.
"""
def get_dimensions(self, resource):
dims = [{'Name': 'BucketName', 'Value': resource['Name']}]
if (self.data['name'] == 'NumberOfObjects' and
'dimensions' not in self.data):
dims.append(
{'Name': 'StorageType', 'Value': 'AllStorageTypes'})
return dims
@filters.register('cross-account')
class S3CrossAccountFilter(CrossAccountAccessFilter):
"""Filters cross-account access to S3 buckets
:example:
.. code-block:: yaml
policies:
- name: s3-acl
resource: s3
region: us-east-1
filters:
- type: cross-account
"""
permissions = ('s3:GetBucketPolicy',)
def get_accounts(self):
"""add in elb access by default
ELB Accounts by region
https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html
Redshift Accounts by region
https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#rs-db-auditing-cloud-trail-rs-acct-ids
Cloudtrail Accounts by region
https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-regions.html
"""
accounts = super(S3CrossAccountFilter, self).get_accounts()
return accounts.union(
[
# ELB accounts
'127311923021', # us-east-1
'033677994240', # us-east-2
'797873946194', # us-west-2
'027434742980', # us-west-1
'985666609251', # ca-central-1
'156460612806', # eu-west-1
'054676820928', # eu-central-1
'652711504416', # eu-west-2
'582318560864', # ap-northeast-1
'600734575887', # ap-northeast-2
'114774131450', # ap-southeast-1
'783225319266', # ap-southeast-2
'718504428378', # ap-south-1
'507241528517', # sa-east-1
'048591011584', # us-gov-west-1 or gov-cloud-1
'638102146993', # cn-north-1
# Redshift accounts
'368064434614', # us-east-1
'790247189693', # us-east-2
'703715109447', # us-east-1
'473191095985', # us-west-2
'408097707231', # ap-south-1
'713597048934', # ap-northeast-2
'960118270566', # ap-southeast-1
'485979073181', # ap-southeast-2
'615915377779', # ap-northeast-1
'764870610256', # ca-central-1
'434091160558', # eu-central-1
'246478207311', # eu-west-1
'885798887673', # eu-west-2
'392442076723', # sa-east-1
# Cloudtrail accounts (psa. folks should be using
# cloudtrail service in bucket policies)
'086441151436', # us-east-1
'475085895292', # us-west-2
'388731089494', # us-west-1
'113285607260', # us-west-2
'819402241893', # ca-central-1
'977081816279', # ap-south-1
'492519147666', # ap-northeast-2
'903692715234', # ap-southeast-1
'284668455005', # ap-southeast-2
'216624486486', # ap-northeast-1
'035351147821', # eu-central-1
'859597730677', # eu-west-1
'282025262664', # eu-west-2
'814480443879', # sa-east-1
])
@filters.register('global-grants')
class GlobalGrantsFilter(Filter):
"""Filters for all S3 buckets that have global-grants
*Note* by default this filter allows for read access
if the bucket has been configured as a website. This
can be disabled per the example below.
:example:
.. code-block:: yaml
policies:
- name: remove-global-grants
resource: s3
filters:
- type: global-grants
allow_website: false
actions:
- delete-global-grants
"""
schema = type_schema(
'global-grants',
allow_website={'type': 'boolean'},
operator={'type': 'string', 'enum': ['or', 'and']},
permissions={
'type': 'array', 'items': {
'type': 'string', 'enum': [
'READ', 'WRITE', 'WRITE_ACP', 'READ_ACP', 'FULL_CONTROL']}})
GLOBAL_ALL = "http://acs.amazonaws.com/groups/global/AllUsers"
AUTH_ALL = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
def process(self, buckets, event=None):
with self.executor_factory(max_workers=5) as w:
results = w.map(self.process_bucket, buckets)
results = list(filter(None, list(results)))
return results
def process_bucket(self, b):
acl = b.get('Acl', {'Grants': []})
if not acl or not acl['Grants']:
return
results = []
allow_website = self.data.get('allow_website', True)
perms = self.data.get('permissions', [])
for grant in acl['Grants']:
if 'URI' not in grant.get("Grantee", {}):
continue
if grant['Grantee']['URI'] not in [self.AUTH_ALL, self.GLOBAL_ALL]:
continue
if allow_website and grant['Permission'] == 'READ' and b['Website']:
continue
if not perms or (perms and grant['Permission'] in perms):
results.append(grant['Permission'])
if results:
set_annotation(b, 'GlobalPermissions', results)
return b
class BucketActionBase(BaseAction):
def get_permissions(self):
return self.permissions
def get_std_format_args(self, bucket):
return {
'account_id': self.manager.config.account_id,
'region': self.manager.config.region,
'bucket_name': bucket['Name'],
'bucket_region': get_region(bucket)
}
def process(self, buckets):
with self.executor_factory(max_workers=3) as w:
futures = {}
results = []
for b in buckets:
futures[w.submit(self.process_bucket, b)] = b
for f in as_completed(futures):
if f.exception():
self.log.error('error modifying bucket:%s\n%s',
b['Name'], f.exception())
results += filter(None, [f.result()])
return results
class BucketFilterBase(Filter):
def get_std_format_args(self, bucket):
return {
'account_id': self.manager.config.account_id,
'region': self.manager.config.region,
'bucket_name': bucket['Name'],
'bucket_region': get_region(bucket)
}
@S3.action_registry.register("post-finding")
class BucketFinding(PostFinding):
def format_resource(self, r):
owner = r.get("Acl", {}).get("Owner", {})
resource = {
"Type": "AwsS3Bucket",
"Id": "arn:aws:s3:::{}".format(r["Name"]),
"Region": get_region(r),
"Tags": {t["Key"]: t["Value"] for t in r.get("Tags", [])},
"Details": {"AwsS3Bucket": {"OwnerId": owner.get('ID', 'Unknown')}}
}
if "DisplayName" in owner:
resource["Details"]["AwsS3Bucket"]["OwnerName"] = owner['DisplayName']
return filter_empty(resource)
@filters.register('has-statement')
class HasStatementFilter(BucketFilterBase):
"""Find buckets with set of policy statements.
:example:
.. code-block:: yaml
policies:
- name: s3-bucket-has-statement
resource: s3
filters:
- type: has-statement
statement_ids:
- RequiredEncryptedPutObject
policies:
- name: s3-public-policy
resource: s3
filters:
- type: has-statement
statements:
- Effect: Allow
Action: 's3:*'
Principal: '*'
"""
schema = type_schema(
'has-statement',
statement_ids={'type': 'array', 'items': {'type': 'string'}},
statements={
'type': 'array',
'items': {
'type': 'object',
'properties': {
'Sid': {'type': 'string'},
'Effect': {'type': 'string', 'enum': ['Allow', 'Deny']},
'Principal': {'anyOf': [
{'type': 'string'},
{'type': 'object'}, {'type': 'array'}]},
'NotPrincipal': {
'anyOf': [{'type': 'object'}, {'type': 'array'}]},
'Action': {
'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'NotAction': {
'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'Resource': {
'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'NotResource': {
'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'Condition': {'type': 'object'}
},
'required': ['Effect']
}
})
def process(self, buckets, event=None):
return list(filter(None, map(self.process_bucket, buckets)))
def process_bucket(self, b):
p = b.get('Policy')
if p is None:
return None
p = json.loads(p)
required = list(self.data.get('statement_ids', []))
statements = p.get('Statement', [])
for s in list(statements):
if s.get('Sid') in required:
required.remove(s['Sid'])
required_statements = format_string_values(list(self.data.get('statements', [])),
**self.get_std_format_args(b))
for required_statement in required_statements:
for statement in statements:
found = 0
for key, value in required_statement.items():
if key in statement and value == statement[key]:
found += 1
if found and found == len(required_statement):
required_statements.remove(required_statement)
break
if (self.data.get('statement_ids', []) and not required) or \
(self.data.get('statements', []) and not required_statements):
return b
return None
ENCRYPTION_STATEMENT_GLOB = {
'Effect': 'Deny',
'Principal': '*',
'Action': 's3:PutObject',
"Condition": {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": ["AES256", "aws:kms"]}}}
@filters.register('no-encryption-statement')
class EncryptionEnabledFilter(Filter):
"""Find buckets with missing encryption policy statements.
:example:
.. code-block:: yaml
policies:
- name: s3-bucket-not-encrypted
resource: s3
filters:
- type: no-encryption-statement
"""
schema = type_schema(
'no-encryption-statement')
def get_permissions(self):
perms = self.manager.get_resource_manager('s3').get_permissions()
return perms
def process(self, buckets, event=None):
return list(filter(None, map(self.process_bucket, buckets)))
def process_bucket(self, b):
p = b.get('Policy')
if p is None:
return b
p = json.loads(p)
encryption_statement = dict(ENCRYPTION_STATEMENT_GLOB)
statements = p.get('Statement', [])
check = False
for s in list(statements):
if 'Sid' in s:
encryption_statement["Sid"] = s["Sid"]
if 'Resource' in s:
encryption_statement["Resource"] = s["Resource"]
if s == encryption_statement:
check = True
break
if check:
return None
else:
return b
@filters.register('missing-statement')
@filters.register('missing-policy-statement')
class MissingPolicyStatementFilter(Filter):
"""Find buckets missing a set of named policy statements.
:example:
.. code-block:: yaml
policies:
- name: s3-bucket-missing-statement
resource: s3
filters:
- type: missing-statement
statement_ids:
- RequiredEncryptedPutObject
"""
schema = type_schema(
'missing-policy-statement',
aliases=('missing-statement',),
statement_ids={'type': 'array', 'items': {'type': 'string'}})
def __call__(self, b):
p = b.get('Policy')
if p is None:
return b
p = json.loads(p)
required = list(self.data.get('statement_ids', []))
statements = p.get('Statement', [])
for s in list(statements):
if s.get('Sid') in required:
required.remove(s['Sid'])
if not required:
return False
return True
@filters.register('bucket-notification')
class BucketNotificationFilter(ValueFilter):
"""Filter based on bucket notification configuration.
:example:
.. code-block:: yaml
policies:
- name: delete-incorrect-notification
resource: s3
filters:
- type: bucket-notification
kind: lambda
key: Id
value: "IncorrectLambda"
op: eq
actions:
- type: delete-bucket-notification
statement_ids: matched
"""
schema = type_schema(
'bucket-notification',
required=['kind'],
kind={'type': 'string', 'enum': ['lambda', 'sns', 'sqs']},
rinherit=ValueFilter.schema)
schema_alias = False
annotation_key = 'c7n:MatchedNotificationConfigurationIds'
permissions = ('s3:GetBucketNotification',)
FIELDS = {
'lambda': 'LambdaFunctionConfigurations',
'sns': 'TopicConfigurations',
'sqs': 'QueueConfigurations'
}
def process(self, buckets, event=None):
return super(BucketNotificationFilter, self).process(buckets, event)
def __call__(self, bucket):
field = self.FIELDS[self.data['kind']]
found = False
for config in bucket.get('Notification', {}).get(field, []):
if self.match(config):
set_annotation(
bucket,
BucketNotificationFilter.annotation_key,
config['Id'])
found = True
return found
@filters.register('bucket-logging')
class BucketLoggingFilter(Filter):
"""Filter based on bucket logging configuration.
:example:
.. code-block:: yaml
policies:
- name: add-bucket-logging-if-missing
resource: s3
filters:
- type: bucket-logging
op: disabled
actions:
- type: toggle-logging
target_bucket: "{account_id}-{region}-s3-logs"
target_prefix: "{source_bucket_name}/"
policies:
- name: update-incorrect-or-missing-logging
resource: s3
filters:
- type: bucket-logging
op: not-equal
target_bucket: "{account_id}-{region}-s3-logs"
target_prefix: "{account}/{source_bucket_name}/"
actions:
- type: toggle-logging
target_bucket: "{account_id}-{region}-s3-logs"
target_prefix: "{account}/{source_bucket_name}/"
"""
schema = type_schema(
'bucket-logging',
op={'enum': ['enabled', 'disabled', 'equal', 'not-equal', 'eq', 'ne']},
required=['op'],
target_bucket={'type': 'string'},
target_prefix={'type': 'string'})
schema_alias = False
account_name = None
permissions = ("s3:GetBucketLogging", "iam:ListAccountAliases")
def process(self, buckets, event=None):
return list(filter(None, map(self.process_bucket, buckets)))
def process_bucket(self, b):
if self.match_bucket(b):
return b
def match_bucket(self, b):
op = self.data.get('op')
logging = b.get('Logging', {})
if op == 'disabled':
return logging == {}
elif op == 'enabled':
return logging != {}
if self.account_name is None:
session = local_session(self.manager.session_factory)
self.account_name = get_account_alias_from_sts(session)
variables = {
'account_id': self.manager.config.account_id,
'account': self.account_name,
'region': self.manager.config.region,
'source_bucket_name': b['Name'],
'target_bucket_name': self.data.get('target_bucket'),
'target_prefix': self.data.get('target_prefix'),
}
data = format_string_values(self.data, **variables)
target_bucket = data.get('target_bucket')
target_prefix = data.get('target_prefix', b['Name'] + '/')
target_config = {
"TargetBucket": target_bucket,
"TargetPrefix": target_prefix
} if target_bucket else {}
if op in ('not-equal', 'ne'):
return logging != target_config
else:
return logging == target_config
@actions.register('delete-bucket-notification')
class DeleteBucketNotification(BucketActionBase):
"""Action to delete S3 bucket notification configurations"""
schema = type_schema(
'delete-bucket-notification',
required=['statement_ids'],
statement_ids={'oneOf': [
{'enum': ['matched']},
{'type': 'array', 'items': {'type': 'string'}}]})
permissions = ('s3:PutBucketNotification',)
def process_bucket(self, bucket):
n = bucket['Notification']
if not n:
return
statement_ids = self.data.get('statement_ids')
if statement_ids == 'matched':
statement_ids = bucket.get(BucketNotificationFilter.annotation_key, ())
if not statement_ids:
return
cfg = defaultdict(list)
for t in six.itervalues(BucketNotificationFilter.FIELDS):
for c in n.get(t, []):
if c['Id'] not in statement_ids:
cfg[t].append(c)
client = bucket_client(local_session(self.manager.session_factory), bucket)
client.put_bucket_notification_configuration(
Bucket=bucket['Name'],
NotificationConfiguration=cfg)
@actions.register('no-op')
class NoOp(BucketActionBase):
schema = type_schema('no-op')
permissions = ('s3:ListAllMyBuckets',)
def process(self, buckets):
return None
@actions.register('set-statements')
class SetPolicyStatement(BucketActionBase):
"""Action to add or update policy statements to S3 buckets
:example:
.. code-block:: yaml
policies:
- name: force-s3-https
resource: s3
actions:
- type: set-statements
statements:
- Sid: "DenyHttp"
Effect: "Deny"
Action: "s3:GetObject"
Principal:
AWS: "*"
Resource: "arn:aws:s3:::{bucket_name}/*"
Condition:
Bool:
"aws:SecureTransport": false
"""
permissions = ('s3:PutBucketPolicy',)
schema = type_schema(
'set-statements',
**{
'statements': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'Sid': {'type': 'string'},
'Effect': {'type': 'string', 'enum': ['Allow', 'Deny']},
'Principal': {'anyOf': [{'type': 'string'},
{'type': 'object'}, {'type': 'array'}]},
'NotPrincipal': {'anyOf': [{'type': 'object'}, {'type': 'array'}]},
'Action': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'NotAction': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'Resource': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'NotResource': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'Condition': {'type': 'object'}
},
'required': ['Sid', 'Effect'],
'oneOf': [
{'required': ['Principal', 'Action', 'Resource']},
{'required': ['NotPrincipal', 'Action', 'Resource']},
{'required': ['Principal', 'NotAction', 'Resource']},
{'required': ['NotPrincipal', 'NotAction', 'Resource']},
{'required': ['Principal', 'Action', 'NotResource']},
{'required': ['NotPrincipal', 'Action', 'NotResource']},
{'required': ['Principal', 'NotAction', 'NotResource']},
{'required': ['NotPrincipal', 'NotAction', 'NotResource']}
]
}
}
}
)
def process_bucket(self, bucket):
policy = bucket.get('Policy') or '{}'
target_statements = format_string_values(
copy.deepcopy({s['Sid']: s for s in self.data.get('statements', [])}),
**self.get_std_format_args(bucket))
policy = json.loads(policy)
bucket_statements = policy.setdefault('Statement', [])
for s in bucket_statements:
if s.get('Sid') not in target_statements:
continue
if s == target_statements[s['Sid']]:
target_statements.pop(s['Sid'])
if not target_statements:
return
bucket_statements.extend(target_statements.values())
policy = json.dumps(policy)
s3 = bucket_client(local_session(self.manager.session_factory), bucket)
s3.put_bucket_policy(Bucket=bucket['Name'], Policy=policy)
return {'Name': bucket['Name'], 'Policy': policy}
@actions.register('remove-statements')
class RemovePolicyStatement(RemovePolicyBase):
"""Action to remove policy statements from S3 buckets
:example:
.. code-block:: yaml
policies:
- name: s3-remove-encrypt-put
resource: s3
filters:
- type: has-statement
statement_ids:
- RequireEncryptedPutObject
actions:
- type: remove-statements
statement_ids:
- RequiredEncryptedPutObject
"""
permissions = ("s3:PutBucketPolicy", "s3:DeleteBucketPolicy")
def process(self, buckets):
with self.executor_factory(max_workers=3) as w:
futures = {}
results = []
for b in buckets:
futures[w.submit(self.process_bucket, b)] = b
for f in as_completed(futures):
if f.exception():
b = futures[f]
self.log.error('error modifying bucket:%s\n%s',
b['Name'], f.exception())
results += filter(None, [f.result()])
return results
def process_bucket(self, bucket):
p = bucket.get('Policy')
if p is None:
return
p = json.loads(p)
statements, found = self.process_policy(
p, bucket, CrossAccountAccessFilter.annotation_key)
if not found:
return
s3 = bucket_client(local_session(self.manager.session_factory), bucket)
if not statements:
s3.delete_bucket_policy(Bucket=bucket['Name'])
else:
s3.put_bucket_policy(Bucket=bucket['Name'], Policy=json.dumps(p))
return {'Name': bucket['Name'], 'State': 'PolicyRemoved', 'Statements': found}
@actions.register('toggle-versioning')
class ToggleVersioning(BucketActionBase):
"""Action to enable/suspend versioning on a S3 bucket
Note versioning can never be disabled only suspended.
:example:
.. code-block:: yaml
policies:
- name: s3-enable-versioning
resource: s3
filters:
- or:
- type: value
key: Versioning.Status
value: Suspended
- type: value
key: Versioning.Status
value: absent
actions:
- type: toggle-versioning
enabled: true
"""
schema = type_schema(
'toggle-versioning',
enabled={'type': 'boolean'})
permissions = ("s3:PutBucketVersioning",)
def process_versioning(self, resource, state):
client = bucket_client(
local_session(self.manager.session_factory), resource)
try:
client.put_bucket_versioning(
Bucket=resource['Name'],
VersioningConfiguration={
'Status': state})
except ClientError as e:
if e.response['Error']['Code'] != 'AccessDenied':
log.error(
"Unable to put bucket versioning on bucket %s: %s" % resource['Name'], e)
raise
log.warning(
"Access Denied Bucket:%s while put bucket versioning" % resource['Name'])
# mfa delete enablement looks like it needs the serial and a current token.
def process(self, resources):
enabled = self.data.get('enabled', True)
for r in resources:
if 'Versioning' not in r or not r['Versioning']:
r['Versioning'] = {'Status': 'Suspended'}
if enabled and (
r['Versioning']['Status'] == 'Suspended'):
self.process_versioning(r, 'Enabled')
if not enabled and r['Versioning']['Status'] == 'Enabled':
self.process_versioning(r, 'Suspended')
@actions.register('toggle-logging')
class ToggleLogging(BucketActionBase):
"""Action to enable/disable logging on a S3 bucket.
Target bucket ACL must allow for WRITE and READ_ACP Permissions
Not specifying a target_prefix will default to the current bucket name.
https://docs.aws.amazon.com/AmazonS3/latest/dev/enable-logging-programming.html
:example:
.. code-block:: yaml
policies:
- name: s3-enable-logging
resource: s3
filters:
- "tag:Testing": present
actions:
- type: toggle-logging
target_bucket: log-bucket
target_prefix: logs123/
policies:
- name: s3-force-standard-logging
resource: s3
filters:
- type: bucket-logging
op: not-equal
target_bucket: "{account_id}-{region}-s3-logs"
target_prefix: "{account}/{source_bucket_name}/"
actions:
- type: toggle-logging
target_bucket: "{account_id}-{region}-s3-logs"
target_prefix: "{account}/{source_bucket_name}/"
"""
schema = type_schema(
'toggle-logging',
enabled={'type': 'boolean'},
target_bucket={'type': 'string'},
target_prefix={'type': 'string'})
permissions = ("s3:PutBucketLogging", "iam:ListAccountAliases")
def validate(self):
if self.data.get('enabled', True):
if not self.data.get('target_bucket'):
raise PolicyValidationError(
"target_bucket must be specified on %s" % (
self.manager.data,))
return self
def process(self, resources):
enabled = self.data.get('enabled', True)
# Account name for variable expansion
session = local_session(self.manager.session_factory)
account_name = get_account_alias_from_sts(session)
for r in resources:
client = bucket_client(session, r)
is_logging = bool(r.get('Logging'))
if enabled:
variables = {
'account_id': self.manager.config.account_id,
'account': account_name,
'region': self.manager.config.region,
'source_bucket_name': r['Name'],
'target_bucket_name': self.data.get('target_bucket'),
'target_prefix': self.data.get('target_prefix'),
}
data = format_string_values(self.data, **variables)
config = {
'TargetBucket': data.get('target_bucket'),
'TargetPrefix': data.get('target_prefix', r['Name'] + '/')
}
if not is_logging or r.get('Logging') != config:
client.put_bucket_logging(
Bucket=r['Name'],
BucketLoggingStatus={'LoggingEnabled': config}
)
elif not enabled and is_logging:
client.put_bucket_logging(
Bucket=r['Name'], BucketLoggingStatus={})
@actions.register('attach-encrypt')
class AttachLambdaEncrypt(BucketActionBase):
"""Action attaches lambda encryption policy to S3 bucket
supports attachment via lambda bucket notification or sns notification
to invoke lambda. a special topic value of `default` will utilize an
extant notification or create one matching the bucket name.
:example:
.. code-block:: yaml
policies:
- name: attach-lambda-encrypt
resource: s3
filters:
- type: missing-policy-statement
actions:
- type: attach-encrypt
role: arn:aws:iam::123456789012:role/my-role
"""
schema = type_schema(
'attach-encrypt',
role={'type': 'string'},
tags={'type': 'object'},
topic={'type': 'string'})
permissions = (
"s3:PutBucketNotification", "s3:GetBucketNotification",
# lambda manager uses quite a few perms to provision lambdas
# and event sources, hard to disamgibuate punt for now.
"lambda:*",
)
def __init__(self, data=None, manager=None):
self.data = data or {}
self.manager = manager
def validate(self):
if (not getattr(self.manager.config, 'dryrun', True) and
not self.data.get('role', self.manager.config.assume_role)):
raise PolicyValidationError(
"attach-encrypt: role must be specified either "
"via assume or in config on %s" % (self.manager.data,))
return self
def process(self, buckets):
from c7n.mu import LambdaManager
from c7n.ufuncs.s3crypt import get_function
account_id = self.manager.config.account_id
topic_arn = self.data.get('topic')
func = get_function(
None, self.data.get('role', self.manager.config.assume_role),
account_id=account_id, tags=self.data.get('tags'))
regions = set([get_region(b) for b in buckets])
# session managers by region
region_sessions = {}
for r in regions:
region_sessions[r] = functools.partial(
self.manager.session_factory, region=r)
# Publish function to all of our buckets regions
region_funcs = {}
for r in regions:
lambda_mgr = LambdaManager(region_sessions[r])
lambda_mgr.publish(func)
region_funcs[r] = func
with self.executor_factory(max_workers=3) as w:
results = []
futures = []
for b in buckets:
region = get_region(b)
futures.append(
w.submit(
self.process_bucket,
region_funcs[region],
b,
topic_arn,
account_id,
region_sessions[region]
))
for f in as_completed(futures):
if f.exception():
log.exception(
"Error attaching lambda-encrypt %s" % (f.exception()))
results.append(f.result())
return list(filter(None, results))
def process_bucket(self, func, bucket, topic, account_id, session_factory):
from c7n.mu import BucketSNSNotification, BucketLambdaNotification
if topic:
topic = None if topic == 'default' else topic
source = BucketSNSNotification(session_factory, bucket, topic)
else:
source = BucketLambdaNotification(
{'account_s3': account_id}, session_factory, bucket)
return source.add(func)
@actions.register('encryption-policy')
class EncryptionRequiredPolicy(BucketActionBase):
"""Action to apply an encryption policy to S3 buckets
:example:
.. code-block:: yaml
policies:
- name: s3-enforce-encryption
resource: s3
mode:
type: cloudtrail
events:
- CreateBucket
actions:
- encryption-policy
"""
permissions = ("s3:GetBucketPolicy", "s3:PutBucketPolicy")
schema = type_schema('encryption-policy')
def __init__(self, data=None, manager=None):
self.data = data or {}
self.manager = manager
def process(self, buckets):
with self.executor_factory(max_workers=3) as w:
results = w.map(self.process_bucket, buckets)
results = list(filter(None, list(results)))
return results
def process_bucket(self, b):
p = b['Policy']
if p is None:
log.info("No policy found, creating new")
p = {'Version': "2012-10-17", "Statement": []}
else:
p = json.loads(p)
encryption_sid = "RequiredEncryptedPutObject"
encryption_statement = {
'Sid': encryption_sid,
'Effect': 'Deny',
'Principal': '*',
'Action': 's3:PutObject',
"Resource": "arn:aws:s3:::%s/*" % b['Name'],
"Condition": {
# AWS Managed Keys or KMS keys, note policy language
# does not support custom kms (todo add issue)
"StringNotEquals": {
"s3:x-amz-server-side-encryption": ["AES256", "aws:kms"]}}}
statements = p.get('Statement', [])
for s in list(statements):
if s.get('Sid', '') == encryption_sid:
log.debug("Bucket:%s Found extant encrypt policy", b['Name'])
if s != encryption_statement:
log.info(
"Bucket:%s updating extant encrypt policy", b['Name'])
statements.remove(s)
else:
return
session = self.manager.session_factory()
s3 = bucket_client(session, b)
statements.append(encryption_statement)
p['Statement'] = statements
log.info('Bucket:%s attached encryption policy' % b['Name'])
try:
s3.put_bucket_policy(
Bucket=b['Name'],
Policy=json.dumps(p))
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchBucket':
return
self.log.exception(
"Error on bucket:%s putting policy\n%s error:%s",
b['Name'],
json.dumps(statements, indent=2), e)
raise
return {'Name': b['Name'], 'State': 'PolicyAttached'}
class BucketScanLog(object):
"""Offload remediated key ids to a disk file in batches
A bucket keyspace is effectively infinite, we need to store partial
results out of memory, this class provides for a json log on disk
with partial write support.
json output format:
- [list_of_serialized_keys],
- [] # Empty list of keys at end when we close the buffer
"""
def __init__(self, log_dir, name):
self.log_dir = log_dir
self.name = name
self.fh = None
self.count = 0
@property
def path(self):
return os.path.join(self.log_dir, "%s.json" % self.name)
def __enter__(self):
# Don't require output directories
if self.log_dir is None:
return
self.fh = open(self.path, 'w')
self.fh.write("[\n")
return self
def __exit__(self, exc_type=None, exc_value=None, exc_frame=None):
if self.fh is None:
return
# we need an empty marker list at end to avoid trailing commas
self.fh.write("[]")
# and close the surrounding list
self.fh.write("\n]")
self.fh.close()
if not self.count:
os.remove(self.fh.name)
self.fh = None
return False
def add(self, keys):
self.count += len(keys)
if self.fh is None:
return
self.fh.write(dumps(keys))
self.fh.write(",\n")
class ScanBucket(BucketActionBase):
permissions = ("s3:ListBucket",)
bucket_ops = {
'standard': {
'iterator': 'list_objects',
'contents_key': ['Contents'],
'key_processor': 'process_key'
},
'versioned': {
'iterator': 'list_object_versions',
'contents_key': ['Versions'],
'key_processor': 'process_version'
}
}
def __init__(self, data, manager=None):
super(ScanBucket, self).__init__(data, manager)
self.denied_buckets = set()
def get_bucket_style(self, b):
return (
b.get('Versioning', {'Status': ''}).get('Status') in (
'Enabled', 'Suspended') and 'versioned' or 'standard')
def get_bucket_op(self, b, op_name):
bucket_style = self.get_bucket_style(b)
op = self.bucket_ops[bucket_style][op_name]
if op_name == 'key_processor':
return getattr(self, op)
return op
def get_keys(self, b, key_set):
content_keys = self.get_bucket_op(b, 'contents_key')
keys = []
for ck in content_keys:
keys.extend(key_set.get(ck, []))
return keys
def process(self, buckets):
results = self._process_with_futures(self.process_bucket, buckets)
self.write_denied_buckets_file()
return results
def _process_with_futures(self, helper, buckets, max_workers=3):
results = []
with self.executor_factory(max_workers) as w:
futures = {}
for b in buckets:
futures[w.submit(helper, b)] = b
for f in as_completed(futures):
if f.exception():
b = futures[f]
self.log.error(
"Error on bucket:%s region:%s policy:%s error: %s",
b['Name'], b.get('Location', 'unknown'),
self.manager.data.get('name'), f.exception())
self.denied_buckets.add(b['Name'])
continue
result = f.result()
if result:
results.append(result)
return results
def write_denied_buckets_file(self):
if self.denied_buckets and self.manager.ctx.log_dir:
with open(
os.path.join(
self.manager.ctx.log_dir, 'denied.json'), 'w') as fh:
json.dump(list(self.denied_buckets), fh, indent=2)
self.denied_buckets = set()
def process_bucket(self, b):
log.info(
"Scanning bucket:%s visitor:%s style:%s" % (
b['Name'], self.__class__.__name__, self.get_bucket_style(b)))
s = self.manager.session_factory()
s3 = bucket_client(s, b)
# The bulk of _process_bucket function executes inline in
# calling thread/worker context, neither paginator nor
# bucketscan log should be used across worker boundary.
p = s3.get_paginator(
self.get_bucket_op(b, 'iterator')).paginate(Bucket=b['Name'])
with BucketScanLog(self.manager.ctx.log_dir, b['Name']) as key_log:
with self.executor_factory(max_workers=10) as w:
try:
return self._process_bucket(b, p, key_log, w)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchBucket':
log.warning(
"Bucket:%s removed while scanning" % b['Name'])
return
if e.response['Error']['Code'] == 'AccessDenied':
log.warning(
"Access Denied Bucket:%s while scanning" % b['Name'])
self.denied_buckets.add(b['Name'])
return
log.exception(
"Error processing bucket:%s paginator:%s" % (
b['Name'], p))
__call__ = process_bucket
def _process_bucket(self, b, p, key_log, w):
count = 0
for key_set in p:
keys = self.get_keys(b, key_set)
count += len(keys)
futures = []
for batch in chunks(keys, size=100):
if not batch:
continue
futures.append(w.submit(self.process_chunk, batch, b))
for f in as_completed(futures):
if f.exception():
log.exception("Exception Processing bucket:%s key batch %s" % (
b['Name'], f.exception()))
continue
r = f.result()
if r:
key_log.add(r)
# Log completion at info level, progress at debug level
if key_set['IsTruncated']:
log.debug('Scan progress bucket:%s keys:%d remediated:%d ...',
b['Name'], count, key_log.count)
else:
log.info('Scan Complete bucket:%s keys:%d remediated:%d',
b['Name'], count, key_log.count)
b['KeyScanCount'] = count
b['KeyRemediated'] = key_log.count
return {
'Bucket': b['Name'], 'Remediated': key_log.count, 'Count': count}
def process_chunk(self, batch, bucket):
raise NotImplementedError()
def process_key(self, s3, key, bucket_name, info=None):
raise NotImplementedError()
def process_version(self, s3, bucket, key):
raise NotImplementedError()
@actions.register('encrypt-keys')
class EncryptExtantKeys(ScanBucket):
"""Action to encrypt unencrypted S3 objects
:example:
.. code-block:: yaml
policies:
- name: s3-encrypt-objects
resource: s3
actions:
- type: encrypt-keys
crypto: aws:kms
key-id: 9c3983be-c6cf-11e6-9d9d-cec0c932ce01
"""
permissions = (
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObjectVersion",
"s3:RestoreObject",
) + ScanBucket.permissions
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['encrypt-keys']},
'report-only': {'type': 'boolean'},
'glacier': {'type': 'boolean'},
'large': {'type': 'boolean'},
'crypto': {'enum': ['AES256', 'aws:kms']},
'key-id': {'type': 'string'}
},
'dependencies': {
'key-id': {
'properties': {
'crypto': {'pattern': 'aws:kms'}
},
'required': ['crypto']
}
}
}
metrics = [
('Total Keys', {'Scope': 'Account'}),
('Unencrypted', {'Scope': 'Account'})]
def __init__(self, data, manager=None):
super(EncryptExtantKeys, self).__init__(data, manager)
self.kms_id = self.data.get('key-id')
def get_permissions(self):
perms = ("s3:GetObject", "s3:GetObjectVersion")
if self.data.get('report-only'):
perms += ('s3:DeleteObject', 's3:DeleteObjectVersion',
's3:PutObject',
's3:AbortMultipartUpload',
's3:ListBucket',
's3:ListBucketVersions')
return perms
def process(self, buckets):
t = time.time()
results = super(EncryptExtantKeys, self).process(buckets)
run_time = time.time() - t
remediated_count = object_count = 0
for r in results:
object_count += r['Count']
remediated_count += r['Remediated']
self.manager.ctx.metrics.put_metric(
"Unencrypted", r['Remediated'], "Count", Scope=r['Bucket'],
buffer=True)
self.manager.ctx.metrics.put_metric(
"Unencrypted", remediated_count, "Count", Scope="Account",
buffer=True
)
self.manager.ctx.metrics.put_metric(
"Total Keys", object_count, "Count", Scope="Account",
buffer=True
)
self.manager.ctx.metrics.flush()
log.info(
("EncryptExtant Complete keys:%d "
"remediated:%d rate:%0.2f/s time:%0.2fs"),
object_count,
remediated_count,
float(object_count) / run_time if run_time else 0,
run_time)
return results
def process_chunk(self, batch, bucket):
crypto_method = self.data.get('crypto', 'AES256')
s3 = bucket_client(
local_session(self.manager.session_factory), bucket,
kms=(crypto_method == 'aws:kms'))
b = bucket['Name']
results = []
key_processor = self.get_bucket_op(bucket, 'key_processor')
for key in batch:
r = key_processor(s3, key, b)
if r:
results.append(r)
return results
def process_key(self, s3, key, bucket_name, info=None):
k = key['Key']
if info is None:
info = s3.head_object(Bucket=bucket_name, Key=k)
# If the data is already encrypted with AES256 and this request is also
# for AES256 then we don't need to do anything
if info.get('ServerSideEncryption') == 'AES256' and not self.kms_id:
return False
if info.get('ServerSideEncryption') == 'aws:kms':
# If we're not looking for a specific key any key will do.
if not self.kms_id:
return False
# If we're configured to use a specific key and the key matches
# note this is not a strict equality match.
if self.kms_id in info.get('SSEKMSKeyId', ''):
return False
if self.data.get('report-only'):
return k
storage_class = info.get('StorageClass', 'STANDARD')
if storage_class == 'GLACIER':
if not self.data.get('glacier'):
return False
if 'Restore' not in info:
# This takes multiple hours, we let the next c7n
# run take care of followups.
s3.restore_object(
Bucket=bucket_name,
Key=k,
RestoreRequest={'Days': 30})
return False
elif not restore_complete(info['Restore']):
return False
storage_class = 'STANDARD'
crypto_method = self.data.get('crypto', 'AES256')
key_id = self.data.get('key-id')
# Note on copy we lose individual object acl grants
params = {'Bucket': bucket_name,
'Key': k,
'CopySource': "/%s/%s" % (bucket_name, k),
'MetadataDirective': 'COPY',
'StorageClass': storage_class,
'ServerSideEncryption': crypto_method}
if key_id and crypto_method == 'aws:kms':
params['SSEKMSKeyId'] = key_id
if info['ContentLength'] > MAX_COPY_SIZE and self.data.get(
'large', True):
return self.process_large_file(s3, bucket_name, key, info, params)
s3.copy_object(**params)
return k
def process_version(self, s3, key, bucket_name):
info = s3.head_object(
Bucket=bucket_name,
Key=key['Key'],
VersionId=key['VersionId'])
if 'ServerSideEncryption' in info:
return False
if self.data.get('report-only'):
return key['Key'], key['VersionId']
if key['IsLatest']:
r = self.process_key(s3, key, bucket_name, info)
# Glacier request processing, wait till we have the restored object
if not r:
return r
s3.delete_object(
Bucket=bucket_name,
Key=key['Key'],
VersionId=key['VersionId'])
return key['Key'], key['VersionId']
def process_large_file(self, s3, bucket_name, key, info, params):
"""For objects over 5gb, use multipart upload to copy"""
part_size = MAX_COPY_SIZE - (1024 ** 2)
num_parts = int(math.ceil(info['ContentLength'] / part_size))
source = params.pop('CopySource')
params.pop('MetadataDirective')
if 'Metadata' in info:
params['Metadata'] = info['Metadata']
upload_id = s3.create_multipart_upload(**params)['UploadId']
params = {'Bucket': bucket_name,
'Key': key['Key'],
'UploadId': upload_id,
'CopySource': source,
'CopySourceIfMatch': info['ETag']}
def upload_part(part_num):
part_params = dict(params)
part_params['CopySourceRange'] = "bytes=%d-%d" % (
part_size * (part_num - 1),
min(part_size * part_num - 1, info['ContentLength'] - 1))
part_params['PartNumber'] = part_num
response = s3.upload_part_copy(**part_params)
return {'ETag': response['CopyPartResult']['ETag'],
'PartNumber': part_num}
try:
with self.executor_factory(max_workers=2) as w:
parts = list(w.map(upload_part, range(1, num_parts + 1)))
except Exception:
log.warning(
"Error during large key copy bucket: %s key: %s, "
"aborting upload", bucket_name, key, exc_info=True)
s3.abort_multipart_upload(
Bucket=bucket_name, Key=key['Key'], UploadId=upload_id)
raise
s3.complete_multipart_upload(
Bucket=bucket_name, Key=key['Key'], UploadId=upload_id,
MultipartUpload={'Parts': parts})
return key['Key']
def restore_complete(restore):
if ',' in restore:
ongoing, avail = restore.split(',', 1)
else:
ongoing = restore
return 'false' in ongoing
@filters.register('is-log-target')
class LogTarget(Filter):
"""Filter and return buckets are log destinations.
Not suitable for use in lambda on large accounts, This is a api
heavy process to detect scan all possible log sources.
Sources:
- elb (Access Log)
- s3 (Access Log)
- cfn (Template writes)
- cloudtrail
:example:
.. code-block:: yaml
policies:
- name: s3-log-bucket
resource: s3
filters:
- type: is-log-target
"""
schema = type_schema(
'is-log-target',
services={'type': 'array', 'items': {'enum': [
's3', 'elb', 'cloudtrail']}},
self={'type': 'boolean'},
value={'type': 'boolean'})
def get_permissions(self):
perms = self.manager.get_resource_manager('elb').get_permissions()
perms += ('elasticloadbalancing:DescribeLoadBalancerAttributes',)
return perms
def process(self, buckets, event=None):
log_buckets = set()
count = 0
services = self.data.get('services', ['elb', 's3', 'cloudtrail'])
self_log = self.data.get('self', False)
if 'elb' in services and not self_log:
for bucket, _ in self.get_elb_bucket_locations():
log_buckets.add(bucket)
count += 1
self.log.debug("Found %d elb log targets" % count)
if 's3' in services:
count = 0
for bucket, _ in self.get_s3_bucket_locations(buckets, self_log):
count += 1
log_buckets.add(bucket)
self.log.debug('Found %d s3 log targets' % count)
if 'cloudtrail' in services and not self_log:
for bucket, _ in self.get_cloud_trail_locations(buckets):
log_buckets.add(bucket)
self.log.info("Found %d log targets for %d buckets" % (
len(log_buckets), len(buckets)))
if self.data.get('value', True):
return [b for b in buckets if b['Name'] in log_buckets]
else:
return [b for b in buckets if b['Name'] not in log_buckets]
@staticmethod
def get_s3_bucket_locations(buckets, self_log=False):
"""return (bucket_name, prefix) for all s3 logging targets"""
for b in buckets:
if b.get('Logging'):
if self_log:
if b['Name'] != b['Logging']['TargetBucket']:
continue
yield (b['Logging']['TargetBucket'],
b['Logging']['TargetPrefix'])
if not self_log and b['Name'].startswith('cf-templates-'):
yield (b['Name'], '')
def get_cloud_trail_locations(self, buckets):
session = local_session(self.manager.session_factory)
client = session.client('cloudtrail')
names = set([b['Name'] for b in buckets])
for t in client.describe_trails().get('trailList', ()):
if t.get('S3BucketName') in names:
yield (t['S3BucketName'], t.get('S3KeyPrefix', ''))
def get_elb_bucket_locations(self):
elbs = self.manager.get_resource_manager('elb').resources()
get_elb_attrs = functools.partial(
_query_elb_attrs, self.manager.session_factory)
with self.executor_factory(max_workers=2) as w:
futures = []
for elb_set in chunks(elbs, 100):
futures.append(w.submit(get_elb_attrs, elb_set))
for f in as_completed(futures):
if f.exception():
log.error("Error while scanning elb log targets: %s" % (
f.exception()))
continue
for tgt in f.result():
yield tgt
def _query_elb_attrs(session_factory, elb_set):
session = local_session(session_factory)
client = session.client('elb')
log_targets = []
for e in elb_set:
try:
attrs = client.describe_load_balancer_attributes(
LoadBalancerName=e['LoadBalancerName'])[
'LoadBalancerAttributes']
if 'AccessLog' in attrs and attrs['AccessLog']['Enabled']:
log_targets.append((
attrs['AccessLog']['S3BucketName'],
attrs['AccessLog']['S3BucketPrefix']))
except Exception as err:
log.warning(
"Could not retrieve load balancer %s: %s" % (
e['LoadBalancerName'], err))
return log_targets
@actions.register('remove-website-hosting')
class RemoveWebsiteHosting(BucketActionBase):
"""Action that removes website hosting configuration."""
schema = type_schema('remove-website-hosting')
permissions = ('s3:DeleteBucketWebsite',)
def process(self, buckets):
session = local_session(self.manager.session_factory)
for bucket in buckets:
client = bucket_client(session, bucket)
client.delete_bucket_website(Bucket=bucket['Name'])
@actions.register('delete-global-grants')
class DeleteGlobalGrants(BucketActionBase):
"""Deletes global grants associated to a S3 bucket
:example:
.. code-block:: yaml
policies:
- name: s3-delete-global-grants
resource: s3
filters:
- type: global-grants
actions:
- delete-global-grants
"""
schema = type_schema(
'delete-global-grants',
grantees={'type': 'array', 'items': {'type': 'string'}})
permissions = ('s3:PutBucketAcl',)
def process(self, buckets):
with self.executor_factory(max_workers=5) as w:
return list(filter(None, list(w.map(self.process_bucket, buckets))))
def process_bucket(self, b):
grantees = self.data.get(
'grantees', [
GlobalGrantsFilter.AUTH_ALL, GlobalGrantsFilter.GLOBAL_ALL])
log.info(b)
acl = b.get('Acl', {'Grants': []})
if not acl or not acl['Grants']:
return
new_grants = []
for grant in acl['Grants']:
grantee = grant.get('Grantee', {})
if not grantee:
continue
# Yuck, 'get_bucket_acl' doesn't return the grantee type.
if 'URI' in grantee:
grantee['Type'] = 'Group'
else:
grantee['Type'] = 'CanonicalUser'
if ('URI' in grantee and
grantee['URI'] in grantees and not
(grant['Permission'] == 'READ' and b['Website'])):
# Remove this grantee.
pass
else:
new_grants.append(grant)
log.info({'Owner': acl['Owner'], 'Grants': new_grants})
c = bucket_client(self.manager.session_factory(), b)
try:
c.put_bucket_acl(
Bucket=b['Name'],
AccessControlPolicy={
'Owner': acl['Owner'], 'Grants': new_grants})
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchBucket':
return
return b
@actions.register('tag')
class BucketTag(Tag):
"""Action to create tags on a S3 bucket
:example:
.. code-block:: yaml
policies:
- name: s3-tag-region
resource: s3
region: us-east-1
filters:
- "tag:RegionName": absent
actions:
- type: tag
key: RegionName
value: us-east-1
"""
def process_resource_set(self, client, resource_set, tags):
modify_bucket_tags(self.manager.session_factory, resource_set, tags)
@actions.register('mark-for-op')
class MarkBucketForOp(TagDelayedAction):
"""Action schedules custodian to perform an action at a certain date
:example:
.. code-block:: yaml
policies:
- name: s3-encrypt
resource: s3
filters:
- type: missing-statement
statement_ids:
- RequiredEncryptedPutObject
actions:
- type: mark-for-op
op: attach-encrypt
days: 7
"""
schema = type_schema(
'mark-for-op', rinherit=TagDelayedAction.schema)
@actions.register('unmark')
@actions.register('remove-tag')
class RemoveBucketTag(RemoveTag):
"""Removes tag/tags from a S3 object
:example:
.. code-block:: yaml
policies:
- name: s3-remove-owner-tag
resource: s3
filters:
- "tag:BucketOwner": present
actions:
- type: remove-tag
tags: ['BucketOwner']
"""
def process_resource_set(self, client, resource_set, tags):
modify_bucket_tags(
self.manager.session_factory, resource_set, remove_tags=tags)
@filters.register('data-events')
class DataEvents(Filter):
schema = type_schema('data-events', state={'enum': ['present', 'absent']})
permissions = (
'cloudtrail:DescribeTrails',
'cloudtrail:GetEventSelectors')
def get_event_buckets(self, session, trails):
"""Return a mapping of bucket name to cloudtrail.
For wildcard trails the bucket name is ''.
"""
regions = {t.get('HomeRegion') for t in trails}
clients = {}
for region in regions:
clients[region] = session.client('cloudtrail', region_name=region)
event_buckets = {}
for t in trails:
for events in clients[t.get('HomeRegion')].get_event_selectors(
TrailName=t['Name']).get('EventSelectors', ()):
if 'DataResources' not in events:
continue
for data_events in events['DataResources']:
if data_events['Type'] != 'AWS::S3::Object':
continue
for b in data_events['Values']:
event_buckets[b.rsplit(':')[-1].strip('/')] = t['Name']
return event_buckets
def process(self, resources, event=None):
trails = self.manager.get_resource_manager('cloudtrail').resources()
session = local_session(self.manager.session_factory)
event_buckets = self.get_event_buckets(session, trails)
ops = {
'present': lambda x: (
x['Name'] in event_buckets or '' in event_buckets),
'absent': (
lambda x: x['Name'] not in event_buckets and ''
not in event_buckets)}
op = ops[self.data['state']]
results = []
for b in resources:
if op(b):
results.append(b)
return results
@filters.register('inventory')
class Inventory(ValueFilter):
"""Filter inventories for a bucket"""
schema = type_schema('inventory', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('s3:GetInventoryConfiguration',)
def process(self, buckets, event=None):
results = []
with self.executor_factory(max_workers=2) as w:
futures = {}
for b in buckets:
futures[w.submit(self.process_bucket, b)] = b
for f in as_completed(futures):
b = futures[f]
if f.exception():
b.setdefault('c7n:DeniedMethods', []).append('GetInventoryConfiguration')
self.log.error(
"Error processing bucket: %s error: %s",
b['Name'], f.exception())
continue
if f.result():
results.append(b)
return results
def process_bucket(self, b):
if 'c7n:inventories' not in b:
client = bucket_client(local_session(self.manager.session_factory), b)
inventories = client.list_bucket_inventory_configurations(
Bucket=b['Name']).get('InventoryConfigurationList', [])
b['c7n:inventories'] = inventories
for i in b['c7n:inventories']:
if self.match(i):
return True
@actions.register('set-inventory')
class SetInventory(BucketActionBase):
"""Configure bucket inventories for an s3 bucket.
"""
schema = type_schema(
'set-inventory',
required=['name', 'destination'],
state={'enum': ['enabled', 'disabled', 'absent']},
name={'type': 'string', 'description': 'Name of inventory'},
destination={'type': 'string', 'description': 'Name of destination bucket'},
prefix={'type': 'string', 'description': 'Destination prefix'},
encryption={'enum': ['SSES3', 'SSEKMS']},
key_id={'type': 'string', 'description': 'Optional Customer KMS KeyId for SSE-KMS'},
versions={'enum': ['All', 'Current']},
schedule={'enum': ['Daily', 'Weekly']},
format={'enum': ['CSV', 'ORC', 'Parquet']},
fields={'type': 'array', 'items': {'enum': [
'Size', 'LastModifiedDate', 'StorageClass', 'ETag',
'IsMultipartUploaded', 'ReplicationStatus', 'EncryptionStatus',
'ObjectLockRetainUntilDate', 'ObjectLockMode', 'ObjectLockLegalHoldStatus',
'IntelligentTieringAccessTier']}})
permissions = ('s3:PutInventoryConfiguration', 's3:GetInventoryConfiguration')
def process(self, buckets):
with self.executor_factory(max_workers=2) as w:
futures = {w.submit(self.process_bucket, bucket): bucket for bucket in buckets}
for future in as_completed(futures):
bucket = futures[future]
try:
future.result()
except Exception as e:
self.log.error('Message: %s Bucket: %s', e, bucket['Name'])
def process_bucket(self, b):
inventory_name = self.data.get('name')
destination = self.data.get('destination')
prefix = self.data.get('prefix', '')
schedule = self.data.get('schedule', 'Daily')
fields = self.data.get('fields', ['LastModifiedDate', 'Size'])
versions = self.data.get('versions', 'Current')
state = self.data.get('state', 'enabled')
encryption = self.data.get('encryption')
inventory_format = self.data.get('format', 'CSV')
if not prefix:
prefix = "Inventories/%s" % (self.manager.config.account_id)
client = bucket_client(local_session(self.manager.session_factory), b)
if state == 'absent':
try:
client.delete_bucket_inventory_configuration(
Bucket=b['Name'], Id=inventory_name)
except ClientError as e:
if e.response['Error']['Code'] != 'NoSuchConfiguration':
raise
return
bucket = {
'Bucket': "arn:aws:s3:::%s" % destination,
'Format': inventory_format
}
inventory = {
'Destination': {
'S3BucketDestination': bucket
},
'IsEnabled': state == 'enabled' and True or False,
'Id': inventory_name,
'OptionalFields': fields,
'IncludedObjectVersions': versions,
'Schedule': {
'Frequency': schedule
}
}
if prefix:
bucket['Prefix'] = prefix
if encryption:
bucket['Encryption'] = {encryption: {}}
if encryption == 'SSEKMS' and self.data.get('key_id'):
bucket['Encryption'] = {encryption: {
'KeyId': self.data['key_id']
}}
found = self.get_inventory_delta(client, inventory, b)
if found:
return
if found is False:
self.log.debug("updating bucket:%s inventory configuration id:%s",
b['Name'], inventory_name)
client.put_bucket_inventory_configuration(
Bucket=b['Name'], Id=inventory_name, InventoryConfiguration=inventory)
def get_inventory_delta(self, client, inventory, b):
inventories = client.list_bucket_inventory_configurations(Bucket=b['Name'])
found = None
for i in inventories.get('InventoryConfigurationList', []):
if i['Id'] != inventory['Id']:
continue
found = True
for k, v in inventory.items():
if k not in i:
found = False
continue
if isinstance(v, list):
v.sort()
i[k].sort()
if i[k] != v:
found = False
return found
@actions.register('delete')
class DeleteBucket(ScanBucket):
"""Action deletes a S3 bucket
:example:
.. code-block:: yaml
policies:
- name: delete-unencrypted-buckets
resource: s3
filters:
- type: missing-statement
statement_ids:
- RequiredEncryptedPutObject
actions:
- type: delete
remove-contents: true
"""
schema = type_schema('delete', **{'remove-contents': {'type': 'boolean'}})
permissions = ('s3:*',)
bucket_ops = {
'standard': {
'iterator': 'list_objects',
'contents_key': ['Contents'],
'key_processor': 'process_key'
},
'versioned': {
'iterator': 'list_object_versions',
'contents_key': ['Versions', 'DeleteMarkers'],
'key_processor': 'process_version'
}
}
def process_delete_enablement(self, b):
"""Prep a bucket for deletion.
Clear out any pending multi-part uploads.
Disable versioning on the bucket, so deletes don't
generate fresh deletion markers.
"""
client = bucket_client(
local_session(self.manager.session_factory), b)
# Stop replication so we can suspend versioning
if b.get('Replication') is not None:
client.delete_bucket_replication(Bucket=b['Name'])
# Suspend versioning, so we don't get new delete markers
# as we walk and delete versions
if (self.get_bucket_style(b) == 'versioned' and b['Versioning']['Status'] == 'Enabled' and
self.data.get('remove-contents', True)):
client.put_bucket_versioning(
Bucket=b['Name'],
VersioningConfiguration={'Status': 'Suspended'})
# Clear our multi-part uploads
uploads = client.get_paginator('list_multipart_uploads')
for p in uploads.paginate(Bucket=b['Name']):
for u in p.get('Uploads', ()):
client.abort_multipart_upload(
Bucket=b['Name'],
Key=u['Key'],
UploadId=u['UploadId'])
def process(self, buckets):
# might be worth sanity checking all our permissions
# on the bucket up front before disabling versioning/replication.
if self.data.get('remove-contents', True):
self._process_with_futures(self.process_delete_enablement, buckets)
self.empty_buckets(buckets)
results = self._process_with_futures(self.delete_bucket, buckets)
self.write_denied_buckets_file()
return results
def delete_bucket(self, b):
s3 = bucket_client(self.manager.session_factory(), b)
try:
self._run_api(s3.delete_bucket, Bucket=b['Name'])
except ClientError as e:
if e.response['Error']['Code'] == 'BucketNotEmpty':
self.log.error(
"Error while deleting bucket %s, bucket not empty" % (
b['Name']))
else:
raise e
def empty_buckets(self, buckets):
t = time.time()
results = super(DeleteBucket, self).process(buckets)
run_time = time.time() - t
object_count = 0
for r in results:
object_count += r['Count']
self.manager.ctx.metrics.put_metric(
"Total Keys", object_count, "Count", Scope=r['Bucket'],
buffer=True)
self.manager.ctx.metrics.put_metric(
"Total Keys", object_count, "Count", Scope="Account", buffer=True)
self.manager.ctx.metrics.flush()
log.info(
"EmptyBucket buckets:%d Complete keys:%d rate:%0.2f/s time:%0.2fs",
len(buckets), object_count,
float(object_count) / run_time if run_time else 0, run_time)
return results
def process_chunk(self, batch, bucket):
s3 = bucket_client(local_session(self.manager.session_factory), bucket)
objects = []
for key in batch:
obj = {'Key': key['Key']}
if 'VersionId' in key:
obj['VersionId'] = key['VersionId']
objects.append(obj)
results = s3.delete_objects(
Bucket=bucket['Name'], Delete={'Objects': objects}).get('Deleted', ())
if self.get_bucket_style(bucket) != 'versioned':
return results
@actions.register('configure-lifecycle')
class Lifecycle(BucketActionBase):
"""Action applies a lifecycle policy to versioned S3 buckets
The schema to supply to the rule follows the schema here:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.put_bucket_lifecycle_configuration
To delete a lifecycle rule, supply Status=absent
:example:
.. code-block:: yaml
policies:
- name: s3-apply-lifecycle
resource: s3
actions:
- type: configure-lifecycle
rules:
- ID: my-lifecycle-id
Status: Enabled
Prefix: foo/
Transitions:
- Days: 60
StorageClass: GLACIER
"""
schema = type_schema(
'configure-lifecycle',
**{
'rules': {
'type': 'array',
'items': {
'type': 'object',
'required': ['ID', 'Status'],
'additionalProperties': False,
'properties': {
'ID': {'type': 'string'},
# c7n intercepts `absent`
'Status': {'enum': ['Enabled', 'Disabled', 'absent']},
'Prefix': {'type': 'string'},
'Expiration': {
'type': 'object',
'additionalProperties': False,
'properties': {
'Date': {'type': 'string'}, # Date
'Days': {'type': 'integer'},
'ExpiredObjectDeleteMarker': {'type': 'boolean'},
},
},
'Filter': {
'type': 'object',
'minProperties': 1,
'maxProperties': 1,
'additionalProperties': False,
'properties': {
'Prefix': {'type': 'string'},
'Tag': {
'type': 'object',
'required': ['Key', 'Value'],
'additionalProperties': False,
'properties': {
'Key': {'type': 'string'},
'Value': {'type': 'string'},
},
},
'And': {
'type': 'object',
'additionalProperties': False,
'properties': {
'Prefix': {'type': 'string'},
'Tags': {
'type': 'array',
'items': {
'type': 'object',
'required': ['Key', 'Value'],
'additionalProperties': False,
'properties': {
'Key': {'type': 'string'},
'Value': {'type': 'string'},
},
},
},
},
},
},
},
'Transitions': {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': False,
'properties': {
'Date': {'type': 'string'}, # Date
'Days': {'type': 'integer'},
'StorageClass': {'type': 'string'},
},
},
},
'NoncurrentVersionTransitions': {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': False,
'properties': {
'NoncurrentDays': {'type': 'integer'},
'StorageClass': {'type': 'string'},
},
},
},
'NoncurrentVersionExpiration': {
'type': 'object',
'additionalProperties': False,
'properties': {
'NoncurrentDays': {'type': 'integer'},
},
},
'AbortIncompleteMultipartUpload': {
'type': 'object',
'additionalProperties': False,
'properties': {
'DaysAfterInitiation': {'type': 'integer'},
},
},
},
},
},
}
)
permissions = ('s3:GetLifecycleConfiguration', 's3:PutLifecycleConfiguration')
def process(self, buckets):
with self.executor_factory(max_workers=3) as w:
futures = {}
results = []
for b in buckets:
futures[w.submit(self.process_bucket, b)] = b
for future in as_completed(futures):
if future.exception():
bucket = futures[future]
self.log.error('error modifying bucket lifecycle: %s\n%s',
bucket['Name'], future.exception())
results += filter(None, [future.result()])
return results
def process_bucket(self, bucket):
s3 = bucket_client(local_session(self.manager.session_factory), bucket)
if 'get_bucket_lifecycle_configuration' in bucket.get('c7n:DeniedMethods', []):
log.warning("Access Denied Bucket:%s while reading lifecycle" % bucket['Name'])
return
# Adjust the existing lifecycle by adding/deleting/overwriting rules as necessary
config = (bucket.get('Lifecycle') or {}).get('Rules', [])
for rule in self.data['rules']:
for index, existing_rule in enumerate(config):
if rule['ID'] == existing_rule['ID']:
if rule['Status'] == 'absent':
config[index] = None
else:
config[index] = rule
break
else:
if rule['Status'] != 'absent':
config.append(rule)
# The extra `list` conversion is required for python3
config = list(filter(None, config))
try:
if not config:
s3.delete_bucket_lifecycle(Bucket=bucket['Name'])
else:
s3.put_bucket_lifecycle_configuration(
Bucket=bucket['Name'], LifecycleConfiguration={'Rules': config})
except ClientError as e:
if e.response['Error']['Code'] == 'AccessDenied':
log.warning("Access Denied Bucket:%s while applying lifecycle" % bucket['Name'])
else:
raise e
class KMSKeyResolverMixin(object):
"""Builds a dictionary of region specific ARNs"""
def __init__(self, data, manager=None):
self.arns = dict()
self.data = data
self.manager = manager
def resolve_keys(self, buckets):
if 'key' not in self.data:
return None
regions = {get_region(b) for b in buckets}
for r in regions:
client = local_session(self.manager.session_factory).client('kms', region_name=r)
try:
self.arns[r] = client.describe_key(
KeyId=self.data.get('key')
).get('KeyMetadata').get('Arn')
except ClientError as e:
self.log.error('Error resolving kms ARNs for set-bucket-encryption: %s key: %s' % (
e, self.data.get('key')))
def get_key(self, bucket):
if 'key' not in self.data:
return None
region = get_region(bucket)
key = self.arns.get(region)
if not key:
self.log.warning('Unable to resolve key %s for bucket %s in region %s',
key, bucket.get('Name'), region)
return key
@filters.register('bucket-encryption')
class BucketEncryption(KMSKeyResolverMixin, Filter):
"""Filters for S3 buckets that have bucket-encryption
:example
.. code-block:: yaml
policies:
- name: s3-bucket-encryption-AES256
resource: s3
region: us-east-1
filters:
- type: bucket-encryption
state: True
crypto: AES256
- name: s3-bucket-encryption-KMS
resource: s3
region: us-east-1
filters:
- type: bucket-encryption
state: True
crypto: aws:kms
key: alias/some/alias/key
- name: s3-bucket-encryption-off
resource: s3
region: us-east-1
filters:
- type: bucket-encryption
state: False
"""
schema = type_schema('bucket-encryption',
state={'type': 'boolean'},
crypto={'type': 'string', 'enum': ['AES256', 'aws:kms']},
key={'type': 'string'})
permissions = ('s3:GetEncryptionConfiguration', 'kms:DescribeKey')
def process(self, buckets, event=None):
self.resolve_keys(buckets)
results = []
with self.executor_factory(max_workers=2) as w:
futures = {w.submit(self.process_bucket, b): b for b in buckets}
for future in as_completed(futures):
b = futures[future]
if future.exception():
self.log.error("Message: %s Bucket: %s", future.exception(),
b['Name'])
continue
if future.result():
results.append(b)
return results
def process_bucket(self, b):
client = bucket_client(local_session(self.manager.session_factory), b)
rules = []
try:
be = client.get_bucket_encryption(Bucket=b['Name'])
b['c7n:bucket-encryption'] = be
rules = be.get('ServerSideEncryptionConfiguration', []).get('Rules', [])
except ClientError as e:
if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError':
raise
# default `state` to True as previous impl assumed state == True
# to preserve backwards compatibility
if self.data.get('state', True):
for sse in rules:
return self.filter_bucket(b, sse)
return False
else:
for sse in rules:
return not self.filter_bucket(b, sse)
return True
def filter_bucket(self, b, sse):
allowed = ['AES256', 'aws:kms']
key = self.get_key(b)
crypto = self.data.get('crypto')
rule = sse.get('ApplyServerSideEncryptionByDefault')
algo = rule.get('SSEAlgorithm')
if not crypto and algo in allowed:
return True
if crypto == 'AES256' and algo == 'AES256':
return True
elif crypto == 'aws:kms' and algo == 'aws:kms':
if key:
if rule.get('KMSMasterKeyID') == key:
return True
else:
return False
else:
return True
@actions.register('set-bucket-encryption')
class SetBucketEncryption(KMSKeyResolverMixin, BucketActionBase):
"""Action enables default encryption on S3 buckets
`enabled`: boolean Optional: Defaults to True
`crypto`: aws:kms | AES256` Optional: Defaults to AES256
`key`: arn, alias, or kms id key
:example:
.. code-block:: yaml
policies:
- name: s3-enable-default-encryption-kms
resource: s3
actions:
- type: set-bucket-encryption
# enabled: true <------ optional (true by default)
crypto: aws:kms
key: 1234abcd-12ab-34cd-56ef-1234567890ab
- name: s3-enable-default-encryption-kms-alias
resource: s3
actions:
- type: set-bucket-encryption
# enabled: true <------ optional (true by default)
crypto: aws:kms
key: alias/some/alias/key
- name: s3-enable-default-encryption-aes256
resource: s3
actions:
- type: set-bucket-encryption
# crypto: AES256 <----- optional (AES256 by default)
# enabled: true <------ optional (true by default)
- name: s3-disable-default-encryption
resource: s3
actions:
- type: set-bucket-encryption
enabled: false
"""
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['set-bucket-encryption']},
'enabled': {'type': 'boolean'},
'crypto': {'enum': ['aws:kms', 'AES256']},
'key': {'type': 'string'}
},
'dependencies': {
'key': {
'properties': {
'crypto': {'pattern': 'aws:kms'}
},
'required': ['crypto']
}
}
}
permissions = ('s3:PutEncryptionConfiguration', 's3:GetEncryptionConfiguration',
'kms:ListAliases', 'kms:DescribeKey')
def process(self, buckets):
if self.data.get('enabled', True):
self.resolve_keys(buckets)
with self.executor_factory(max_workers=3) as w:
futures = {w.submit(self.process_bucket, b): b for b in buckets}
for future in as_completed(futures):
if future.exception():
self.log.error('Message: %s Bucket: %s', future.exception(),
futures[future]['Name'])
def process_bucket(self, bucket):
s3 = bucket_client(local_session(self.manager.session_factory), bucket)
if not self.data.get('enabled', True):
s3.delete_bucket_encryption(Bucket=bucket['Name'])
return
algo = self.data.get('crypto', 'AES256')
config = {'Rules': [
{'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': algo}}
]}
if algo == 'aws:kms':
key = self.get_key(bucket)
if not key:
raise Exception('Valid KMS Key required but does not exist')
(config['Rules'][0]['ApplyServerSideEncryptionByDefault']
['KMSMasterKeyID']) = key
s3.put_bucket_encryption(
Bucket=bucket['Name'],
ServerSideEncryptionConfiguration=config
)
| apache-2.0 | -1,985,373,751,195,002,600 | 34.820595 | 132 | 0.518708 | false |
jonathanstrong/functor | setup.py | 1 | 1091 | #!/usr/bin/env python
# Bootstrap installation of Distribute
import distribute_setup
distribute_setup.use_setuptools()
import os
from setuptools import setup
PROJECT = u'Functor'
VERSION = '0.1'
URL = ''
AUTHOR = u'Jonathan Strong'
AUTHOR_EMAIL = u'[email protected]'
DESC = "Implements a function-object pattern in Python."
def read_file(file_name):
file_path = os.path.join(
os.path.dirname(__file__),
file_name
)
return open(file_path).read()
setup(
name=PROJECT,
version=VERSION,
description=DESC,
long_description=read_file('README.md'),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=read_file('LICENSE'),
namespace_packages=[],
packages=[u'functor'],
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Requirements -*-
],
entry_points = {
# -*- Entry points -*-
},
classifiers=[
# see http://pypi.python.org/pypi?:action=list_classifiers
# -*- Classifiers -*-
"Programming Language :: Python",
],
)
| mit | 4,604,809,639,414,675,000 | 20.82 | 63 | 0.628781 | false |
fusic-com/flask-webcache | tests/test_storage.py | 1 | 12927 | from __future__ import unicode_literals
import unittest
from datetime import timedelta, datetime
from six.moves.cPickle import dumps, loads
from six import iteritems
from flask import Flask, send_file
from werkzeug.wrappers import Response
from werkzeug.datastructures import HeaderSet
from werkzeug.contrib.cache import SimpleCache
from flask_webcache.storage import Config, Metadata, Store, Retrieval
from flask_webcache.storage import (CacheMiss, NoResourceMetadata, NoMatchingRepresentation, NotFreshEnoughForClient,
RecacheRequested)
from flask_webcache.recache import RECACHE_HEADER
from flask_webcache.utils import werkzeug_cache_get_or_add
from testutils import compare_numbers
a = Flask(__name__)
class UtilsTestCase(unittest.TestCase):
def test_config_kwargs(self):
with self.assertRaises(TypeError):
Config(foo=1)
def test_metadata_datastructure(self):
def check_metadata(m):
self.assertEquals(m.salt, 'qux')
self.assertIn('foo', m.vary)
self.assertIn('bar', m.vary)
m = Metadata(HeaderSet(('foo', 'bar')), 'qux')
check_metadata(m)
check_metadata(loads(dumps(m)))
m2 = Metadata(HeaderSet(('foo', 'bar')), 'qux')
self.assertEquals(m, m2)
m3 = Metadata(HeaderSet(('foo', 'bar')), 'notqux')
self.assertNotEquals(m2, m3)
class StorageTestCase(unittest.TestCase):
def setUp(self):
self.c = SimpleCache()
self.s = Store(self.c)
self.r = Retrieval(self.c)
def test_basic_cachability(self):
with a.test_request_context('/foo'):
self.assertFalse(self.s.should_cache_response(Response(x for x in 'foo')))
self.assertTrue(self.s.should_cache_response(Response(status=204)))
self.assertFalse(self.s.should_cache_response(Response(status=500)))
self.assertTrue(self.s.should_cache_response(Response('foo')))
self.assertTrue(self.s.should_cache_response(Response()))
r = Response()
r.vary.add('*')
self.assertFalse(self.s.should_cache_response(r))
with a.test_request_context('/foo', method='HEAD'):
self.assertFalse(self.s.should_cache_response(Response('foo')))
with a.test_request_context('/foo', method='POST'):
self.assertFalse(self.s.should_cache_response(Response('foo')))
def test_cache_control_cachability(self):
def check_response_with_cache_control(**cc):
r = Response()
for k, v in iteritems(cc):
setattr(r.cache_control, k, v)
return self.s.should_cache_response(r)
with a.test_request_context():
self.assertTrue(check_response_with_cache_control(max_age=10))
self.assertTrue(check_response_with_cache_control(must_revalidate=True))
self.assertFalse(check_response_with_cache_control(max_age=0))
self.assertFalse(check_response_with_cache_control(private=True))
self.assertFalse(check_response_with_cache_control(no_cache=True))
self.assertFalse(check_response_with_cache_control(no_store=True))
def test_expire_cachability(self):
def check_response_with_expires(dt):
r = Response()
r.expires = dt
return self.s.should_cache_response(r)
with a.test_request_context():
self.assertFalse(check_response_with_expires(datetime.utcnow() - timedelta(seconds=1)))
self.assertTrue(check_response_with_expires(datetime.utcnow() + timedelta(seconds=1)))
def test_default_cachability(self):
with a.test_request_context('/foo'):
self.assertTrue(self.s.should_cache_response(Response()))
with a.test_request_context('/foo', query_string='?bar'):
self.assertFalse(self.s.should_cache_response(Response()))
def test_x_cache_headers(self):
r = Response()
self.s.mark_cache_hit(r)
self.assertEquals(r.headers[self.s.X_CACHE_HEADER], 'hit')
self.s.mark_cache_miss(r)
self.assertEquals(r.headers[self.s.X_CACHE_HEADER], 'miss')
def test_metadata_miss(self):
with self.assertRaises(NoResourceMetadata):
with a.test_request_context('/foo'):
self.r.fetch_metadata()
def test_response_miss(self):
with self.assertRaises(NoResourceMetadata):
with a.test_request_context('/foo'):
self.r.fetch_response()
def test_store_retrieve_cycle(self):
with a.test_request_context('/foo'):
r = Response('foo')
self.s.cache_response(r)
self.assertEquals(len(self.c._cache), 2)
r2 = self.r.fetch_response()
self.assertEquals(r.data, r2.data)
def test_vary_miss(self):
with a.test_request_context('/foo', headers=(('accept-encoding', 'gzip'),)):
r = Response('foo')
r.vary.add('accept-encoding')
r.content_encoding = 'gzip'
self.s.cache_response(r)
with self.assertRaises(NoMatchingRepresentation):
with a.test_request_context('/foo'):
self.r.fetch_response()
def test_invalidation_condition(self):
with a.test_request_context('/foo', method="PUT"):
r = Response('foo')
self.assertTrue(self.s.should_invalidate_resource(r))
r = Response('foo', status=500)
self.assertFalse(self.s.should_invalidate_resource(r))
with a.test_request_context('/foo'):
r = Response('foo')
self.assertFalse(self.s.should_invalidate_resource(r))
def test_invalidation(self):
with a.test_request_context('/foo'):
r = Response('foo')
self.s.cache_response(r)
self.assertEquals(len(self.c._cache), 2)
with a.test_request_context('/foo', method="PUT"):
r = Response('foo')
self.assertTrue(self.s.should_invalidate_resource(r))
self.s.invalidate_resource()
self.assertEquals(len(self.c._cache), 1)
with self.assertRaises(CacheMiss):
with a.test_request_context('/foo'):
self.r.fetch_response()
def test_master_salt_invalidation(self):
with a.test_request_context('/foo'):
r = Response('foo')
self.s.cache_response(r)
self.assertEquals(self.r.fetch_response().data, b'foo')
self.r.config.master_salt = 'newsalt'
with self.assertRaises(NoMatchingRepresentation):
self.r.fetch_response()
def test_request_cache_controls(self):
with a.test_request_context('/foo'):
self.assertTrue(self.r.should_fetch_response())
with a.test_request_context('/foo', method='HEAD'):
self.assertTrue(self.r.should_fetch_response())
with a.test_request_context('/foo', method='POST'):
self.assertFalse(self.r.should_fetch_response())
with a.test_request_context('/foo', headers=(('cache-control', 'no-cache'),)):
self.assertFalse(self.r.should_fetch_response())
with a.test_request_context('/foo', headers=(('pragma', 'no-cache'),)):
self.assertFalse(self.r.should_fetch_response())
with a.test_request_context('/foo', headers=(('cache-control', 'max-age=0'),)):
self.assertFalse(self.r.should_fetch_response())
with a.test_request_context('/foo', headers=(('cache-control', 'max-age=5'),)):
self.assertTrue(self.r.should_fetch_response())
def test_response_freshness_seconds(self):
# this test is raced; if running it takes about a second, it might fail
r = Response()
self.assertEquals(0, self.r.response_freshness_seconds(r))
r.date = datetime.utcnow()
self.assertTrue(compare_numbers(self.s.DEFAULT_EXPIRATION_SECONDS,
self.r.response_freshness_seconds(r),
1))
r.expires = datetime.utcnow() + timedelta(seconds=345)
self.assertTrue(compare_numbers(345, self.r.response_freshness_seconds(r), 1))
r.cache_control.max_age=789
self.assertTrue(compare_numbers(789, self.r.response_freshness_seconds(r), 1))
def test_min_fresh(self):
# this test is raced; if running it takes about a second, it might fail
r = Response()
r.date = datetime.utcnow() - timedelta(seconds=100)
r.cache_control.max_age = 200
f = self.r.response_freshness_seconds(r)
with a.test_request_context('/foo', headers=(('cache-control', 'min-fresh=50'),)):
try:
self.r.verify_response_freshness_or_miss(r, f)
except CacheMiss:
self.fail('unexpected CacheMiss on reasonably fresh response')
with a.test_request_context('/foo', headers=(('cache-control', 'min-fresh=150'),)):
self.assertRaises(NotFreshEnoughForClient, self.r.verify_response_freshness_or_miss, r, f)
def test_request_cache_control_disobedience(self):
c = SimpleCache()
cfg = Config(request_controls_cache=False)
s = Store(c, cfg)
r = Retrieval(c, cfg)
with a.test_request_context('/foo', headers=(('cache-control', 'no-store'),)):
self.assertTrue(r.should_fetch_response())
with a.test_request_context('/foo', headers=(('cache-control', 'no-store'),)):
self.assertTrue(s.should_cache_response(Response()))
with a.test_request_context('/foo', headers=(('cache-control', 'no-store'),)):
self.assertTrue(s.should_cache_response(Response()))
resp = Response()
resp.date = datetime.utcnow() - timedelta(seconds=100)
resp.cache_control.max_age = 200
with a.test_request_context('/foo', headers=(('cache-control', 'min-fresh=150'),)):
f = self.r.response_freshness_seconds(resp)
try:
r.verify_response_freshness_or_miss(resp, f)
except CacheMiss:
self.fail('unexpected CacheMiss when ignoring request cache control')
def test_sequence_converted_responses(self):
with a.test_request_context('/foo'):
r = Response(f for f in 'foo')
r.make_sequence()
self.assertFalse(self.s.should_cache_response(r))
r = send_file(__file__)
r.make_sequence()
self.assertFalse(self.s.should_cache_response(r))
class RecacheTestCase(unittest.TestCase):
def setUp(self):
self.recached = False
def dispatcher(salt):
self.recached = True
self.c = SimpleCache()
cfg = Config(preemptive_recache_seconds=10, preemptive_recache_callback=dispatcher)
self.s = Store(self.c, cfg)
self.r = Retrieval(self.c, cfg)
def test_preemptive_recaching_predicate(self):
m = Metadata(HeaderSet(('foo', 'bar')), 'qux')
def mkretr(**kwargs):
return Retrieval(self.c, Config(**kwargs))
with a.test_request_context('/'):
self.assertFalse(mkretr(preemptive_recache_seconds=10).should_recache_preemptively(10, m))
self.assertFalse(mkretr(preemptive_recache_callback=lambda x: 0).should_recache_preemptively(10, m))
self.assertFalse(self.r.should_recache_preemptively(11, m))
self.assertTrue(self.r.should_recache_preemptively(10, m))
self.assertFalse(self.r.should_recache_preemptively(10, m))
self.c.clear()
self.assertTrue(self.r.should_recache_preemptively(10, m))
def test_preemptive_recaching_cache_bypass(self):
fresh = Response('foo')
with a.test_request_context('/foo'):
self.s.cache_response(fresh)
metadata = self.r.fetch_metadata()
with a.test_request_context('/foo'):
cached = self.r.fetch_response()
self.assertEquals(cached.headers[self.r.X_CACHE_HEADER], 'hit')
with a.test_request_context('/foo', headers={RECACHE_HEADER: metadata.salt}):
self.assertRaises(RecacheRequested, self.r.fetch_response)
with a.test_request_context('/foo', headers={RECACHE_HEADER: 'incorrect-salt'}):
try:
self.r.fetch_response()
except RecacheRequested:
self.fail('unexpected RecacheRequested for incorrect salt')
class UtilityTestCase(unittest.TestCase):
def setUp(self):
self.c = SimpleCache()
def test_werkzeug_cache_get_or_add_missing_key(self):
self.assertEquals('bar', werkzeug_cache_get_or_add(self.c, 'foo', 'bar', 10))
def test_werkzeug_cache_get_or_add_existing_key(self):
self.c.set('foo', 'bar')
self.assertEquals('bar', werkzeug_cache_get_or_add(self.c, 'foo', 'qux', 10))
| mit | -3,667,972,195,193,208,300 | 45.003559 | 117 | 0.61886 | false |
eroicaleo/LearningPython | interview/leet/124_Binary_Tree_Maximum_Path_Sum.py | 1 | 1054 | #!/usr/bin/env python
from tree import *
class Solution:
def maxPathSum(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root == None:
return 0
self.maxSum = root.val
self.maxPathSumNode(root)
return self.maxSum
def maxPathSumNode(self, node):
if node == None:
return 0
leftSum = self.maxPathSumNode(node.left)
rightSum = self.maxPathSumNode(node.right)
self.maxSum = max(leftSum+node.val, rightSum+node.val, leftSum+node.val+rightSum, self.maxSum, node.val)
print('leftSum: %d, rightSum: %d, node.val: %d, self.maxSum: %d' % (leftSum, rightSum, node.val, self.maxSum))
ret = max(leftSum+node.val, rightSum+node.val, node.val)
print('node.val: %d, ret: %d' % (node.val, ret))
return ret
sol = Solution()
nodeString = "[-10,9,20,null,null,15,7]"
nodeString = "[1,2,3]"
nodeString = "[1,-2,-3,1,3,-2,null,-1]"
root = treeBuilder(nodeString)
traverse(root)
print(sol.maxPathSum(root))
| mit | -2,902,822,416,752,697,000 | 30 | 118 | 0.598672 | false |
ChileanVirtualObservatory/flask_endpoint | endpoint/run.py | 1 | 1314 | #This file is part of ChiVO, the Chilean Virtual Observatory
#A project sponsored by FONDEF (D11I1060)
#Copyright (C) 2015 Universidad Tecnica Federico Santa Maria Mauricio Solar
# Marcelo Mendoza
# Universidad de Chile Diego Mardones
# Pontificia Universidad Catolica Karim Pichara
# Universidad de Concepcion Ricardo Contreras
# Universidad de Santiago Victor Parada
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from app import app
app.run(debug=True)
| gpl-3.0 | 6,914,507,212,773,058,000 | 47.666667 | 81 | 0.667428 | false |
niklasf/python-prompt-toolkit | prompt_toolkit/layout/utils.py | 1 | 2590 | from __future__ import unicode_literals
from prompt_toolkit.utils import get_cwidth
__all__ = (
'token_list_len',
'token_list_width',
'token_list_to_text',
'explode_tokens',
'find_window_for_buffer_name',
)
def token_list_len(tokenlist):
"""
Return the amount of characters in this token list.
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples.
"""
return sum(len(item[1]) for item in tokenlist)
def token_list_width(tokenlist):
"""
Return the character width of this token list.
(Take double width characters into account.)
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples.
"""
return sum(get_cwidth(c) for item in tokenlist for c in item[1])
def token_list_to_text(tokenlist):
"""
Concatenate all the text parts again.
"""
return ''.join(item[1] for item in tokenlist)
def iter_token_lines(tokenlist):
"""
Iterator that yields tokenlists for each line.
"""
line = []
for token, c in explode_tokens(tokenlist):
line.append((token, c))
if c == '\n':
yield line
line = []
yield line
def split_lines(tokenlist):
"""
Take a single list of (Token, text) tuples and yield one such list for each
line.
"""
line = []
for token, string in tokenlist:
items = string.split('\n')
for item in items[:-1]:
if item:
line.append((token, item))
yield line
line = []
line.append((token, items[-1]))
if line:
yield line
def explode_tokens(tokenlist):
"""
Turn a list of (token, text) tuples into another list where each string is
exactly one character.
:param tokenlist: List of (token, text) tuples.
"""
result = []
for token, string in tokenlist:
for c in string:
result.append((token, c))
return result
def find_window_for_buffer_name(layout, buffer_name):
"""
Look for a :class:`~prompt_toolkit.layout.containers.Window` in the Layout
that contains the :class:`~prompt_toolkit.layout.controls.BufferControl`
for the given buffer and return it. If no such Window is found, return None.
"""
from .containers import Window
from .controls import BufferControl
for l in layout.walk():
if isinstance(l, Window) and isinstance(l.content, BufferControl):
if l.content.buffer_name == buffer_name:
return l
| bsd-3-clause | 693,291,599,595,765,100 | 23.205607 | 80 | 0.602317 | false |
devbitstudio/portfolio | settings.py | 1 | 5950 | # Django settings for devbitstudio project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DOMAIN = 'devbitstudio.com'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
#~ DEFAULT_FROM_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
EMAIL_SUBJECT_PREFIX = 'DevBitStudio - '
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__).decode('utf-8'))
PROJECT_DIR = os.path.dirname(__file__)
RESULTS_PER_PAGE = 12
ADMINS = (
('William Ibarra Rodriguez', '[email protected]'),
('Miguel Pelfort Paz', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'devbitstudio', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': 'root', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'uploads/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''#os.path.join(PROJECT_DIR, 'static/')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'static/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'iea9ivk!*ms-#$i%ix0i0b3p=u&30v+h*)&c5!%byv^i6^15%3'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'marketing.urlcanon.URLCanonicalizationMiddleware',
)
ROOT_URLCONF = 'devbitstudio.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__),'templates').replace('\\', '/'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'main',
'django.contrib.sitemaps',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# for use with URL Canonicalization Middleware:
# this is the canonical hostname to be used by your app (required)
CANON_URL_HOST = 'devbitstudio.com'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit | -7,184,461,981,706,605,000 | 32.806818 | 120 | 0.689412 | false |
ChromeDevTools/devtools-frontend | scripts/deps/roll_deps.py | 2 | 2410 | #!/usr/bin/env vpython
#
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Update manually maintained dependencies from Chromium.
"""
import argparse
import os
import shutil
import subprocess
import sys
# Files whose location within devtools-frontend matches the upstream location.
FILES = [
'v8/include/js_protocol.pdl',
'third_party/blink/renderer/core/css/css_properties.json5',
'third_party/blink/renderer/core/html/aria_properties.json5',
'third_party/blink/public/devtools_protocol/browser_protocol.pdl',
]
# Files whose location within devtools-frontend differs from the upstream location.
FILE_MAPPINGS = {
# chromium_path => devtools_frontend_path
'components/variations/proto/devtools/client_variations.js':
'front_end/third_party/chromium/client-variations/ClientVariations.js',
'third_party/axe-core/axe.d.ts': 'front_end/third_party/axe-core/axe.d.ts',
'third_party/axe-core/axe.js': 'front_end/third_party/axe-core/axe.js',
'third_party/axe-core/axe.min.js':
'front_end/third_party/axe-core/axe.min.js',
'third_party/axe-core/LICENSE': 'front_end/third_party/axe-core/LICENSE',
}
for f in FILES:
FILE_MAPPINGS[f] = f
def parse_options(cli_args):
parser = argparse.ArgumentParser(description='Roll dependencies from Chromium.')
parser.add_argument('chromium_dir', help='path to chromium/src directory')
parser.add_argument('devtools_dir',
help='path to devtools/devtools-frontend directory')
return parser.parse_args(cli_args)
def update(options):
subprocess.check_call(['git', 'fetch', 'origin'], cwd=options.chromium_dir)
subprocess.check_call(['git', 'checkout', 'origin/main'],
cwd=options.chromium_dir)
subprocess.check_call(['gclient', 'sync'], cwd=options.chromium_dir)
def copy_files(options):
for from_path, to_path in FILE_MAPPINGS.items():
from_path = os.path.normpath(from_path)
to_path = os.path.normpath(to_path)
print('%s => %s' % (from_path, to_path))
shutil.copy(os.path.join(options.chromium_dir, from_path),
os.path.join(options.devtools_dir, to_path))
if __name__ == '__main__':
OPTIONS = parse_options(sys.argv[1:])
update(OPTIONS)
copy_files(OPTIONS)
| bsd-3-clause | 2,989,384,766,150,173,000 | 36.076923 | 84 | 0.692946 | false |
google/makani | avionics/motor/motor_client.py | 1 | 50178 | #!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line client for controlling motors."""
import collections
import os
import re
import socket
import subprocess
import tempfile
import threading
import time
import makani
from makani.avionics.common import actuator_types
from makani.avionics.common import aio
from makani.avionics.common import cmd_client
from makani.avionics.common import pack_avionics_messages
from makani.avionics.common import safety_codes
from makani.avionics.firmware.params import client as param_client
from makani.avionics.motor.firmware import config_params
from makani.avionics.motor.firmware import flags
from makani.avionics.network import aio_labels
from makani.avionics.network import aio_node
from makani.avionics.network import message_type
from makani.lib.python import c_helpers
import numpy as np
from scipy import interpolate
# TODO: implement NetworkConfig() to replace all these EnumHelper's.
aio_node_helper = c_helpers.EnumHelper('AioNode', aio_node)
motor_label_helper = c_helpers.EnumHelper('MotorLabel', aio_labels,
prefix='kMotor')
motor_error_helper = c_helpers.EnumHelper('MotorError', flags)
motor_warning_helper = c_helpers.EnumHelper('MotorWarning', flags)
def BuildMotorParamDict():
"""Builds a dict mapping motor param names to their indices."""
# Build up parameter list.
filename = os.path.join(makani.HOME, 'avionics/motor/firmware/io.c')
with open(filename) as f:
f_text = f.read()
# Get parameter array string.
re_string = r'static float \*g_mutable_param_addrs\[\] = {\s*^([\s\S]*)^};'
array_string = re.search(re_string, f_text, re.MULTILINE)
re_string = r'^ *&[\w\[\]]+.([\w\.\[\]]+)'
motor_param_keys = re.findall(re_string, array_string.group(0), re.MULTILINE)
return {key: ind for ind, key in enumerate(motor_param_keys)}
# Constants.
MOTORS = [mot.upper() for mot in motor_label_helper.ShortNames()]
CONTROLLER = 'kAioNodeControllerA'
OPERATOR = 'kAioNodeOperator'
MOTOR_PARAMS = BuildMotorParamDict()
MOTOR_ERROR_NAMES = collections.OrderedDict(
(error_bitmask, motor_error_helper.Name(error_bitmask))
for error_bitmask in motor_error_helper.Values()
if motor_error_helper.Name(error_bitmask) != 'kMotorErrorAll')
MOTOR_WARNING_NAMES = collections.OrderedDict(
(warning_bitmask, motor_warning_helper.Name(warning_bitmask))
for warning_bitmask in motor_warning_helper.Values()
if motor_warning_helper.Name(warning_bitmask) != 'kMotorWarningAll')
MOTOR_STATUS_NAMES = {val: key for key, val in flags.__dict__.items()
if key.startswith('kMotorStatus')}
GEN_TABLE_PATH = os.path.join(makani.HOME,
'avionics/motor/gen_lookup_table.py')
OMEGA_MIN_LIMIT = -260.0
OMEGA_MAX_LIMIT = 260.0
TORQUE_MIN_LIMIT = -600.0
TORQUE_MAX_LIMIT = 600.0
EPS32 = np.finfo(np.float32).eps
class MotorClientError(cmd_client.WingClientError):
pass
def MotorsAsBits(motor_list):
"""Returns a bitmask describing the motors in `motor_list`."""
return sum(1 << motor_label_helper.Value(motor.capitalize())
for motor in motor_list)
def AioNodeNameFromMotorNickname(motor):
"""Returns AIO node name for the specified motor."""
return 'kAioNodeMotor' + motor.capitalize()
def AioNodeNameFromDynoNickname(motor):
"""Returns AIO node name for the specified dyno motor."""
return 'kAioNodeDynoMotor' + motor.capitalize()
def GetMotorErrorNames(error_bitmask):
"""Returns a list of error names corresponding to the specified bitmask."""
return GetFlagNames(error_bitmask, MOTOR_ERROR_NAMES, 0)
def GetMotorWarningNames(warning_bitmask):
"""Returns a list of warning names corresponding to the specified bitmask."""
return GetFlagNames(warning_bitmask, MOTOR_WARNING_NAMES, 0)
def GetFlagNames(bitmask, bitmask_dict, default_key=None):
"""Returns a list based on bitmask_dict corresponding to set bits in bitmask.
Args:
bitmask: Integer containing a bitmask of desired fields.
bitmask_dict: Dictionary with power-of-two integer keys and values
containing names of the corresponding bits.
default_key: Key to use if bitmask == 0. Set to None to return [].
Returns:
A list with the values of bitmask_dict specified by bitmask.
"""
if bitmask:
return [name for bit, name in bitmask_dict.iteritems() if bit & bitmask]
else:
if default_key is None:
return []
else:
return [bitmask_dict[default_key]]
def GenerateCommandData(args):
"""Generates the data to use for a given speed or torque command.
Args:
args: List containing command input file & optional loop parameter.
Returns:
data: Numpy array of time, torque and speed limits.
loop: Boolean of optional loop parameter.
Raises:
MotorClientError: An invalid filename or file format was specified.
"""
cmd_file = args[0]
if not os.path.isfile(cmd_file):
raise MotorClientError('Invalid filename: %s' % cmd_file)
# Handle 1st arg i.e. the command file.
if cmd_file.endswith(('.py', '.pycmd')): # Treat as a Python file.
with tempfile.NamedTemporaryFile() as table_file:
popen = subprocess.Popen([GEN_TABLE_PATH, '--input_file', cmd_file,
'--binary'],
stdout=table_file, stderr=subprocess.PIPE)
_, stderr = popen.communicate()
if popen.returncode != 0:
raise MotorClientError('Generation of lookup table from %s failed. '
'stderr:\n%s' % (cmd_file, stderr))
data = np.load(table_file.name)
print 'Using %s to generate command profile.' % cmd_file
else: # Treat as a text file for interpolation.
try:
data = np.loadtxt(cmd_file)
except (IOError, ValueError):
raise MotorClientError(
'Invalid input text file: %s. Should contain a table of time, torques'
'and speed limits with rows of the form:\n\n'
'time torque1 torque2 ... torque8 omega_lower1 omega_lower2 ...'
'omega_lower8 omega_upper1 omega_upper2 ... omega_upper8' % cmd_file)
print 'Using interpolated values from %s for command profile.' % cmd_file
if data.shape[1] != 25:
raise MotorClientError(
'Invalid number of columns in command table. Expected 25, got %d. '
'Revise input file to generate rows of the form:\n'
'time torque1 torque2 ... torque8 omega_lower1 omega_lower2 ...'
'omega_lower8 omega_upper1 omega_upper2 ... omega_upper8'
% data.shape[1])
# Handle 2nd arg i.e. the optional parameter to repeat.
if len(args) == 1:
loop = False
print 'Defaulting to \"noloop\".'
else:
if args[1] == 'loop':
loop = True
elif args[1] == 'noloop':
loop = False
else:
raise MotorClientError('Invalid option: %s. Expecting \"loop\" or '
'[default] \"noloop\".' % args[1])
return data, loop
def CheckCommandLimits(
cmd_min, cmd_max, cmd_min_limit, cmd_max_limit, cmd_type):
if cmd_min < cmd_min_limit or cmd_max > cmd_max_limit:
raise MotorClientError('Extreme %s outside of limits [%f, %f] '
'detected. Command not set.' %
(cmd_type, cmd_min_limit, cmd_max_limit))
if cmd_min > cmd_max:
raise MotorClientError('Invalid %s i.e. min value - %f, is greater '
'than max value - %f' % (cmd_type, cmd_min, cmd_max))
class CommandProfile(object):
"""Maintains a lookup table of motor commands while running motors."""
def __init__(
self, t, motor_cmd, cmd_min_limit, cmd_max_limit, cmd_type,
loop_back=False):
self._loop_back = loop_back
self._t = t
self._motor_cmd_func = interpolate.interp1d(self._t, motor_cmd, axis=0)
cmd_max = np.max(motor_cmd)
cmd_min = np.min(motor_cmd)
print ('\nWith {t_start:.2f}s < t < {t_end:.2f}s:'
'\n min({type}) = {min:f}\n max({type}) = {max:f}\n'.format(
t_start=t[0], t_end=t[-1], type=cmd_type,
min=cmd_min, max=cmd_max))
CheckCommandLimits(cmd_min, cmd_max, cmd_min_limit, cmd_max_limit, cmd_type)
def __call__(self, t):
if self._loop_back:
t = np.mod(t, self._t[-1])
elif t > self._t[-1]:
return None
return list(self._motor_cmd_func(t))
class MotorCommandClient(cmd_client.WingCommandClient):
"""Command line client for running M600 motors."""
prompt = '(motor_client) '
_NUM_RETRIES = 10
_MOTORS = 'motors'
_DYNOS = 'dynos'
def __init__(self, *args, **kwargs):
cmd_client.WingCommandClient.__init__(self, *args, **kwargs)
self._motors_selected = set()
self._dynos_selected = set()
self._spin_dir = {}
self._motor_runner = Runner(self._motors_selected, self._spin_dir)
self._dyno_runner = Runner(self._dynos_selected, self._spin_dir,
dyno_mode=True)
self._motor_listener = None
self._dyno_listener = None
self._torque = 0.0
self._omega_lower_limit = 0.0
self._omega_upper_limit = 0.0
self._arm_aio_client = aio.AioClient(
['kMessageTypeMotorSetState', 'kMessageTypeDynoMotorSetState'],
timeout=0.1)
self._set_param_aio_client = aio.AioClient(
['kMessageTypeMotorSetParam', 'kMessageTypeDynoMotorSetParam'],
timeout=0.1)
# The long range radio requires at least 2x160 ms for a complete command-
# response cycle.
self._ack_param_aio_client = aio.AioClient(
['kMessageTypeMotorAckParam'], timeout=0.35)
self._get_param_aio_client = aio.AioClient(
['kMessageTypeMotorGetParam', 'kMessageTypeDynoMotorGetParam'],
timeout=0.1)
self._param_client = param_client.Client(timeout=0.1)
def TryStopThreads(self):
self._motor_runner.TryStop()
self._dyno_runner.TryStop()
if self._motor_listener:
self._motor_listener.TryStop()
if self._dyno_listener:
self._dyno_listener.TryStop()
def _GetListenerAndRunner(self, node_type):
if node_type == self._MOTORS:
return self._motor_listener, self._motor_runner
elif node_type == self._DYNOS:
return self._dyno_listener, self._dyno_runner
else:
raise MotorClientError('Unknown node type.')
def _CheckStatus(self, valid_statuses, node_type):
listener, _ = self._GetListenerAndRunner(node_type)
if not listener:
status = flags.kMotorStatusInit
else:
status = listener.GetMostRestrictiveMotorStatus()
if status not in valid_statuses:
raise MotorClientError(
'Invalid %s status. %s' % (
node_type.capitalize(), MOTOR_STATUS_NAMES[status]))
return True
def _CheckMotorStatus(self, valid_statuses):
self._CheckStatus(valid_statuses, self._MOTORS)
def _CheckDynoStatus(self, valid_statuses):
self._CheckStatus(valid_statuses, self._DYNOS)
def _CheckTargetsSelected(self):
if self._motors_selected or self._dynos_selected:
return True
else:
raise MotorClientError('Invalid set of targets. Use either: '
'"set_targets" or "set_targets_dyno".')
def _SetTargets(self, line, node_type):
"""Sets motor or dyno targets.
Args:
line: User supplied arguments specifying target motors.
node_type: String specifying type of targets i.e. 'motors' or 'dynos'.
Raises:
MotorClientError: An invalid set of targets was specified.
"""
targets_selected, _ = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, require_all=True,
select_all=True, require_one=False)
if node_type == self._MOTORS:
self._motors_selected = targets_selected
motor_params = self._QueryConfig(self._motors_selected, self._MOTORS)
self._spin_dir = self._GetSpinDir(motor_params)
elif node_type == self._DYNOS:
self._dynos_selected = targets_selected
self._QueryConfig(self._dynos_selected, self._DYNOS)
self.TryStopThreads()
if self._motors_selected:
print 'Motors selected: %s.' % ', '.join(self._motors_selected)
self._motor_runner = Runner(self._motors_selected, self._spin_dir)
self._motor_listener = Listener(self._motor_runner.StopRun,
self._motors_selected)
if self._dynos_selected:
print 'Dynos selected: %s.' % ', '.join(self._dynos_selected)
self._dyno_runner = Runner(self._dynos_selected, self._spin_dir,
dyno_mode=True)
self._dyno_listener = Listener(self._dyno_runner.StopRun,
self._dynos_selected, dyno_mode=True)
@cmd_client.Command()
def do_set_targets(self, line): # pylint: disable=invalid-name
"""Sets motor targets e.g. "set_targets SBO SBI"."""
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._SetTargets(line, self._MOTORS)
@cmd_client.Command()
def do_set_targets_dyno(self, line): # pylint: disable=invalid-name
"""Sets dyno targets e.g. "set_targets_dyno SBO SBI"."""
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._SetTargets(line, self._DYNOS)
@cmd_client.Command()
def do_get_targets(self, line): # pylint: disable=invalid-name
"""Displays selected motor & dyno targets."""
print 'Current targets.\nMotors: %s.\nDynos: %s.' % (
', '.join(self._motors_selected), ', '.join(self._dynos_selected))
@cmd_client.Command()
def do_clear_targets(self, line): # pylint: disable=invalid-name
"""Clears selected motor & dyno targets."""
old_motors = self._motors_selected.copy()
old_dynos = self._dynos_selected.copy()
self.TryStopThreads()
self._motors_selected = set()
self._dynos_selected = set()
self._spin_dir = {}
self._motor_runner = Runner(self._motors_selected, self._spin_dir)
self._dyno_runner = Runner(self._dynos_selected, self._spin_dir,
dyno_mode=True)
self._motor_listener = None
self._dyno_listener = None
print 'Cleared old targets.\nOld Motors: %s.\nOld Dynos: %s.' % (
', '.join(old_motors), ', '.join(old_dynos))
def complete_set_targets(self, text, *unused_args): # pylint: disable=invalid-name
return self._CompleteArg(text, sorted(MOTORS) + ['All'])
complete_set_targets_dyno = complete_set_targets
def _GetSpinDir(self, params):
"""Determine the nominal spin direction based off of the motor load type."""
# List of props that need to spin in the positive x direction / in the
# negative omega sense.
# Additional loads are to be added in future commits.
reversed_loads = [config_params.MotorLoadType.PROP_REV2_POSITIVE_X]
return {key: -1 if param and param.load_type in reversed_loads else 1
for key, param in params.iteritems()}
def _QueryConfig(self, targets, target_type):
"""Test if targets are on the network and query their configurations."""
params = {}
for target in targets:
if target_type == self._DYNOS:
node = aio_node_helper.Value(AioNodeNameFromDynoNickname(target))
elif target_type == self._MOTORS:
node = aio_node_helper.Value(AioNodeNameFromMotorNickname(target))
section = param_client.SECTION_CONFIG
try:
params[target] = self._param_client.GetSection(node, section)
except socket.timeout:
params[target] = None
self._PrintConfig(targets, params)
return params
def _PrintConfig(self, motors, params):
"""Print portions of the selected motor config params."""
load_types = [load_type.CName()[len('kMotorLoadType'):]
for load_type in config_params.MotorLoadType.Names()]
motor_types = [motor_type.CName()[len('kMotorType'):]
for motor_type in config_params.MotorType.Names()]
load_type_max_str_len = max([len(name) for name in load_types])
motor_type_max_str_len = max([len(name) for name in motor_types])
for motor in sorted(motors):
if params[motor] is None:
print '%s: unknown' % motor
else:
print '{name}: motor_type: {motor_type} load_type: {load_type}'.format(
name=motor,
motor_type=(motor_types[params[motor].motor_type]
.ljust(motor_type_max_str_len)),
load_type=(load_types[params[motor].load_type]
.ljust(load_type_max_str_len)))
print ''
@cmd_client.Command()
def do_query_config(self, line): # pylint: disable=invalid-name
targets_selected, _ = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, require_all=True,
select_all=True, require_one=False)
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._QueryConfig(targets_selected, self._MOTORS)
@cmd_client.Command()
def do_query_config_dyno(self, line): # pylint: disable=invalid-name
targets_selected, _ = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, require_all=True,
select_all=True, require_one=False)
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._QueryConfig(targets_selected, self._DYNOS)
def _TryArm(self, arm_msg, arm_msg_type, node_type):
listener, _ = self._GetListenerAndRunner(node_type)
for _ in xrange(self._NUM_RETRIES):
self._arm_aio_client.Send(arm_msg, arm_msg_type, OPERATOR)
time.sleep(0.1)
if listener.AllMotorsArmed():
print 'Successfully armed %s.' % node_type
return
else:
raise MotorClientError('Failed to arm %s.' % node_type)
@cmd_client.Command(num_args=0)
def do_arm(self, unused_line): # pylint: disable=invalid-name
"""Arms the selected motors and/or dynos."""
if self._motors_selected:
self._CheckMotorStatus([flags.kMotorStatusInit])
if self._dynos_selected:
self._CheckDynoStatus([flags.kMotorStatusInit])
self._CheckTargetsSelected()
if self._motors_selected:
motor_arm_msg = pack_avionics_messages.MotorSetStateMessage()
motor_arm_msg.command = actuator_types.kActuatorStateCommandArm
motor_arm_msg.command_data = safety_codes.MOTOR_ARMING_SIGNAL
print 'Arming motors.'
motor_arm_msg.selected_motors = MotorsAsBits(
self._motor_listener.GetUnarmedMotors())
self._TryArm(
motor_arm_msg, 'kMessageTypeMotorSetState', self._MOTORS)
if self._dynos_selected:
dyno_arm_msg = pack_avionics_messages.DynoMotorSetStateMessage()
dyno_arm_msg.command = actuator_types.kActuatorStateCommandArm
dyno_arm_msg.command_data = safety_codes.MOTOR_ARMING_SIGNAL
print 'Arming dynos.'
dyno_arm_msg.selected_motors = MotorsAsBits(
self._dyno_listener.GetUnarmedMotors())
self._TryArm(
dyno_arm_msg, 'kMessageTypeDynoMotorSetState', self._DYNOS)
def _SetParam(self, line, message, node_type): # pylint: disable=invalid-name
"""Sets a param for a specified motor or dyno."""
targets, args = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, select_all=True)
param, args = cmd_client.SelectArgs(
args, MOTOR_PARAMS.keys(), require_one=True, select_all=False)
if node_type == self._DYNOS:
targets = ['DYNO_%s' % t.upper() for t in targets]
try:
value = float(args[0])
except ValueError:
raise MotorClientError('Invalid value: "%s".' % args[0])
message.id = MOTOR_PARAMS[param]
message.value = value
failed_targets = []
for target in targets:
print 'Setting %s to %g on %s.' % (param, value, target)
if target.startswith('DYNO_'):
message.selected_motors = MotorsAsBits([target[len('DYNO_'):]])
aio_target = AioNodeNameFromDynoNickname(target[len('DYNO_'):])
success = self._TrySetParam(
message, 'kMessageTypeDynoMotorSetParam', param, target, aio_target)
else:
message.selected_motors = MotorsAsBits([target])
aio_target = AioNodeNameFromMotorNickname(target)
success = self._TrySetParam(
message, 'kMessageTypeMotorSetParam', param, target, aio_target)
if not success:
failed_targets.append(target)
if failed_targets:
raise MotorClientError('Failed to verify %s from %s.'
% (param, failed_targets))
def _TrySetParam(self, message, msg_type, param, target, aio_target):
for _ in xrange(self._NUM_RETRIES):
self._set_param_aio_client.Send(message, msg_type, OPERATOR)
for _ in xrange(self._NUM_RETRIES):
try:
_, header, ack = self._ack_param_aio_client.Recv()
if (header.source == aio_node_helper.Value(aio_target)
and header.type == message_type.kMessageTypeMotorAckParam
and ack.id == message.id and ack.value == message.value):
print '%s %s: %g' % (target, param, ack.value)
return True
except socket.timeout:
return False
return False
@cmd_client.Command(num_args=3)
def do_set_param(self, line): # pylint: disable=invalid-name
"""Sets param for a specified motor, e.g. "set_motor_param SBO Ld 3.14"."""
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
message = pack_avionics_messages.MotorSetParamMessage()
self._SetParam(line, message, self._MOTORS)
@cmd_client.Command(num_args=3)
def do_set_param_dyno(self, line): # pylint: disable=invalid-name
"""Sets param for a specified dyno, e.g. "set_dyno_param SBO Ld 3.14"."""
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
message = pack_avionics_messages.DynoMotorSetParamMessage()
self._SetParam(line, message, self._DYNOS)
def complete_set_param(self, text, line, *unused_args): # pylint: disable=invalid-name
arg_number = len(line.split())
if not text:
arg_number += 1
if arg_number == 2:
return self._CompleteArg(text, sorted(MOTORS) + ['All'])
elif arg_number == 3:
return self._CompleteArg(text, sorted(MOTOR_PARAMS.keys()))
else:
return []
complete_set_param_dyno = complete_set_param
def _GetParam(self, line, message, node_type):
targets, args = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, select_all=True)
param, _ = cmd_client.SelectArgs(
args, MOTOR_PARAMS.keys(), require_one=True, select_all=False)
if node_type == self._DYNOS:
targets = ['DYNO_%s' % t.upper() for t in targets]
message.id = MOTOR_PARAMS[param]
failed_targets = []
for target in targets:
print 'Getting %s from %s...' % (param, target)
success = True
if target.startswith('DYNO_'):
message.selected_motors = MotorsAsBits([target[len('DYNO_'):]])
aio_target = AioNodeNameFromDynoNickname(target[len('DYNO_'):])
success = self._TryGetParam(
message, 'kMessageTypeDynoMotorGetParam', param, target, aio_target)
else:
message.selected_motors = MotorsAsBits([target])
aio_target = AioNodeNameFromMotorNickname(target)
success = self._TryGetParam(
message, 'kMessageTypeMotorGetParam', param, target, aio_target)
if not success:
failed_targets.append(target)
if failed_targets:
raise MotorClientError('Failed to get %s from %s.'
% (param, failed_targets))
def _TryGetParam(self, message, msg_type, param, target, aio_target):
for _ in xrange(self._NUM_RETRIES):
self._get_param_aio_client.Send(message, msg_type, OPERATOR)
for _ in xrange(self._NUM_RETRIES):
try:
_, header, ack = self._ack_param_aio_client.Recv()
if (header.source == aio_node_helper.Value(aio_target)
and header.type == message_type.kMessageTypeMotorAckParam
and ack.id == message.id):
print '%s %s: %g' % (target, param, ack.value)
return True
except socket.timeout:
return False
return False
@cmd_client.Command()
def do_get_param(self, line): # pylint: disable=invalid-name
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
message = pack_avionics_messages.MotorGetParamMessage()
self._GetParam(line, message, self._MOTORS)
@cmd_client.Command()
def do_get_param_dyno(self, line): # pylint: disable=invalid-name
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
message = pack_avionics_messages.DynoMotorGetParamMessage()
self._GetParam(line, message, self._DYNOS)
complete_get_param = complete_set_param
complete_get_param_dyno = complete_get_param
@cmd_client.Command()
def do_run(self, line): # pylint: disable=invalid-name
"""Runs the selected motors and/or dynos.
Specify a duration in "s" or "ms". E.g. "run 10s" or "run 300ms".
Args:
line: Command to this function.
Raises:
MotorClientError: An invalid duration was specified.
"""
if self._motors_selected:
self._CheckMotorStatus([flags.kMotorStatusArmed])
if self._dynos_selected:
self._CheckDynoStatus([flags.kMotorStatusArmed])
self._CheckTargetsSelected()
if line.endswith('ms'):
line = line[:-2]
multiplier = 1e-3
elif line.endswith('s'):
line = line[:-1]
multiplier = 1.0
else:
raise MotorClientError('Usage: run {$N {s|ms}}')
try:
duration = float(line) * multiplier
except ValueError:
raise MotorClientError('Invalid run time: \'%s\'' % line)
if self._motor_runner.IsRunning() or self._dyno_runner.IsRunning():
raise MotorClientError('Already running.')
if self._motors_selected:
if not self._motor_listener.AllMotorsArmed():
raise MotorClientError('Motors not armed.')
self._motor_runner.StartRun(duration)
if self._dynos_selected:
if not self._dyno_listener.AllMotorsArmed():
raise MotorClientError('Dynos not armed.')
self._dyno_runner.StartRun(duration)
print 'Running...'
@cmd_client.Command(num_args=0)
def do_stop(self, unused_line): # pylint: disable=invalid-name
"""Stops the motors and/or dynos."""
if self._motor_runner.IsRunning() or self._dyno_runner.IsRunning():
self._motor_runner.StopRun()
self._dyno_runner.StopRun()
else:
raise MotorClientError('Not running.')
print 'Run stopped.'
def _GetCommandFunction(self, line):
"""Returns a complete command function for each selected motor and/or dyno.
Args:
line: Command to this function.
Raises:
MotorClientError: Motors and/or dynos are running.
Returns:
torque_func: A function that returns torque commands.
omega_lower_func: A function that returns omega_lower commands.
omega_upper_func: A function that returns omega_upper commands.
freeze_command: Specifies if last command should persist on stop.
"""
if self._motor_runner.IsRunning() or self._dyno_runner.IsRunning():
raise MotorClientError('Motors and/or dynos are running.')
args = line.split()
data, loop = GenerateCommandData(args)
t = data[:, 0]
torque_cmd = data[:, 1:9]
omega_lower_cmd = data[:, 9:17]
omega_upper_cmd = data[:, 17:25]
torque_func = CommandProfile(t, torque_cmd, TORQUE_MIN_LIMIT,
TORQUE_MAX_LIMIT, 'torque', loop)
omega_lower_func = CommandProfile(t, omega_lower_cmd, OMEGA_MIN_LIMIT,
OMEGA_MAX_LIMIT, 'omega', loop)
omega_upper_func = CommandProfile(t, omega_upper_cmd, OMEGA_MIN_LIMIT,
OMEGA_MAX_LIMIT, 'omega', loop)
freeze_command = False
return (torque_func, omega_lower_func, omega_upper_func, freeze_command)
@cmd_client.Command(num_args=[1, 2])
def do_set_command_function(self, line): # pylint: disable=invalid-name, g-doc-args
# pylint: disable=g-doc-args
"""Sets a command function for motor(s).
Specify a filename which may be:
- A Python file (must have .py suffix) corresponding to an input to
gen_lookup_table.py
- A text file whose output is a lookup table formatted per the output of
gen_lookup_table.py.
"""
self._CheckMotorStatus(
[flags.kMotorStatusInit, flags.kMotorStatusArmed,
flags.kMotorStatusError])
cmd_args = self._GetCommandFunction(line)
self._motor_runner.SetCommandFunction(*cmd_args)
@cmd_client.Command(num_args=[1, 2])
def do_set_command_function_dyno(self, line): # pylint: disable=invalid-name
# pylint: disable=g-doc-args
"""Sets a command function for dyno(s).
Specify a filename which may be:
- A Python file (must have .py suffix) corresponding to an input to
gen_lookup_table.py
- A text file whose output is a lookup table formatted per the output of
gen_lookup_table.py.
"""
self._CheckDynoStatus(
[flags.kMotorStatusInit, flags.kMotorStatusArmed,
flags.kMotorStatusError])
cmd_args = self._GetCommandFunction(line)
self._dyno_runner.SetCommandFunction(*cmd_args)
def complete_set_motor_command_function(self, _, line, *unused_args): # pylint: disable=invalid-name
"""Completes arguments for the "set_command_function" command."""
args = line.split(None, 2)
if len(args) > 2 or (len(args) == 2 and line.endswith(' ')):
suggestions = ['noloop', 'loop']
if len(args) == 3:
if args[2] in suggestions:
return []
suggestions = [x for x in suggestions if x.startswith(args[2])]
else:
path = args[1] if len(args) == 2 else ''
suggestions = cmd_client.CompleteFile(path)
suggestions = [x for x in suggestions
if (x.endswith(('/', '.py', '.pycmd', '.txt', '.dat'))
or x.find('.') < 0)]
return suggestions
complete_set_dyno_command_function = complete_set_motor_command_function
@cmd_client.Command(num_args=2)
def do_set_speed_limits(self, line): # pylint: disable=invalid-name
"""Sets the speed limits for torque-mode e.g. set_speed_limits 100 200."""
if not self._dynos_selected:
raise MotorClientError('No dynos selected. Use "set_targets_dyno".')
args = line.split()
try:
omega_lower = float(args[0])
omega_upper = float(args[1])
except ValueError:
raise MotorClientError('Invalid argument(s): \'{:s}\''.format(line))
CheckCommandLimits(
omega_lower, omega_upper, OMEGA_MIN_LIMIT, OMEGA_MAX_LIMIT, 'omega')
self._omega_lower_limit = omega_lower
self._omega_upper_limit = omega_upper
print 'Omega limits set to: %.2f rad/s, %.2f rad/s.' % (
self._omega_lower_limit, self._omega_upper_limit)
torque_func = lambda _: self._torque
omega_lower_func = lambda _: self._omega_lower_limit
omega_upper_func = lambda _: self._omega_upper_limit
freeze_command = True
self._dyno_runner.SetCommandFunction(torque_func, omega_lower_func,
omega_upper_func, freeze_command)
@cmd_client.Command(num_args=1)
def do_set_torque(self, line): # pylint: disable=invalid-name
"""Sets motor torque."""
if not self._dynos_selected:
raise MotorClientError('No dynos selected. Use "set_targets_dyno".')
try:
torque = float(line)
except ValueError:
raise MotorClientError('Invalid argument(s): \'{:s}\''.format(line))
if self._omega_lower_limit == 0 and self._omega_upper_limit == 0:
raise MotorClientError('Omega limits not set. Use "set_speed_limits".')
CheckCommandLimits(
torque, torque, TORQUE_MIN_LIMIT, TORQUE_MAX_LIMIT, 'torque')
self._torque = torque
print 'Torque desired: %.2f Nm. Speed limits: %.2f rad/s, %.2f rad/s.' % (
torque, self._omega_lower_limit, self._omega_upper_limit)
torque_func = lambda _: self._torque
omega_lower_func = lambda _: self._omega_lower_limit
omega_upper_func = lambda _: self._omega_upper_limit
freeze_command = True
self._dyno_runner.SetCommandFunction(torque_func, omega_lower_func,
omega_upper_func, freeze_command)
@cmd_client.Command(num_args=1)
def do_set_omega(self, line): # pylint: disable=invalid-name
"""Sets motor speed."""
if not self._motors_selected:
raise MotorClientError('No motors selected. Use "set_targets".')
try:
omega = float(line)
except ValueError:
raise MotorClientError('Invalid omega: \'{:s}\''.format(line))
CheckCommandLimits(omega, omega, OMEGA_MIN_LIMIT, OMEGA_MAX_LIMIT, 'omega')
print 'Omega desired: %s rad/s' % omega
torque_func = lambda _: 0.0
omega_lower_func = lambda _: omega
omega_upper_func = lambda _: omega
freeze_command = True
self._motor_runner.SetCommandFunction(torque_func, omega_lower_func,
omega_upper_func, freeze_command)
def _RampCommand(self, line, cmd_type, runner):
"""Sets a motor speed or torque ramp.
Args:
line: Command to this function.
cmd_type: Torque or Omega command to ramp.
runner: Runner instance to use for setting command.
Raises:
MotorClientError: An invalid parameter was specified.
"""
args = line.split(None, 2)
try:
cmd = float(args[0])
except ValueError:
raise MotorClientError('Invalid %s: \'{:s}\''.format(args[0]) % cmd_type)
if len(args) == 2:
try:
dt = self._dt = float(args[1])
except ValueError:
raise MotorClientError('Invalid time: \'{:s}\''.format(args[1]))
else:
dt = 1.0
if runner.IsRunning():
t0 = runner.GetTime()
motor_cmd = runner.GetCommand()
cmd0 = motor_cmd[cmd_type]
else:
t0 = 0.0
cmd0 = 0.0
dcmd_dt = (cmd - cmd0) / dt if abs(dt) > 10.0 * EPS32 else 0.0
def Ramp(t):
if t > t0 + dt:
return cmd
elif t > t0:
return dcmd_dt * (t - t0) + cmd0
else:
return cmd0
if cmd_type == 'omega_upper':
torque_func = lambda _: 0.0
omega_lower_func = Ramp
omega_upper_func = Ramp
elif cmd_type == 'torque':
torque_func = Ramp
omega_lower_func = lambda _: self._omega_lower_limit
omega_upper_func = lambda _: self._omega_upper_limit
else:
raise MotorClientError('Invalid command type: %s' % cmd_type)
freeze_command = True
runner.SetCommandFunction(
torque_func, omega_lower_func, omega_upper_func, freeze_command)
display_cmd = cmd_type.split('_')[0].capitalize()
print (' Ramping over dt = %4.2f:\n'
' %s(t0) = %4.1f\n'
' %s(t0 + dt) = %4.1f' % (dt, display_cmd, cmd0, display_cmd, cmd))
@cmd_client.Command(num_args=[1, 2])
def do_ramp_omega(self, line): # pylint: disable=invalid-name
# pylint: disable=g-doc-args
"""Sets a motor speed ramp.
Specify a linear angular rate ramp from the present speed omega0 to a final
speed omega1 over some time dt (in seconds) with the command:
ramp_omega [omega1] [dt]
The second argument is optional. If not specified dt = 1s is assumed.
"""
self._RampCommand(line, 'omega_upper', self._motor_runner)
@cmd_client.Command(num_args=[1, 2])
def do_ramp_torque(self, line): # pylint: disable=invalid-name
# pylint: disable=g-doc-args
"""Sets a dyno torque ramp.
Specify a linear torque ramp from the present torque T0 to a final
torque T1 over some time dt (in seconds) with the command:
ramp_torque [T1] [dt]
The second argument is optional. If not specified dt = 1s is assumed.
"""
self._RampCommand(line, 'torque', self._dyno_runner)
@cmd_client.Command(num_args=0)
def do_clear_errors(self, unused_line): # pylint: disable=invalid-name
self._CheckTargetsSelected()
if self._motors_selected:
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._motor_listener.ClearErrors()
self._motor_runner.ClearErrors()
if self._dynos_selected:
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._dyno_listener.ClearErrors()
self._dyno_runner.ClearErrors()
print 'Errors cleared.'
def _TryDisarm(self, node_type):
listener, runner = self._GetListenerAndRunner(node_type)
for _ in xrange(self._NUM_RETRIES):
runner.Disarm()
time.sleep(0.1)
if listener.AllMotorsDisarmed():
print 'Successfully disarmed %s.' % node_type
return
raise MotorClientError('Failed to disarm %s.' % node_type)
@cmd_client.Command(num_args=0)
def do_disarm(self, unused_line): # pylint: disable=invalid-name
"""Disarms the motors."""
self._CheckTargetsSelected()
print 'Disarming.'
if self._motors_selected:
self._TryDisarm(self._MOTORS)
if self._dynos_selected:
self._TryDisarm(self._DYNOS)
@cmd_client.Command()
def do_get_errors(self, unused_line): # pylint: disable=invalid-name
self._CheckTargetsSelected()
if self._motors_selected:
self._motor_listener.PrintErrors()
if self._dynos_selected:
self._dyno_listener.PrintErrors()
@cmd_client.Command()
def do_request_control_log(self, unused_line): # pylint: disable=invalid-name
self._CheckTargetsSelected()
if self._motors_selected:
self._motor_runner.RequestControlLog()
if self._dynos_selected:
self._dyno_runner.RequestControlLog()
@cmd_client.Command()
def do_request_adc_log(self, unused_line): # pylint: disable=invalid-name
self._CheckTargetsSelected()
if self._motors_selected:
self._motor_runner.RequestAdcLog()
if self._dynos_selected:
self._dyno_runner.RequestAdcLog()
class Listener(cmd_client.AioThread):
"""Continuously listens to MotorStatusMessages."""
def __init__(self, error_callback, motors, dyno_mode=False):
self._motors = motors.copy()
t_now = time.time()
self._errors = {m: flags.kMotorErrorNone for m in MOTORS}
self._warnings = {m: flags.kMotorWarningNone for m in MOTORS}
self._error_lock = threading.Lock()
self._clear_errors_stop_time = t_now
self._motor_status = {m: flags.kMotorStatusInit
for m in self._motors}
self._motor_status_lock = threading.Lock()
self._t_message = {m: t_now for m in self._motors}
self._t_message_lock = threading.Lock()
self._dyno_mode = dyno_mode
if dyno_mode:
sources = {AioNodeNameFromDynoNickname(m): m for m in self._motors}
else:
sources = {AioNodeNameFromMotorNickname(m): m for m in self._motors}
self._motor_sources = {aio.aio_node_helper.Value(k): sources[k]
for k in sources.keys()}
self._error_callback = error_callback
super(Listener, self).__init__(['kMessageTypeMotorStatus'],
allowed_sources=sources.keys(), timeout=0.1)
self.start()
def ClearErrors(self):
with self._error_lock:
for motor in self._errors.keys():
self._errors[motor] = flags.kMotorErrorNone
self._warnings[motor] = flags.kMotorWarningNone
self._clear_errors_stop_time = time.time() + 5*10e-3
def GetMostRestrictiveMotorStatus(self):
"""Returns the most restrictive status across all motors."""
with self._motor_status_lock:
motor_statuses = self._motor_status.values()
if flags.kMotorStatusRunning in motor_statuses:
return flags.kMotorStatusRunning
elif flags.kMotorStatusArmed in motor_statuses:
return flags.kMotorStatusArmed
elif flags.kMotorStatusError in motor_statuses:
return flags.kMotorStatusError
return flags.kMotorStatusInit
def AllMotorsArmed(self):
with self._motor_status_lock:
motor_statuses = self._motor_status.values()
return all(x == flags.kMotorStatusArmed for x in motor_statuses)
def AnyMotorsArmed(self):
with self._motor_status_lock:
motor_statuses = self._motor_status.values()
return any(x == flags.kMotorStatusArmed for x in motor_statuses)
def AllMotorsDisarmed(self):
with self._motor_status_lock:
motor_statuses = self._motor_status.values()
return all(x != flags.kMotorStatusArmed
and x != flags.kMotorStatusRunning
for x in motor_statuses)
def GetUnarmedMotors(self):
with self._motor_status_lock:
return [motor for motor, status in self._motor_status.iteritems()
if status == flags.kMotorStatusInit]
def PrintErrors(self):
with self._error_lock:
if (any([e != flags.kMotorErrorNone for e in self._errors.itervalues()])
or any([w != flags.kMotorWarningNone
for w in self._warnings.itervalues()])):
print 'Errors:'
for motor in MOTORS:
error = self._errors[motor]
warning = self._warnings[motor]
if error != flags.kMotorErrorNone:
print '%s: %s' % (motor, ' | '.join(GetMotorErrorNames(error)))
motor = (' ') * len(motor) # Do no print out the motor name again.
if warning != flags.kMotorWarningNone:
print '%s: %s' % (motor, ' | '.join(GetMotorWarningNames(warning)))
else:
print 'No errors or warnings.'
def _RunOnce(self):
try:
_, header, msg = self._client.Recv()
motor = self._motor_sources[header.source]
t_now = time.time()
with self._t_message_lock:
self._t_message[motor] = t_now
stale = {m: t_now - self._t_message[m] > 0.05 for m in self._motors}
new_status = False
execute_callback = False
with self._error_lock, self._motor_status_lock:
# New errors.
if t_now > self._clear_errors_stop_time:
newline = '\n'
error_diff = self._errors[motor] ^ msg.motor_error
if msg.motor_error and error_diff:
self._errors[motor] |= msg.motor_error
print ('%sNew motor error(s) %s: %s' %
(newline, motor, ' | '.join(GetMotorErrorNames(error_diff))))
newline = '' # Group errors and warning from the same motor.
warning_diff = self._warnings[motor] ^ msg.motor_warning
if warning_diff:
self._warnings[motor] = msg.motor_warning
if msg.motor_warning & warning_diff:
print ('%sNew motor warning(s) %s: %s' %
(newline, motor,
' | '.join(GetMotorWarningNames(warning_diff
& msg.motor_warning))))
else:
print ('%sCleared motor warning(s) %s: %s' %
(newline, motor,
' | '.join(GetMotorWarningNames(warning_diff
& ~msg.motor_warning))))
# Change in status.
if self._motor_status[motor] != msg.motor_status:
new_status = True
self._motor_status[motor] = msg.motor_status
# Invoke error callback after giving up self._error_lock and
# self._status_lock just in case.
if (new_status and
any([e for e in self._errors.values()]) and
all([self._motor_status[motor] &
~(flags.kMotorStatusRunning | flags.kMotorStatusWindDown) or
stale[motor] for motor in self._motors])):
execute_callback = True
if execute_callback:
self._error_callback()
except socket.timeout:
pass
class Runner(cmd_client.AioThread):
"""Continuously sends ControllerCommandMessages."""
def __init__(self, motors, spin_dir, dyno_mode=False):
self._motors = motors.copy()
self._spin_dir = [spin_dir.get(motor, 1) for motor in MOTORS]
self._clear_error_retries = 0
self._disarm_retries = 0
self._request_control_log = False
self._request_adc_log = False
self._dyno_mode = dyno_mode
if dyno_mode:
self._command = pack_avionics_messages.DynoCommandMessage()
else:
self._command = pack_avionics_messages.ControllerCommandMessage()
self._command.motor_command = flags.kMotorCommandNone
self._command_lock = threading.Lock()
self._command_function_lock = threading.Lock()
self._torque_func = lambda _: 0.0
self._omega_lower_func = lambda _: 0.0
self._omega_upper_func = lambda _: 0.0
self._freeze_command = False # Replace command with a constant on stop.
self._WriteMotorCommand()
super(Runner, self).__init__(['kMessageTypeControllerCommand',
'kMessageTypeDynoCommand'])
self.start()
def SetCommand(self, command_mask):
with self._command_lock:
self._command.motor_command |= command_mask
def _ClearCommand(self, command_mask):
with self._command_lock:
self._command.motor_command &= ~command_mask
def IsRunning(self):
return self._command.motor_command & flags.kMotorCommandRun
def StartRun(self, duration):
self._start_time = time.time()
self._stop_time = self._start_time + duration
self.SetCommand(flags.kMotorCommandRun)
def StopRun(self):
if self._freeze_command:
motor_cmd = self.GetCommand()
with self._command_function_lock:
self._torque_func = lambda _: motor_cmd['torque']
self._omega_lower_func = lambda _: motor_cmd['omega_lower']
self._omega_upper_func = lambda _: motor_cmd['omega_upper']
self._ClearCommand(flags.kMotorCommandRun)
def GetCommand(self):
"""Generates motor commands at the current time.
Returns:
motor_cmd: Command to send to motors or dynos at the current time.
"""
if self.IsRunning():
curr_time = time.time() - self._start_time
else:
curr_time = 0.0
with self._command_function_lock:
motor_cmd = {'torque': self._torque_func(curr_time),
'omega_lower': self._omega_lower_func(curr_time),
'omega_upper': self._omega_upper_func(curr_time)}
return motor_cmd
def _CheckCommand(self, cmd_dict):
for _, val in cmd_dict.iteritems():
assert isinstance(val, list)
assert len(val) == len(MOTORS)
def _WriteMotorCommand(self):
motor_cmd = self.GetCommand()
for cmd, val in motor_cmd.iteritems():
if isinstance(val, int) or isinstance(val, float):
motor_cmd[cmd] = [val for _ in MOTORS]
self._CheckCommand(motor_cmd)
torque = motor_cmd['torque']
omega_lower = motor_cmd['omega_lower']
omega_upper = motor_cmd['omega_upper']
with self._command_lock:
for i, motor in enumerate(MOTORS):
spin = self._spin_dir[i]
if motor in self._motors:
self._command.motor_torque[i] = torque[i] * spin
self._command.motor_speed_lower_limit[i] = omega_lower[i] * spin
self._command.motor_speed_upper_limit[i] = omega_upper[i] * spin
else:
self._command.motor_torque[i] = 0.0
self._command.motor_speed_lower_limit[i] = 0.0
self._command.motor_speed_upper_limit[i] = 0.0
def SetCommandFunction(self, torque_func, omega_lower_func,
omega_upper_func, freeze_command):
with self._command_function_lock:
self._torque_func = torque_func
self._omega_lower_func = omega_lower_func
self._omega_upper_func = omega_upper_func
self._freeze_command = freeze_command
self._WriteMotorCommand()
def GetTime(self):
return time.time() - self._start_time if self.IsRunning() else 0.0
def ClearErrors(self):
self.SetCommand(flags.kMotorCommandClearError)
self._clear_error_retries = 3
def Disarm(self):
self.SetCommand(flags.kMotorCommandDisarm)
self._disarm_retries = 3
def RequestControlLog(self):
self._request_control_log = True
def RequestAdcLog(self):
self._request_adc_log = True
def _RunOnce(self):
"""Modifies and sends the ControllerCommandMessage."""
if self.IsRunning():
if time.time() > self._stop_time:
self.StopRun()
print '\nFinished run.'
else:
try:
self._WriteMotorCommand()
except AssertionError:
print ('Warning: Command(t) did not return a scalar or list with '
'elements for all motors.')
self.StopRun()
if self._clear_error_retries <= 0:
self._ClearCommand(flags.kMotorCommandClearError)
else:
self._clear_error_retries -= 1
if self._disarm_retries <= 0:
self._ClearCommand(flags.kMotorCommandDisarm)
else:
self._disarm_retries -= 1
if self._request_control_log:
self.SetCommand(flags.kMotorCommandSendControlLog)
self._request_control_log = False
else:
self._ClearCommand(flags.kMotorCommandSendControlLog)
if self._request_adc_log:
self.SetCommand(flags.kMotorCommandSendAdcLog)
self._request_adc_log = False
else:
self._ClearCommand(flags.kMotorCommandSendAdcLog)
with self._command_lock:
if self._dyno_mode:
self._client.Send(self._command, 'kMessageTypeDynoCommand', OPERATOR)
else:
self._client.Send(self._command, 'kMessageTypeControllerCommand',
CONTROLLER)
time.sleep(0.0095)
if __name__ == '__main__':
client = MotorCommandClient()
try:
client.cmdloop()
except BaseException:
client.TryStopThreads()
raise
| apache-2.0 | -5,446,123,190,470,756,000 | 35.626277 | 103 | 0.645343 | false |
JarbasAI/JarbasAI | jarbas_models/tf_tacotron/models/modules.py | 1 | 3455 | import tensorflow as tf
from tensorflow.contrib.rnn import GRUCell
def prenet(inputs, is_training, layer_sizes=[256, 128], scope=None):
x = inputs
drop_rate = 0.5 if is_training else 0.0
with tf.variable_scope(scope or 'prenet'):
for i, size in enumerate(layer_sizes):
dense = tf.layers.dense(x, units=size, activation=tf.nn.relu,
name='dense_%d' % (i + 1))
x = tf.layers.dropout(dense, rate=drop_rate,
name='dropout_%d' % (i + 1))
return x
def encoder_cbhg(inputs, input_lengths, is_training):
return cbhg(
inputs,
input_lengths,
is_training,
scope='encoder_cbhg',
K=16,
projections=[128, 128])
def post_cbhg(inputs, input_dim, is_training):
return cbhg(
inputs,
None,
is_training,
scope='post_cbhg',
K=8,
projections=[256, input_dim])
def cbhg(inputs, input_lengths, is_training, scope, K, projections):
with tf.variable_scope(scope):
with tf.variable_scope('conv_bank'):
# Convolution bank: concatenate on the last axis to stack channels from all convolutions
conv_outputs = tf.concat(
[conv1d(inputs, k, 128, tf.nn.relu, is_training,
'conv1d_%d' % k) for k in range(1, K + 1)],
axis=-1
)
# Maxpooling:
maxpool_output = tf.layers.max_pooling1d(
conv_outputs,
pool_size=2,
strides=1,
padding='same')
# Two projection layers:
proj1_output = conv1d(maxpool_output, 3, projections[0], tf.nn.relu,
is_training, 'proj_1')
proj2_output = conv1d(proj1_output, 3, projections[1], None,
is_training, 'proj_2')
# Residual connection:
highway_input = proj2_output + inputs
# Handle dimensionality mismatch:
if highway_input.shape[2] != 128:
highway_input = tf.layers.dense(highway_input, 128)
# 4-layer HighwayNet:
for i in range(4):
highway_input = highwaynet(highway_input, 'highway_%d' % (i + 1))
rnn_input = highway_input
# Bidirectional RNN
outputs, states = tf.nn.bidirectional_dynamic_rnn(
GRUCell(128),
GRUCell(128),
rnn_input,
sequence_length=input_lengths,
dtype=tf.float32)
return tf.concat(outputs, axis=2) # Concat forward and backward
def highwaynet(inputs, scope):
with tf.variable_scope(scope):
H = tf.layers.dense(
inputs,
units=128,
activation=tf.nn.relu,
name='H')
T = tf.layers.dense(
inputs,
units=128,
activation=tf.nn.sigmoid,
name='T',
bias_initializer=tf.constant_initializer(-1.0))
return H * T + inputs * (1.0 - T)
def conv1d(inputs, kernel_size, channels, activation, is_training, scope):
with tf.variable_scope(scope):
conv1d_output = tf.layers.conv1d(
inputs,
filters=channels,
kernel_size=kernel_size,
activation=activation,
padding='same')
return tf.layers.batch_normalization(conv1d_output,
training=is_training)
| gpl-3.0 | 2,314,599,216,257,785,300 | 31.28972 | 100 | 0.541245 | false |
zhangg/trove | trove/guestagent/datastore/mysql/service.py | 1 | 3685 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from trove.common.i18n import _
from trove.guestagent.datastore.mysql_common import service
LOG = logging.getLogger(__name__)
CONF = service.CONF
class KeepAliveConnection(service.BaseKeepAliveConnection):
pass
class MySqlAppStatus(service.BaseMySqlAppStatus):
pass
class LocalSqlClient(service.BaseLocalSqlClient):
pass
class MySqlApp(service.BaseMySqlApp):
def __init__(self, status):
super(MySqlApp, self).__init__(status, LocalSqlClient,
KeepAliveConnection)
# DEPRECATED: Mantain for API Compatibility
def get_txn_count(self):
LOG.info(_("Retrieving latest txn id."))
txn_count = 0
with self.local_sql_client(self.get_engine()) as client:
result = client.execute('SELECT @@global.gtid_executed').first()
for uuid_set in result[0].split(','):
for interval in uuid_set.split(':')[1:]:
if '-' in interval:
iparts = interval.split('-')
txn_count += int(iparts[1]) - int(iparts[0])
else:
txn_count += 1
return txn_count
def _get_slave_status(self):
with self.local_sql_client(self.get_engine()) as client:
return client.execute('SHOW SLAVE STATUS').first()
def _get_master_UUID(self):
slave_status = self._get_slave_status()
return slave_status and slave_status['Master_UUID'] or None
def _get_gtid_executed(self):
with self.local_sql_client(self.get_engine()) as client:
return client.execute('SELECT @@global.gtid_executed').first()[0]
def get_last_txn(self):
master_UUID = self._get_master_UUID()
last_txn_id = '0'
gtid_executed = self._get_gtid_executed()
for gtid_set in gtid_executed.split(','):
uuid_set = gtid_set.split(':')
if uuid_set[0] == master_UUID:
last_txn_id = uuid_set[-1].split('-')[-1]
break
return master_UUID, int(last_txn_id)
def get_latest_txn_id(self):
LOG.info(_("Retrieving latest txn id."))
return self._get_gtid_executed()
def wait_for_txn(self, txn):
LOG.info(_("Waiting on txn '%s'."), txn)
with self.local_sql_client(self.get_engine()) as client:
client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')"
% txn)
class MySqlRootAccess(service.BaseMySqlRootAccess):
def __init__(self):
super(MySqlRootAccess, self).__init__(LocalSqlClient,
MySqlApp(MySqlAppStatus.get()))
class MySqlAdmin(service.BaseMySqlAdmin):
def __init__(self):
super(MySqlAdmin, self).__init__(LocalSqlClient, MySqlRootAccess(),
MySqlApp)
get_engine = MySqlApp.get_engine
| apache-2.0 | -3,997,285,619,942,811,600 | 33.764151 | 78 | 0.612212 | false |
mdomke/signaling | tests/test_signals.py | 1 | 3866 | import mock
import pytest
from signaling.exceptions import InvalidEmit
from signaling.exceptions import InvalidSlot
from signaling import Signal
class Receiver(object):
def __init__(self):
self.m = mock.Mock()
def slot(self):
self.m()
class TestSignalSlot(object):
def setup_method(self, method):
self.signal = Signal(name='emitter')
self.sentinel_a = mock.Mock()
self.sentinel_b = mock.Mock()
def slot_a(self):
self.sentinel_a()
def slot_b(self):
self.sentinel_b()
def test_connect(self):
self.signal.connect(self.slot_a)
assert self.slot_a in self.signal.slots
self.signal.connect(self.slot_b)
assert self.slot_a in self.signal.slots
assert self.slot_b in self.signal.slots
def test_connect_with_incompatible_slot_arg_count(self):
def slot_a():
pass
with pytest.raises(InvalidSlot):
Signal(args=['foo']).connect(slot_a)
def slot_b(foo):
pass
with pytest.raises(InvalidSlot):
Signal().connect(slot_b)
def test_connect_with_incompatible_slot_arg_name(self):
def slot(foo):
pass
with pytest.raises(InvalidSlot):
Signal(args=['bar']).connect(slot)
def test_disconnect(self):
self.test_connect()
self.signal.disconnect(self.slot_a)
assert self.slot_a not in self.signal.slots
assert self.slot_b in self.signal.slots
self.signal.disconnect(self.slot_b)
assert self.slot_a not in self.signal.slots
assert self.slot_b not in self.signal.slots
def test_emit_with_one_slot(self):
self.signal.connect(self.slot_a)
self.signal.emit()
self.sentinel_a.assert_called_once_with()
assert self.sentinel_b.call_count == 0
def test_emit_with_two_slots(self):
self.signal.connect(self.slot_a)
self.signal.connect(self.slot_b)
self.signal.emit()
self.sentinel_a.assert_called_once_with()
self.sentinel_b.assert_called_once_with()
def test_emit_with_args(self):
def slot(foo, bar):
self.sentinel_a(foo=foo, bar=bar)
signal = Signal(args=['foo', 'bar'])
signal.connect(slot)
signal.emit(foo=1, bar=2)
self.sentinel_a.assert_called_once_with(foo=1, bar=2)
def test_emit_with_missing_args(self):
def slot(foo, bar):
self.sentinel_a(foo, bar)
signal = Signal(args=['foo', 'bar'])
signal.connect(slot)
with pytest.raises(InvalidEmit):
signal.emit(foo=1)
self.sentinel_a.assert_not_called()
def test_emit_with_superfluous_args(self):
def slot(foo):
self.sentinel_a(foo)
signal = Signal(args=['foo'])
signal.connect(slot)
with pytest.raises(InvalidEmit):
signal.emit(foo=1, bar=2)
self.sentinel_a.assert_not_called()
def test_emit_with_superfluous_args_none_expected(self):
def slot():
self.sentinel_a()
signal = Signal()
signal.connect(slot)
with pytest.raises(InvalidEmit):
signal.emit(foo=1)
self.sentinel_a.assert_not_called()
def test_emit_with_method_slot(self):
signal = Signal()
receiver = Receiver()
signal.connect(receiver.slot)
signal.emit()
receiver.m.assert_called_with()
def test_repr(self):
signal = Signal()
assert repr(signal) == u"<Signal: 'anonymous'. Slots=0>"
signal.connect(self.slot_a)
assert repr(signal) == u"<Signal: 'anonymous'. Slots=1>"
def test_equality(self):
other = Signal()
assert self.signal == other
self.signal.connect(self.slot_a)
assert self.signal != other
| mit | 992,258,306,603,419,400 | 27.014493 | 64 | 0.600103 | false |
mikemintz/neutron | modules/iq.py | 1 | 4752 | # -*- coding: koi8-r -*-
## OJAB iq module
## Copyright (C) Boris Kotov <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
# Modified by me :) Gh0st AKA Bohdan Turkynewych
import os, xmpp, time
messages=None
global version
global vername
ver_queue={}
time_queue={}
iq_id=1
def versioncmd(conn, msg, args, replyto):
if args=="":
target=msg.getFrom()
else:
target=("%s/%s"%(replyto, args))
req=xmpp.protocol.Iq('get', xmpp.NS_VERSION, {}, target)
req.setID(iq_id)
ver_queue[str(iq_id)]=[replyto, msg.getFrom().getResource(), False]
conn.send(req)
globals()['iq_id']+=1
def pingcmd(conn, msg, args, replyto):
if args=="":
target=msg.getFrom()
else:
target=("%s/%s"%(replyto, args))
req=xmpp.protocol.Iq('get', xmpp.NS_VERSION, {}, target)
req.setID(iq_id)
ver_queue[str(iq_id)]=[replyto, msg.getFrom().getResource(), time.time()]
conn.send(req)
globals()['iq_id']+=1
def timecmd(conn, msg, args, replyto):
if args=="":
target=msg.getFrom()
else:
target=("%s/%s"%(replyto, args))
req=xmpp.protocol.Iq('get', xmpp.NS_TIME, {}, target)
req.setID(iq_id)
time_queue[str(iq_id)]=[replyto, msg.getFrom().getResource()]
conn.send(req)
globals()['iq_id']+=1
def versionCB(conn, iq_obj):
uname=os.popen("uname -sr", 'r')
osver=uname.read().strip()
uname.close()
pipe = os.popen('sh -c ' + '"' + 'python -V 2>&1' + '"')
python_ver = pipe.read(1024).strip()
osver = osver + ' ' + python_ver
iq_obj=iq_obj.buildReply('result')
qp=iq_obj.getTag('query')
qp.setTagData('name', vername)
qp.setTagData('version', version)
qp.setTagData('os', osver)
conn.send(iq_obj)
raise xmpp.NodeProcessed
def versionresultCB(conn, iq_obj):
qp=iq_obj.getTag('query')
rname=qp.getTagData('name')
rversion=qp.getTagData('version')
ros=qp.getTagData('os')
rid=iq_obj.getID()
if ver_queue.has_key(rid):
if ver_queue[rid][2]:
if ver_queue[rid][1]==iq_obj.getFrom().getResource():
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['yourping']%(ver_queue[rid][1], str(round(time.time()-ver_queue[rid][2],3))), 'groupchat'))
else:
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['ping']%(ver_queue[rid][1], iq_obj.getFrom().getResource(), str(round(time.time()-ver_queue[rid][2],3))), 'groupchat'))
else:
if ver_queue[rid][1]==iq_obj.getFrom().getResource():
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['yourversion']%(ver_queue[rid][1], rname, rversion, ros), 'groupchat'))
else:
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['version']%(ver_queue[rid][1], iq_obj.getFrom().getResource(), rname, rversion, ros), 'groupchat'))
def versionerrorCB(conn, iq_obj):
rid=iq_obj.getID()
if ver_queue.has_key(rid):
if ver_queue[rid][2]:
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['ping_error']%(ver_queue[rid][1], iq_obj.getFrom().getResource()), 'groupchat'))
else:
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['version_error']%(ver_queue[rid][1], iq_obj.getFrom().getResource()), 'groupchat'))
def timeCB(conn, iq_obj):
timep=os.popen("date -u '+%Y%m%dT%T'", 'r'); futc=timep.read(17); timep.close()
timep=os.popen("date '+%Z|%d/%m/%Y %T|'", 'r'); ftime=timep.read(); timep.close()
iq_obj = iq_obj.buildReply('result')
qp = iq_obj.getTag('query')
qp.setTagData('utc', futc)
qp.setTagData('tz', ftime.split("|")[0])
qp.setTagData('display', ftime.split("|")[1])
conn.send(iq_obj)
raise xmpp.NodeProcessed
def timeresultCB(conn, iq_obj):
qp=iq_obj.getTag('query')
rdisplay=qp.getTagData('display')
rid=iq_obj.getID()
if time_queue.has_key(rid):
if time_queue[rid][1]==iq_obj.getFrom().getResource():
conn.send(xmpp.protocol.Message(time_queue[rid][0], messages['yourtime']%(time_queue[rid][1], rdisplay), 'groupchat'))
else:
conn.send(xmpp.protocol.Message(time_queue[rid][0], messages['time']%(time_queue[rid][1], iq_obj.getFrom().getResource(), rdisplay), 'groupchat'))
def timeerrorCB(conn, iq_obj):
rid=iq_obj.getID()
if time_queue.has_key(rid):
conn.send(xmpp.protocol.Message(time_queue[rid][0], messages['time_error']%(time_queue[rid][1], iq_obj.getFrom().getResource()), 'groupchat'))
| gpl-2.0 | -6,113,828,099,328,659,000 | 39.271186 | 195 | 0.62016 | false |
smurfix/pybble | pybble/cache/__init__.py | 1 | 1978 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This is part of Pybble, a WMS (Whatever Management System) based on
## Jinja2/Haml, Werkzeug, Flask, and Optimism.
##
## Pybble is Copyright © 2009-2014 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.md` for details,
## including an optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
regions = None
from dogpile.cache.api import NO_VALUE
def keystr(args):
# Take care to keep this idempotent: keystr(x) == keystr(keystr(x))
return '|'.join(str(x) for x in args)
## TODO: add keyword-only region param
def delete(*args):
"""Delete a cache value (or a bunch of them)."""
global regions
if regions is None:
from .config import regions
if not regions:
return
# TODO: this only works with redis
r = regions['default'].backend.client
n = 0
if "*" in args:
for k in r.keys(keystr(args)):
r.delete(k)
n += 1
else:
r.delete(keystr(args))
n = 1
return n
def get(*args):
"""Get a cache value, or NO_VALUE if not set."""
global regions
if regions is None:
from .config import regions
if not regions:
return NO_VALUE
r = regions['default']
return r.get(keystr(args))
def set(val, *args):
"""Set a cache value. You really should use cached() instead."""
global regions
if regions is None:
from .config import regions
if not regions:
return
r = regions['default']
r.set(keystr(args),val)
def cached(func, *args):
"""Cache this function's result. Runs the function exactly once."""
global regions
if regions is None:
from .config import regions
if not regions:
return func()
r = regions['default']
return r.get_or_create(keystr(args), func)
| gpl-3.0 | 7,012,280,615,499,374,000 | 24.960526 | 82 | 0.695895 | false |
natj/bender | paper/figs/fig9.py | 1 | 4141 | import numpy as np
import math
from pylab import *
from palettable.wesanderson import Zissou_5 as wsZ
import matplotlib.ticker as mtick
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
from scipy.signal import savgol_filter
def smooth(xx, yy):
yy = savgol_filter(yy, 7, 2)
np.clip(yy, 0.0, 1000.0, out=yy)
yy[0] = 0.0
yy[-1] = 0.0
return xx, yy
#Read JN files
def read_lineprof(fname):
da = np.genfromtxt(fname, delimiter=",")
des = np.diff(da[:,0])[2]
norm = np.sum(des*da[:,1])
return da[:,0],da[:,1]/norm
#Read JN files
def read_csv(fname):
da = np.genfromtxt(fname, delimiter=",")
des = np.diff(da[:,0])[2]
norm = np.sum(des*da[:,1])
return da[:,0],da[:,1] #/norm
## Plot
fig = figure(figsize=(5,3), dpi=80)
rc('font', family='serif')
rc('xtick', labelsize='xx-small')
rc('ytick', labelsize='xx-small')
gs = GridSpec(1, 1)
#gs.update(wspace = 0.34)
#gs.update(hspace = 0.4)
lsize = 10.0
xmin = 0.69
xmax = 0.82
#error window limits
eymin = -0.5
eymax = 0.5
#path to files
#path_JN = "../../out3/lines/"
path_JN = "../../out/lines2/"
#labels size
tsize = 10.0
nu = '700'
#fig.text(0.5, 0.92, '$\\theta_s = 18^{\\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.72, '$\\theta_s = 45^{\\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.52, '$\\theta_s = 90^{\\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.32, 'Hopf $\\theta_s = 45^{\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.12, 'Phase',ha='center', va='center', size=lsize)
ax1 = subplot(gs[0,0])
ax1.minorticks_on()
ax1.set_xlim(xmin, xmax)
ax1.set_ylim(0.0, 30)
ax1.set_ylabel('Normalized flux',size=lsize)
ax1.set_xlabel('Energy $E/E\'$',size=lsize)
#xx1, yy1 = read_lineprof(path_JN+'lineprof_f700pbbr10m1.4i20.csv')
#ax1.plot(xx1, yy1, "k--")
#xx2, yy2 = read_lineprof(path_JN+'lineprof_obl_HTq0_f700pbbr10m1.4i20.csv')
#ax1.plot(xx2, yy2, "k-")
#lineprof_obl_HTq3_f700pbbr10m1.4i20.csv
#lineprof_obl_HTq5_f700pbbr10m1.4i20.csv
#lineprof_obl_HTq2_f700pbbr10m1.4i20.csv
files_JN = [
"lineprof_f700pbbr10m1.4i20.csv",
"lineprof_obl_f700pbbr10m1.4i20.csv",
#"lineprof_sph2_HTqfix_f700pbbr10m1.4i20.csv"]
#"lineprof_obl_HTq0_f700pbbr10m1.4i20.csv",
"lineprof_obl_HTq1_f700pbbr10m1.4i20.csv"]
#"lineprof_obl_HTq4_f700pbbr10m1.4i20.csv"]
files_JN = ['sch/lineprofile_f700_bb_r10_m1.4_i20.csv',
'obl/lineprofile_f700_bb_r10_m1.4_i20.csv',
'q/lineprofile_f700_bb_r10_m1.4_i20.csv']
cols = ["black",
"blue",
"red",
"magenta"]
i = 0
for file_name in files_JN:
xx, yy = read_lineprof(path_JN+file_name)
xx, yy = smooth(xx, yy)
ax1.plot(xx, yy, color=cols[i], linestyle="solid")
i += 1
#path_JN = "../../out3/lines/"
xx, yy = read_lineprof("../../out3/lines/lineprof_obl_HTq4_f700pbbr10m1.4i20.csv")
ax1.plot(xx, yy, color="red", linestyle="dashed")
#files_Bau = [
#"sch+dopp.csv",
#"sch+dopp+obl.csv",
#"HT.csv",
#"HT_obl.csv"]
files_Bau = ['sch.csv', 'obl.csv', 'ht.csv']
i = 0
for file_name in files_Bau:
xx, yy = read_csv(path_JN+file_name)
#rescale xx for correct scaling
#xx = (xx-0.72)/(0.89-0.72)*(0.8-0.72) + 0.72
#ax1.plot(xx, yy, color=cols[i], linestyle="dashed")
i += 1
############ q's
#xx3, yy3 = read_lineprof(path_JN+'lineprof_obl_HTq1_f700pbbr10m1.4i20.csv')
#ax1.plot(xx3, yy3, "k-", label="$q = -0.268$")
#
#xx4, yy4 = read_lineprof(path_JN+'lineprof_obl_HTq2_f700pbbr10m1.4i20.csv')
#ax1.plot(xx4, yy4, "r-", label="$q \\times 2$")
#
#xx5, yy5 = read_lineprof(path_JN+'lineprof_obl_HTq3_f700pbbr10m1.4i20.csv')
#ax1.plot(xx5, yy5, "g-", label="$q \\times 3$")
#
#xx6, yy6 = read_lineprof(path_JN+'lineprof_obl_HTq4_f700pbbr10m1.4i20.csv')
#ax1.plot(xx6, yy6, "b-", label="$q \\times 4$")
#
#xx7, yy7 = read_lineprof(path_JN+'lineprof_obl_HTq5_f700pbbr10m1.4i20.csv')
#ax1.plot(xx7, yy7, "m-", label="$q \\times 5$")
#
#legend = ax1.legend(loc='upper left', shadow=False, labelspacing=0.1)
#for label in legend.get_texts():
# label.set_fontsize('x-small')
savefig('fig9_testi.pdf', bbox_inches='tight')
| mit | 426,456,998,150,507,200 | 23.358824 | 92 | 0.63632 | false |
gaborvecsei/Color-Tracker | examples/tracking.py | 1 | 2306 | import argparse
from functools import partial
import cv2
import color_tracker
# You can determine these values with the HSVColorRangeDetector()
HSV_LOWER_VALUE = [155, 103, 82]
HSV_UPPER_VALUE = [178, 255, 255]
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-low", "--low", nargs=3, type=int, default=HSV_LOWER_VALUE,
help="Lower value for the HSV range. Default = 155, 103, 82")
parser.add_argument("-high", "--high", nargs=3, type=int, default=HSV_UPPER_VALUE,
help="Higher value for the HSV range. Default = 178, 255, 255")
parser.add_argument("-c", "--contour-area", type=float, default=2500,
help="Minimum object contour area. This controls how small objects should be detected. Default = 2500")
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
return args
def tracking_callback(tracker: color_tracker.ColorTracker, verbose: bool = True):
# Visualizing the original frame and the debugger frame
cv2.imshow("original frame", tracker.frame)
cv2.imshow("debug frame", tracker.debug_frame)
# Stop the script when we press ESC
key = cv2.waitKey(1)
if key == 27:
tracker.stop_tracking()
if verbose:
for obj in tracker.tracked_objects:
print("Object {0} center {1}".format(obj.id, obj.last_point))
def main():
args = get_args()
# Creating a kernel for the morphology operations
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
# Init the ColorTracker object
tracker = color_tracker.ColorTracker(max_nb_of_objects=5, max_nb_of_points=20, debug=True)
# Setting a callback which is called at every iteration
callback = partial(tracking_callback, verbose=args.verbose)
tracker.set_tracking_callback(tracking_callback=callback)
# Start tracking with a camera
with color_tracker.WebCamera(video_src=0) as webcam:
# Start the actual tracking of the object
tracker.track(webcam,
hsv_lower_value=args.low,
hsv_upper_value=args.high,
min_contour_area=args.contour_area,
kernel=kernel)
if __name__ == "__main__":
main()
| mit | -4,765,477,397,888,369,000 | 34.476923 | 127 | 0.647008 | false |
jefftc/changlab | Betsy/Betsy/modules/convert_simplevariantfile_to_matrix.py | 1 | 8224 | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
out_filename):
from genomicode import filelib
from genomicode import SimpleVariantMatrix
from genomicode import AnnotationMatrix
simple_file = in_data.identifier
metadata = {}
# Read all in memory. Hopefully, not too big.
ds = []
for d in filelib.read_row(simple_file, header=-1):
ds.append(d)
#if len(ds) > 50000: # DEBUG
# break
# MuSE sometimes has alternates.
# Alt A,C
# Num_Alt 13,0
# VAF 0.19,0.0
# Detect this and fix it. Take the alternate with the highest VAF.
for d in ds:
if d.Num_Alt.find(",") < 0:
continue
x1 = d.Num_Alt.split(",")
x2 = d.VAF.split(",")
assert len(x1) == len(x2)
x1 = map(int, x1)
x2 = map(float, x2)
max_vaf = max_i = None
for i in range(len(x2)):
if max_vaf is None or x2[i] > max_vaf:
max_vaf = x2[i]
max_i = i
assert max_i is not None
d.Num_Alt = str(x1[max_i])
d.VAF = str(x2[max_i])
# Make a list of all the positions.
positions = {} # (Chrom, Pos) -> 1
for d in ds:
positions[(d.Chrom, int(d.Pos))] = 1
positions = sorted(positions)
# Make a list of all the callers.
callers = {}
for d in ds:
callers[d.Caller] = 1
callers = sorted(callers)
# Make a list of all the samples.
samples = {}
for d in ds:
samples[d.Sample] = 1
samples = sorted(samples)
# Make a list of the coordinates.
coord_data = {}
for d in ds:
x = d.Chrom, int(d.Pos), d.Ref, d.Alt
coord_data[x] = 1
coord_data = sorted(coord_data)
# Make a list of all DNA calls.
call_data = []
for d in ds:
assert d.Source in ["DNA", "RNA"]
if d.Source != "DNA":
continue
num_ref = num_alt = vaf = None
if d.Num_Ref:
num_ref = int(d.Num_Ref)
if d.Num_Alt:
num_alt = int(d.Num_Alt)
if d.VAF:
vaf = float(d.VAF)
if num_ref is None and num_alt is None and vaf is None:
continue
call = SimpleVariantMatrix.Call(num_ref, num_alt, vaf)
x = d.Chrom, int(d.Pos), d.Ref, d.Alt, d.Sample, d.Caller, call
call_data.append(x)
# sample -> caller -> chrom, pos, ref, alt -> call
samp2caller2coord2call = {}
for x in call_data:
chrom, pos, ref, alt, sample, caller, call = x
coord = chrom, pos, ref, alt
if sample not in samp2caller2coord2call:
samp2caller2coord2call[sample] = {}
caller2coord2call = samp2caller2coord2call[sample]
if caller not in caller2coord2call:
caller2coord2call[caller] = {}
coord2call = caller2coord2call[caller]
# A (sample, caller, coord) may have multiple calls. For
# example, for germline samples that are called with each
# tumor sample. If this is the case, then take the call
# with the highest coverage.
if coord in coord2call:
old_call = coord2call[coord]
cov = old_cov = None
if call.num_ref is not None and call.num_alt is not None:
cov = call.num_ref + call.num_alt
if old_call.num_ref is not None and \
old_call.num_alt is not None:
old_cov = old_call.num_ref + old_call.num_alt
if cov is None and old_cov is not None:
call = old_call
elif cov is not None and old_cov is not None and cov < old_cov:
call = old_call
coord2call[coord] = call
# Count the number of callers that called a variant at each
# position for each sample.
samp2coord2caller = {} # sample -> chrom, pos, ref, alt -> caller -> 1
# Need to do this first, to make sure each caller is counted
# at most once. This is to account for germline samples that
# is called by each caller multiple times.
for x in call_data:
chrom, pos, ref, alt, sample, caller, call = x
coord = chrom, pos, ref, alt
if sample not in samp2coord2caller:
samp2coord2caller[sample] = {}
if coord not in samp2coord2caller[sample]:
samp2coord2caller[sample][coord] = {}
samp2coord2caller[sample][coord][caller] = 1
samp2coord2nc = {} # sample -> chrom, pos, ref, alt -> num_callers
for sample in samp2coord2caller:
samp2coord2nc[sample] = {}
for coord in samp2coord2caller[sample]:
samp2coord2nc[sample][coord] = len(
samp2coord2caller[sample][coord])
#for x in call_data:
# chrom, pos, ref, alt, sample, caller, call = x
# coord = chrom, pos, ref, alt
# if sample not in samp2coord2nc:
# samp2coord2nc[sample] = {}
# nc = samp2coord2nc[sample].get(coord, 0) + 1
# samp2coord2nc[sample][coord] = nc
# Format everything into an annotation matrix.
headers0 = []
headers1 = []
headers2 = []
all_annots = []
# Add the positions.
headers0 += ["", "", "", ""]
headers1 += ["", "", "", ""]
headers2 += ["Chrom", "Pos", "Ref", "Alt"]
for i in range(4):
x = [x[i] for x in coord_data]
x = [str(x) for x in x]
all_annots.append(x)
# Add the number of callers information.
headers0 += ["Num Callers"] * len(samples)
headers1 += [""] * len(samples)
headers2 += samples
for sample in samples:
annots = []
for coord in coord_data:
nc = samp2coord2nc.get(sample, {}).get(coord, "")
annots.append(nc)
all_annots.append(annots)
# Add information about calls.
for sample in samples:
caller2coord2call = samp2caller2coord2call.get(sample, {})
for i, caller in enumerate(callers):
h0 = ""
if not i:
h0 = sample
h1 = caller
h2 = "Ref/Alt/VAF"
headers0.append(h0)
headers1.append(h1)
headers2.append(h2)
coord2call = caller2coord2call.get(caller, {})
annots = []
for coord in coord_data:
x = ""
call = coord2call.get(coord)
if call:
x = SimpleVariantMatrix._format_call(call)
annots.append(x)
all_annots.append(annots)
# Set the headers.
assert len(headers0) == len(headers1)
assert len(headers0) == len(headers2)
assert len(headers0) == len(all_annots)
headers = [None] * len(headers0)
for i, x in enumerate(zip(headers0, headers1, headers2)):
x = "___".join(x)
headers[i] = x
matrix = AnnotationMatrix.create_from_annotations(headers, all_annots)
SimpleVariantMatrix.write_from_am(out_filename, matrix)
#annot_header = ["Chrom", "Pos", "Ref", "Alt"]
#matrix = SimpleVariantMatrix.make_matrix(
# samples, callers, annot_header, coord_data, named_data,
# call_data)
#SimpleVariantMatrix.write(out_filename, matrix)
return metadata
def name_outfile(self, antecedents, user_options):
return "calls.txt"
| mit | 3,985,949,250,645,467,600 | 36.552511 | 79 | 0.508512 | false |
wrenchzc/photomanager | tests/test_command_update.py | 1 | 1307 | from tests.utils import remove_file
from photomanager.lib.pmconst import PMDBNAME
from photomanager.commands.index import CommandIndex
from photomanager.commands.update import CommandUpdate
from photomanager.db.dbutils import get_db_session, close_db_session
from photomanager.db.models import ImageMeta
cmd_inx_test_root = 'tests/data'
class TestDisplayImg(object):
@classmethod
def setup_class(cls):
cls._clear()
cls._do_index()
@classmethod
def teardown_class(cls):
cls._clear()
db_filename = cmd_inx_test_root + '/' + PMDBNAME
remove_file(db_filename)
@staticmethod
def _clear():
db_filename = cmd_inx_test_root + '/' + PMDBNAME
close_db_session(db_filename)
@staticmethod
def _do_index():
command_index = CommandIndex(cmd_inx_test_root, {})
cnt = command_index.do()
def setup_method(self):
self._clear()
def teardown_method(self):
self._clear()
def test_update_address_by_geoinfo(self):
command_update = CommandUpdate(cmd_inx_test_root, {"geoinfo": True})
command_update.do()
test2_meta = command_update.handler.session.query(ImageMeta).filter(ImageMeta.filename == "test2.jpg").first()
assert ("汝城" in test2_meta.address)
| mit | -8,557,492,756,590,856,000 | 27.955556 | 118 | 0.66462 | false |
dzorlu/sdc-segmentation | train.py | 1 | 4118 | import sys
import tensorflow as tf
from tensorflow.python.ops import math_ops
sys.path.append("slim/")
slim = tf.contrib.slim
TRAIN_DIR = "/tmp/tf"
class Trainer(object):
def __init__(self, nb_classes, optimizer, learning_rate):
self.nb_classes = nb_classes
# learning rate can be a placeholder tensor
self.learning_rate = learning_rate
self.optimizer = optimizer(learning_rate)
self.train_op = None
self.prediction = None
def build(self, predictions, labels, one_hot=False):
with tf.name_scope('training'):
if one_hot:
labels = tf.one_hot(labels, depth=self.nb_classes)
labels = tf.squeeze(labels, axis=2)
label_shape = tf.shape(labels)[:2]
predictions = tf.image.resize_bilinear(predictions, label_shape, name='resize_predictions')
else:
labels = tf.reshape(labels, (-1, self.nb_clasess))
predictions = tf.reshape(predictions, (-1, self.nb_classes))
self.prediction = predictions
labels = tf.expand_dims(labels, 0)
print("pred shape {}, label shape {}".format(predictions.get_shape(), labels.get_shape()))
# wraps the softmax_with_entropy fn. adds it to loss collection
tf.losses.softmax_cross_entropy(logits=predictions, onehot_labels=labels)
# include the regulization losses in the loss collection.
total_loss = tf.losses.get_total_loss()
self.train_op = slim.learning.create_train_op(total_loss,
optimizer=self.optimizer)
def add_summaries(self):
# Add summaries for images, variables and losses.
global_summaries = set([])
# image summary
image_summary = tf.get_default_graph().get_tensor_by_name('IteratorGetNext:0')
image_summary = tf.expand_dims(image_summary, 0)
image_summary = tf.summary.image('image', image_summary)
global_summaries.add(image_summary)
# prediction summary
prediction = tf.argmax(self.prediction, axis=3)
prediction = tf.cast(prediction, tf.float32)
prediction = tf.expand_dims(prediction, 3)
image_summary = tf.summary.image('prediction', prediction)
global_summaries.add(image_summary)
for model_var in slim.get_model_variables():
global_summaries.add(tf.summary.histogram(model_var.op.name, model_var))
# total loss
total_loss_tensor = tf.get_default_graph().get_tensor_by_name('training/total_loss:0')
global_summaries.add(tf.summary.scalar(total_loss_tensor.op.name, total_loss_tensor))
# Merge all summaries together.
summary_op = tf.summary.merge(list(global_summaries), name='summary_op')
return summary_op
def train(self, iterator,
filename,
restore_fn=None,
_add_summaries = True,
number_of_steps=10000,
save_interval_secs = 12000,
same_summaries_secs=120,
keep_checkpoint_every_n_hours=5):
summary_op = None
if _add_summaries:
summary_op = self.add_summaries()
# Save checkpoints regularly.
saver = tf.train.Saver(
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
# init fn for the dataset ops and checkpointin
def initializer_fn(sess):
input_tensor = tf.get_default_graph().get_tensor_by_name('training_data/input:0')
sess.run(iterator.initializer, feed_dict={input_tensor: filename})
if restore_fn:
restore_fn(sess)
init_fn = initializer_fn
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
# train
slim.learning.train(train_op=self.train_op,
logdir=TRAIN_DIR,
session_config=session_config,
summary_op=summary_op,
init_fn=init_fn,
save_interval_secs = save_interval_secs,
number_of_steps=number_of_steps,
save_summaries_secs=same_summaries_secs,
saver=saver)
| mit | 8,798,008,712,275,257,000 | 41.453608 | 99 | 0.639631 | false |
ge0rgi/cinder | cinder/volume/drivers/dell_emc/vmax/provision_v3.py | 1 | 46116 | # Copyright (c) 2012 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
import six
from cinder import coordination
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.volume.drivers.dell_emc.vmax import utils
LOG = logging.getLogger(__name__)
STORAGEGROUPTYPE = 4
POSTGROUPTYPE = 3
EMC_ROOT = 'root/emc'
THINPROVISIONINGCOMPOSITE = 32768
THINPROVISIONING = 5
INFO_SRC_V3 = 3
ACTIVATESNAPVX = 4
DEACTIVATESNAPVX = 19
SNAPSYNCTYPE = 7
RDF_FAILOVER = 10
RDF_FAILBACK = 11
RDF_RESYNC = 14
RDF_SYNC_MODE = 2
RDF_SYNCHRONIZED = 6
RDF_FAILEDOVER = 12
class VMAXProvisionV3(object):
"""Provisioning Class for SMI-S based EMC volume drivers.
This Provisioning class is for EMC volume drivers based on SMI-S.
It supports VMAX arrays.
"""
def __init__(self, prtcl):
self.protocol = prtcl
self.utils = utils.VMAXUtils(prtcl)
def delete_volume_from_pool(
self, conn, storageConfigservice, volumeInstanceName, volumeName,
extraSpecs):
"""Given the volume instance remove it from the pool.
:param conn: connection to the ecom server
:param storageConfigservice: volume created from job
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param extraSpecs: additional info
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
if isinstance(volumeInstanceName, list):
theElements = volumeInstanceName
volumeName = 'Bulk Delete'
else:
theElements = [volumeInstanceName]
rc, job = conn.InvokeMethod(
'ReturnElementsToStoragePool', storageConfigservice,
TheElements=theElements)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Delete Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ReturnElementsToStoragePool took: "
"%(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc
def create_volume_from_sg(
self, conn, storageConfigService, volumeName,
sgInstanceName, volumeSize, extraSpecs):
"""Create the volume and associate it with a storage group.
We use EMCCollections parameter to supply a Device Masking Group
to contain a newly created storage volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service
:param volumeName: the volume name (String)
:param sgInstanceName: the storage group instance name
associated with an SLO
:param volumeSize: volume size (String)
:param extraSpecs: additional info
:returns: dict -- volumeDict - the volume dict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
try:
storageGroupInstance = conn.GetInstance(sgInstanceName)
except Exception:
exceptionMessage = (_(
"Unable to get the name of the storage group"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
sgName = storageGroupInstance['ElementName']
@coordination.synchronized("emc-sg-{storageGroup}")
def do_create_volume_from_sg(storageGroup):
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, ElementName=volumeName,
EMCCollections=[sgInstanceName],
ElementType=self.utils.get_num(THINPROVISIONING, '16'),
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Create Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
return do_create_volume_from_sg(sgName)
def _find_new_storage_group(
self, conn, maskingGroupDict, storageGroupName):
"""After creating an new storage group find it and return it.
:param conn: connection to the ecom server
:param maskingGroupDict: the maskingGroupDict dict
:param storageGroupName: storage group name (String)
:returns: maskingGroupDict['MaskingGroup'] or None
"""
foundStorageGroupInstanceName = None
if 'MaskingGroup' in maskingGroupDict:
foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup']
return foundStorageGroupInstanceName
def get_volume_dict_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
if len(associators) > 0:
return self.create_volume_dict(associators[0].path)
else:
exceptionMessage = (_(
"Unable to get storage volume from job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def get_volume_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
if len(associators) > 0:
return associators[0]
else:
exceptionMessage = (_(
"Unable to get storage volume from job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def create_volume_dict(self, volumeInstanceName):
"""Create volume dictionary
:param volumeInstanceName: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
volpath = volumeInstanceName
volumeDict = {}
volumeDict['classname'] = volpath.classname
keys = {}
keys['CreationClassName'] = volpath['CreationClassName']
keys['SystemName'] = volpath['SystemName']
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
volumeDict['keybindings'] = keys
return volumeDict
def get_or_create_default_sg(self, conn, extraSpecs, storageSystemName,
doDisableCompression):
"""Get or create default storage group for a replica.
:param conn: the connection to the ecom server
:param extraSpecs: the extra specifications
:param storageSystemName: the storage system name
:param doDisableCompression: flag for compression
:returns: sgInstanceName, instance of storage group
"""
pool = extraSpecs[self.utils.POOL]
slo = extraSpecs[self.utils.SLO]
workload = extraSpecs[self.utils.WORKLOAD]
storageGroupName, controllerConfigService, sgInstanceName = (
self.utils.get_v3_default_sg_instance_name(
conn, pool, slo, workload, storageSystemName,
doDisableCompression))
if sgInstanceName is None:
sgInstanceName = self.create_storage_group_v3(
conn, controllerConfigService, storageGroupName,
pool, slo, workload, extraSpecs, doDisableCompression)
return sgInstanceName
def create_element_replica(
self, conn, repServiceInstanceName,
cloneName, syncType, sourceInstance, extraSpecs,
targetInstance=None, rsdInstance=None, copyState=None):
"""Make SMI-S call to create replica for source element.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param extraSpecs: additional info
:param targetInstance: Target volume instance. Default None
:param rsdInstance: replication settingdata instance. Default None
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
LOG.debug("Create replica: %(clone)s "
"syncType: %(syncType)s Source: %(source)s.",
{'clone': cloneName,
'syncType': syncType,
'source': sourceInstance.path})
storageSystemName = sourceInstance['SystemName']
doDisableCompression = self.utils.is_compression_disabled(extraSpecs)
sgInstanceName = (
self.get_or_create_default_sg(
conn, extraSpecs, storageSystemName, doDisableCompression))
try:
storageGroupInstance = conn.GetInstance(sgInstanceName)
except Exception:
exceptionMessage = (_(
"Unable to get the name of the storage group"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
@coordination.synchronized("emc-sg-{storageGroupName}")
def do_create_element_replica(storageGroupName):
if targetInstance is None and rsdInstance is None:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=self.utils.get_num(syncType, '16'),
SourceElement=sourceInstance.path,
Collections=[sgInstanceName])
else:
rc, job = self._create_element_replica_extra_params(
conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rsdInstance,
sgInstanceName, copyState=copyState)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Cloned Volume: %(cloneName)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'cloneName': cloneName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateElementReplica "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
return do_create_element_replica(storageGroupInstance['ElementName'])
def create_remote_element_replica(
self, conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rdfGroupInstance, extraSpecs):
"""Create a replication relationship between source and target.
:param conn: the ecom connection
:param repServiceInstanceName: the replication service
:param cloneName: the name of the target volume
:param syncType: the synchronization type
:param sourceInstance: the source volume instance
:param targetInstance: the target volume instance
:param rdfGroupInstance: the rdf group instance
:param extraSpecs: additional info
:return: rc, job
"""
startTime = time.time()
LOG.debug("Setup replication relationship: %(source)s "
"syncType: %(syncType)s Source: %(target)s.",
{'source': sourceInstance.path,
'syncType': syncType,
'target': targetInstance.path})
rc, job = self._create_element_replica_extra_params(
conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, None, None, rdfGroupInstance)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (
_("Error Create Cloned Volume: %(cloneName)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'cloneName': cloneName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateElementReplica "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
def _create_element_replica_extra_params(
self, conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rsdInstance, sgInstanceName,
rdfGroupInstance=None, copyState=None):
"""CreateElementReplica using extra parameters.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param targetInstance: Target volume instance. Default None
:param rsdInstance: replication settingdata instance. Default None
:param sgInstanceName: pool instance name
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
"""
syncType = self.utils.get_num(syncType, '16')
modeType = self.utils.get_num(RDF_SYNC_MODE, '16')
if targetInstance and rsdInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path,
ReplicationSettingData=rsdInstance)
elif targetInstance and rdfGroupInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
SyncType=syncType,
Mode=modeType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path,
ConnectivityCollection=rdfGroupInstance)
elif rsdInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
ReplicationSettingData=rsdInstance,
Collections=[sgInstanceName],
WaitForCopyState=copyState)
elif targetInstance and copyState:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path,
WaitForCopyState=copyState)
elif targetInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path)
return rc, job
def break_replication_relationship(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Deletes the relationship between the clone/snap and source volume.
Makes an SMI-S call to break clone relationship between the clone
volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to break replication relationship if True
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
LOG.debug("Break replication relationship: %(sv)s "
"operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs, force)
def create_storage_group_v3(self, conn, controllerConfigService,
groupName, srp, slo, workload, extraSpecs,
doDisableCompression):
"""Create the volume in the specified pool.
:param conn: the connection information to the ecom server
:param controllerConfigService: the controller configuration service
:param groupName: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extraSpecs: additional info
:param doDisableCompression: disable compression flag
:returns: storageGroupInstanceName - storage group instance name
"""
startTime = time.time()
@coordination.synchronized("emc-sg-{sgGroupName}")
def do_create_storage_group_v3(sgGroupName):
if doDisableCompression:
if slo and workload:
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'),
EMCSRP=srp,
EMCSLO=slo,
EMCWorkload=workload,
EMCDisableCompression=True)
else:
if slo and workload:
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'),
EMCSRP=srp,
EMCSLO=slo,
EMCWorkload=workload)
else:
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'))
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, job, extraSpecs)
if rc != 0:
LOG.error(_LE(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s."),
{'groupName': groupName,
'rc': rc,
'error': errordesc})
raise
LOG.debug("InvokeMethod CreateGroup "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
foundStorageGroupInstanceName = self._find_new_storage_group(
conn, job, groupName)
return foundStorageGroupInstanceName
return do_create_storage_group_v3(groupName)
def get_storage_pool_capability(self, conn, poolInstanceName):
"""Get the pool capability.
:param conn: the connection information to the ecom server
:param poolInstanceName: the pool instance
:returns: the storage pool capability instance. None if not found
"""
storagePoolCapability = None
associators = (
conn.AssociatorNames(poolInstanceName,
ResultClass='Symm_StoragePoolCapabilities'))
if len(associators) > 0:
storagePoolCapability = associators[0]
return storagePoolCapability
def get_storage_pool_setting(
self, conn, storagePoolCapability, slo, workload):
"""Get the pool setting for pool capability.
:param conn: the connection information to the ecom server
:param storagePoolCapability: the storage pool capability instance
:param slo: the slo string e.g Bronze
:param workload: the workload string e.g DSS_REP
:returns: the storage pool setting instance
"""
foundStoragePoolSetting = None
storagePoolSettings = (
conn.AssociatorNames(storagePoolCapability,
ResultClass='CIM_storageSetting'))
for storagePoolSetting in storagePoolSettings:
settingInstanceID = storagePoolSetting['InstanceID']
matchString = ("%(slo)s:%(workload)s"
% {'slo': slo,
'workload': workload})
if matchString in settingInstanceID:
foundStoragePoolSetting = storagePoolSetting
break
if foundStoragePoolSetting is None:
exceptionMessage = (_(
"The array does not support the storage pool setting "
"for SLO %(slo)s and workload %(workload)s. Please "
"check the array for valid SLOs and workloads.")
% {'slo': slo,
'workload': workload})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return foundStoragePoolSetting
def _get_supported_size_range_for_SLO(
self, conn, storageConfigService,
srpPoolInstanceName, storagePoolSettingInstanceName, extraSpecs):
"""Gets available performance capacity per SLO.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service instance
:param srpPoolInstanceName: the SRP storage pool instance
:param storagePoolSettingInstanceName: the SLO type, e.g Bronze
:param extraSpecs: additional info
:returns: dict -- supportedSizeDict - the supported size dict
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, supportedSizeDict = conn.InvokeMethod(
'GetSupportedSizeRange',
srpPoolInstanceName,
ElementType=self.utils.get_num(3, '16'),
Goal=storagePoolSettingInstanceName)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, supportedSizeDict, extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Cannot get supported size range for %(sps)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sps': storagePoolSettingInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod GetSupportedSizeRange "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return supportedSizeDict
def get_volume_range(
self, conn, storageConfigService, poolInstanceName, slo, workload,
extraSpecs):
"""Get upper and lower range for volume for slo/workload combination.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service
:param poolInstanceName: the pool instance
:param slo: slo string e.g Bronze
:param workload: workload string e.g DSS
:param extraSpecs: additional info
:returns: supportedSizeDict
"""
supportedSizeDict = {}
storagePoolCapabilityInstanceName = self.get_storage_pool_capability(
conn, poolInstanceName)
if storagePoolCapabilityInstanceName:
storagePoolSettingInstanceName = self.get_storage_pool_setting(
conn, storagePoolCapabilityInstanceName, slo, workload)
supportedSizeDict = self._get_supported_size_range_for_SLO(
conn, storageConfigService, poolInstanceName,
storagePoolSettingInstanceName, extraSpecs)
return supportedSizeDict
def activate_snap_relationship(
self, conn, repServiceInstanceName, syncInstanceName, extraSpecs):
"""Activate snap relationship and start copy operation.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = ACTIVATESNAPVX
LOG.debug("Activate snap: %(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def return_to_resource_pool(self, conn, repServiceInstanceName,
syncInstanceName, extraSpecs):
"""Return the snap target resources back to the pool.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
:param extraSpecs: additional info
:returns: rc - return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = DEACTIVATESNAPVX
LOG.debug("Return snap resource back to pool: "
"%(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def _modify_replica_synchronization(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Modify the relationship between the clone/snap and source volume.
Helper function that makes an SMI-S call to break clone relationship
between the clone volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to modify replication synchronization if True
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'ModifyReplicaSynchronization', repServiceInstanceName,
Operation=self.utils.get_num(operation, '16'),
Synchronization=syncInstanceName,
Force=force)
LOG.debug("_modify_replica_synchronization: %(sv)s "
"operation: %(operation)s Return code: %(rc)lu.",
{'sv': syncInstanceName, 'operation': operation, 'rc': rc})
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error modify replica synchronization: %(sv)s "
"operation: %(operation)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sv': syncInstanceName, 'operation': operation,
'rc': rc, 'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ModifyReplicaSynchronization "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
def create_group_replica(
self, conn, replicationService,
srcGroupInstanceName, tgtGroupInstanceName, relationName,
extraSpecs):
"""Make SMI-S call to create replica for source group.
:param conn: the connection to the ecom server
:param replicationService: replication service
:param srcGroupInstanceName: source group instance name
:param tgtGroupInstanceName: target group instance name
:param relationName: replica relationship name
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
LOG.debug(
"Creating CreateGroupReplica V3: "
"replicationService: %(replicationService)s "
"RelationName: %(relationName)s "
"sourceGroup: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'replicationService': replicationService,
'relationName': relationName,
'srcGroup': srcGroupInstanceName,
'tgtGroup': tgtGroupInstanceName})
rc, job = conn.InvokeMethod(
'CreateGroupReplica',
replicationService,
RelationshipName=relationName,
SourceGroup=srcGroupInstanceName,
TargetGroup=tgtGroupInstanceName,
SyncType=self.utils.get_num(SNAPSYNCTYPE, '16'),
WaitForCopyState=self.utils.get_num(4, '16'))
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMsg = (_("Error CreateGroupReplica: "
"source: %(source)s target: %(target)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'source': srcGroupInstanceName,
'target': tgtGroupInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMsg)
raise exception.VolumeBackendAPIException(data=exceptionMsg)
return rc, job
def get_srp_pool_stats(self, conn, arrayInfo):
"""Get the totalManagedSpace, remainingManagedSpace.
:param conn: the connection to the ecom server
:param arrayInfo: the array dict
:returns: totalCapacityGb
:returns: remainingCapacityGb
:returns: subscribedCapacityGb
:returns: array_reserve_percent
:returns: wlpEnabled
"""
totalCapacityGb = -1
remainingCapacityGb = -1
subscribedCapacityGb = -1
array_reserve_percent = -1
wlpEnabled = False
storageSystemInstanceName = self.utils.find_storageSystem(
conn, arrayInfo['SerialNumber'])
srpPoolInstanceNames = conn.AssociatorNames(
storageSystemInstanceName,
ResultClass='Symm_SRPStoragePool')
for srpPoolInstanceName in srpPoolInstanceNames:
poolnameStr = self.utils.get_pool_name(conn, srpPoolInstanceName)
if six.text_type(arrayInfo['PoolName']) == (
six.text_type(poolnameStr)):
try:
# Check that pool hasn't suddently been deleted.
srpPoolInstance = conn.GetInstance(srpPoolInstanceName)
propertiesList = srpPoolInstance.properties.items()
for properties in propertiesList:
if properties[0] == 'TotalManagedSpace':
cimProperties = properties[1]
totalManagedSpace = cimProperties.value
totalCapacityGb = self.utils.convert_bits_to_gbs(
totalManagedSpace)
elif properties[0] == 'RemainingManagedSpace':
cimProperties = properties[1]
remainingManagedSpace = cimProperties.value
remainingCapacityGb = (
self.utils.convert_bits_to_gbs(
remainingManagedSpace))
elif properties[0] == 'EMCSubscribedCapacity':
cimProperties = properties[1]
subscribedManagedSpace = cimProperties.value
subscribedCapacityGb = (
self.utils.convert_bits_to_gbs(
subscribedManagedSpace))
elif properties[0] == 'EMCPercentReservedCapacity':
cimProperties = properties[1]
array_reserve_percent = int(cimProperties.value)
except Exception:
pass
remainingSLOCapacityGb = (
self._get_remaining_slo_capacity_wlp(
conn, srpPoolInstanceName, arrayInfo,
storageSystemInstanceName['Name']))
if remainingSLOCapacityGb != -1:
remainingCapacityGb = remainingSLOCapacityGb
wlpEnabled = True
else:
LOG.warning(_LW(
"Remaining capacity %(remainingCapacityGb)s "
"GBs is determined from SRP pool capacity "
"and not the SLO capacity. Performance may "
"not be what you expect."),
{'remainingCapacityGb': remainingCapacityGb})
return (totalCapacityGb, remainingCapacityGb, subscribedCapacityGb,
array_reserve_percent, wlpEnabled)
def _get_remaining_slo_capacity_wlp(self, conn, srpPoolInstanceName,
arrayInfo, systemName):
"""Get the remaining SLO capacity.
This is derived from the WLP portion of Unisphere. Please
see the SMIProvider doc and the readme doc for details.
:param conn: the connection to the ecom server
:param srpPoolInstanceName: SRP instance name
:param arrayInfo: the array dict
:param systemName: the system name
:returns: remainingCapacityGb
"""
remainingCapacityGb = -1
if arrayInfo['SLO']:
storageConfigService = (
self.utils.find_storage_configuration_service(
conn, systemName))
supportedSizeDict = (
self.get_volume_range(
conn, storageConfigService, srpPoolInstanceName,
arrayInfo['SLO'], arrayInfo['Workload'],
None))
try:
if supportedSizeDict['EMCInformationSource'] == INFO_SRC_V3:
remainingCapacityGb = self.utils.convert_bits_to_gbs(
supportedSizeDict['EMCRemainingSLOCapacity'])
LOG.debug("Received remaining SLO Capacity "
"%(remainingCapacityGb)s GBs for SLO "
"%(SLO)s and workload %(workload)s.",
{'remainingCapacityGb': remainingCapacityGb,
'SLO': arrayInfo['SLO'],
'workload': arrayInfo['Workload']})
except KeyError:
pass
return remainingCapacityGb
def extend_volume_in_SG(
self, conn, storageConfigService, volumeInstanceName,
volumeName, volumeSize, extraSpecs):
"""Extend a volume instance.
:param conn: connection to the ecom server
:param storageConfigservice: the storage configuration service
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param volumeSize: the volume size
:param extraSpecs: additional info
:returns: volumeDict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, TheElement=volumeInstanceName,
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Extend Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0:
rc, error_desc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Extend Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': error_desc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
def get_rdf_group_instance(self, conn, repServiceInstanceName,
RDFGroupName):
"""Get the SRDF group instance.
:param conn: the connection to the ecom server
:param repServiceInstanceName: the replication service
:param RDFGroupName: the element name of the RDF group
:return: foundRDFGroupInstanceName
"""
foundRDFGroupInstanceName = None
RDFGroupInstances = (
conn.Associators(repServiceInstanceName,
ResultClass='CIM_ConnectivityCollection'))
for RDFGroupInstance in RDFGroupInstances:
if RDFGroupName == (
six.text_type(RDFGroupInstance['ElementName'])):
# Check that it has not been deleted recently.
instance = self.utils.get_existing_instance(
conn, RDFGroupInstance.path)
if instance is None:
# SRDF group not found.
foundRDFGroupInstanceName = None
else:
foundRDFGroupInstanceName = (
RDFGroupInstance.path)
break
return foundRDFGroupInstanceName
def failover_volume(self, conn, repServiceInstanceName,
storageSynchronizationSv,
extraSpecs):
"""Failover a volume to its target device.
:param conn: the connection to the ecom server
:param repServiceInstanceName: the replication service
:param storageSynchronizationSv: the storage synchronized object
:param extraSpecs: the extra specifications
"""
operation = RDF_FAILOVER
# check if volume already in failover state
syncState = self._check_sync_state(conn, storageSynchronizationSv)
if syncState == RDF_FAILEDOVER:
return
else:
LOG.debug("Failover: %(sv)s operation: %(operation)s.",
{'sv': storageSynchronizationSv, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, storageSynchronizationSv,
operation, extraSpecs)
def failback_volume(self, conn, repServiceInstanceName,
storageSynchronizationSv,
extraSpecs):
"""Failback a volume to the source device.
:param conn: the connection to the ecom server
:param repServiceInstanceName: the replication service
:param storageSynchronizationSv: the storage synchronized object
:param extraSpecs: the extra specifications
"""
failback_operation = RDF_FAILBACK
# check if volume already in failback state
syncState = self._check_sync_state(conn, storageSynchronizationSv)
if syncState == RDF_SYNCHRONIZED:
return
else:
LOG.debug("Failback: %(sv)s operation: %(operation)s.",
{'sv': storageSynchronizationSv,
'operation': failback_operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, storageSynchronizationSv,
failback_operation, extraSpecs)
def _check_sync_state(self, conn, syncName):
"""Get the copy state of a sync name.
:param conn: the connection to the ecom server
:param syncName: the storage sync sv name
:return: the copy state
"""
try:
syncInstance = conn.GetInstance(syncName,
LocalOnly=False)
syncState = syncInstance['syncState']
LOG.debug("syncState is %(syncState)lu.",
{'syncState': syncState})
return syncState
except Exception as ex:
exceptionMessage = (
_("Getting sync instance failed with: %(ex)s.")
% {'ex': six.text_type(ex)})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
| apache-2.0 | -5,951,031,079,355,960,000 | 42.382879 | 79 | 0.574334 | false |
Aorjoa/aiyara-ceph-dash | .tox/flake8/lib/python2.7/site-packages/flake8/formatting/default.py | 1 | 2191 | """Default formatting class for Flake8."""
from flake8.formatting import base
class SimpleFormatter(base.BaseFormatter):
"""Simple abstraction for Default and Pylint formatter commonality.
Sub-classes of this need to define an ``error_format`` attribute in order
to succeed. The ``format`` method relies on that attribute and expects the
``error_format`` string to use the old-style formatting strings with named
parameters:
* code
* text
* path
* row
* col
"""
error_format = None
def format(self, error):
"""Format and write error out.
If an output filename is specified, write formatted errors to that
file. Otherwise, print the formatted error to standard out.
"""
return self.error_format % {
"code": error.code,
"text": error.text,
"path": error.filename,
"row": error.line_number,
"col": error.column_number,
}
class Default(SimpleFormatter):
"""Default formatter for Flake8.
This also handles backwards compatibility for people specifying a custom
format string.
"""
error_format = '%(path)s:%(row)d:%(col)d: %(code)s %(text)s'
def after_init(self):
"""Check for a custom format string."""
if self.options.format.lower() != 'default':
self.error_format = self.options.format
class Pylint(SimpleFormatter):
"""Pylint formatter for Flake8."""
error_format = '%(path)s:%(row)d: [%(code)s] %(text)s'
class FilenameOnly(SimpleFormatter):
"""Only print filenames, e.g., flake8 -q."""
error_format = '%(path)s'
def after_init(self):
"""Initialize our set of filenames."""
self.filenames_already_printed = set()
def format(self, error):
"""Ensure we only print each error once."""
if error.filename not in self.filenames_already_printed:
self.filenames_already_printed.add(error.filename)
return super(FilenameOnly, self).format(error)
class Nothing(base.BaseFormatter):
"""Print absolutely nothing."""
def format(self, error):
"""Do nothing."""
pass
| bsd-2-clause | -733,534,517,707,735,400 | 26.3875 | 78 | 0.624829 | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/endpoint_tests.py | 1 | 6296 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import endpoint
from .fhirdate import FHIRDate
class EndpointTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Endpoint", js["resourceType"])
return endpoint.Endpoint(js)
def testEndpoint1(self):
inst = self.instantiate_from("endpoint-example-iid.json")
self.assertIsNotNone(inst, "Must have instantiated a Endpoint instance")
self.implEndpoint1(inst)
js = inst.as_json()
self.assertEqual("Endpoint", js["resourceType"])
inst2 = endpoint.Endpoint(js)
self.implEndpoint1(inst2)
def implEndpoint1(self, inst):
self.assertEqual(inst.address, "https://pacs.hospital.org/IHEInvokeImageDisplay")
self.assertEqual(inst.connectionType.code, "ihe-iid")
self.assertEqual(inst.connectionType.system, "http://terminology.hl7.org/CodeSystem/endpoint-connection-type")
self.assertEqual(inst.id, "example-iid")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "PACS Hospital Invoke Image Display endpoint")
self.assertEqual(inst.payloadType[0].text, "DICOM IID")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
def testEndpoint2(self):
inst = self.instantiate_from("endpoint-example-direct.json")
self.assertIsNotNone(inst, "Must have instantiated a Endpoint instance")
self.implEndpoint2(inst)
js = inst.as_json()
self.assertEqual("Endpoint", js["resourceType"])
inst2 = endpoint.Endpoint(js)
self.implEndpoint2(inst2)
def implEndpoint2(self, inst):
self.assertEqual(inst.address, "mailto:[email protected]")
self.assertEqual(inst.connectionType.code, "direct-project")
self.assertEqual(inst.id, "direct-endpoint")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "MARTIN SMIETANKA")
self.assertEqual(inst.payloadType[0].coding[0].code, "urn:hl7-org:sdwg:ccda-structuredBody:1.1")
self.assertEqual(inst.payloadType[0].coding[0].system, "urn:oid:1.3.6.1.4.1.19376.1.2.3")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
def testEndpoint3(self):
inst = self.instantiate_from("endpoint-example-wadors.json")
self.assertIsNotNone(inst, "Must have instantiated a Endpoint instance")
self.implEndpoint3(inst)
js = inst.as_json()
self.assertEqual("Endpoint", js["resourceType"])
inst2 = endpoint.Endpoint(js)
self.implEndpoint3(inst2)
def implEndpoint3(self, inst):
self.assertEqual(inst.address, "https://pacs.hospital.org/wado-rs")
self.assertEqual(inst.connectionType.code, "dicom-wado-rs")
self.assertEqual(inst.connectionType.system, "http://terminology.hl7.org/CodeSystem/endpoint-connection-type")
self.assertEqual(inst.id, "example-wadors")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "PACS Hospital DICOM WADO-RS endpoint")
self.assertEqual(inst.payloadMimeType[0], "application/dicom")
self.assertEqual(inst.payloadType[0].text, "DICOM WADO-RS")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
def testEndpoint4(self):
inst = self.instantiate_from("endpoint-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Endpoint instance")
self.implEndpoint4(inst)
js = inst.as_json()
self.assertEqual("Endpoint", js["resourceType"])
inst2 = endpoint.Endpoint(js)
self.implEndpoint4(inst2)
def implEndpoint4(self, inst):
self.assertEqual(inst.address, "http://fhir3.healthintersections.com.au/open/CarePlan")
self.assertEqual(inst.connectionType.code, "hl7-fhir-rest")
self.assertEqual(inst.connectionType.system, "http://terminology.hl7.org/CodeSystem/endpoint-connection-type")
self.assertEqual(inst.contact[0].system, "email")
self.assertEqual(inst.contact[0].use, "work")
self.assertEqual(inst.contact[0].value, "[email protected]")
self.assertEqual(inst.header[0], "bearer-code BASGS534s4")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "http://example.org/enpoint-identifier")
self.assertEqual(inst.identifier[0].value, "epcp12")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "Health Intersections CarePlan Hub")
self.assertEqual(inst.payloadMimeType[0], "application/fhir+xml")
self.assertEqual(inst.payloadType[0].coding[0].code, "CarePlan")
self.assertEqual(inst.payloadType[0].coding[0].system, "http://hl7.org/fhir/resource-types")
self.assertEqual(inst.period.start.date, FHIRDate("2014-09-01").date)
self.assertEqual(inst.period.start.as_json(), "2014-09-01")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
| bsd-3-clause | 9,115,488,887,219,823,000 | 48.968254 | 118 | 0.676302 | false |
WisniewskiP/meson | install_meson.py | 1 | 3639 | #!/usr/bin/env python3
# Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script installs Meson. We can't use Meson to install itself
# because of the bootstrap problem. We can't use any other build system
# either becaust that would be just silly.
import os, sys, glob, shutil, gzip
from optparse import OptionParser
usage_info = '%prog [--prefix PREFIX] [--destdir DESTDIR]'
parser = OptionParser(usage=usage_info)
parser.add_option('--prefix', default='/usr/local', dest='prefix',
help='the installation prefix (default: %default)')
parser.add_option('--destdir', default='', dest='destdir',
help='the destdir (default: %default)')
(options, args) = parser.parse_args(sys.argv)
if options.prefix[0] != '/':
print('Error, prefix must be an absolute path.')
sys.exit(1)
if options.destdir == '':
install_root = options.prefix
else:
install_root = os.path.join(options.destdir, options.prefix[1:])
script_dir = os.path.join(install_root, 'share/meson')
bin_dir = os.path.join(install_root, 'bin')
bin_script = os.path.join(script_dir, 'meson.py')
gui_script = os.path.join(script_dir, 'mesongui.py')
conf_script = os.path.join(script_dir, 'mesonconf.py')
bin_name = os.path.join(bin_dir, 'meson')
gui_name = os.path.join(bin_dir, 'mesongui')
conf_name = os.path.join(bin_dir, 'mesonconf')
man_dir = os.path.join(install_root, 'share/man/man1')
in_manfile = 'man/meson.1'
out_manfile = os.path.join(man_dir, 'meson.1.gz')
in_guimanfile = 'man/mesongui.1'
out_guimanfile = os.path.join(man_dir, 'mesongui.1.gz')
in_confmanfile = 'man/mesonconf.1'
out_confmanfile = os.path.join(man_dir, 'mesonconf.1.gz')
symlink_value = os.path.relpath(bin_script, os.path.dirname(bin_name))
guisymlink_value = os.path.relpath(gui_script, os.path.dirname(gui_name))
confsymlink_value = os.path.relpath(conf_script, os.path.dirname(conf_name))
files = glob.glob('*.py')
files += glob.glob('*.ui')
noinstall = ['compile_meson.py', 'install_meson.py', 'run_tests.py', 'run_cross_test.py']
files = [x for x in files if x not in noinstall]
os.makedirs(script_dir, exist_ok=True)
os.makedirs(bin_dir, exist_ok=True)
os.makedirs(man_dir, exist_ok=True)
for f in files:
print('Installing %s to %s.' %(f, script_dir))
outfilename = os.path.join(script_dir, f)
shutil.copyfile(f, outfilename)
shutil.copystat(f, outfilename)
try:
os.remove(bin_name)
except OSError:
pass
print('Creating symlinks %s and %s.' % (bin_name, gui_name))
try:
os.unlink(bin_name)
except FileNotFoundError:
pass
try:
os.unlink(gui_name)
except FileNotFoundError:
pass
try:
os.unlink(conf_name)
except FileNotFoundError:
pass
os.symlink(symlink_value, bin_name)
os.symlink(guisymlink_value, gui_name)
os.symlink(confsymlink_value, conf_name)
print('Installing manfiles to %s.' % man_dir)
open(out_manfile, 'wb').write(gzip.compress(open(in_manfile, 'rb').read()))
open(out_confmanfile, 'wb').write(gzip.compress(open(in_confmanfile, 'rb').read()))
open(out_guimanfile, 'wb').write(gzip.compress(open(in_guimanfile, 'rb').read()))
| apache-2.0 | -5,287,735,341,432,281,000 | 35.029703 | 89 | 0.710085 | false |
Gricha/django-empty | django-empty-auth/newproject/settings.py | 1 | 3305 | """
Django settings for newproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
from newproject.settings_local import (
SECRET_KEY,
DEBUG,
LESSC_PATH,
USE_SYSLOG,
TEMPLATE_DEBUG,
ALLOWED_HOSTS,
COMPRESS_ENABLED,
DATABASES,
ADMINS)
AUTHENTICATION_BACKENDS = (
'newproject.auth_backends.CustomUserModelBackend',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
'django.core.context_processors.request',)
AUTH_PROFILE_MODULE = 'newproject.apps.account.CustomUser'
CUSTOM_USER_MODEL = 'newproject.apps.account.CustomUser'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'newproject.apps.account',
'newproject.apps.main',
'compressor',
'south',
'registration',
'widget_tweaks',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'newproject.urls'
WSGI_APPLICATION = 'newproject.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
'newproject/templates/',
)
LOGIN_REDIRECT_URL = '/'
COMPRESS_ROOT = os.path.join(BASE_DIR, 'newproject', 'static')
COMPRESS_PRECOMPILERS = (
('text/less', '%s {infile} {outfile}' % LESSC_PATH),
)
| unlicense | 1,989,905,234,812,430,300 | 25.653226 | 75 | 0.722844 | false |
SKIRT/PTS | magic/region/panda.py | 1 | 4117 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.region.panda Contains the PandaRegion class and subclasses.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astropy.coordinates import Angle
from astropy.units import Quantity
# Import the relevant PTS classes and modules
from .region import Region, PixelRegion, SkyRegion, PhysicalRegion
from ..basics.coordinate import PixelCoordinate, SkyCoordinate, PhysicalCoordinate
# -----------------------------------------------------------------
class PandaRegion(Region):
"""
This class ...
"""
def __init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs):
"""
The constructor ...
:param kwargs:
"""
# Check the angle
#if not isinstance(angle, Angle): raise ValueError("Angle must be a Astropy Angle object")
# Set the attributes
self.center = center
self.start_angle = start_angle
self.stop_angle = stop_angle
self.nangle = nangle
self.inner = inner
self.outer = outer
self.nradius = nradius
# Call the constructor of the base class
super(PandaRegion, self).__init__(**kwargs)
# -----------------------------------------------------------------
class PixelPandaRegion(PandaRegion, PixelRegion):
"""
This class ...
"""
def __init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs):
"""
This function ...
"""
# Check the start coordinate
#if not isinstance(start, PixelCoordinate): raise ValueError("Start must be pixel coordinate")
# Check the length
#if not isinstance(length, float): raise ValueError("Length must be float")
# Call the constructor of VectorRegion class
PandaRegion.__init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs)
# -----------------------------------------------------------------
class SkyPandaRegion(PandaRegion, SkyRegion):
"""
This class ...
"""
def __init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs):
"""
This function ...
:param start:
:param length:
:param angle:
:param kwargs:
"""
# Check the start coordinate
#if not isinstance(start, SkyCoordinate): raise ValueError("Start must be sky coordinate")
# Check the length
#if not isinstance(length, Quantity): raise ValueError("Length must be an angular quantity")
# Call the constructor of VectorRegion class
PandaRegion.__init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs)
# -----------------------------------------------------------------
class PhysicalPandaRegion(PandaRegion, PhysicalRegion):
"""
This class ...
"""
def __init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs):
"""
This function ...
:param start:
:param length:
:param angle:
:param kwargs:
"""
# Check the start coordinate
#if not isinstance(start, PhysicalCoordinate): raise ValueError("Start must be physical coordinate")
# Check the length
#if not isinstance(length, Quantity): raise ValueError("Length must be a physical quantity of length")
# Call the constructor of VectorRegion class
PandaRegion.__init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs)
# -----------------------------------------------------------------
| agpl-3.0 | -4,254,293,722,206,895,000 | 30.661538 | 110 | 0.548105 | false |
jor-/matrix-decomposition | setup.py | 1 | 2841 | # Copyright (C) 2017-2018 Joscha Reimer [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""A setuptools based setup module.
https://packaging.python.org/en/latest/distributing.html
"""
import setuptools
import os.path
import versioneer_extended
# Get the long description from the README file
readme_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst')
with open(readme_file, mode='r', encoding='utf-8') as f:
long_description = f.read()
# Setup
setuptools.setup(
# general informations
name='matrix-decomposition',
description='This library allows to approximate Hermitian (dense and sparse) matrices by positive definite matrices. Furthermore it allows to decompose (factorize) positive definite matrices and solve associated systems of linear equations.',
long_description=long_description,
keywords='approximation Hermitian dense sparse matrix matrices positive definite decompose factorize decomposition factorization linear equation equations Cholesky',
url='https://github.com/jor-/matrix_decomposition',
author='Joscha Reimer',
author_email='[email protected]',
license='AGPL',
classifiers=[
# Development Status
'Development Status :: 5 - Production/Stable',
# Intended Audience, Topic
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
# Licence (should match "license" above)
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
# Supported Python versions
'Programming Language :: Python',
],
# version
version=versioneer_extended.get_version(),
cmdclass=versioneer_extended.get_cmdclass(),
# packages to install
packages=setuptools.find_packages(),
# dependencies
python_requires='>=3.7',
setup_requires=[
'setuptools>=0.8',
'pip>=1.4',
],
install_requires=[
'numpy>=1.15',
'scipy>=0.19',
],
extras_require={
'decompose_sparse': ['scikit-sparse>=0.4.2'],
},
)
| agpl-3.0 | -9,103,944,859,064,064,000 | 35.423077 | 246 | 0.699754 | false |
ella/mypage | mypage/pages/migrations/0003_change_default_value_on_site_fk.py | 1 | 3108 |
from south.db import db
from django.db import models
from mypage.pages.models import *
class Migration:
def forwards(self, orm):
# Changing field 'Page.site'
db.alter_column('pages_page', 'site_id', models.ForeignKey(orm['sites.Site'], default= lambda :settings.SITE_ID))
def backwards(self, orm):
# Changing field 'Page.site'
db.alter_column('pages_page', 'site_id', models.ForeignKey(orm['sites.Site']))
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'db_table': "'django_site'"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.widgetinpage': {
'Meta': {'unique_together': "(('page','widget',),)"},
'config_json': ('models.TextField', [], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'page': ('models.ForeignKey', ["orm['pages.Page']"], {'verbose_name': "_('Page')"}),
'rendered_widget': ('models.ForeignKey', ["orm['widgets.RenderedWidget']"], {'null': 'False'}),
'state': ('models.SmallIntegerField', [], {'default': '2'}),
'widget': ('models.ForeignKey', ["orm['widgets.Widget']"], {'verbose_name': "_('Widget')"})
},
'auth.user': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'widgets.widget': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.page': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'layout_json': ('models.TextField', [], {}),
'site': ('models.ForeignKey', ["orm['sites.Site']"], {'default': ' lambda :settings.SITE_ID'}),
'skin': ('models.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'template': ('models.CharField', [], {'default': "'page.html'", 'max_length': '100'}),
'widgets': ('models.ManyToManyField', ["orm['widgets.Widget']"], {'through': "'WidgetInPage'"})
},
'widgets.renderedwidget': {
'Meta': {'unique_together': "(('widget','state','site',),)"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.userpage': {
'Meta': {'_bases': ['mypage.pages.models.Page']},
'page_ptr': ('models.OneToOneField', ["orm['pages.Page']"], {}),
'user': ('models.ForeignKey', ["orm['auth.User']"], {'unique': 'True'})
},
'pages.sessionpage': {
'Meta': {'_bases': ['mypage.pages.models.Page']},
'page_ptr': ('models.OneToOneField', ["orm['pages.Page']"], {}),
'session_key': ('models.CharField', ["_('session key')"], {'unique': 'True', 'max_length': '40'}),
'updated': ('models.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'False'})
}
}
complete_apps = ['pages']
| bsd-3-clause | -1,818,358,948,859,231,000 | 42.774648 | 121 | 0.485521 | false |
bpsinc-native/src_third_party_libjingle_source_talk | PRESUBMIT.py | 2 | 5115 | # libjingle
# Copyright 2013 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# List of files that should not be committed to
DO_NOT_SUBMIT_FILES = [
"talk/media/webrtc/webrtcmediaengine.h",
"talk/media/webrtc/webrtcvideoengine.cc",
"talk/media/webrtc/webrtcvideoengine.h",
"talk/media/webrtc/webrtcvideoengine_unittest.cc"]
def _LicenseHeader(input_api):
"""Returns the license header regexp."""
# Accept any year number from start of project to the current year
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2004, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
years_re = '%s(--%s)?' % (years_re, years_re)
license_header = (
r'.*? libjingle\n'
r'.*? Copyright %(year)s,? Google Inc\.\n'
r'.*?\n'
r'.*? Redistribution and use in source and binary forms, with or without'
r'\n'
r'.*? modification, are permitted provided that the following conditions '
r'are met:\n'
r'.*?\n'
r'.*? 1\. Redistributions of source code must retain the above copyright '
r'notice,\n'
r'.*? this list of conditions and the following disclaimer\.\n'
r'.*? 2\. Redistributions in binary form must reproduce the above '
r'copyright notice,\n'
r'.*? this list of conditions and the following disclaimer in the '
r'documentation\n'
r'.*? and/or other materials provided with the distribution\.\n'
r'.*? 3\. The name of the author may not be used to endorse or promote '
r'products\n'
r'.*? derived from this software without specific prior written '
r'permission\.\n'
r'.*?\n'
r'.*? THIS SOFTWARE IS PROVIDED BY THE AUTHOR \`\`AS IS\'\' AND ANY '
r'EXPRESS OR IMPLIED\n'
r'.*? WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES '
r'OF\n'
r'.*? MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE '
r'DISCLAIMED\. IN NO\n'
r'.*? EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, '
r'INCIDENTAL,\n'
r'.*? SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \(INCLUDING, '
r'BUT NOT LIMITED TO,\n'
r'.*? PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR '
r'PROFITS;\n'
r'.*? OR BUSINESS INTERRUPTION\) HOWEVER CAUSED AND ON ANY THEORY OF '
r'LIABILITY,\n'
r'.*? WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \(INCLUDING '
r'NEGLIGENCE OR\n'
r'.*? OTHERWISE\) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, '
r'EVEN IF\n'
r'.*? ADVISED OF THE POSSIBILITY OF SUCH DAMAGE\.\n'
) % {
'year': years_re,
}
return license_header
def _ProtectedFiles(input_api, output_api):
results = []
changed_files = []
for f in input_api.AffectedFiles():
changed_files.append(f.LocalPath())
bad_files = list(set(DO_NOT_SUBMIT_FILES) & set(changed_files))
if bad_files:
error_type = output_api.PresubmitError
results.append(error_type(
'The following affected files are only allowed to be updated when '
'importing libjingle',
bad_files))
return results
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.CheckLicense(
input_api, output_api, _LicenseHeader(input_api)))
results.extend(_ProtectedFiles(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
| bsd-3-clause | -1,188,175,575,473,091,600 | 43.094828 | 80 | 0.682502 | false |
EvangelouSotiris/flightradiationcalc | main.py | 1 | 5469 | import time
import requests
##############################################################
############## REQUESTS MANAGEMENT/ LINKS ####################
##############################################################
headers = {'User-Agent' : 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'}
<<<<<<< HEAD
flight = input('Enter the number of your flight: ')
firstpart = 'https://data-live.flightradar24.com/clickhandler/?version=1.5&flight='
r = requests.get(firstpart+flight, headers = headers)
=======
r = requests.get('https://data-live.flightradar24.com/clickhandler/?version=1.5&flight=dfa6132', headers = headers)
>>>>>>> refs/remotes/GiorgosNikitopoulos/master
jsoned_response = r.json()
global limit = len(jsoned_response['trail']) #no need to declare that in every function
##############################################################
################### INITIALISATIONS ##########################
##############################################################
each_derivative = [None] * (limit - 1)
risingSpeed = [None]*limit
enRouteAlts = [0]*12
enRouteTimestamps = [0]*12
enRouteCounter = 0 #position on enRoute matrixes
possibleLastTimestamp = 0 #last timestamp of enroute flight - start of descension
each_derivative = [None] * (limit - 1)
y = [None] * (limit) #initialisation y-altitude , x-time and dy/dx derivative
x = [None] * (limit)
first_time_derivative_flag = 0
first_time_derivative_zero_flag = 0
##############################################################
################# MAIN PROGRAM/ LOOPS ########################
##############################################################
## Getting requests - heights , timestamps , and alt changing speed measurement ## needed in functions ##
for i in range(0, limit):
global y[i] = jsoned_response['trail'][limit - 1 - i]['alt'] #values of altitudes
if y[i] == None:
print 'y is none in ' + str(i)
break
global x[i] = jsoned_response['trail'][limit - 1 - i]['ts'] #values of timestamps
if x[i] == None:
print 'x is none in ' + str(i)
break #Break statements if x or y values are none (can't be used)
if (i>0 && x[i-1]!= None && y[i-1]!= None):
global each_derivative[i - 1] = float(float((y[i] - y[i-1])) / float((x[i] - x[i-1]))) #each derivative = speed of changing altitudes
print x[i]
print y[i]
print each_derivative[i]
## Getting the response points where ascension ends and descension starts
ascensionFinishPoint = get_ascension_point(jsoned_response)
descensionStartingPoint = get_descension_point(jsoned_response)
##############################################################
################### FUNCTIONS ################################
##############################################################
## Functions for ascension and descension points
def get_ascension_point(jsoned_response):
counter_ascend = 0 #counter will help us decide the stage of flight
for i in range(0, limit):
if(each_derivative[i] < 10 and each_derivative[i] > 0): #If u>10 for 6+ successive points=>ascension
if(first_time_derivate_flag == 0):
first_time_derivate_flag = 1
possible_ascention_point = i
counter_ascend = counter_ascend + 1
print("counter_ascend = " , counter_ascend)
counter_descend = 0
else:
counter_ascend = 0
first_time_derivate_flag = 0
if(counter_ascend > 0 or first_time_derivative_zero_flag == 1):
first_time_derivative_zero_flag = 1
if(each_derivative[i] < 5 and each_derivative[i + 1] < 5 and each_derivative[i + 2] < 5):
print ("snap_ascend")
if(counter_ascend >= 15): #into ascension stage
ascend_point = i
print ("snap_ascend")
def get_descension_point(jsoned_response):
counter_descend = 0
for i in range(0, limit):
if(each_derivative[i] > -10 and each_derivative[i] < 0 and y[i] > 18000): #If u>10 for 6+ successive points=>ascension
if(first_time_derivate_flag == 0):
first_time_derivate_flag = 1
possible_ascention_point = i
counter_descend = counter_descend + 1
print("descend = " , counter_descend)
else:
counter_descend = 0
first_time_derivate_flag = 0
if(counter_descend > 0 or first_time_derivative_zero_flag == 1):
first_time_derivative_zero_flag = 1
if(each_derivative[i] > -5 and each_derivative[i + 1] > -5 and each_derivative[i + 2] > -5):
print ("snap_descend")
if(counter_descend >= 15): #into ascension stage
descend_point = i
print ("snap_descend")
##############################################################
############### OLD COMMITS/MAYBE USEFUL #####################
##############################################################
##ARTIFACT
######## EN ROUTE STAGE
# if (each_derivative>-5 and each_derivative<5): #En route stage of flight #######CHANGEABLE
# counter_ascend = 0
# counter_descend = 0
# # print ("snap_enroute")
# if (enRouteAlts[enRouteCounter] == 0): #1st time into en route stage
# enRouteAlts[enRouteCounter] = y[i]
# enRouteTimestamps[enRouteCounter] = x[i] #x1 time airplane got into that altitude
# if (abs(y[i]-enRouteAlts[enRouteCounter])>1000): #more than 1000 feet is considered another en route alt #######CHANGEABLE
# enRouteTimestamps[enRouteCounter] = x[i]-enRouteTimestamps[enRouteCounter] #x2-x1 time airplane stayed into former alt
# enRouteCounter = enRouteCounter + 1 #next altitude/timestamp matrix pos
# enRouteAlts[enRouteCounter] = y[i] #new alt
# enRouteTimestamps[enRouteCounter] = x[i] #x1 timestamp of new alt
| gpl-3.0 | -8,451,631,178,383,616,000 | 41.069231 | 135 | 0.588956 | false |
KSG-IT/ksg-nett | api/serializers.py | 1 | 4959 | from django.conf import settings
from rest_framework import serializers
from rest_framework_simplejwt.serializers import TokenObtainSlidingSerializer
from api.exceptions import InsufficientFundsException, NoSociSessionError
from economy.models import SociProduct, ProductOrder, SociSession, SociBankAccount
class CustomTokenObtainSlidingSerializer(TokenObtainSlidingSerializer):
"""
Overridden so we can obtain a token for a user based only on the card uuid.
"""
username_field = "card_uuid"
def __init__(self, *args, **kwargs):
"""
Overridden from `TokenObtainSerializer` since this adds a required
field `password` to the serializer that we don't need.
"""
super().__init__(*args, **kwargs)
del self.fields['password']
def validate(self, attrs):
"""
Overridden from `TokenObtainSlidingSerializer` since
this expects a username and password to be supplied.
"""
data = {}
token = self.get_token(self.context['request'].user)
data['token'] = str(token)
return data
# ===============================
# ECONOMY
# ===============================
from sensors.consts import MEASUREMENT_TYPE_CHOICES
from sensors.models import SensorMeasurement
class SociProductSerializer(serializers.Serializer):
sku_number = serializers.CharField(read_only=True, label="Product SKU number")
name = serializers.CharField(read_only=True, label="Product name")
price = serializers.IntegerField(read_only=True, label="Product price in NOK")
description = serializers.CharField(read_only=True, allow_blank=True, allow_null=True, label="Product description",
help_text="Returns `null` if no description exists")
icon = serializers.CharField(read_only=True, label="Product icon descriptor")
class CheckBalanceSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True, label="This soci bank account ID")
user = serializers.CharField(source='user.get_full_name', read_only=True, label="User´s full name")
balance = serializers.IntegerField(read_only=True, label="Balance in NOK",
help_text="Should not be displayed publicly")
class ChargeSociBankAccountDeserializer(serializers.Serializer):
sku = serializers.CharField(label="Product SKU number to charge for")
order_size = serializers.IntegerField(default=1, required=False, label="Order size for this product",
help_text="Defaults to 1 if not supplied")
@staticmethod
def validate_sku(value):
if not SociProduct.objects.filter(sku_number=value).exists():
raise serializers.ValidationError('SKU number is invalid.')
return value
@staticmethod
def validate_order_size(value):
if value <= 0:
raise serializers.ValidationError('Order size must be positive.')
return value
def validate(self, attrs):
if attrs['sku'] != settings.DIRECT_CHARGE_SKU:
attrs['amount'] = SociProduct.objects.get(sku_number=attrs['sku']).price
else:
attrs['amount'] = 1
self.context['total'] += attrs['amount'] * attrs['order_size']
if self.context['total'] > self.context['soci_bank_account'].balance:
raise InsufficientFundsException()
if SociSession.get_active_session() is None:
raise NoSociSessionError()
return attrs
def create(self, validated_data):
product_order = ProductOrder.objects.create(
product=SociProduct.objects.get(sku_number=validated_data.pop('sku')), **validated_data
)
return product_order
class PurchaseSerializer(serializers.Serializer):
amount_charged = serializers.IntegerField(read_only=True, source='total_amount',
label="Amount that was charged from user´s Soci account")
amount_remaining = serializers.IntegerField(read_only=True, source='source.balance',
label="Remaining balance in user´s Soci account",
help_text="Should not be displayed publicly")
products_purchased = serializers.ListField(read_only=True, child=serializers.CharField(),
help_text="The products that were purchased")
class SensorMeasurementSerializer(serializers.Serializer):
type = serializers.ChoiceField(
choices=MEASUREMENT_TYPE_CHOICES,
label="The type of measurement.",
)
value = serializers.FloatField(
label="The value of the measurement",
)
created_at = serializers.DateTimeField(
label="The time of the measurement",
)
def create(self, validated_data):
return SensorMeasurement.objects.create(**validated_data)
| gpl-3.0 | 6,613,007,558,135,177,000 | 36.545455 | 119 | 0.650525 | false |
fordcars/SDL3D | tools/Frameworkify/frameworkify.py | 1 | 3860 | #!/usr/bin/env python -S
# -*- coding: utf-8 -*-
r"""
frameworkify
~~~~~~~~~~~~
A small command line tool that can rewrite the paths to dynamic
loaded libraries in .dylib files so that they reference other
paths. By default it will rewrite the path so that it points to
the bundle's Frameworks folder. This can be paired with a CMake
post build action to make proper bundles without having to
recompile a bunch of dylibs to reference the framework.
Usage::
$ frameworkify.py MyApplication.app/Contents/MacOS/MyApplication \
> /path/to/mylib.dylib
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from optparse import OptionParser
def find_bundle(executable):
executable = os.path.abspath(executable)
if not os.path.isfile(executable):
raise RuntimeError('Executable does not exist')
folder, exe_name = os.path.split(executable)
content_path, folder = os.path.split(folder)
if folder != 'MacOS':
raise RuntimeError('Executable not located inside a bundle')
return content_path
def find_baked_dylibs(executable):
from subprocess import Popen, PIPE
c = Popen(['otool', '-L', executable], stdout=PIPE)
lines = c.communicate()[0].splitlines()
return [x.strip().split(' (')[0] for x in lines[1:]]
def find_matching_dylib(dylibs, basename):
lbasename = basename.lower()
for dylib in dylibs:
if os.path.basename(dylib).lower() == lbasename:
return dylib
def rewrite_path(executable, old, new):
from subprocess import Popen
Popen(['install_name_tool', '-change', old, new, executable]).wait()
def copy_to_framework(bundle_path, filename, target_name):
from shutil import copy2
framework_path = os.path.join(bundle_path, 'Frameworks')
if not os.path.isdir(framework_path):
os.mkdir(framework_path)
copy2(filename, os.path.join(framework_path, target_name))
def perform_rewrite_operation(rewrites, executable, bundle_path, copy=True):
for old_path, new_path, dylib_path in rewrites:
rewrite_path(executable, old_path, new_path)
if copy:
copy_to_framework(bundle_path, dylib_path,
os.path.basename(new_path))
def frameworkify(executable, dylibs, nocopy, path):
bundle = find_bundle(executable)
baked_dylibs = find_baked_dylibs(executable)
def _make_new_path(dylib_name):
if path:
return os.path.join(path, dylib_name)
return '@executable_path/../Frameworks/' + dylib_name
rewrites = []
for dylib in dylibs:
dylib_name = os.path.basename(dylib)
dylib_path_match = find_matching_dylib(baked_dylibs, dylib_name)
if dylib_path_match is None:
raise Exception('dylib "%s" is not referenced by "%s"' % (
dylib_name,
executable
))
rewrites.append((dylib_path_match, _make_new_path(dylib_name), dylib))
perform_rewrite_operation(rewrites, executable, bundle, not nocopy)
def main():
parser = OptionParser()
parser.add_option('-p', '--path', dest='path', metavar='PATH',
help='alternative path to dylib')
parser.add_option('-C', '--nocopy', dest='nocopy', action='store_true',
help='don\'t copy dylib to framework folder')
opts, args = parser.parse_args()
if len(args) < 2:
parser.error('Not enough arguments: executable and a list of dylibs')
if opts.path and not opts.nocopy:
parser.error('Path combined with copy operation is not supported')
try:
frameworkify(args[0], args[1:], opts.nocopy, opts.path)
except Exception, e:
parser.error(str(e))
sys.exit(1)
if __name__ == '__main__':
main() | gpl-3.0 | -5,696,709,541,733,503,000 | 31.720339 | 78 | 0.644041 | false |
william-richard/moto | tests/test_ec2/test_instances.py | 1 | 62435 | from __future__ import unicode_literals
from botocore.exceptions import ClientError
import pytest
from unittest import SkipTest
import base64
import ipaddress
import six
import boto
import boto3
from boto.ec2.instance import Reservation, InstanceAttribute
from boto.exception import EC2ResponseError
from freezegun import freeze_time
import sure # noqa
from moto import mock_ec2_deprecated, mock_ec2, settings
from tests import EXAMPLE_AMI_ID
from tests.helpers import requires_boto_gte
if six.PY2:
decode_method = base64.decodestring
else:
decode_method = base64.decodebytes
################ Test Readme ###############
def add_servers(ami_id, count):
conn = boto.connect_ec2()
for index in range(count):
conn.run_instances(ami_id)
@mock_ec2_deprecated
def test_add_servers():
add_servers(EXAMPLE_AMI_ID, 2)
conn = boto.connect_ec2()
reservations = conn.get_all_reservations()
assert len(reservations) == 2
instance1 = reservations[0].instances[0]
assert instance1.image_id == EXAMPLE_AMI_ID
############################################
@freeze_time("2014-01-01 05:00:00")
@mock_ec2_deprecated
def test_instance_launch_and_terminate():
conn = boto.ec2.connect_to_region("us-east-1")
with pytest.raises(EC2ResponseError) as ex:
reservation = conn.run_instances(EXAMPLE_AMI_ID, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set"
)
reservation = conn.run_instances(EXAMPLE_AMI_ID)
reservation.should.be.a(Reservation)
reservation.instances.should.have.length_of(1)
instance = reservation.instances[0]
instance.state.should.equal("pending")
reservations = conn.get_all_reservations()
reservations.should.have.length_of(1)
reservations[0].id.should.equal(reservation.id)
instances = reservations[0].instances
instances.should.have.length_of(1)
instance = instances[0]
instance.id.should.equal(instance.id)
instance.state.should.equal("running")
instance.launch_time.should.equal("2014-01-01T05:00:00.000Z")
instance.vpc_id.shouldnt.equal(None)
instance.placement.should.equal("us-east-1a")
root_device_name = instance.root_device_name
instance.block_device_mapping[root_device_name].status.should.equal("in-use")
volume_id = instance.block_device_mapping[root_device_name].volume_id
volume_id.should.match(r"vol-\w+")
volume = conn.get_all_volumes(volume_ids=[volume_id])[0]
volume.attach_data.instance_id.should.equal(instance.id)
volume.status.should.equal("in-use")
with pytest.raises(EC2ResponseError) as ex:
conn.terminate_instances([instance.id], dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set"
)
conn.terminate_instances([instance.id])
reservations = conn.get_all_reservations()
instance = reservations[0].instances[0]
instance.state.should.equal("terminated")
@mock_ec2
def test_instance_terminate_discard_volumes():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[
{
"DeviceName": "/dev/sda1",
"Ebs": {"VolumeSize": 50, "DeleteOnTermination": True},
}
],
)
instance = result[0]
instance_volume_ids = []
for volume in instance.volumes.all():
instance_volume_ids.append(volume.volume_id)
instance.terminate()
instance.wait_until_terminated()
assert not list(ec2_resource.volumes.all())
@mock_ec2
def test_instance_terminate_keep_volumes_explicit():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[
{
"DeviceName": "/dev/sda1",
"Ebs": {"VolumeSize": 50, "DeleteOnTermination": False},
}
],
)
instance = result[0]
instance_volume_ids = []
for volume in instance.volumes.all():
instance_volume_ids.append(volume.volume_id)
instance.terminate()
instance.wait_until_terminated()
assert len(list(ec2_resource.volumes.all())) == 1
@mock_ec2
def test_instance_terminate_keep_volumes_implicit():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}}],
)
instance = result[0]
instance_volume_ids = []
for volume in instance.volumes.all():
instance_volume_ids.append(volume.volume_id)
instance.terminate()
instance.wait_until_terminated()
assert len(instance_volume_ids) == 1
volume = ec2_resource.Volume(instance_volume_ids[0])
volume.state.should.equal("available")
@mock_ec2
def test_instance_terminate_detach_volumes():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[
{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}},
{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}},
],
)
instance = result[0]
for volume in instance.volumes.all():
response = instance.detach_volume(VolumeId=volume.volume_id)
response["State"].should.equal("detaching")
instance.terminate()
instance.wait_until_terminated()
assert len(list(ec2_resource.volumes.all())) == 2
@mock_ec2
def test_instance_detach_volume_wrong_path():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}},],
)
instance = result[0]
for volume in instance.volumes.all():
with pytest.raises(ClientError) as ex:
instance.detach_volume(VolumeId=volume.volume_id, Device="/dev/sdf")
ex.value.response["Error"]["Code"].should.equal("InvalidAttachment.NotFound")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.value.response["Error"]["Message"].should.equal(
"The volume {0} is not attached to instance {1} as device {2}".format(
volume.volume_id, instance.instance_id, "/dev/sdf"
)
)
@mock_ec2_deprecated
def test_terminate_empty_instances():
conn = boto.connect_ec2("the_key", "the_secret")
conn.terminate_instances.when.called_with([]).should.throw(EC2ResponseError)
@freeze_time("2014-01-01 05:00:00")
@mock_ec2_deprecated
def test_instance_attach_volume():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
vol1 = conn.create_volume(size=36, zone=conn.region.name)
vol1.attach(instance.id, "/dev/sda1")
vol1.update()
vol2 = conn.create_volume(size=65, zone=conn.region.name)
vol2.attach(instance.id, "/dev/sdb1")
vol2.update()
vol3 = conn.create_volume(size=130, zone=conn.region.name)
vol3.attach(instance.id, "/dev/sdc1")
vol3.update()
reservations = conn.get_all_reservations()
instance = reservations[0].instances[0]
instance.block_device_mapping.should.have.length_of(3)
for v in conn.get_all_volumes(
volume_ids=[instance.block_device_mapping["/dev/sdc1"].volume_id]
):
v.attach_data.instance_id.should.equal(instance.id)
# can do due to freeze_time decorator.
v.attach_data.attach_time.should.equal(instance.launch_time)
# can do due to freeze_time decorator.
v.create_time.should.equal(instance.launch_time)
v.region.name.should.equal(instance.region.name)
v.status.should.equal("in-use")
@mock_ec2_deprecated
def test_get_instances_by_id():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=2)
instance1, instance2 = reservation.instances
reservations = conn.get_all_reservations(instance_ids=[instance1.id])
reservations.should.have.length_of(1)
reservation = reservations[0]
reservation.instances.should.have.length_of(1)
reservation.instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_reservations(instance_ids=[instance1.id, instance2.id])
reservations.should.have.length_of(1)
reservation = reservations[0]
reservation.instances.should.have.length_of(2)
instance_ids = [instance.id for instance in reservation.instances]
instance_ids.should.equal([instance1.id, instance2.id])
# Call get_all_reservations with a bad id should raise an error
with pytest.raises(EC2ResponseError) as cm:
conn.get_all_reservations(instance_ids=[instance1.id, "i-1234abcd"])
cm.value.code.should.equal("InvalidInstanceID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
@mock_ec2
def test_get_paginated_instances():
client = boto3.client("ec2", region_name="us-east-1")
conn = boto3.resource("ec2", "us-east-1")
for i in range(100):
conn.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
resp = client.describe_instances(MaxResults=50)
reservations = resp["Reservations"]
reservations.should.have.length_of(50)
next_token = resp["NextToken"]
next_token.should_not.be.none
resp2 = client.describe_instances(NextToken=next_token)
reservations.extend(resp2["Reservations"])
reservations.should.have.length_of(100)
assert "NextToken" not in resp2.keys()
@mock_ec2
def test_create_with_tags():
ec2 = boto3.client("ec2", region_name="us-west-2")
instances = ec2.run_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
InstanceType="t2.micro",
TagSpecifications=[
{
"ResourceType": "instance",
"Tags": [
{"Key": "MY_TAG1", "Value": "MY_VALUE1"},
{"Key": "MY_TAG2", "Value": "MY_VALUE2"},
],
},
{
"ResourceType": "instance",
"Tags": [{"Key": "MY_TAG3", "Value": "MY_VALUE3"}],
},
],
)
assert "Tags" in instances["Instances"][0]
len(instances["Instances"][0]["Tags"]).should.equal(3)
@mock_ec2_deprecated
def test_get_instances_filtering_by_state():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
conn.terminate_instances([instance1.id])
reservations = conn.get_all_reservations(filters={"instance-state-name": "running"})
reservations.should.have.length_of(1)
# Since we terminated instance1, only instance2 and instance3 should be
# returned
instance_ids = [instance.id for instance in reservations[0].instances]
set(instance_ids).should.equal(set([instance2.id, instance3.id]))
reservations = conn.get_all_reservations(
[instance2.id], filters={"instance-state-name": "running"}
)
reservations.should.have.length_of(1)
instance_ids = [instance.id for instance in reservations[0].instances]
instance_ids.should.equal([instance2.id])
reservations = conn.get_all_reservations(
[instance2.id], filters={"instance-state-name": "terminated"}
)
list(reservations).should.equal([])
# get_all_reservations should still return all 3
reservations = conn.get_all_reservations()
reservations[0].instances.should.have.length_of(3)
conn.get_all_reservations.when.called_with(
filters={"not-implemented-filter": "foobar"}
).should.throw(NotImplementedError)
@mock_ec2_deprecated
def test_get_instances_filtering_by_instance_id():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
reservations = conn.get_all_reservations(filters={"instance-id": instance1.id})
# get_all_reservations should return just instance1
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_reservations(
filters={"instance-id": [instance1.id, instance2.id]}
)
# get_all_reservations should return two
reservations[0].instances.should.have.length_of(2)
reservations = conn.get_all_reservations(filters={"instance-id": "non-existing-id"})
reservations.should.have.length_of(0)
@mock_ec2_deprecated
def test_get_instances_filtering_by_instance_type():
conn = boto.connect_ec2()
reservation1 = conn.run_instances(EXAMPLE_AMI_ID, instance_type="m1.small")
instance1 = reservation1.instances[0]
reservation2 = conn.run_instances(EXAMPLE_AMI_ID, instance_type="m1.small")
instance2 = reservation2.instances[0]
reservation3 = conn.run_instances(EXAMPLE_AMI_ID, instance_type="t1.micro")
instance3 = reservation3.instances[0]
reservations = conn.get_all_reservations(filters={"instance-type": "m1.small"})
# get_all_reservations should return instance1,2
reservations.should.have.length_of(2)
reservations[0].instances.should.have.length_of(1)
reservations[1].instances.should.have.length_of(1)
instance_ids = [reservations[0].instances[0].id, reservations[1].instances[0].id]
set(instance_ids).should.equal(set([instance1.id, instance2.id]))
reservations = conn.get_all_reservations(filters={"instance-type": "t1.micro"})
# get_all_reservations should return one
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance3.id)
reservations = conn.get_all_reservations(
filters={"instance-type": ["t1.micro", "m1.small"]}
)
reservations.should.have.length_of(3)
reservations[0].instances.should.have.length_of(1)
reservations[1].instances.should.have.length_of(1)
reservations[2].instances.should.have.length_of(1)
instance_ids = [
reservations[0].instances[0].id,
reservations[1].instances[0].id,
reservations[2].instances[0].id,
]
set(instance_ids).should.equal(set([instance1.id, instance2.id, instance3.id]))
reservations = conn.get_all_reservations(filters={"instance-type": "bogus"})
# bogus instance-type should return none
reservations.should.have.length_of(0)
@mock_ec2_deprecated
def test_get_instances_filtering_by_reason_code():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.stop()
instance2.terminate()
reservations = conn.get_all_reservations(
filters={"state-reason-code": "Client.UserInitiatedShutdown"}
)
# get_all_reservations should return instance1 and instance2
reservations[0].instances.should.have.length_of(2)
set([instance1.id, instance2.id]).should.equal(
set([i.id for i in reservations[0].instances])
)
reservations = conn.get_all_reservations(filters={"state-reason-code": ""})
# get_all_reservations should return instance 3
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance3.id)
@mock_ec2_deprecated
def test_get_instances_filtering_by_source_dest_check():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=2)
instance1, instance2 = reservation.instances
conn.modify_instance_attribute(
instance1.id, attribute="sourceDestCheck", value=False
)
source_dest_check_false = conn.get_all_reservations(
filters={"source-dest-check": "false"}
)
source_dest_check_true = conn.get_all_reservations(
filters={"source-dest-check": "true"}
)
source_dest_check_false[0].instances.should.have.length_of(1)
source_dest_check_false[0].instances[0].id.should.equal(instance1.id)
source_dest_check_true[0].instances.should.have.length_of(1)
source_dest_check_true[0].instances[0].id.should.equal(instance2.id)
@mock_ec2_deprecated
def test_get_instances_filtering_by_vpc_id():
conn = boto.connect_vpc("the_key", "the_secret")
vpc1 = conn.create_vpc("10.0.0.0/16")
subnet1 = conn.create_subnet(vpc1.id, "10.0.0.0/27")
reservation1 = conn.run_instances(EXAMPLE_AMI_ID, min_count=1, subnet_id=subnet1.id)
instance1 = reservation1.instances[0]
vpc2 = conn.create_vpc("10.1.0.0/16")
subnet2 = conn.create_subnet(vpc2.id, "10.1.0.0/27")
reservation2 = conn.run_instances(EXAMPLE_AMI_ID, min_count=1, subnet_id=subnet2.id)
instance2 = reservation2.instances[0]
reservations1 = conn.get_all_reservations(filters={"vpc-id": vpc1.id})
reservations1.should.have.length_of(1)
reservations1[0].instances.should.have.length_of(1)
reservations1[0].instances[0].id.should.equal(instance1.id)
reservations1[0].instances[0].vpc_id.should.equal(vpc1.id)
reservations1[0].instances[0].subnet_id.should.equal(subnet1.id)
reservations2 = conn.get_all_reservations(filters={"vpc-id": vpc2.id})
reservations2.should.have.length_of(1)
reservations2[0].instances.should.have.length_of(1)
reservations2[0].instances[0].id.should.equal(instance2.id)
reservations2[0].instances[0].vpc_id.should.equal(vpc2.id)
reservations2[0].instances[0].subnet_id.should.equal(subnet2.id)
@mock_ec2_deprecated
def test_get_instances_filtering_by_architecture():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=1)
instance = reservation.instances
reservations = conn.get_all_reservations(filters={"architecture": "x86_64"})
# get_all_reservations should return the instance
reservations[0].instances.should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_image_id():
client = boto3.client("ec2", region_name="us-east-1")
conn = boto3.resource("ec2", "us-east-1")
conn.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
reservations = client.describe_instances(
Filters=[{"Name": "image-id", "Values": [EXAMPLE_AMI_ID]}]
)["Reservations"]
reservations[0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_account_id():
client = boto3.client("ec2", region_name="us-east-1")
conn = boto3.resource("ec2", "us-east-1")
conn.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
reservations = client.describe_instances(
Filters=[{"Name": "owner-id", "Values": ["123456789012"]}]
)["Reservations"]
reservations[0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_private_dns():
client = boto3.client("ec2", region_name="us-east-1")
conn = boto3.resource("ec2", "us-east-1")
conn.create_instances(
ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1, PrivateIpAddress="10.0.0.1"
)
reservations = client.describe_instances(
Filters=[{"Name": "private-dns-name", "Values": ["ip-10-0-0-1.ec2.internal"]}]
)["Reservations"]
reservations[0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_ni_private_dns():
client = boto3.client("ec2", region_name="us-west-2")
conn = boto3.resource("ec2", "us-west-2")
conn.create_instances(
ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1, PrivateIpAddress="10.0.0.1"
)
reservations = client.describe_instances(
Filters=[
{
"Name": "network-interface.private-dns-name",
"Values": ["ip-10-0-0-1.us-west-2.compute.internal"],
}
]
)["Reservations"]
reservations[0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_instance_group_name():
client = boto3.client("ec2", region_name="us-east-1")
client.create_security_group(Description="test", GroupName="test_sg")
client.run_instances(
ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1, SecurityGroups=["test_sg"]
)
reservations = client.describe_instances(
Filters=[{"Name": "instance.group-name", "Values": ["test_sg"]}]
)["Reservations"]
reservations[0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_instance_group_id():
client = boto3.client("ec2", region_name="us-east-1")
create_sg = client.create_security_group(Description="test", GroupName="test_sg")
group_id = create_sg["GroupId"]
client.run_instances(
ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1, SecurityGroups=["test_sg"]
)
reservations = client.describe_instances(
Filters=[{"Name": "instance.group-id", "Values": [group_id]}]
)["Reservations"]
reservations[0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_subnet_id():
client = boto3.client("ec2", region_name="us-east-1")
vpc_cidr = ipaddress.ip_network("192.168.42.0/24")
subnet_cidr = ipaddress.ip_network("192.168.42.0/25")
resp = client.create_vpc(CidrBlock=str(vpc_cidr),)
vpc_id = resp["Vpc"]["VpcId"]
resp = client.create_subnet(CidrBlock=str(subnet_cidr), VpcId=vpc_id)
subnet_id = resp["Subnet"]["SubnetId"]
client.run_instances(
ImageId=EXAMPLE_AMI_ID, MaxCount=1, MinCount=1, SubnetId=subnet_id,
)
reservations = client.describe_instances(
Filters=[{"Name": "subnet-id", "Values": [subnet_id]}]
)["Reservations"]
reservations.should.have.length_of(1)
@mock_ec2_deprecated
def test_get_instances_filtering_by_tag():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.add_tag("tag1", "value1")
instance1.add_tag("tag2", "value2")
instance2.add_tag("tag1", "value1")
instance2.add_tag("tag2", "wrong value")
instance3.add_tag("tag2", "value2")
reservations = conn.get_all_reservations(filters={"tag:tag0": "value0"})
# get_all_reservations should return no instances
reservations.should.have.length_of(0)
reservations = conn.get_all_reservations(filters={"tag:tag1": "value1"})
# get_all_reservations should return both instances with this tag value
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations = conn.get_all_reservations(
filters={"tag:tag1": "value1", "tag:tag2": "value2"}
)
# get_all_reservations should return the instance with both tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_reservations(
filters={"tag:tag1": "value1", "tag:tag2": "value2"}
)
# get_all_reservations should return the instance with both tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_reservations(filters={"tag:tag2": ["value2", "bogus"]})
# get_all_reservations should return both instances with one of the
# acceptable tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance3.id)
@mock_ec2_deprecated
def test_get_instances_filtering_by_tag_value():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.add_tag("tag1", "value1")
instance1.add_tag("tag2", "value2")
instance2.add_tag("tag1", "value1")
instance2.add_tag("tag2", "wrong value")
instance3.add_tag("tag2", "value2")
reservations = conn.get_all_reservations(filters={"tag-value": "value0"})
# get_all_reservations should return no instances
reservations.should.have.length_of(0)
reservations = conn.get_all_reservations(filters={"tag-value": "value1"})
# get_all_reservations should return both instances with this tag value
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations = conn.get_all_reservations(
filters={"tag-value": ["value2", "value1"]}
)
# get_all_reservations should return both instances with one of the
# acceptable tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(3)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations[0].instances[2].id.should.equal(instance3.id)
reservations = conn.get_all_reservations(filters={"tag-value": ["value2", "bogus"]})
# get_all_reservations should return both instances with one of the
# acceptable tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance3.id)
@mock_ec2_deprecated
def test_get_instances_filtering_by_tag_name():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.add_tag("tag1")
instance1.add_tag("tag2")
instance2.add_tag("tag1")
instance2.add_tag("tag2X")
instance3.add_tag("tag3")
reservations = conn.get_all_reservations(filters={"tag-key": "tagX"})
# get_all_reservations should return no instances
reservations.should.have.length_of(0)
reservations = conn.get_all_reservations(filters={"tag-key": "tag1"})
# get_all_reservations should return both instances with this tag value
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations = conn.get_all_reservations(filters={"tag-key": ["tag1", "tag3"]})
# get_all_reservations should return both instances with one of the
# acceptable tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(3)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations[0].instances[2].id.should.equal(instance3.id)
@mock_ec2_deprecated
def test_instance_start_and_stop():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=2)
instances = reservation.instances
instances.should.have.length_of(2)
instance_ids = [instance.id for instance in instances]
with pytest.raises(EC2ResponseError) as ex:
stopped_instances = conn.stop_instances(instance_ids, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set"
)
stopped_instances = conn.stop_instances(instance_ids)
for instance in stopped_instances:
instance.state.should.equal("stopping")
with pytest.raises(EC2ResponseError) as ex:
started_instances = conn.start_instances([instances[0].id], dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set"
)
started_instances = conn.start_instances([instances[0].id])
started_instances[0].state.should.equal("pending")
@mock_ec2_deprecated
def test_instance_reboot():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as ex:
instance.reboot(dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set"
)
instance.reboot()
instance.state.should.equal("pending")
@mock_ec2_deprecated
def test_instance_attribute_instance_type():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as ex:
instance.modify_attribute("instanceType", "m1.small", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set"
)
instance.modify_attribute("instanceType", "m1.small")
instance_attribute = instance.get_attribute("instanceType")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("instanceType").should.equal("m1.small")
@mock_ec2_deprecated
def test_modify_instance_attribute_security_groups():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
sg_id = conn.create_security_group(
"test security group", "this is a test security group"
).id
sg_id2 = conn.create_security_group(
"test security group 2", "this is a test security group 2"
).id
with pytest.raises(EC2ResponseError) as ex:
instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set"
)
instance.modify_attribute("groupSet", [sg_id, sg_id2])
instance_attribute = instance.get_attribute("groupSet")
instance_attribute.should.be.a(InstanceAttribute)
group_list = instance_attribute.get("groupSet")
any(g.id == sg_id for g in group_list).should.be.ok
any(g.id == sg_id2 for g in group_list).should.be.ok
@mock_ec2_deprecated
def test_instance_attribute_user_data():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as ex:
instance.modify_attribute("userData", "this is my user data", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set"
)
instance.modify_attribute("userData", "this is my user data")
instance_attribute = instance.get_attribute("userData")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("userData").should.equal("this is my user data")
@mock_ec2_deprecated
def test_instance_attribute_source_dest_check():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
# Default value is true
instance.sourceDestCheck.should.equal("true")
instance_attribute = instance.get_attribute("sourceDestCheck")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("sourceDestCheck").should.equal(True)
# Set to false (note: Boto converts bool to string, eg 'false')
with pytest.raises(EC2ResponseError) as ex:
instance.modify_attribute("sourceDestCheck", False, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set"
)
instance.modify_attribute("sourceDestCheck", False)
instance.update()
instance.sourceDestCheck.should.equal("false")
instance_attribute = instance.get_attribute("sourceDestCheck")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("sourceDestCheck").should.equal(False)
# Set back to true
instance.modify_attribute("sourceDestCheck", True)
instance.update()
instance.sourceDestCheck.should.equal("true")
instance_attribute = instance.get_attribute("sourceDestCheck")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("sourceDestCheck").should.equal(True)
@mock_ec2_deprecated
def test_user_data_with_run_instance():
user_data = b"some user data"
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, user_data=user_data)
instance = reservation.instances[0]
instance_attribute = instance.get_attribute("userData")
instance_attribute.should.be.a(InstanceAttribute)
retrieved_user_data = instance_attribute.get("userData").encode("utf-8")
decoded_user_data = decode_method(retrieved_user_data)
decoded_user_data.should.equal(b"some user data")
@mock_ec2_deprecated
def test_run_instance_with_security_group_name():
conn = boto.connect_ec2("the_key", "the_secret")
with pytest.raises(EC2ResponseError) as ex:
group = conn.create_security_group("group1", "some description", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set"
)
group = conn.create_security_group("group1", "some description")
reservation = conn.run_instances(EXAMPLE_AMI_ID, security_groups=["group1"])
instance = reservation.instances[0]
instance.groups[0].id.should.equal(group.id)
instance.groups[0].name.should.equal("group1")
@mock_ec2_deprecated
def test_run_instance_with_security_group_id():
conn = boto.connect_ec2("the_key", "the_secret")
group = conn.create_security_group("group1", "some description")
reservation = conn.run_instances(EXAMPLE_AMI_ID, security_group_ids=[group.id])
instance = reservation.instances[0]
instance.groups[0].id.should.equal(group.id)
instance.groups[0].name.should.equal("group1")
@mock_ec2_deprecated
def test_run_instance_with_instance_type():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, instance_type="t1.micro")
instance = reservation.instances[0]
instance.instance_type.should.equal("t1.micro")
@mock_ec2_deprecated
def test_run_instance_with_default_placement():
conn = boto.ec2.connect_to_region("us-east-1")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.placement.should.equal("us-east-1a")
@mock_ec2_deprecated
def test_run_instance_with_placement():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, placement="us-east-1b")
instance = reservation.instances[0]
instance.placement.should.equal("us-east-1b")
@mock_ec2
def test_run_instance_with_subnet_boto3():
client = boto3.client("ec2", region_name="eu-central-1")
ip_networks = [
(ipaddress.ip_network("10.0.0.0/16"), ipaddress.ip_network("10.0.99.0/24")),
(
ipaddress.ip_network("192.168.42.0/24"),
ipaddress.ip_network("192.168.42.0/25"),
),
]
# Tests instances are created with the correct IPs
for vpc_cidr, subnet_cidr in ip_networks:
resp = client.create_vpc(
CidrBlock=str(vpc_cidr),
AmazonProvidedIpv6CidrBlock=False,
DryRun=False,
InstanceTenancy="default",
)
vpc_id = resp["Vpc"]["VpcId"]
resp = client.create_subnet(CidrBlock=str(subnet_cidr), VpcId=vpc_id)
subnet_id = resp["Subnet"]["SubnetId"]
resp = client.run_instances(
ImageId=EXAMPLE_AMI_ID, MaxCount=1, MinCount=1, SubnetId=subnet_id
)
instance = resp["Instances"][0]
instance["SubnetId"].should.equal(subnet_id)
priv_ipv4 = ipaddress.ip_address(six.text_type(instance["PrivateIpAddress"]))
subnet_cidr.should.contain(priv_ipv4)
@mock_ec2
def test_run_instance_with_specified_private_ipv4():
client = boto3.client("ec2", region_name="eu-central-1")
vpc_cidr = ipaddress.ip_network("192.168.42.0/24")
subnet_cidr = ipaddress.ip_network("192.168.42.0/25")
resp = client.create_vpc(
CidrBlock=str(vpc_cidr),
AmazonProvidedIpv6CidrBlock=False,
DryRun=False,
InstanceTenancy="default",
)
vpc_id = resp["Vpc"]["VpcId"]
resp = client.create_subnet(CidrBlock=str(subnet_cidr), VpcId=vpc_id)
subnet_id = resp["Subnet"]["SubnetId"]
resp = client.run_instances(
ImageId=EXAMPLE_AMI_ID,
MaxCount=1,
MinCount=1,
SubnetId=subnet_id,
PrivateIpAddress="192.168.42.5",
)
instance = resp["Instances"][0]
instance["SubnetId"].should.equal(subnet_id)
instance["PrivateIpAddress"].should.equal("192.168.42.5")
@mock_ec2
def test_run_instance_mapped_public_ipv4():
client = boto3.client("ec2", region_name="eu-central-1")
vpc_cidr = ipaddress.ip_network("192.168.42.0/24")
subnet_cidr = ipaddress.ip_network("192.168.42.0/25")
resp = client.create_vpc(
CidrBlock=str(vpc_cidr),
AmazonProvidedIpv6CidrBlock=False,
DryRun=False,
InstanceTenancy="default",
)
vpc_id = resp["Vpc"]["VpcId"]
resp = client.create_subnet(CidrBlock=str(subnet_cidr), VpcId=vpc_id)
subnet_id = resp["Subnet"]["SubnetId"]
client.modify_subnet_attribute(
SubnetId=subnet_id, MapPublicIpOnLaunch={"Value": True}
)
resp = client.run_instances(
ImageId=EXAMPLE_AMI_ID, MaxCount=1, MinCount=1, SubnetId=subnet_id
)
instance = resp["Instances"][0]
instance.should.contain("PublicDnsName")
instance.should.contain("PublicIpAddress")
len(instance["PublicDnsName"]).should.be.greater_than(0)
len(instance["PublicIpAddress"]).should.be.greater_than(0)
@mock_ec2_deprecated
def test_run_instance_with_nic_autocreated():
conn = boto.connect_vpc("the_key", "the_secret")
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
security_group1 = conn.create_security_group(
"test security group #1", "this is a test security group"
)
security_group2 = conn.create_security_group(
"test security group #2", "this is a test security group"
)
private_ip = "10.0.0.1"
reservation = conn.run_instances(
EXAMPLE_AMI_ID,
subnet_id=subnet.id,
security_groups=[security_group1.name],
security_group_ids=[security_group2.id],
private_ip_address=private_ip,
)
instance = reservation.instances[0]
all_enis = conn.get_all_network_interfaces()
all_enis.should.have.length_of(1)
eni = all_enis[0]
instance.interfaces.should.have.length_of(1)
instance.interfaces[0].id.should.equal(eni.id)
instance.subnet_id.should.equal(subnet.id)
instance.groups.should.have.length_of(2)
set([group.id for group in instance.groups]).should.equal(
set([security_group1.id, security_group2.id])
)
eni.subnet_id.should.equal(subnet.id)
eni.groups.should.have.length_of(2)
set([group.id for group in eni.groups]).should.equal(
set([security_group1.id, security_group2.id])
)
eni.private_ip_addresses.should.have.length_of(1)
eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip)
@mock_ec2_deprecated
def test_run_instance_with_nic_preexisting():
conn = boto.connect_vpc("the_key", "the_secret")
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
security_group1 = conn.create_security_group(
"test security group #1", "this is a test security group"
)
security_group2 = conn.create_security_group(
"test security group #2", "this is a test security group"
)
private_ip = "54.0.0.1"
eni = conn.create_network_interface(
subnet.id, private_ip, groups=[security_group1.id]
)
# Boto requires NetworkInterfaceCollection of NetworkInterfaceSpecifications...
# annoying, but generates the desired querystring.
from boto.ec2.networkinterface import (
NetworkInterfaceSpecification,
NetworkInterfaceCollection,
)
interface = NetworkInterfaceSpecification(
network_interface_id=eni.id, device_index=0
)
interfaces = NetworkInterfaceCollection(interface)
# end Boto objects
reservation = conn.run_instances(
EXAMPLE_AMI_ID,
network_interfaces=interfaces,
security_group_ids=[security_group2.id],
)
instance = reservation.instances[0]
instance.subnet_id.should.equal(subnet.id)
all_enis = conn.get_all_network_interfaces()
all_enis.should.have.length_of(1)
instance.interfaces.should.have.length_of(1)
instance_eni = instance.interfaces[0]
instance_eni.id.should.equal(eni.id)
instance_eni.subnet_id.should.equal(subnet.id)
instance_eni.groups.should.have.length_of(2)
set([group.id for group in instance_eni.groups]).should.equal(
set([security_group1.id, security_group2.id])
)
instance_eni.private_ip_addresses.should.have.length_of(1)
instance_eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip)
@requires_boto_gte("2.32.0")
@mock_ec2_deprecated
def test_instance_with_nic_attach_detach():
conn = boto.connect_vpc("the_key", "the_secret")
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
security_group1 = conn.create_security_group(
"test security group #1", "this is a test security group"
)
security_group2 = conn.create_security_group(
"test security group #2", "this is a test security group"
)
reservation = conn.run_instances(
EXAMPLE_AMI_ID, security_group_ids=[security_group1.id]
)
instance = reservation.instances[0]
eni = conn.create_network_interface(subnet.id, groups=[security_group2.id])
# Check initial instance and ENI data
instance.interfaces.should.have.length_of(1)
eni.groups.should.have.length_of(1)
set([group.id for group in eni.groups]).should.equal(set([security_group2.id]))
# Attach
with pytest.raises(EC2ResponseError) as ex:
conn.attach_network_interface(eni.id, instance.id, device_index=1, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set"
)
conn.attach_network_interface(eni.id, instance.id, device_index=1)
# Check attached instance and ENI data
instance.update()
instance.interfaces.should.have.length_of(2)
instance_eni = instance.interfaces[1]
instance_eni.id.should.equal(eni.id)
instance_eni.groups.should.have.length_of(2)
set([group.id for group in instance_eni.groups]).should.equal(
set([security_group1.id, security_group2.id])
)
eni = conn.get_all_network_interfaces(filters={"network-interface-id": eni.id})[0]
eni.groups.should.have.length_of(2)
set([group.id for group in eni.groups]).should.equal(
set([security_group1.id, security_group2.id])
)
# Detach
with pytest.raises(EC2ResponseError) as ex:
conn.detach_network_interface(instance_eni.attachment.id, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set"
)
conn.detach_network_interface(instance_eni.attachment.id)
# Check detached instance and ENI data
instance.update()
instance.interfaces.should.have.length_of(1)
eni = conn.get_all_network_interfaces(filters={"network-interface-id": eni.id})[0]
eni.groups.should.have.length_of(1)
set([group.id for group in eni.groups]).should.equal(set([security_group2.id]))
# Detach with invalid attachment ID
with pytest.raises(EC2ResponseError) as cm:
conn.detach_network_interface("eni-attach-1234abcd")
cm.value.code.should.equal("InvalidAttachmentID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
@mock_ec2_deprecated
def test_ec2_classic_has_public_ip_address():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, key_name="keypair_name")
instance = reservation.instances[0]
instance.ip_address.should_not.equal(None)
instance.public_dns_name.should.contain(instance.ip_address.replace(".", "-"))
instance.private_ip_address.should_not.equal(None)
instance.private_dns_name.should.contain(
instance.private_ip_address.replace(".", "-")
)
@mock_ec2_deprecated
def test_run_instance_with_keypair():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, key_name="keypair_name")
instance = reservation.instances[0]
instance.key_name.should.equal("keypair_name")
@mock_ec2
def test_run_instance_with_block_device_mappings():
ec2_client = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}}],
}
ec2_client.run_instances(**kwargs)
instances = ec2_client.describe_instances()
volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][
"Ebs"
]
volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]])
volumes["Volumes"][0]["Size"].should.equal(50)
@mock_ec2
def test_run_instance_with_block_device_mappings_missing_ebs():
ec2_client = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [{"DeviceName": "/dev/sda2"}],
}
with pytest.raises(ClientError) as ex:
ec2_client.run_instances(**kwargs)
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter ebs"
)
@mock_ec2
def test_run_instance_with_block_device_mappings_missing_size():
ec2_client = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [
{"DeviceName": "/dev/sda2", "Ebs": {"VolumeType": "standard"}}
],
}
with pytest.raises(ClientError) as ex:
ec2_client.run_instances(**kwargs)
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter size or snapshotId"
)
@mock_ec2
def test_run_instance_with_block_device_mappings_from_snapshot():
ec2_client = boto3.client("ec2", region_name="us-east-1")
ec2_resource = boto3.resource("ec2", region_name="us-east-1")
volume_details = {
"AvailabilityZone": "1a",
"Size": 30,
}
volume = ec2_resource.create_volume(**volume_details)
snapshot = volume.create_snapshot()
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [
{"DeviceName": "/dev/sda2", "Ebs": {"SnapshotId": snapshot.snapshot_id}}
],
}
ec2_client.run_instances(**kwargs)
instances = ec2_client.describe_instances()
volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][
"Ebs"
]
volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]])
volumes["Volumes"][0]["Size"].should.equal(30)
volumes["Volumes"][0]["SnapshotId"].should.equal(snapshot.snapshot_id)
@mock_ec2_deprecated
def test_describe_instance_status_no_instances():
conn = boto.connect_ec2("the_key", "the_secret")
all_status = conn.get_all_instance_status()
len(all_status).should.equal(0)
@mock_ec2_deprecated
def test_describe_instance_status_with_instances():
conn = boto.connect_ec2("the_key", "the_secret")
conn.run_instances(EXAMPLE_AMI_ID, key_name="keypair_name")
all_status = conn.get_all_instance_status()
len(all_status).should.equal(1)
all_status[0].instance_status.status.should.equal("ok")
all_status[0].system_status.status.should.equal("ok")
@mock_ec2_deprecated
def test_describe_instance_status_with_instance_filter_deprecated():
conn = boto.connect_ec2("the_key", "the_secret")
# We want to filter based on this one
reservation = conn.run_instances(EXAMPLE_AMI_ID, key_name="keypair_name")
instance = reservation.instances[0]
# This is just to setup the test
conn.run_instances(EXAMPLE_AMI_ID, key_name="keypair_name")
all_status = conn.get_all_instance_status(instance_ids=[instance.id])
len(all_status).should.equal(1)
all_status[0].id.should.equal(instance.id)
# Call get_all_instance_status with a bad id should raise an error
with pytest.raises(EC2ResponseError) as cm:
conn.get_all_instance_status(instance_ids=[instance.id, "i-1234abcd"])
cm.value.code.should.equal("InvalidInstanceID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
@mock_ec2
def test_describe_instance_credit_specifications():
conn = boto3.client("ec2", region_name="us-west-1")
# We want to filter based on this one
reservation = conn.run_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
result = conn.describe_instance_credit_specifications(
InstanceIds=[reservation["Instances"][0]["InstanceId"]]
)
assert (
result["InstanceCreditSpecifications"][0]["InstanceId"]
== reservation["Instances"][0]["InstanceId"]
)
@mock_ec2
def test_describe_instance_status_with_instance_filter():
conn = boto3.client("ec2", region_name="us-west-1")
# We want to filter based on this one
reservation = conn.run_instances(ImageId=EXAMPLE_AMI_ID, MinCount=3, MaxCount=3)
instance1 = reservation["Instances"][0]
instance2 = reservation["Instances"][1]
instance3 = reservation["Instances"][2]
conn.stop_instances(InstanceIds=[instance1["InstanceId"]])
stopped_instance_ids = [instance1["InstanceId"]]
running_instance_ids = sorted([instance2["InstanceId"], instance3["InstanceId"]])
all_instance_ids = sorted(stopped_instance_ids + running_instance_ids)
# Filter instance using the state name
state_name_filter = {
"running_and_stopped": [
{"Name": "instance-state-name", "Values": ["running", "stopped"]}
],
"running": [{"Name": "instance-state-name", "Values": ["running"]}],
"stopped": [{"Name": "instance-state-name", "Values": ["stopped"]}],
}
found_statuses = conn.describe_instance_status(
IncludeAllInstances=True, Filters=state_name_filter["running_and_stopped"]
)["InstanceStatuses"]
found_instance_ids = [status["InstanceId"] for status in found_statuses]
sorted(found_instance_ids).should.equal(all_instance_ids)
found_statuses = conn.describe_instance_status(
IncludeAllInstances=True, Filters=state_name_filter["running"]
)["InstanceStatuses"]
found_instance_ids = [status["InstanceId"] for status in found_statuses]
sorted(found_instance_ids).should.equal(running_instance_ids)
found_statuses = conn.describe_instance_status(
IncludeAllInstances=True, Filters=state_name_filter["stopped"]
)["InstanceStatuses"]
found_instance_ids = [status["InstanceId"] for status in found_statuses]
sorted(found_instance_ids).should.equal(stopped_instance_ids)
# Filter instance using the state code
state_code_filter = {
"running_and_stopped": [
{"Name": "instance-state-code", "Values": ["16", "80"]}
],
"running": [{"Name": "instance-state-code", "Values": ["16"]}],
"stopped": [{"Name": "instance-state-code", "Values": ["80"]}],
}
found_statuses = conn.describe_instance_status(
IncludeAllInstances=True, Filters=state_code_filter["running_and_stopped"]
)["InstanceStatuses"]
found_instance_ids = [status["InstanceId"] for status in found_statuses]
sorted(found_instance_ids).should.equal(all_instance_ids)
found_statuses = conn.describe_instance_status(
IncludeAllInstances=True, Filters=state_code_filter["running"]
)["InstanceStatuses"]
found_instance_ids = [status["InstanceId"] for status in found_statuses]
sorted(found_instance_ids).should.equal(running_instance_ids)
found_statuses = conn.describe_instance_status(
IncludeAllInstances=True, Filters=state_code_filter["stopped"]
)["InstanceStatuses"]
found_instance_ids = [status["InstanceId"] for status in found_statuses]
sorted(found_instance_ids).should.equal(stopped_instance_ids)
@requires_boto_gte("2.32.0")
@mock_ec2_deprecated
def test_describe_instance_status_with_non_running_instances():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.stop()
instance2.terminate()
all_running_status = conn.get_all_instance_status()
all_running_status.should.have.length_of(1)
all_running_status[0].id.should.equal(instance3.id)
all_running_status[0].state_name.should.equal("running")
all_status = conn.get_all_instance_status(include_all_instances=True)
all_status.should.have.length_of(3)
status1 = next((s for s in all_status if s.id == instance1.id), None)
status1.state_name.should.equal("stopped")
status2 = next((s for s in all_status if s.id == instance2.id), None)
status2.state_name.should.equal("terminated")
status3 = next((s for s in all_status if s.id == instance3.id), None)
status3.state_name.should.equal("running")
@mock_ec2_deprecated
def test_get_instance_by_security_group():
conn = boto.connect_ec2("the_key", "the_secret")
conn.run_instances(EXAMPLE_AMI_ID)
instance = conn.get_only_instances()[0]
security_group = conn.create_security_group("test", "test")
with pytest.raises(EC2ResponseError) as ex:
conn.modify_instance_attribute(
instance.id, "groupSet", [security_group.id], dry_run=True
)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set"
)
conn.modify_instance_attribute(instance.id, "groupSet", [security_group.id])
security_group_instances = security_group.instances()
assert len(security_group_instances) == 1
assert security_group_instances[0].id == instance.id
@mock_ec2
def test_modify_delete_on_termination():
ec2_client = boto3.resource("ec2", region_name="us-west-1")
result = ec2_client.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
instance = result[0]
instance.load()
instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(True)
instance.modify_attribute(
BlockDeviceMappings=[
{"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": False}}
]
)
instance.load()
instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(False)
@mock_ec2
def test_create_instance_ebs_optimized():
ec2_resource = boto3.resource("ec2", region_name="eu-west-1")
instance = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID, MaxCount=1, MinCount=1, EbsOptimized=True
)[0]
instance.load()
instance.ebs_optimized.should.be(True)
instance.modify_attribute(EbsOptimized={"Value": False})
instance.load()
instance.ebs_optimized.should.be(False)
instance = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID, MaxCount=1, MinCount=1,
)[0]
instance.load()
instance.ebs_optimized.should.be(False)
@mock_ec2
def test_run_multiple_instances_in_same_command():
instance_count = 4
client = boto3.client("ec2", region_name="us-east-1")
client.run_instances(
ImageId=EXAMPLE_AMI_ID, MinCount=instance_count, MaxCount=instance_count
)
reservations = client.describe_instances()["Reservations"]
reservations[0]["Instances"].should.have.length_of(instance_count)
instances = reservations[0]["Instances"]
for i in range(0, instance_count):
instances[i]["AmiLaunchIndex"].should.be(i)
@mock_ec2
def test_describe_instance_attribute():
client = boto3.client("ec2", region_name="us-east-1")
security_group_id = client.create_security_group(
GroupName="test security group", Description="this is a test security group"
)["GroupId"]
client.run_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
SecurityGroupIds=[security_group_id],
)
instance_id = client.describe_instances()["Reservations"][0]["Instances"][0][
"InstanceId"
]
valid_instance_attributes = [
"instanceType",
"kernel",
"ramdisk",
"userData",
"disableApiTermination",
"instanceInitiatedShutdownBehavior",
"rootDeviceName",
"blockDeviceMapping",
"productCodes",
"sourceDestCheck",
"groupSet",
"ebsOptimized",
"sriovNetSupport",
]
for valid_instance_attribute in valid_instance_attributes:
response = client.describe_instance_attribute(
InstanceId=instance_id, Attribute=valid_instance_attribute
)
if valid_instance_attribute == "groupSet":
response.should.have.key("Groups")
response["Groups"].should.have.length_of(1)
response["Groups"][0]["GroupId"].should.equal(security_group_id)
elif valid_instance_attribute == "userData":
response.should.have.key("UserData")
response["UserData"].should.be.empty
invalid_instance_attributes = [
"abc",
"Kernel",
"RamDisk",
"userdata",
"iNsTaNcEtYpE",
]
for invalid_instance_attribute in invalid_instance_attributes:
with pytest.raises(ClientError) as ex:
client.describe_instance_attribute(
InstanceId=instance_id, Attribute=invalid_instance_attribute
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
message = "Value ({invalid_instance_attribute}) for parameter attribute is invalid. Unknown attribute.".format(
invalid_instance_attribute=invalid_instance_attribute
)
ex.value.response["Error"]["Message"].should.equal(message)
@mock_ec2
def test_warn_on_invalid_ami():
if settings.TEST_SERVER_MODE:
raise SkipTest("Can't capture warnings in server mode.")
ec2 = boto3.resource("ec2", "us-east-1")
with pytest.warns(
PendingDeprecationWarning,
match=r"Could not find AMI with image-id:invalid-ami.+",
):
ec2.create_instances(ImageId="invalid-ami", MinCount=1, MaxCount=1)
| apache-2.0 | -5,225,527,537,591,672,000 | 35.704879 | 155 | 0.682262 | false |
acbilson/forbidden-island | tests/test_print.py | 1 | 1905 | import sys
sys.path.append('../src')
from tiles import *
from tile import *
class Test(object):
def __init__(self):
self.board = ""
def gen_board(self, tiles):
segments = []
rows = [[0,1],
[2,3,4,5],
[6,7,8,9,10,11],
[12,13,14,15,16,17],
[18,19,20,21],
[22,23]]
spaces = [' ',
' ',
'',
'',
' ',
' ']
names = [t.name for t in tiles]
players = [t.player for t in tiles]
statuses = [t.status for t in tiles]
allSegs = []
for i in range(0, len(rows)):
nameSegments = self._gen_segments(rows[i], spaces[i], ('/', '\\'), names)
playerSegments = self._gen_segments(rows[i], spaces[i], ('|', '|'), players)
statusSegments = self._gen_segments(rows[i], spaces[i], ('\\', '/'), statuses, newLine=True)
allSegs.append(''.join(nameSegments))
allSegs.append(''.join(playerSegments))
allSegs.append(''.join(statusSegments))
return ''.join(allSegs)
def _gen_segments(self, row, space, dividers, tileSegments, newLine=None):
TILE_SPACE = ' '
segments = []
segments.append(space)
last = row[len(row)-1]
rowSegments = tileSegments[row[0]:last+1]
for i,rs in enumerate(rowSegments):
segments.append(dividers[0] + rs.value + dividers[1])
if len(rowSegments) != i:
segments.append(TILE_SPACE)
segments.append(space + '\n')
if newLine != None:
segments.append('\n')
return segments
if __name__ == '__main__':
tiles = Tiles()
t = Test()
board = t.gen_board(tiles.tiles)
print(board)
| gpl-3.0 | 8,462,663,968,441,839,000 | 23.74026 | 104 | 0.467192 | false |
Inboxen/website | views/inbox/delete.py | 1 | 2017 | ##
# Copyright (C) 2013 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django.views import generic
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
from django.contrib import messages
from inboxen import models
from queue.delete.tasks import delete_inbox
from website import forms
from website.views import base
__all__ = ["InboxDeletionView"]
class InboxDeletionView(base.CommonContextMixin, base.LoginRequiredMixin, generic.DeleteView):
model = models.Inbox
success_url = reverse_lazy('user-home')
headline = _("Delete Inbox")
template_name = "inbox/delete.html"
def get_object(self, *args, **kwargs):
return self.request.user.inbox_set.get(
inbox=self.kwargs["inbox"],
domain__domain=self.kwargs["domain"]
)
def delete(self, request, *args, **kawrgs):
self.object = self.get_object()
success_url = self.get_success_url()
self.object.flags.deleted = True
self.object.save()
delete_inbox.delay(self.object.id, request.user.id)
messages.success(request, _("{0}@{1} has been deleted.".format(self.object.inbox, self.object.domain.domain)))
return HttpResponseRedirect(success_url)
| agpl-3.0 | -7,981,159,622,210,445,000 | 36.351852 | 118 | 0.706495 | false |
juan-cb/django-cookie-law | setup.py | 1 | 1609 | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
from itertools import chain
from glob import glob
import cookielaw
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: JavaScript',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Session',
]
package_data_globs = (
'cookielaw/templates/cookielaw/*.html',
'cookielaw/static/cookielaw/*/*',
'cookielaw/locale/*/*/*'
)
package_data = []
for f in chain(*map(glob, package_data_globs)):
package_data.append(f.split('/', 1)[1])
setup(
author='Piotr Kilczuk',
author_email='[email protected]',
name='django-cookie-law',
version='.'.join(str(v) for v in cookielaw.VERSION),
description='Helps your Django project comply with EU cookie law regulations',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='https://github.com/TyMaszWeb/django-cookie-law',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'Django>=1.2',
'django-classy-tags>=0.3.0',
],
tests_require=[
'selenium==3.0.1',
],
packages=find_packages(),
package_data={'cookielaw': package_data},
include_package_data=False,
zip_safe=False,
test_suite='runtests.main',
)
| bsd-2-clause | -8,123,308,249,982,098,000 | 27.22807 | 88 | 0.649472 | false |
sonali0901/zulip | analytics/views.py | 1 | 37020 | from __future__ import absolute_import, division
from django.conf import settings
from django.core import urlresolvers
from django.db import connection
from django.db.models import Sum
from django.db.models.query import QuerySet
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from django.template import RequestContext, loader
from django.utils import timezone
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from analytics.lib.counts import CountStat, process_count_stat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount, last_successful_fill
from zerver.decorator import has_request_variables, REQ, zulip_internal, \
zulip_login_required, to_non_negative_int, to_utc_datetime
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, timestamp_to_datetime
from zerver.models import Realm, UserProfile, UserActivity, \
UserActivityInterval, Client
from zproject.jinja2 import render_to_response
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import json
import logging
import pytz
import re
import time
from six.moves import filter, map, range, zip
from typing import Any, Dict, List, Tuple, Optional, Callable, Type, \
Union, Text
@zulip_login_required
def stats(request):
# type: (HttpRequest) -> HttpResponse
return render_to_response('analytics/stats.html',
context=dict(realm_name = request.user.realm.name))
@has_request_variables
def get_chart_data(request, user_profile, chart_name=REQ(),
min_length=REQ(converter=to_non_negative_int, default=None),
start=REQ(converter=to_utc_datetime, default=None),
end=REQ(converter=to_utc_datetime, default=None)):
# type: (HttpRequest, UserProfile, Text, Optional[int], Optional[datetime], Optional[datetime]) -> HttpResponse
if chart_name == 'number_of_humans':
stat = COUNT_STATS['active_users:is_bot:day']
tables = [RealmCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True]
elif chart_name == 'messages_sent_over_time':
stat = COUNT_STATS['messages_sent:is_bot:hour']
tables = [RealmCount, UserCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True, False]
elif chart_name == 'messages_sent_by_message_type':
stat = COUNT_STATS['messages_sent:message_type:day']
tables = [RealmCount, UserCount]
subgroups = ['public_stream', 'private_stream', 'private_message']
labels = ['Public Streams', 'Private Streams', 'PMs & Group PMs']
labels_sort_function = lambda data: sort_by_totals(data['realm'])
include_empty_subgroups = [True, True]
elif chart_name == 'messages_sent_by_client':
stat = COUNT_STATS['messages_sent:client:day']
tables = [RealmCount, UserCount]
subgroups = [str(x) for x in Client.objects.values_list('id', flat=True).order_by('id')]
# these are further re-written by client_label_map
labels = list(Client.objects.values_list('name', flat=True).order_by('id'))
labels_sort_function = sort_client_labels
include_empty_subgroups = [False, False]
else:
raise JsonableError(_("Unknown chart name: %s") % (chart_name,))
# Most likely someone using our API endpoint. The /stats page does not
# pass a start or end in its requests.
if start is not None and end is not None and start > end:
raise JsonableError(_("Start time is later than end time. Start: %(start)s, End: %(end)s") %
{'start': start, 'end': end})
realm = user_profile.realm
if start is None:
start = realm.date_created
if end is None:
end = last_successful_fill(stat.property)
if end is None or start > end:
logging.warning("User from realm %s attempted to access /stats, but the computed "
"start time: %s (creation time of realm) is later than the computed "
"end time: %s (last successful analytics update). Is the "
"analytics cron job running?" % (realm.string_id, start, end))
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
end_times = time_range(start, end, stat.frequency, min_length)
data = {'end_times': end_times, 'frequency': stat.frequency, 'interval': stat.interval}
for table, include_empty_subgroups_ in zip(tables, include_empty_subgroups):
if table == RealmCount:
data['realm'] = get_time_series_by_subgroup(
stat, RealmCount, realm.id, end_times, subgroups, labels, include_empty_subgroups_)
if table == UserCount:
data['user'] = get_time_series_by_subgroup(
stat, UserCount, user_profile.id, end_times, subgroups, labels, include_empty_subgroups_)
if labels_sort_function is not None:
data['display_order'] = labels_sort_function(data)
else:
data['display_order'] = None
return json_success(data=data)
def sort_by_totals(value_arrays):
# type: (Dict[str, List[int]]) -> List[str]
totals = []
for label, values in value_arrays.items():
totals.append((label, sum(values)))
totals.sort(key=lambda label_total: label_total[1], reverse=True)
return [label for label, total in totals]
# For any given user, we want to show a fixed set of clients in the chart,
# regardless of the time aggregation or whether we're looking at realm or
# user data. This fixed set ideally includes the clients most important in
# understanding the realm's traffic and the user's traffic. This function
# tries to rank the clients so that taking the first N elements of the
# sorted list has a reasonable chance of doing so.
def sort_client_labels(data):
# type: (Dict[str, Dict[str, List[int]]]) -> List[str]
realm_order = sort_by_totals(data['realm'])
user_order = sort_by_totals(data['user'])
label_sort_values = {} # type: Dict[str, float]
for i, label in enumerate(realm_order):
label_sort_values[label] = i
for i, label in enumerate(user_order):
label_sort_values[label] = min(i-.1, label_sort_values.get(label, i))
return [label for label, sort_value in sorted(label_sort_values.items(),
key=lambda x: x[1])]
def table_filtered_to_id(table, key_id):
# type: (Type[BaseCount], int) -> QuerySet
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
else:
raise ValueError("Unknown table: %s" % (table,))
def client_label_map(name):
# type: (str) -> str
if name == "website":
return "Website"
if name.startswith("desktop app"):
return "Old desktop app"
if name == "ZulipAndroid":
return "Android app"
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
return "New iOS app"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
return name[len("Zulip"):-len("Webhook")] + " webhook"
# Clients in dev environment autogenerated data start with _ so
# that it's easy to manually drop without affecting other data.
if settings.DEVELOPMENT and name.startswith("_"):
return name[1:]
return name
def rewrite_client_arrays(value_arrays):
# type: (Dict[str, List[int]]) -> Dict[str, List[int]]
mapped_arrays = {} # type: Dict[str, List[int]]
for label, array in value_arrays.items():
mapped_label = client_label_map(label)
if mapped_label in mapped_arrays:
for i in range(0, len(array)):
mapped_arrays[mapped_label][i] += value_arrays[label][i]
else:
mapped_arrays[mapped_label] = [value_arrays[label][i] for i in range(0, len(array))]
return mapped_arrays
def get_time_series_by_subgroup(stat, table, key_id, end_times, subgroups, labels, include_empty_subgroups):
# type: (CountStat, Type[BaseCount], Optional[int], List[datetime], List[str], List[str], bool) -> Dict[str, List[int]]
if len(subgroups) != len(labels):
raise ValueError("subgroups and labels have lengths %s and %s, which are different." %
(len(subgroups), len(labels)))
queryset = table_filtered_to_id(table, key_id).filter(property=stat.property) \
.values_list('subgroup', 'end_time', 'value')
value_dicts = defaultdict(lambda: defaultdict(int)) # type: Dict[Optional[str], Dict[datetime, int]]
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in zip(subgroups, labels):
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
if stat == COUNT_STATS['messages_sent:client:day']:
# HACK: We rewrite these arrays to collapse the Client objects
# with similar names into a single sum, and generally give
# them better names
return rewrite_client_arrays(value_arrays)
return value_arrays
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.string_id,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.string_id,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, string_id ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except Exception:
pass
# formatting
for row in rows:
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
string_id='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__string_id'
).order_by(
'user_profile__realm__string_id',
'user_profile__email'
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (string_id,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''' % (mobile_type,)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = '''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
'''
cols = [
'Client',
'Realm',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
request=request
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm_str):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm_str=realm_str))
realm_link = '<a href="%s">%s</a>' % (url, realm_str)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[Text]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = datetime.now(val.tzinfo) - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Any]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android',
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm_str):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = Realm.objects.get(string_id=realm_str).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm_str,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (realm_str,)
title = realm_str
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
request=request
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
request=request
)
| apache-2.0 | 1,357,763,504,589,575,200 | 32.502262 | 123 | 0.557969 | false |
luboslenco/cyclesgame | blender/arm/utils.py | 1 | 24354 | import bpy
import json
import os
import glob
import platform
import zipfile
import re
import subprocess
import webbrowser
import numpy as np
import arm.lib.armpack
import arm.make_state as state
import arm.log as log
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def write_arm(filepath, output):
if filepath.endswith('.zip'):
with zipfile.ZipFile(filepath, 'w', zipfile.ZIP_DEFLATED) as zip_file:
if bpy.data.worlds['Arm'].arm_minimize:
zip_file.writestr('data.arm', arm.lib.armpack.packb(output))
else:
zip_file.writestr('data.json', json.dumps(output, sort_keys=True, indent=4, cls=NumpyEncoder))
else:
if bpy.data.worlds['Arm'].arm_minimize:
with open(filepath, 'wb') as f:
f.write(arm.lib.armpack.packb(output))
else:
filepath_json = filepath.split('.arm')[0] + '.json'
with open(filepath_json, 'w') as f:
f.write(json.dumps(output, sort_keys=True, indent=4, cls=NumpyEncoder))
def unpack_image(image, path, file_format='JPEG'):
print('Armory Info: Unpacking to ' + path)
image.filepath_raw = path
image.file_format = file_format
image.save()
def convert_image(image, path, file_format='JPEG'):
# Convert image to compatible format
print('Armory Info: Converting to ' + path)
ren = bpy.context.scene.render
orig_quality = ren.image_settings.quality
orig_file_format = ren.image_settings.file_format
orig_color_mode = ren.image_settings.color_mode
ren.image_settings.quality = 90
ren.image_settings.file_format = file_format
if file_format == 'PNG':
ren.image_settings.color_mode = 'RGBA'
image.save_render(path, scene=bpy.context.scene)
ren.image_settings.quality = orig_quality
ren.image_settings.file_format = orig_file_format
ren.image_settings.color_mode = orig_color_mode
def blend_name():
return bpy.path.basename(bpy.context.blend_data.filepath).rsplit('.')[0]
def build_dir():
return 'build_' + safestr(blend_name())
def get_fp():
wrd = bpy.data.worlds['Arm']
if wrd.arm_project_root != '':
return bpy.path.abspath(wrd.arm_project_root)
else:
s = bpy.data.filepath.split(os.path.sep)
s.pop()
return os.path.sep.join(s)
def get_fp_build():
return get_fp() + '/' + build_dir()
def get_os():
s = platform.system()
if s == 'Windows':
return 'win'
elif s == 'Darwin':
return 'mac'
else:
return 'linux'
def get_gapi():
wrd = bpy.data.worlds['Arm']
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
return getattr(item, target_to_gapi(item.arm_project_target))
if wrd.arm_runtime == 'Browser':
return 'webgl'
return arm.utils.get_player_gapi()
def get_rp():
wrd = bpy.data.worlds['Arm']
return wrd.arm_rplist[wrd.arm_rplist_index]
def bundled_sdk_path():
if get_os() == 'mac':
# SDK on MacOS is located in .app folder due to security
p = bpy.app.binary_path
if p.endswith('Contents/MacOS/blender'):
return p[:-len('Contents/MacOS/blender')] + '/armsdk/'
else:
return p[:-len('Contents/MacOS/./blender')] + '/armsdk/'
elif get_os() == 'linux':
# /blender
return bpy.app.binary_path.rsplit('/', 1)[0] + '/armsdk/'
else:
# /blender.exe
return bpy.app.binary_path.replace('\\', '/').rsplit('/', 1)[0] + '/armsdk/'
# Passed by load_post handler when armsdk is found in project folder
use_local_sdk = False
def get_sdk_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons["armory"].preferences
p = bundled_sdk_path()
if use_local_sdk:
return get_fp() + '/armsdk/'
elif os.path.exists(p) and addon_prefs.sdk_bundled:
return p
else:
return addon_prefs.sdk_path
def get_ide_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons["armory"].preferences
return '' if not hasattr(addon_prefs, 'ide_path') else addon_prefs.ide_path
def get_ffmpeg_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return addon_prefs.ffmpeg_path
def get_renderdoc_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
p = addon_prefs.renderdoc_path
if p == '' and get_os() == 'win':
pdefault = 'C:\\Program Files\\RenderDoc\\qrenderdoc.exe'
if os.path.exists(pdefault):
p = pdefault
return p
def get_player_gapi():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 'opengl' if not hasattr(addon_prefs, 'player_gapi_' + get_os()) else getattr(addon_prefs, 'player_gapi_' + get_os())
def get_code_editor():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 'kodestudio' if not hasattr(addon_prefs, 'code_editor') else addon_prefs.code_editor
def get_ui_scale():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 1.0 if not hasattr(addon_prefs, 'ui_scale') else addon_prefs.ui_scale
def get_khamake_threads():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 1 if not hasattr(addon_prefs, 'khamake_threads') else addon_prefs.khamake_threads
def get_compilation_server():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'compilation_server') else addon_prefs.compilation_server
def get_save_on_build():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'save_on_build') else addon_prefs.save_on_build
def get_viewport_controls():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 'qwerty' if not hasattr(addon_prefs, 'viewport_controls') else addon_prefs.viewport_controls
def get_legacy_shaders():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'legacy_shaders') else addon_prefs.legacy_shaders
def get_relative_paths():
# Convert absolute paths to relative
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'relative_paths') else addon_prefs.relative_paths
def get_node_path():
if get_os() == 'win':
return get_sdk_path() + '/nodejs/node.exe'
elif get_os() == 'mac':
return get_sdk_path() + '/nodejs/node-osx'
else:
return get_sdk_path() + '/nodejs/node-linux64'
def get_kha_path():
if os.path.exists('Kha'):
return 'Kha'
return get_sdk_path() + '/Kha'
def get_haxe_path():
if get_os() == 'win':
return get_kha_path() + '/Tools/haxe/haxe.exe'
elif get_os() == 'mac':
return get_kha_path() + '/Tools/haxe/haxe-osx'
else:
return get_kha_path() + '/Tools/haxe/haxe-linux64'
def get_khamake_path():
return get_kha_path() + '/make'
def krom_paths(bin_ext=''):
sdk_path = get_sdk_path()
if arm.utils.get_os() == 'win':
krom_location = sdk_path + '/Krom'
krom_path = krom_location + '/Krom' + bin_ext + '.exe'
elif arm.utils.get_os() == 'mac':
krom_location = sdk_path + '/Krom/Krom.app/Contents/MacOS'
krom_path = krom_location + '/Krom' + bin_ext
else:
krom_location = sdk_path + '/Krom'
krom_path = krom_location + '/Krom' + bin_ext
return krom_location, krom_path
def fetch_bundled_script_names():
wrd = bpy.data.worlds['Arm']
wrd.arm_bundled_scripts_list.clear()
os.chdir(get_sdk_path() + '/armory/Sources/armory/trait')
for file in glob.glob('*.hx'):
wrd.arm_bundled_scripts_list.add().name = file.rsplit('.')[0]
script_props = {}
script_props_defaults = {}
def fetch_script_props(file):
with open(file) as f:
if '/' in file:
file = file.split('/')[-1]
if '\\' in file:
file = file.split('\\')[-1]
name = file.rsplit('.')[0]
script_props[name] = []
script_props_defaults[name] = []
lines = f.read().splitlines()
read_prop = False
for l in lines:
if not read_prop:
read_prop = l.lstrip().startswith('@prop')
if read_prop and 'var ' in l:
p = l.split('var ')[1]
valid_prop = False
# Has type
if ':' in p:
# Fetch default value
if '=' in p:
s = p.split('=')
ps = s[0].split(':')
prop = (ps[0].strip(), ps[1].split(';')[0].strip())
prop_value = s[1].split(';')[0].replace('\'', '').replace('"', '').strip()
valid_prop = True
else:
ps = p.split(':')
prop = (ps[0].strip(), ps[1].split(';')[0].strip())
prop_value = ''
valid_prop = True
# Fetch default value
elif '=' in p:
s = p.split('=')
prop = (s[0].strip(), None)
prop_value = s[1].split(';')[0].replace('\'', '').replace('"', '').strip()
valid_prop = True
# Register prop
if valid_prop:
script_props[name].append(prop)
script_props_defaults[name].append(prop_value)
read_prop = False
def fetch_script_names():
if bpy.data.filepath == "":
return
wrd = bpy.data.worlds['Arm']
# Sources
wrd.arm_scripts_list.clear()
sources_path = get_fp() + '/Sources/' + safestr(wrd.arm_project_package)
if os.path.isdir(sources_path):
os.chdir(sources_path)
# Glob supports recursive search since python 3.5 so it should cover both blender 2.79 and 2.8 integrated python
for file in glob.glob('**/*.hx', recursive=True):
name = file.rsplit('.')[0]
# Replace the path syntax for package syntax so that it can be searchable in blender traits "Class" dropdown
wrd.arm_scripts_list.add().name = name.replace(os.sep, '.')
fetch_script_props(file)
# Canvas
wrd.arm_canvas_list.clear()
canvas_path = get_fp() + '/Bundled/canvas'
if os.path.isdir(canvas_path):
os.chdir(canvas_path)
for file in glob.glob('*.json'):
wrd.arm_canvas_list.add().name = file.rsplit('.')[0]
os.chdir(get_fp())
def fetch_wasm_names():
if bpy.data.filepath == "":
return
wrd = bpy.data.worlds['Arm']
# WASM modules
wrd.arm_wasm_list.clear()
sources_path = get_fp() + '/Bundled'
if os.path.isdir(sources_path):
os.chdir(sources_path)
for file in glob.glob('*.wasm'):
name = file.rsplit('.')[0]
wrd.arm_wasm_list.add().name = name
os.chdir(get_fp())
def fetch_trait_props():
for o in bpy.data.objects:
fetch_prop(o)
for s in bpy.data.scenes:
fetch_prop(s)
def fetch_prop(o):
for item in o.arm_traitlist:
if item.name not in script_props:
continue
props = script_props[item.name]
defaults = script_props_defaults[item.name]
# Remove old props
for i in range(len(item.arm_traitpropslist) - 1, -1, -1):
ip = item.arm_traitpropslist[i]
# if ip.name not in props:
if ip.name.split('(')[0] not in [p[0] for p in props]:
item.arm_traitpropslist.remove(i)
# Add new props
for i in range(0, len(props)):
p = props[i]
found = False
for ip in item.arm_traitpropslist:
if ip.name.replace(')', '').split('(')[0] == p[0]:
found = ip
break
# Not in list
if not found:
prop = item.arm_traitpropslist.add()
prop.name = p[0] + ('(' + p[1] + ')' if p[1] else '')
prop.value = defaults[i]
if found:
prop = item.arm_traitpropslist[found.name]
f = found.name.replace(')', '').split('(')
# Default value added and current value is blank (no override)
if (not found.value and defaults[i]):
prop.value = defaults[i]
# Type has changed, update displayed name
if (len(f) == 1 or (len(f) > 1 and f[1] != p[1])):
prop.name = p[0] + ('(' + p[1] + ')' if p[1] else '')
def fetch_bundled_trait_props():
# Bundled script props
for o in bpy.data.objects:
for t in o.arm_traitlist:
if t.type_prop == 'Bundled Script':
file_path = get_sdk_path() + '/armory/Sources/armory/trait/' + t.name + '.hx'
if os.path.exists(file_path):
fetch_script_props(file_path)
fetch_prop(o)
def update_trait_collections():
for col in bpy.data.collections:
if col.name.startswith('Trait|'):
bpy.data.collections.remove(col)
for o in bpy.data.objects:
for t in o.arm_traitlist:
if 'Trait|' + t.name not in bpy.data.collections:
col = bpy.data.collections.new('Trait|' + t.name)
else:
col = bpy.data.collections['Trait|' + t.name]
col.objects.link(o)
def to_hex(val):
return '#%02x%02x%02x%02x' % (int(val[3] * 255), int(val[0] * 255), int(val[1] * 255), int(val[2] * 255))
def color_to_int(val):
return (int(val[3] * 255) << 24) + (int(val[0] * 255) << 16) + (int(val[1] * 255) << 8) + int(val[2] * 255)
def safesrc(s):
s = safestr(s).replace('.', '_').replace('-', '_').replace(' ', '')
if s[0].isdigit():
s = '_' + s
return s
def safestr(s):
for c in r'[]/\;,><&*:%=+@!#^()|?^':
s = s.replace(c, '_')
return ''.join([i if ord(i) < 128 else '_' for i in s])
def asset_name(bdata):
s = bdata.name
# Append library name if linked
if bdata.library != None:
s += '_' + bdata.library.name
return s
def asset_path(s):
return s[2:] if s[:2] == '//' else s # Remove leading '//'
def extract_filename(s):
return os.path.basename(asset_path(s))
def get_render_resolution(scene):
render = scene.render
scale = render.resolution_percentage / 100
return int(render.resolution_x * scale), int(render.resolution_y * scale)
def get_project_scene_name():
return get_active_scene().name
def get_active_scene():
if not state.is_export:
return bpy.context.scene
else:
wrd = bpy.data.worlds['Arm']
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
return item.arm_project_scene
def logic_editor_space(context_screen=None):
if context_screen == None:
context_screen = bpy.context.screen
if context_screen != None:
areas = context_screen.areas
for area in areas:
for space in area.spaces:
if space.type == 'NODE_EDITOR':
if space.node_tree != None and space.node_tree.bl_idname == 'ArmLogicTreeType':
return space
return None
def voxel_support():
# macos does not support opengl 4.5, needs metal
return state.target != 'html5' and get_os() != 'mac'
def get_cascade_size(rpdat):
cascade_size = int(rpdat.rp_shadowmap_cascade)
# Clamp to 4096 per cascade
if int(rpdat.rp_shadowmap_cascades) > 1 and cascade_size > 4096:
cascade_size = 4096
return cascade_size
def check_saved(self):
if bpy.data.filepath == "":
msg = "Save blend file first"
self.report({"ERROR"}, msg) if self != None else log.print_info(msg)
return False
return True
def check_path(s):
for c in r'[];><&*%=+@!#^()|?^':
if c in s:
return False
for c in s:
if ord(c) > 127:
return False
return True
def check_sdkpath(self):
s = get_sdk_path()
if check_path(s) == False:
msg = "SDK path '{0}' contains special characters. Please move SDK to different path for now.".format(s)
self.report({"ERROR"}, msg) if self != None else log.print_info(msg)
return False
else:
return True
def check_projectpath(self):
s = get_fp()
if check_path(s) == False:
msg = "Project path '{0}' contains special characters, build process may fail.".format(s)
self.report({"ERROR"}, msg) if self != None else log.print_info(msg)
return False
else:
return True
def disp_enabled(target):
rpdat = get_rp()
if rpdat.arm_rp_displacement == 'Tessellation':
return target == 'krom' or target == 'native'
return rpdat.arm_rp_displacement != 'Off'
def is_object_animation_enabled(bobject):
# Checks if animation is present and enabled
if bobject.arm_animation_enabled == False or bobject.type == 'BONE' or bobject.type == 'ARMATURE':
return False
if bobject.animation_data and bobject.animation_data.action:
return True
return False
def is_bone_animation_enabled(bobject):
# Checks if animation is present and enabled for parented armature
if bobject.parent and bobject.parent.type == 'ARMATURE':
if bobject.parent.arm_animation_enabled == False:
return False
# Check for present actions
adata = bobject.parent.animation_data
has_actions = adata != None and adata.action != None
if not has_actions and adata != None:
if hasattr(adata, 'nla_tracks') and adata.nla_tracks != None:
for track in adata.nla_tracks:
if track.strips == None:
continue
for strip in track.strips:
if strip.action == None:
continue
has_actions = True
break
if has_actions:
break
if adata != None and has_actions:
return True
return False
def export_bone_data(bobject):
return bobject.find_armature() and is_bone_animation_enabled(bobject) and get_rp().arm_skin == 'On'
def kode_studio_mklink_win(sdk_path, ide_path):
# Fight long-path issues on Windows
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/kha/Kha'):
source = ide_path + '/resources/app/kodeExtensions/kha/Kha'
target = sdk_path + '/Kha'
subprocess.check_call('mklink /J "%s" "%s"' % (source, target), shell=True)
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/krom/Krom'):
source = ide_path + '/resources/app/kodeExtensions/krom/Krom'
target = sdk_path + '/Krom'
subprocess.check_call('mklink /J "%s" "%s"' % (source, target), shell=True)
def kode_studio_mklink_linux(sdk_path, ide_path):
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/kha/Kha'):
source = ide_path + '/resources/app/kodeExtensions/kha/Kha'
target = sdk_path + '/Kha'
subprocess.check_call('ln -s "%s" "%s"' % (target, source), shell=True)
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/krom/Krom'):
source = ide_path + '/resources/app/kodeExtensions/krom/Krom'
target = sdk_path + '/Krom'
subprocess.check_call('ln -s "%s" "%s"' % (target, source), shell=True)
def kode_studio_mklink_mac(sdk_path, ide_path):
if not os.path.exists(ide_path + '/Contents/Resources/app/kodeExtensions/kha/Kha'):
source = ide_path + '/Contents/Resources/app/kodeExtensions/kha/Kha'
target = sdk_path + '/Kha'
subprocess.check_call('ln -fs "%s" "%s"' % (target, source), shell=True)
if not os.path.exists(ide_path + '/Contents/Resources/app/kodeExtensions/krom/Krom'):
source = ide_path + '/Contents/Resources/app/kodeExtensions/krom/Krom'
target = sdk_path + '/Krom'
subprocess.check_call('ln -fs "%s" "%s"' % (target, source), shell=True)
def get_kode_path():
p = get_ide_path()
if p == '':
if get_os() == 'win':
p = get_sdk_path() + '/win32'
elif get_os() == 'mac':
p = get_sdk_path() + '/KodeStudio.app'
else:
p = get_sdk_path() + '/linux64'
return p
def get_kode_bin():
p = get_kode_path()
if get_os() == 'win':
return p + '/Kode Studio.exe'
elif get_os() == 'mac':
return p + '/Contents/MacOS/Electron'
else:
return p + '/kodestudio'
def get_vscode_bin():
p = get_kode_path()
if get_os() == 'win':
return p + '/Code.exe'
elif get_os() == 'mac':
return p + '/Contents/MacOS/Electron'
else:
return p + '/code'
def kode_studio(hx_path=None):
project_path = arm.utils.get_fp()
kode_bin = get_kode_bin()
if not os.path.exists(kode_bin):
kode_bin = get_vscode_bin()
if os.path.exists(kode_bin) and get_code_editor() == 'kodestudio':
if arm.utils.get_os() == 'win':
# kode_studio_mklink_win(get_sdk_path(), get_kode_path())
args = [kode_bin, arm.utils.get_fp()]
if hx_path != None:
args.append(hx_path)
subprocess.Popen(args)
elif arm.utils.get_os() == 'mac':
# kode_studio_mklink_mac(get_sdk_path(), get_kode_path())
args = ['"' + kode_bin + '"' + ' "' + arm.utils.get_fp() + '"']
if hx_path != None:
args[0] += ' "' + hx_path + '"'
subprocess.Popen(args, shell=True)
else:
# kode_studio_mklink_linux(get_sdk_path(), get_kode_path())
args = [kode_bin, arm.utils.get_fp()]
if hx_path != None:
args.append(hx_path)
subprocess.Popen(args)
else:
fp = hx_path if hx_path != None else arm.utils.get_fp()
webbrowser.open('file://' + fp)
def def_strings_to_array(strdefs):
defs = strdefs.split('_')
defs = defs[1:]
defs = ['_' + d for d in defs] # Restore _
return defs
def get_kha_target(target_name): # TODO: remove
if target_name == 'macos-hl':
return 'osx-hl'
elif target_name.startswith('krom'): # krom-windows
return 'krom'
elif target_name == 'custom':
return ''
return target_name
def target_to_gapi(arm_project_target):
# TODO: align target names
if arm_project_target == 'krom':
return 'arm_gapi_' + arm.utils.get_os()
elif arm_project_target == 'krom-windows':
return 'arm_gapi_win'
elif arm_project_target == 'windows-hl':
return 'arm_gapi_win'
elif arm_project_target == 'krom-linux':
return 'arm_gapi_linux'
elif arm_project_target == 'linux-hl':
return 'arm_gapi_linux'
elif arm_project_target == 'krom-macos':
return 'arm_gapi_mac'
elif arm_project_target == 'macos-hl':
return 'arm_gapi_mac'
elif arm_project_target == 'android-native-hl':
return 'arm_gapi_android'
elif arm_project_target == 'ios-hl':
return 'arm_gapi_ios'
elif arm_project_target == 'node':
return 'arm_gapi_html5'
else: # html5, custom
return 'arm_gapi_' + arm_project_target
def check_default_props():
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) == 0:
wrd.arm_rplist.add()
wrd.arm_rplist_index = 0
if wrd.arm_project_name == '':
# Take blend file name
wrd.arm_project_name = arm.utils.blend_name()
def register(local_sdk=False):
global use_local_sdk
use_local_sdk = local_sdk
def unregister():
pass
| lgpl-3.0 | 2,749,772,502,997,039,600 | 35.08 | 127 | 0.581137 | false |
ozamiatin/glance | glance/common/utils.py | 1 | 26028 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 SoftLayer Technologies, Inc.
# Copyright 2015 Mirantis, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
try:
from eventlet import sleep
except ImportError:
from time import sleep
from eventlet.green import socket
import functools
import os
import platform
import re
import subprocess
import sys
import uuid
from OpenSSL import crypto
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import netutils
from oslo_utils import strutils
import six
from webob import exc
from glance.common import exception
from glance import i18n
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
FEATURE_BLACKLIST = ['content-length', 'content-type', 'x-image-meta-size']
# Whitelist of v1 API headers of form x-image-meta-xxx
IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size',
'x-image-meta-is_public', 'x-image-meta-disk_format',
'x-image-meta-container_format', 'x-image-meta-name',
'x-image-meta-status', 'x-image-meta-copy_from',
'x-image-meta-uri', 'x-image-meta-checksum',
'x-image-meta-created_at', 'x-image-meta-updated_at',
'x-image-meta-deleted_at', 'x-image-meta-min_ram',
'x-image-meta-min_disk', 'x-image-meta-owner',
'x-image-meta-store', 'x-image-meta-id',
'x-image-meta-protected', 'x-image-meta-deleted',
'x-image-meta-virtual_size']
GLANCE_TEST_SOCKET_FD_STR = 'GLANCE_TEST_SOCKET_FD'
def chunkreadable(iter, chunk_size=65536):
"""
Wrap a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave iterator unchanged.
:param iter: an iter which may also be readable
:param chunk_size: maximum size of chunk
"""
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
def chunkiter(fp, chunk_size=65536):
"""
Return an iterator to a file-like obj which yields fixed size chunks
:param fp: a file-like object
:param chunk_size: maximum size of chunk
"""
while True:
chunk = fp.read(chunk_size)
if chunk:
yield chunk
else:
break
def cooperative_iter(iter):
"""
Return an iterator which schedules after each
iteration. This can prevent eventlet thread starvation.
:param iter: an iterator to wrap
"""
try:
for chunk in iter:
sleep(0)
yield chunk
except Exception as err:
with excutils.save_and_reraise_exception():
msg = _LE("Error: cooperative_iter exception %s") % err
LOG.error(msg)
def cooperative_read(fd):
"""
Wrap a file descriptor's read with a partial function which schedules
after each read. This can prevent eventlet thread starvation.
:param fd: a file descriptor to wrap
"""
def readfn(*args):
result = fd.read(*args)
sleep(0)
return result
return readfn
MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit
class CooperativeReader(object):
"""
An eventlet thread friendly class for reading in image data.
When accessing data either through the iterator or the read method
we perform a sleep to allow a co-operative yield. When there is more than
one image being uploaded/downloaded this prevents eventlet thread
starvation, ie allows all threads to be scheduled periodically rather than
having the same thread be continuously active.
"""
def __init__(self, fd):
"""
:param fd: Underlying image file object
"""
self.fd = fd
self.iterator = None
# NOTE(markwash): if the underlying supports read(), overwrite the
# default iterator-based implementation with cooperative_read which
# is more straightforward
if hasattr(fd, 'read'):
self.read = cooperative_read(fd)
else:
self.iterator = None
self.buffer = ''
self.position = 0
def read(self, length=None):
"""Return the requested amount of bytes, fetching the next chunk of
the underlying iterator when needed.
This is replaced with cooperative_read in __init__ if the underlying
fd already supports read().
"""
if length is None:
if len(self.buffer) - self.position > 0:
# if no length specified but some data exists in buffer,
# return that data and clear the buffer
result = self.buffer[self.position:]
self.buffer = ''
self.position = 0
return str(result)
else:
# otherwise read the next chunk from the underlying iterator
# and return it as a whole. Reset the buffer, as subsequent
# calls may specify the length
try:
if self.iterator is None:
self.iterator = self.__iter__()
return self.iterator.next()
except StopIteration:
return ''
finally:
self.buffer = ''
self.position = 0
else:
result = bytearray()
while len(result) < length:
if self.position < len(self.buffer):
to_read = length - len(result)
chunk = self.buffer[self.position:self.position + to_read]
result.extend(chunk)
# This check is here to prevent potential OOM issues if
# this code is called with unreasonably high values of read
# size. Currently it is only called from the HTTP clients
# of Glance backend stores, which use httplib for data
# streaming, which has readsize hardcoded to 8K, so this
# check should never fire. Regardless it still worths to
# make the check, as the code may be reused somewhere else.
if len(result) >= MAX_COOP_READER_BUFFER_SIZE:
raise exception.LimitExceeded()
self.position += len(chunk)
else:
try:
if self.iterator is None:
self.iterator = self.__iter__()
self.buffer = self.iterator.next()
self.position = 0
except StopIteration:
self.buffer = ''
self.position = 0
return str(result)
return str(result)
def __iter__(self):
return cooperative_iter(self.fd.__iter__())
class LimitingReader(object):
"""
Reader designed to fail when reading image data past the configured
allowable amount.
"""
def __init__(self, data, limit):
"""
:param data: Underlying image data object
:param limit: maximum number of bytes the reader should allow
"""
self.data = data
self.limit = limit
self.bytes_read = 0
def __iter__(self):
for chunk in self.data:
self.bytes_read += len(chunk)
if self.bytes_read > self.limit:
raise exception.ImageSizeLimitExceeded()
else:
yield chunk
def read(self, i):
result = self.data.read(i)
self.bytes_read += len(result)
if self.bytes_read > self.limit:
raise exception.ImageSizeLimitExceeded()
return result
def image_meta_to_http_headers(image_meta):
"""
Returns a set of image metadata into a dict
of HTTP headers that can be fed to either a Webob
Request object or an httplib.HTTP(S)Connection object
:param image_meta: Mapping of image metadata
"""
headers = {}
for k, v in image_meta.items():
if v is not None:
if k == 'properties':
for pk, pv in v.items():
if pv is not None:
headers["x-image-meta-property-%s"
% pk.lower()] = six.text_type(pv)
else:
headers["x-image-meta-%s" % k.lower()] = six.text_type(v)
return headers
def get_image_meta_from_headers(response):
"""
Processes HTTP headers from a supplied response that
match the x-image-meta and x-image-meta-property and
returns a mapping of image metadata and properties
:param response: Response to process
"""
result = {}
properties = {}
if hasattr(response, 'getheaders'): # httplib.HTTPResponse
headers = response.getheaders()
else: # webob.Response
headers = response.headers.items()
for key, value in headers:
key = str(key.lower())
if key.startswith('x-image-meta-property-'):
field_name = key[len('x-image-meta-property-'):].replace('-', '_')
properties[field_name] = value or None
elif key.startswith('x-image-meta-'):
field_name = key[len('x-image-meta-'):].replace('-', '_')
if 'x-image-meta-' + field_name not in IMAGE_META_HEADERS:
msg = _("Bad header: %(header_name)s") % {'header_name': key}
raise exc.HTTPBadRequest(msg, content_type="text/plain")
result[field_name] = value or None
result['properties'] = properties
for key, nullable in [('size', False), ('min_disk', False),
('min_ram', False), ('virtual_size', True)]:
if key in result:
try:
result[key] = int(result[key])
except ValueError:
if nullable and result[key] == str(None):
result[key] = None
else:
extra = (_("Cannot convert image %(key)s '%(value)s' "
"to an integer.")
% {'key': key, 'value': result[key]})
raise exception.InvalidParameterValue(value=result[key],
param=key,
extra_msg=extra)
if result[key] < 0 and result[key] is not None:
extra = _('Cannot be a negative value.')
raise exception.InvalidParameterValue(value=result[key],
param=key,
extra_msg=extra)
for key in ('is_public', 'deleted', 'protected'):
if key in result:
result[key] = strutils.bool_from_string(result[key])
return result
def create_mashup_dict(image_meta):
"""
Returns a dictionary-like mashup of the image core properties
and the image custom properties from given image metadata.
:param image_meta: metadata of image with core and custom properties
"""
def get_items():
for key, value in six.iteritems(image_meta):
if isinstance(value, dict):
for subkey, subvalue in six.iteritems(
create_mashup_dict(value)):
if subkey not in image_meta:
yield subkey, subvalue
else:
yield key, value
return dict(get_items())
def safe_mkdirs(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def safe_remove(path):
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
class PrettyTable(object):
"""Creates an ASCII art table for use in bin/glance
Example:
ID Name Size Hits
--- ----------------- ------------ -----
122 image 22 0
"""
def __init__(self):
self.columns = []
def add_column(self, width, label="", just='l'):
"""Add a column to the table
:param width: number of characters wide the column should be
:param label: column heading
:param just: justification for the column, 'l' for left,
'r' for right
"""
self.columns.append((width, label, just))
def make_header(self):
label_parts = []
break_parts = []
for width, label, _ in self.columns:
# NOTE(sirp): headers are always left justified
label_part = self._clip_and_justify(label, width, 'l')
label_parts.append(label_part)
break_part = '-' * width
break_parts.append(break_part)
label_line = ' '.join(label_parts)
break_line = ' '.join(break_parts)
return '\n'.join([label_line, break_line])
def make_row(self, *args):
row = args
row_parts = []
for data, (width, _, just) in zip(row, self.columns):
row_part = self._clip_and_justify(data, width, just)
row_parts.append(row_part)
row_line = ' '.join(row_parts)
return row_line
@staticmethod
def _clip_and_justify(data, width, just):
# clip field to column width
clipped_data = str(data)[:width]
if just == 'r':
# right justify
justified = clipped_data.rjust(width)
else:
# left justify
justified = clipped_data.ljust(width)
return justified
def get_terminal_size():
def _get_terminal_size_posix():
import fcntl
import struct
import termios
height_width = None
try:
height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(),
termios.TIOCGWINSZ,
struct.pack('HH', 0, 0)))
except Exception:
pass
if not height_width:
try:
p = subprocess.Popen(['stty', 'size'],
shell=False,
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'))
result = p.communicate()
if p.returncode == 0:
return tuple(int(x) for x in result[0].split())
except Exception:
pass
return height_width
def _get_terminal_size_win32():
try:
from ctypes import create_string_buffer
from ctypes import windll
handle = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
except Exception:
return None
if res:
import struct
unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw)
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = unpack_tmp
height = bottom - top + 1
width = right - left + 1
return (height, width)
else:
return None
def _get_terminal_size_unknownOS():
raise NotImplementedError
func = {'posix': _get_terminal_size_posix,
'win32': _get_terminal_size_win32}
height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)()
if height_width is None:
raise exception.Invalid()
for i in height_width:
if not isinstance(i, int) or i <= 0:
raise exception.Invalid()
return height_width[0], height_width[1]
def mutating(func):
"""Decorator to enforce read-only logic"""
@functools.wraps(func)
def wrapped(self, req, *args, **kwargs):
if req.context.read_only:
msg = "Read-only access"
LOG.debug(msg)
raise exc.HTTPForbidden(msg, request=req,
content_type="text/plain")
return func(self, req, *args, **kwargs)
return wrapped
def setup_remote_pydev_debug(host, port):
error_msg = _LE('Error setting up the debug environment. Verify that the'
' option pydev_worker_debug_host is pointing to a valid '
'hostname or IP on which a pydev server is listening on'
' the port indicated by pydev_worker_debug_port.')
try:
try:
from pydev import pydevd
except ImportError:
import pydevd
pydevd.settrace(host,
port=port,
stdoutToServer=True,
stderrToServer=True)
return True
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(error_msg)
def validate_key_cert(key_file, cert_file):
try:
error_key_name = "private key"
error_filename = key_file
with open(key_file, 'r') as keyfile:
key_str = keyfile.read()
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str)
error_key_name = "certificate"
error_filename = cert_file
with open(cert_file, 'r') as certfile:
cert_str = certfile.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
except IOError as ioe:
raise RuntimeError(_("There is a problem with your %(error_key_name)s "
"%(error_filename)s. Please verify it."
" Error: %(ioe)s") %
{'error_key_name': error_key_name,
'error_filename': error_filename,
'ioe': ioe})
except crypto.Error as ce:
raise RuntimeError(_("There is a problem with your %(error_key_name)s "
"%(error_filename)s. Please verify it. OpenSSL"
" error: %(ce)s") %
{'error_key_name': error_key_name,
'error_filename': error_filename,
'ce': ce})
try:
data = str(uuid.uuid4())
digest = CONF.digest_algorithm
if digest == 'sha1':
LOG.warn('The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)'
' state that the SHA-1 is not suitable for'
' general-purpose digital signature applications (as'
' specified in FIPS 186-3) that require 112 bits of'
' security. The default value is sha1 in Kilo for a'
' smooth upgrade process, and it will be updated'
' with sha256 in next release(L).')
out = crypto.sign(key, data, digest)
crypto.verify(cert, out, data, digest)
except crypto.Error as ce:
raise RuntimeError(_("There is a problem with your key pair. "
"Please verify that cert %(cert_file)s and "
"key %(key_file)s belong together. OpenSSL "
"error %(ce)s") % {'cert_file': cert_file,
'key_file': key_file,
'ce': ce})
def get_test_suite_socket():
global GLANCE_TEST_SOCKET_FD_STR
if GLANCE_TEST_SOCKET_FD_STR in os.environ:
fd = int(os.environ[GLANCE_TEST_SOCKET_FD_STR])
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
if six.PY2:
sock = socket.SocketType(_sock=sock)
sock.listen(CONF.backlog)
del os.environ[GLANCE_TEST_SOCKET_FD_STR]
os.close(fd)
return sock
return None
def is_uuid_like(val):
"""Returns validation of a value as a UUID.
For our purposes, a UUID is a canonical form string:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
try:
return str(uuid.UUID(val)) == val
except (TypeError, ValueError, AttributeError):
return False
def is_valid_hostname(hostname):
"""Verify whether a hostname (not an FQDN) is valid."""
return re.match('^[a-zA-Z0-9-]+$', hostname) is not None
def is_valid_fqdn(fqdn):
"""Verify whether a host is a valid FQDN."""
return re.match('^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$', fqdn) is not None
def parse_valid_host_port(host_port):
"""
Given a "host:port" string, attempts to parse it as intelligently as
possible to determine if it is valid. This includes IPv6 [host]:port form,
IPv4 ip:port form, and hostname:port or fqdn:port form.
Invalid inputs will raise a ValueError, while valid inputs will return
a (host, port) tuple where the port will always be of type int.
"""
try:
try:
host, port = netutils.parse_host_port(host_port)
except Exception:
raise ValueError(_('Host and port "%s" is not valid.') % host_port)
if not netutils.is_valid_port(port):
raise ValueError(_('Port "%s" is not valid.') % port)
# First check for valid IPv6 and IPv4 addresses, then a generic
# hostname. Failing those, if the host includes a period, then this
# should pass a very generic FQDN check. The FQDN check for letters at
# the tail end will weed out any hilariously absurd IPv4 addresses.
if not (netutils.is_valid_ipv6(host) or netutils.is_valid_ipv4(host) or
is_valid_hostname(host) or is_valid_fqdn(host)):
raise ValueError(_('Host "%s" is not valid.') % host)
except Exception as ex:
raise ValueError(_('%s '
'Please specify a host:port pair, where host is an '
'IPv4 address, IPv6 address, hostname, or FQDN. If '
'using an IPv6 address, enclose it in brackets '
'separately from the port (i.e., '
'"[fe80::a:b:c]:9876").') % ex)
return (host, int(port))
try:
REGEX_4BYTE_UNICODE = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
# UCS-2 build case
REGEX_4BYTE_UNICODE = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
def no_4byte_params(f):
"""
Checks that no 4 byte unicode characters are allowed
in dicts' keys/values and string's parameters
"""
def wrapper(*args, **kwargs):
def _is_match(some_str):
return (isinstance(some_str, six.text_type) and
REGEX_4BYTE_UNICODE.findall(some_str) != [])
def _check_dict(data_dict):
# a dict of dicts has to be checked recursively
for key, value in six.iteritems(data_dict):
if isinstance(value, dict):
_check_dict(value)
else:
if _is_match(key):
msg = _("Property names can't contain 4 byte unicode.")
raise exception.Invalid(msg)
if _is_match(value):
msg = (_("%s can't contain 4 byte unicode characters.")
% key.title())
raise exception.Invalid(msg)
for data_dict in [arg for arg in args if isinstance(arg, dict)]:
_check_dict(data_dict)
# now check args for str values
for arg in args:
if _is_match(arg):
msg = _("Param values can't contain 4 byte unicode.")
raise exception.Invalid(msg)
# check kwargs as well, as params are passed as kwargs via
# registry calls
_check_dict(kwargs)
return f(*args, **kwargs)
return wrapper
def validate_mysql_int(*args, **kwargs):
"""
Make sure that all arguments are less than 2 ** 31 - 1.
This limitation is introduced because mysql stores INT in 4 bytes.
If the validation fails for some argument, exception.Invalid is raised with
appropriate information.
"""
max_int = (2 ** 31) - 1
for param in args:
if param > max_int:
msg = _("Value %(value)d out of range, "
"must not exceed %(max)d") % {"value": param,
"max": max_int}
raise exception.Invalid(msg)
for param_str in kwargs:
param = kwargs.get(param_str)
if param and param > max_int:
msg = _("'%(param)s' value out of range, "
"must not exceed %(max)d") % {"param": param_str,
"max": max_int}
raise exception.Invalid(msg)
def stash_conf_values():
"""
Make a copy of some of the current global CONF's settings.
Allows determining if any of these values have changed
when the config is reloaded.
"""
conf = {}
conf['bind_host'] = CONF.bind_host
conf['bind_port'] = CONF.bind_port
conf['tcp_keepidle'] = CONF.cert_file
conf['backlog'] = CONF.backlog
conf['key_file'] = CONF.key_file
conf['cert_file'] = CONF.cert_file
return conf
| apache-2.0 | 29,378,260,714,325,270 | 34.078167 | 79 | 0.552059 | false |
keitaroyam/yamtbx | cctbx_progs/dano_vs_d.py | 1 | 1364 | """
Usage:
phenix.python dano_vs_d.py your.sca 20
"""
import iotbx.file_reader
from cctbx.array_family import flex
def run(hklin, n_bins):
for array in iotbx.file_reader.any_file(hklin).file_server.miller_arrays:
# skip if not anomalous intensity data
if not (array.is_xray_intensity_array() and array.anomalous_flag()):
print "skipping", array.info()
continue
# We assume that data is already merged
assert array.is_unique_set_under_symmetry()
# take anomalous differences
dano = array.anomalous_differences()
# process with binning
dano.setup_binner(n_bins=n_bins)
binner = dano.binner()
print "Array:", array.info()
print " dmax dmin nrefs dano"
for i_bin in binner.range_used():
# selection for this bin. sel is flex.bool object (list of True of False)
sel = binner.selection(i_bin)
# take mean of absolute value of anomalous differences in a bin
bin_mean = flex.mean(flex.abs(dano.select(sel).data()))
d_max, d_min = binner.bin_d_range(i_bin)
print "%7.2f %7.2f %6d %.2f" % (d_max, d_min, binner.count(i_bin), bin_mean)
# run()
if __name__ == "__main__":
import sys
hklin = sys.argv[1]
n_bins = int(sys.argv[2])
run(hklin, n_bins)
| bsd-3-clause | 3,833,057,630,944,875,500 | 31.47619 | 88 | 0.60044 | false |
jamespcole/home-assistant | homeassistant/components/eight_sleep/binary_sensor.py | 1 | 1832 | """Support for Eight Sleep binary sensors."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from . import CONF_BINARY_SENSORS, DATA_EIGHT, NAME_MAP, EightSleepHeatEntity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['eight_sleep']
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the eight sleep binary sensor."""
if discovery_info is None:
return
name = 'Eight'
sensors = discovery_info[CONF_BINARY_SENSORS]
eight = hass.data[DATA_EIGHT]
all_sensors = []
for sensor in sensors:
all_sensors.append(EightHeatSensor(name, eight, sensor))
async_add_entities(all_sensors, True)
class EightHeatSensor(EightSleepHeatEntity, BinarySensorDevice):
"""Representation of a Eight Sleep heat-based sensor."""
def __init__(self, name, eight, sensor):
"""Initialize the sensor."""
super().__init__(eight)
self._sensor = sensor
self._mapped_name = NAME_MAP.get(self._sensor, self._sensor)
self._name = '{} {}'.format(name, self._mapped_name)
self._state = None
self._side = self._sensor.split('_')[0]
self._userid = self._eight.fetch_userid(self._side)
self._usrobj = self._eight.users[self._userid]
_LOGGER.debug("Presence Sensor: %s, Side: %s, User: %s",
self._sensor, self._side, self._userid)
@property
def name(self):
"""Return the name of the sensor, if any."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
async def async_update(self):
"""Retrieve latest state."""
self._state = self._usrobj.bed_presence
| apache-2.0 | -4,521,372,871,055,284,700 | 28.548387 | 77 | 0.622817 | false |
iw3hxn/LibrERP | purchase_order_version/models/inherit_purchase_order_line.py | 1 | 2019 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2014 Didotech srl (<http://www.didotech.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class purchase_order_line(orm.Model):
_inherit = "purchase.order.line"
_columns = {
# 'active': fields.related('order_id', 'active', type='boolean', string='Active', store=False),
'purchase_line_copy_id': fields.many2one('purchase.order.line', 'Orig version', required=False, readonly=False),
}
def copy_data(self, cr, uid, line_id, defaults=None, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
defaults = defaults or {}
if context.get('versioning', False):
defaults['purchase_line_copy_id'] = line_id
return super(purchase_order_line, self).copy_data(cr, uid, line_id, defaults, context)
def copy(self, cr, uid, line_id, default, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
default = default or {}
if context.get('versioning', False):
default['purchase_line_copy_id'] = line_id
return super(purchase_order_line, self).copy(cr, uid, line_id, default, context)
| agpl-3.0 | 2,852,159,605,738,768,400 | 43.866667 | 120 | 0.616642 | false |
Tealium/nagios | files/default/plugins/check_mongodb_backup.py | 1 | 6143 | #!/usr/bin/env python
desc = """
Checks the status of the most recent MongoDB backup or, with the --snap option,
checks that the snapshots for the most recent backup were completed.
"""
import kazoo
from kazoo.client import KazooClient
from kazoo.client import KazooState
import yaml
import argparse
import time
from datetime import datetime
from datetime import timedelta
class Status(dict):
def __init__(self, name, code, msg):
self.name = name
self.code = code
self.msg = msg
def exit(self):
print "%s - %s" % (self.name, self.msg)
raise SystemExit(self.code)
class OK(Status):
def __init__(self,msg):
super(OK,self).__init__('OK', 0, msg)
class WARNING(Status):
def __init__(self,msg):
super(WARNING,self).__init__('WARNING', 1, msg)
class CRITICAL(Status):
def __init__(self,msg):
super(CRITICAL,self).__init__('CRITICAL', 2, msg)
class UNKNOWN(Status):
def __init__(self,msg):
super(UNKNOWN,self).__init__('UNKNOWN', 3, msg)
def state_listener(state):
if state == KazooState.LOST:
error("zookeeper connection state was lost")
elif state == KazooState.SUSPENDED:
error("zookeeper connection state was suspended")
elif state == KazooState.CONNECTED:
pass
def create_date_path(days_ago):
when = datetime.utcnow()
if days_ago:
delta = timedelta(days=days_ago)
when = when - delta
return when.strftime("/%Y/%m/%d")
def look4abort(zk, days_ago=None):
day_node = args.prefix.rstrip('/') + '/' + args.env.rstrip('/') + create_date_path(days_ago)
if zk.exists(day_node):
hours = zk.retry(zk.get_children, day_node)
if len(hours):
hours.sort()
abort_node = day_node + '/' + str(hours[-1]) + '/ABORT'
if zk.exists(abort_node):
excuse = zk.retry(zk.get, abort_node)
return CRITICAL("found backup abort status: %s" % excuse[0])
else:
return OK('no abort during most recent backup')
else:
# Apparently no backups yet today. Let's check yesterday.
# Let's not explore infinity though...
if days_ago: return WARNING('found no backup info for past two days')
return look4abort(zk, 1)
else:
# Apparently no backups yet today. Let's check yesterday.
# Let's not explore infinity though...
if days_ago: return WARNING('found no backup info for past two days')
return look4abort(zk, 1)
def look4snaps(zk, days_ago=None):
import boto
import boto.ec2
import boto.utils
import chef
instance_id = boto.utils.get_instance_metadata()['instance-id']
if args.region:
region_spec = args.region
else:
region_spec = boto.utils.get_instance_identity()['document']['region']
chef_api = chef.autoconfigure()
node = chef.Node(instance_id)
my_app_env = node.attributes['app_environment']
bag = chef.DataBag('aws')
item = bag[my_app_env]
key_id = str(item['aws_access_key_id'])
key_secret = str(item['aws_secret_access_key'])
region = boto.ec2.get_region(region_spec, aws_access_key_id=key_id, aws_secret_access_key=key_secret)
conn = region.connect(aws_access_key_id=key_id, aws_secret_access_key=key_secret)
day_node = args.prefix.rstrip('/') + '/' + args.env.rstrip('/') + create_date_path(days_ago)
if zk.exists(day_node):
hours = zk.retry(zk.get_children, day_node)
if len(hours):
hours.sort()
shards_parent_node = day_node + '/' + str(hours[-1]) + '/mongodb_shard_server'
if zk.exists(shards_parent_node):
shard_list = zk.retry(zk.get_children, shards_parent_node)
if len(shard_list) > 0:
msg = ''
err = 0
for shard in shard_list:
shard_data = zk.retry(zk.get, shards_parent_node + '/' + shard)
snaps = conn.get_all_snapshots(eval(shard_data[0]))
msg = msg + ", %s [" % shard
snap_text = ''
for snap in snaps:
if snap.status == 'error': err = 1
snap_text = snap_text + ", %s (%s)" % (str(snap), snap.status)
msg = msg + snap_text.strip(', ') + ']'
if err:
return CRITICAL(msg.strip(', '))
return OK(msg.strip(', '))
# Apparently no backups yet today. Let's check yesterday.
# Let's not explore infinity though...
if days_ago: return WARNING('found no backup info for past two days')
return look4snaps(zk, 1)
if __name__ == '__main__':
gargle = argparse.ArgumentParser(prog = "check_mongodb_backup", description=desc,
usage='%(prog)s [options]',
formatter_class = argparse.RawDescriptionHelpFormatter)
gargle.add_argument('--prefix', dest="prefix", metavar="<path_prefix>", default='/backup/mongodb_cluster/',
help='ZooKeeper path prefix (default: /backup/mongodb_cluster/)')
gargle.add_argument('--cluster', dest="env", metavar="<cluster_id>", default='production',
help='MongoDB cluster name (default: production)')
gargle.add_argument('--config', dest='yaml', metavar="<config_file>",
help='ZooKeeper server list file (default: /etc/zookeeper/server_list.yml)',
default='/etc/zookeeper/server_list.yml')
gargle.add_argument('--region', metavar="<aws-region-spec>",
help='AWS region where the snapshots are stored (default: region of host instance)')
gargle.add_argument('--snaps', action='store_true',
help='check snapshots from most recent backup (default: False)')
args = gargle.parse_args()
try:
y = yaml.safe_load(open(args.yaml))
servers = ','.join("%s:%s" % (s['host'],s['port']) for s in y['zookeepers'])
zk = KazooClient(hosts=servers)
zk.start()
zk.add_listener(state_listener)
if args.snaps:
status = look4snaps(zk)
else:
status = look4abort(zk)
zk.remove_listener(state_listener)
zk.stop()
status.exit()
except Exception as e:
UNKNOWN("Error: %s" % e).exit()
| apache-2.0 | 3,951,950,523,133,966,300 | 27.178899 | 110 | 0.608335 | false |
fkie/rosrepo | src/rosrepo/util.py | 1 | 6206 | # coding=utf-8
#
# ROSREPO
# Manage ROS workspaces with multiple Gitlab repositories
#
# Author: Timo Röhling
#
# Copyright 2016 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import os
import fcntl
import termios
import struct
import multiprocessing
import signal
from tempfile import mkstemp
from subprocess import Popen, PIPE
from yaml import load as yaml_load_impl, dump as yaml_dump_impl, YAMLError
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
def yaml_load(stream, Loader=SafeLoader):
return yaml_load_impl(stream, Loader=Loader)
def yaml_dump(data, stream=None, Dumper=SafeDumper, **kwargs):
return yaml_dump_impl(data, stream=stream, Dumper=Dumper, **kwargs)
class NamedTuple(object):
__slots__ = ()
def __init__(self, *args, **kwargs):
slots = self.__slots__
for k in slots:
setattr(self, k, kwargs.get(k))
if args:
for k, v in zip(slots, args):
setattr(self, k, v)
def __str__(self):
clsname = self.__class__.__name__
values = ", ".join("%s=%r" % (k, getattr(self, k)) for k in self.__slots__)
return "%s(%s)" % (clsname, values)
__repr__ = __str__
def __getitem__(self, item):
return getattr(self, self.__slots__[item])
def __setitem__(self, item, value):
return setattr(self, self.__slots__[item], value)
def __len__(self):
return len(self.__slots__)
try:
iteritems = dict.iteritems
except AttributeError:
iteritems = dict.items
class UserError(RuntimeError):
pass
def is_deprecated_package(manifest):
deprecated = next((e for e in manifest.exports if e.tagname == "deprecated"), None)
return deprecated is not None
def deprecated_package_info(manifest):
deprecated = next((e for e in manifest.exports if e.tagname == "deprecated"), None)
return deprecated.content if deprecated is not None else None
def path_has_prefix(path, prefix):
p = os.path.normpath(path)
q = os.path.normpath(prefix)
if p == q:
return True
head, tail = os.path.split(p)
while tail != "":
if head == q:
return True
head, tail = os.path.split(head)
return False
def has_package_path(obj, paths):
for path in paths:
if path_has_prefix(path, obj.workspace_path if hasattr(obj, "workspace_path") else obj):
return True
return False
def env_path_list_contains(path_list, path):
if path_list not in os.environ:
return False
paths = os.environ[path_list].split(os.pathsep)
return path in paths
def makedirs(path):
if not os.path.isdir(path):
os.makedirs(path)
def write_atomic(filepath, data, mode=0o644, ignore_fail=False):
try:
fd, filepath_tmp = mkstemp(prefix=os.path.basename(filepath) + ".tmp.", dir=os.path.dirname(filepath))
os.fchmod(fd, mode)
with os.fdopen(fd, "wb") as f:
f.write(data)
os.rename(filepath_tmp, filepath)
except (IOError, OSError):
if not ignore_fail:
raise
def isatty(fd):
return hasattr(fd, "isatty") and fd.isatty()
_cached_terminal_size = None
def get_terminal_size():
global _cached_terminal_size
if _cached_terminal_size is not None:
return _cached_terminal_size
try:
with open(os.ctermid(), "rb") as f:
cr = struct.unpack('hh', fcntl.ioctl(f.fileno(), termios.TIOCGWINSZ, '1234'))
except (IOError, struct.error):
raise OSError("Cannot determine terminal size")
_cached_terminal_size = int(cr[1]), int(cr[0])
return _cached_terminal_size
def find_program(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
fpath = path.strip('"')
candidate = os.path.join(fpath, fname)
if is_exe(candidate):
return candidate
return None
def getmtime(path):
return os.path.getmtime(path) if os.path.exists(path) else 0
def call_process(args, bufsize=0, stdin=None, stdout=None, stderr=None, cwd=None, env=None, input_data=None):
p = Popen(args, bufsize=bufsize, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, env=env)
if stdin == PIPE or stdout == PIPE or stderr == PIPE:
stdoutdata, stderrdata = p.communicate(input_data.encode("UTF-8") if input_data else None)
return p.returncode, stdoutdata.decode("UTF-8") if stdoutdata is not None else None, stderrdata.decode("UTF-8") if stderrdata is not None else None
else:
p.wait()
return p.returncode
def create_multiprocess_manager():
return multiprocessing.Manager()
def _worker_init(worker_init, worker_init_args):
signal.signal(signal.SIGINT, signal.SIG_IGN)
if worker_init is not None:
worker_init(*worker_init_args)
def run_multiprocess_workers(worker, workload, worker_init=None, worker_init_args=(), jobs=None, timeout=None):
if not workload:
return []
if timeout is None:
timeout = 999999999 # Workaround for KeyboardInterrupt
pool = multiprocessing.Pool(processes=jobs, initializer=_worker_init, initargs=(worker_init, worker_init_args))
try:
result_obj = pool.map_async(worker, workload)
pool.close()
result = result_obj.get(timeout=timeout)
return result
except:
pool.terminate()
raise
finally:
pool.join()
| apache-2.0 | 8,527,631,097,278,525,000 | 28.131455 | 155 | 0.652538 | false |
Hearen/OnceServer | pool_management/bn-xend-core/xend/BNVMAPI.py | 1 | 374814 | import traceback
import inspect
import os
import Queue
import string
import sys
import threading
import time
import xmlrpclib
import socket
import struct
import copy
import re
import XendDomain, XendDomainInfo, XendNode, XendDmesg, XendConfig
import XendLogging, XendTaskManager, XendAPIStore, XendIOController
from xen.xend.BNPoolAPI import BNPoolAPI
from xen.util.xmlrpcclient import ServerProxy
from xen.xend import uuid as genuuid
from XendLogging import log
from XendNetwork import XendNetwork
from XendError import *
from XendTask import XendTask
from xen.util import ip as getip
from xen.util import Netctl
from xen.xend import sxp
from xen.xend.XendCPUPool import XendCPUPool
from XendAuthSessions import instance as auth_manager
from xen.util.xmlrpclib2 import stringify
from xen.util import xsconstants
from xen.util.xpopen import xPopen3
from xen.xend.XendConstants import DOM_STATE_HALTED, DOM_STATE_PAUSED
from xen.xend.XendConstants import DOM_STATE_RUNNING, DOM_STATE_SUSPENDED
from xen.xend.XendConstants import DOM_STATE_SHUTDOWN, DOM_STATE_UNKNOWN
from xen.xend.XendConstants import DOM_STATE_CRASHED, HVM_PARAM_ACPI_S_STATE
from xen.xend.XendConstants import VDI_DEFAULT_STRUCT, VDI_DEFAULT_SR_TYPE, VDI_DEFAULT_DIR
from xen.xend.XendConstants import FAKE_MEDIA_PATH, FAKE_MEDIA_NAME
from xen.xend.XendConstants import CD_VBD_DEFAULT_STRUCT, DEFAULT_HA_PATH
from xen.xend.XendConstants import CACHED_CONFIG_FILE
from XendAPIConstants import *
from xen.xend.ConfigUtil import getConfigVar
GB = 1024 * 1024 * 1024
if getConfigVar('compute', 'VM', 'disk_limit'):
DISK_LIMIT = int(getConfigVar('compute', 'VM', 'disk_limit'))
else:
DISK_LIMIT = 6
if getConfigVar('compute', 'VM', 'interface_limit'):
INTERFACE_LIMIT = int(getConfigVar('compute', 'VM', 'interface_limit'))
else:
INTERFACE_LIMIT = 6
if getConfigVar('virtualization', 'DOM0', 'reserved_mem_gb'):
RESERVED_MEM = int(getConfigVar('virtualization', 'DOM0', 'reserved_mem_gb')) * GB
else:
RESERVED_MEM = 4 * GB
try:
set
except NameError:
from sets import Set as set
reload(sys)
sys.setdefaultencoding( "utf-8" )
DOM0_UUID = "00000000-0000-0000-0000-000000000000"
argcounts = {}
def doexec(args, inputtext=None):
"""Execute a subprocess, then return its return code, stdout and stderr"""
proc = xPopen3(args, True)
if inputtext != None:
proc.tochild.write(inputtext)
stdout = proc.fromchild
stderr = proc.childerr
rc = proc.wait()
return (rc, stdout, stderr)
# ------------------------------------------
# Utility Methods for Xen API Implementation
# ------------------------------------------
def xen_api_success(value):
"""Wraps a return value in XenAPI format."""
if value is None:
s = ''
else:
s = stringify(value)
return {"Status": "Success", "Value": s}
def xen_api_success_void():
"""Return success, but caller expects no return value."""
return xen_api_success("")
def xen_api_error(error):
"""Wraps an error value in XenAPI format."""
if type(error) == tuple:
error = list(error)
if type(error) != list:
error = [error]
if len(error) == 0:
error = ['INTERNAL_ERROR', 'Empty list given to xen_api_error']
return { "Status": "Failure",
"ErrorDescription": [str(x) for x in error] }
def xen_rpc_call(ip, method, *args):
"""wrap rpc call to a remote host"""
try:
if not ip:
return xen_api_error("Invalid ip for rpc call")
# create
proxy = ServerProxy("http://" + ip + ":9363/")
# login
response = proxy.session.login('root')
if cmp(response['Status'], 'Failure') == 0:
log.exception(response['ErrorDescription'])
return xen_api_error(response['ErrorDescription'])
session_ref = response['Value']
# excute
method_parts = method.split('_')
method_class = method_parts[0]
method_name = '_'.join(method_parts[1:])
if method.find("host_metrics") == 0:
method_class = "host_metrics"
method_name = '_'.join(method_parts[2:])
#log.debug(method_class)
#log.debug(method_name)
if method_class.find("Async") == 0:
method_class = method_class.split(".")[1]
response = proxy.__getattr__("Async").__getattr__(method_class).__getattr__(method_name)(session_ref, *args)
else:
response = proxy.__getattr__(method_class).__getattr__(method_name)(session_ref, *args)
if cmp(response['Status'], 'Failure') == 0:
log.exception(response['ErrorDescription'])
return xen_api_error(response['ErrorDescription'])
# result
return response
except socket.error:
return xen_api_error('socket error')
def xen_api_todo():
"""Temporary method to make sure we track down all the TODOs"""
return {"Status": "Error", "ErrorDescription": XEND_ERROR_TODO}
def now():
return datetime()
def datetime(when = None):
"""Marshall the given time as a Xen-API DateTime.
@param when The time in question, given as seconds since the epoch, UTC.
May be None, in which case the current time is used.
"""
if when is None:
return xmlrpclib.DateTime(time.gmtime())
else:
return xmlrpclib.DateTime(time.gmtime(when))
# -----------------------------
# Bridge to Legacy XM API calls
# -----------------------------
def do_vm_func(fn_name, vm_ref, *args, **kwargs):
"""Helper wrapper func to abstract away from repetitive code.
@param fn_name: function name for XendDomain instance
@type fn_name: string
@param vm_ref: vm_ref
@type vm_ref: string
@param *args: more arguments
@type *args: tuple
"""
try:
xendom = XendDomain.instance()
fn = getattr(xendom, fn_name)
xendom.do_legacy_api_with_uuid(fn, vm_ref, *args, **kwargs)
return xen_api_success_void()
except VMBadState, exn:
return xen_api_error(['VM_BAD_POWER_STATE', vm_ref, exn.expected,
exn.actual])
# ---------------------------------------------------
# Event dispatch
# ---------------------------------------------------
EVENT_QUEUE_LENGTH = 50
event_registrations = {}
def event_register(session, reg_classes):
if session not in event_registrations:
event_registrations[session] = {
'classes' : set(),
'queue' : Queue.Queue(EVENT_QUEUE_LENGTH),
'next-id' : 1
}
if not reg_classes:
reg_classes = classes
sessionclasses = event_registrations[session]['classes']
if hasattr(sessionclasses, 'union_update'):
sessionclasses.union_update(reg_classes)
else:
sessionclasses.update(reg_classes)
def event_unregister(session, unreg_classes):
if session not in event_registrations:
return
if unreg_classes:
event_registrations[session]['classes'].intersection_update(
unreg_classes)
if len(event_registrations[session]['classes']) == 0:
del event_registrations[session]
else:
del event_registrations[session]
def event_next(session):
if session not in event_registrations:
return xen_api_error(['SESSION_NOT_REGISTERED', session])
queue = event_registrations[session]['queue']
events = [queue.get()]
try:
while True:
events.append(queue.get(False))
except Queue.Empty:
pass
return xen_api_success(events)
def _ctor_event_dispatch(xenapi, ctor, api_cls, session, args):
result = ctor(xenapi, session, *args)
if result['Status'] == 'Success':
ref = result['Value']
event_dispatch('add', api_cls, ref, '')
return result
def _dtor_event_dispatch(xenapi, dtor, api_cls, session, ref, args):
result = dtor(xenapi, session, ref, *args)
if result['Status'] == 'Success':
event_dispatch('del', api_cls, ref, '')
return result
def _setter_event_dispatch(xenapi, setter, api_cls, attr_name, session, ref,
args):
result = setter(xenapi, session, ref, *args)
if result['Status'] == 'Success':
event_dispatch('mod', api_cls, ref, attr_name)
return result
def event_dispatch(operation, api_cls, ref, attr_name):
assert operation in ['add', 'del', 'mod']
event = {
'timestamp' : now(),
'class' : api_cls,
'operation' : operation,
'ref' : ref,
'obj_uuid' : ref,
'field' : attr_name,
}
for reg in event_registrations.values():
if api_cls in reg['classes']:
event['id'] = reg['next-id']
reg['next-id'] += 1
reg['queue'].put(event)
# ---------------------------------------------------
# Python Method Decorators for input value validation
# ---------------------------------------------------
def trace(func, api_name=''):
"""Decorator to trace XMLRPC Xen API methods.
@param func: function with any parameters
@param api_name: name of the api call for debugging.
"""
if hasattr(func, 'api'):
api_name = func.api
def trace_func(self, *args, **kwargs):
log.debug('%s: %s' % (api_name, args))
return func(self, *args, **kwargs)
trace_func.api = api_name
return trace_func
def catch_typeerror(func):
"""Decorator to catch any TypeErrors and translate them into Xen-API
errors.
@param func: function with params: (self, ...)
@rtype: callable object
"""
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except TypeError, exn:
#log.exception('catch_typeerror')
if hasattr(func, 'api') and func.api in argcounts:
# Assume that if the argument count was wrong and if the
# exception was thrown inside this file, then it is due to an
# invalid call from the client, otherwise it's an internal
# error (which will be handled further up).
expected = argcounts[func.api]
actual = len(args) + len(kwargs)
if expected != actual:
tb = sys.exc_info()[2]
try:
sourcefile = traceback.extract_tb(tb)[-1][0]
if sourcefile == inspect.getsourcefile(BNVMAPI):
return xen_api_error(
['MESSAGE_PARAMETER_COUNT_MISMATCH',
func.api, expected, actual])
finally:
del tb
raise
except XendAPIError, exn:
return xen_api_error(exn.get_api_error())
return f
def session_required(func):
"""Decorator to verify if session is valid before calling method.
@param func: function with params: (self, session, ...)
@rtype: callable object
"""
def check_session(self, session, *args, **kwargs):
if auth_manager().is_session_valid(session) or cmp(session, "SessionForTest") == 0:
return func(self, session, *args, **kwargs)
else:
return xen_api_error(['SESSION_INVALID', session])
return check_session
def _is_valid_ref(ref, validator):
return type(ref) == str and validator(ref)
def _check_ref(validator, clas, func, api, session, ref, *args, **kwargs):
# if _is_valid_ref(ref, validator):
return func(api, session, ref, *args, **kwargs)
# else:
# return xen_api_error(['HANDLE_INVALID', clas, ref])
def _check_vm(validator, clas, func, api, session, ref, *args, **kwargs):
# for host_ref in BNPoolAPI._host_structs.keys():
# if BNPoolAPI._host_structs[host_ref]['VMs'].has_key(ref):
if BNPoolAPI.check_vm(ref):
return func(api, session, ref, *args, **kwargs)
return xen_api_error(['VM_NOT_FOUND', clas, ref])
def _check_console(validator, clas, func, api, session, ref, *args, **kwargs):
#if BNPoolAPI._consoles_to_VM.has_key(ref):
return func(api, session, ref, *args, **kwargs)
#else:
return xen_api_error(['HANDLE_INVALID', clas, ref])
def valid_object(class_name):
"""Decorator to verify if object is valid before calling
method.
@param func: function with params: (self, session, pif_ref)
@rtype: callable object
"""
return lambda func: \
lambda *args, **kwargs: \
_check_ref(lambda r: \
XendAPIStore.get(r, class_name) is not None,
class_name, func, *args, **kwargs)
def valid_task(func):
"""Decorator to verify if task_ref is valid before calling
method.
@param func: function with params: (self, session, task_ref)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(XendTaskManager.get_task,
'task', func, *args, **kwargs)
def valid_vm(func):
"""Decorator to verify if vm_ref is valid before calling method.
@param func: function with params: (self, session, vm_ref, ...)
@rtype: callable object
"""
return lambda * args, **kwargs: \
_check_vm(XendDomain.instance().is_valid_vm,
'VM', func, *args, **kwargs)
def valid_vbd(func):
"""Decorator to verify if vbd_ref is valid before calling method.
@param func: function with params: (self, session, vbd_ref, ...)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(lambda r: XendDomain.instance().is_valid_dev('vbd', r),
'VBD', func, *args, **kwargs)
def valid_vbd_metrics(func):
"""Decorator to verify if ref is valid before calling method.
@param func: function with params: (self, session, ref, ...)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(lambda r: XendDomain.instance().is_valid_dev('vbd', r),
'VBD_metrics', func, *args, **kwargs)
def valid_vif(func):
"""Decorator to verify if vif_ref is valid before calling method.
@param func: function with params: (self, session, vif_ref, ...)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(lambda r: XendDomain.instance().is_valid_dev('vif', r),
'VIF', func, *args, **kwargs)
def valid_vif_metrics(func):
"""Decorator to verify if ref is valid before calling method.
@param func: function with params: (self, session, ref, ...)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(lambda r: XendDomain.instance().is_valid_dev('vif', r),
'VIF_metrics', func, *args, **kwargs)
def valid_console(func):
"""Decorator to verify if console_ref is valid before calling method.
@param func: function with params: (self, session, console_ref, ...)
@rtype: callable object
"""
return lambda * args, **kwargs: \
_check_console(lambda r: XendDomain.instance().is_valid_dev('console',
r),
'console', func, *args, **kwargs)
classes = {
'session' : None,
'VM' : valid_vm,
'VBD' : valid_vbd,
'VBD_metrics' : valid_vbd_metrics,
'VIF' : valid_vif,
'VIF_metrics' : valid_vif_metrics,
'console' : valid_console,
'task' : valid_task,
}
def singleton(cls, *args, **kw):
instances = {}
def _singleton(*args, **kw):
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class BNVMAPI(object):
__decorated__ = False
__init_lock__ = threading.Lock()
__vm_clone_lock__ = threading.Lock()
__vm_change_host_lock__ = threading.Lock()
__set_passwd_lock__ = threading.Lock()
__vbd_lock__ = threading.Lock()
def __new__(cls, *args, **kwds):
""" Override __new__ to decorate the class only once.
Lock to make sure the classes are not decorated twice.
"""
cls.__init_lock__.acquire()
try:
if not cls.__decorated__:
cls._decorate()
cls.__decorated__ = True
return object.__new__(cls, *args, **kwds)
finally:
cls.__init_lock__.release()
def _decorate(cls):
""" Decorate all the object methods to have validators
and appropriate function attributes.
This should only be executed once for the duration of the
server.
"""
global_validators = [session_required, catch_typeerror]
# Cheat methods _hosts_name_label
# -------------
# Methods that have a trivial implementation for all classes.
# 1. get_by_uuid == getting by ref, so just return uuid for
# all get_by_uuid() methods.
for api_cls in classes.keys():
# We'll let the autoplug classes implement these functions
# themselves - its much cleaner to do it in the base class
get_by_uuid = '%s_get_by_uuid' % api_cls
get_uuid = '%s_get_uuid' % api_cls
get_all_records = '%s_get_all_records' % api_cls
def _get_by_uuid(_1, _2, ref):
return xen_api_success(ref)
def _get_uuid(_1, _2, ref):
return xen_api_success(ref)
def unpack(v):
return v.get('Value')
def _get_all_records(_api_cls):
return lambda s, session: \
xen_api_success(dict([(ref, unpack(getattr(cls, '%s_get_record' % _api_cls)(s, session, ref)))\
for ref in unpack(getattr(cls, '%s_get_all' % _api_cls)(s, session))]))
setattr(cls, get_by_uuid, _get_by_uuid)
setattr(cls, get_uuid, _get_uuid)
setattr(cls, get_all_records, _get_all_records(api_cls))
# Autoplugging classes
# --------------------
# These have all of their methods grabbed out from the implementation
# class, and wrapped up to be compatible with the Xen-API.
def getter(ref, type):
return XendAPIStore.get(ref, type)
def wrap_method(name, new_f):
try:
f = getattr(cls, name)
wrapped_f = (lambda * args: new_f(f, *args))
wrapped_f.api = f.api
wrapped_f.async = f.async
setattr(cls, name, wrapped_f)
except AttributeError:
# Logged below (API call: %s not found)
pass
def setter_event_wrapper(api_cls, attr_name):
setter_name = '%s_set_%s' % (api_cls, attr_name)
wrap_method(
setter_name,
lambda setter, s, session, ref, *args:
_setter_event_dispatch(s, setter, api_cls, attr_name,
session, ref, args))
def ctor_event_wrapper(api_cls):
ctor_name = '%s_create' % api_cls
wrap_method(
ctor_name,
lambda ctor, s, session, *args:
_ctor_event_dispatch(s, ctor, api_cls, session, args))
def dtor_event_wrapper(api_cls):
dtor_name = '%s_destroy' % api_cls
wrap_method(
dtor_name,
lambda dtor, s, session, ref, *args:
_dtor_event_dispatch(s, dtor, api_cls, session, ref, args))
# Wrapping validators around XMLRPC calls
# ---------------------------------------
for api_cls, validator in classes.items():
def doit(n, takes_instance, async_support=False,
return_type=None):
n_ = n.replace('.', '_')
try:
f = getattr(cls, n_)
if n not in argcounts:
argcounts[n] = f.func_code.co_argcount - 1
validators = takes_instance and validator and \
[validator] or []
validators += global_validators
for v in validators:
f = v(f)
f.api = n
f.async = async_support
if return_type:
f.return_type = return_type
setattr(cls, n_, f)
except AttributeError:
log.warn("API call: %s not found" % n)
ro_attrs = getattr(cls, '%s_attr_ro' % api_cls, []) \
+ cls.Base_attr_ro
rw_attrs = getattr(cls, '%s_attr_rw' % api_cls, []) \
+ cls.Base_attr_rw
methods = getattr(cls, '%s_methods' % api_cls, []) \
+ cls.Base_methods
funcs = getattr(cls, '%s_funcs' % api_cls, []) \
+ cls.Base_funcs
# wrap validators around readable class attributes
for attr_name in ro_attrs + rw_attrs:
doit('%s.get_%s' % (api_cls, attr_name), True,
async_support=False)
# wrap validators around writable class attrributes
for attr_name in rw_attrs:
doit('%s.set_%s' % (api_cls, attr_name), True,
async_support=False)
setter_event_wrapper(api_cls, attr_name)
# wrap validators around methods
for method_name, return_type in methods:
doit('%s.%s' % (api_cls, method_name), True,
async_support=True)
# wrap validators around class functions
for func_name, return_type in funcs:
doit('%s.%s' % (api_cls, func_name), False,
async_support=True,
return_type=return_type)
ctor_event_wrapper(api_cls)
dtor_event_wrapper(api_cls)
_decorate = classmethod(_decorate)
def __init__(self, auth):
self.auth = auth
Base_attr_ro = ['uuid']
Base_attr_rw = ['name_label', 'name_description']
Base_methods = [('get_record', 'Struct')]
Base_funcs = [('get_all', 'Set'), ('get_by_uuid', None), ('get_all_records', 'Set')]
def _get_XendAPI_instance(self):
import XendAPI
return XendAPI.instance()
def _get_BNStorageAPI_instance(self):
import BNStorageAPI
return BNStorageAPI.instance()
# Xen API: Class Session
# ----------------------------------------------------------------
# NOTE: Left unwrapped by __init__
session_attr_ro = ['this_host', 'this_user', 'last_active']
session_methods = [('logout', None)]
def session_get_all(self, session):
return xen_api_success([session])
def session_login(self, username):
try:
session = auth_manager().login_unconditionally(username)
return xen_api_success(session)
except XendError, e:
return xen_api_error(['SESSION_AUTHENTICATION_FAILED'])
session_login.api = 'session.login'
def session_login_with_password(self, *args):
if not BNPoolAPI._isMaster and BNPoolAPI._inPool:
return xen_api_error(XEND_ERROR_HOST_IS_SLAVE)
if len(args) < 2:
return xen_api_error(
['MESSAGE_PARAMETER_COUNT_MISMATCH',
'session.login_with_password', 2, len(args)])
username = args[0]
password = args[1]
try:
# session = ((self.auth == AUTH_NONE and
# auth_manager().login_unconditionally(username)) or
# auth_manager().login_with_password(username, password))
session = auth_manager().login_with_password(username, password)
return xen_api_success(session)
except XendError, e:
return xen_api_error(['SESSION_AUTHENTICATION_FAILED'])
session_login_with_password.api = 'session.login_with_password'
# object methods
def session_logout(self, session):
auth_manager().logout(session)
return xen_api_success_void()
def session_get_record(self, session, self_session):
if self_session != session:
return xen_api_error(['PERMISSION_DENIED'])
record = {'uuid' : session,
'this_host' : XendNode.instance().uuid,
'this_user' : auth_manager().get_user(session),
'last_active': now()}
return xen_api_success(record)
def session_get_uuid(self, session, self_session):
return xen_api_success(self_session)
def session_get_by_uuid(self, session, self_session):
return xen_api_success(self_session)
# attributes (ro)
def session_get_this_host(self, session, self_session):
if self_session != session:
return xen_api_error(['PERMISSION_DENIED'])
if not BNPoolAPI._isMaster and BNPoolAPI._inPool:
return xen_api_error(XEND_ERROR_HOST_IS_SLAVE)
return xen_api_success(XendNode.instance().uuid)
def session_get_this_user(self, session, self_session):
if self_session != session:
return xen_api_error(['PERMISSION_DENIED'])
user = auth_manager().get_user(session)
if user is not None:
return xen_api_success(user)
return xen_api_error(['SESSION_INVALID', session])
def session_get_last_active(self, session, self_session):
if self_session != session:
return xen_api_error(['PERMISSION_DENIED'])
return xen_api_success(now())
# Xen API: Class User
# ----------------------------------------------------------------
# Xen API: Class Tasks
# ----------------------------------------------------------------
task_attr_ro = ['name_label',
'name_description',
'status',
'progress',
'type',
'result',
'error_info',
'allowed_operations',
'session'
]
task_attr_rw = []
task_funcs = [('get_by_name_label', 'Set(task)'),
('cancel', None)]
def task_get_name_label(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.name_label)
def task_get_name_description(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.name_description)
def task_get_status(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.get_status())
def task_get_progress(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.progress)
def task_get_type(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.type)
def task_get_result(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.result)
def task_get_error_info(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.error_info)
def task_get_allowed_operations(self, session, task_ref):
return xen_api_success({})
def task_get_session(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.session)
def task_get_all(self, session):
tasks = XendTaskManager.get_all_tasks()
return xen_api_success(tasks)
def task_get_record(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
log.debug(task.get_record())
return xen_api_success(task.get_record())
def task_cancel(self, session, task_ref):
return xen_api_error('OPERATION_NOT_ALLOWED')
# def task_get_by_name_label(self, session, name):
# return xen_api_success(XendTaskManager.get_task_by_name(name))
# Xen API: Class VM
# ----------------------------------------------------------------
VM_attr_ro = ['power_state',
'resident_on',
'consoles',
'snapshots',
'VIFs',
'VBDs',
'VTPMs',
'DPCIs',
'DSCSIs',
'media',
'fibers',
'usb_scsi',
'DSCSI_HBAs',
'tools_version',
'domid',
'is_control_domain',
'metrics',
'crash_dumps',
'cpu_pool',
'cpu_qos',
'network_qos',
'VCPUs_CPU',
'ip_addr',
'MAC',
'is_local_vm',
'vnc_location',
'available_vbd_device',
'VIF_record',
'VBD_record',
'dev2path_list',
'pid2devnum_list',
'vbd2device_list',
'config',
'record_lite',
'inner_ip',
'system_VDI',
'network_record',
]
VM_attr_rw = ['name_label',
'name_description',
'user_version',
'is_a_template',
'auto_power_on',
'snapshot_policy',
'memory_dynamic_max',
'memory_dynamic_min',
'memory_static_max',
'memory_static_min',
'VCPUs_max',
'VCPUs_at_startup',
'VCPUs_params',
'actions_after_shutdown',
'actions_after_reboot',
'actions_after_suspend',
'actions_after_crash',
'PV_bootloader',
'PV_kernel',
'PV_ramdisk',
'PV_args',
'PV_bootloader_args',
'HVM_boot_policy',
'HVM_boot_params',
'platform',
'PCI_bus',
'other_config',
'security_label',
'pool_name',
'suspend_VDI',
'suspend_SR',
'VCPUs_affinity',
'tags',
'tag',
'rate',
'all_tag',
'all_rate',
'boot_order',
'IO_rate_limit',
# 'ip_map',
'passwd',
'config',
'platform_serial',
]
VM_methods = [('clone', 'VM'),
('clone_local', 'VM'),
('clone_MAC', 'VM'),
('clone_local_MAC', 'VM'),
('start', None),
('start_on', None),
('snapshot', None),
('rollback', None),
('destroy_snapshot', 'Bool'),
('destroy_all_snapshots', 'Bool'),
('pause', None),
('unpause', None),
('clean_shutdown', None),
('clean_reboot', None),
('hard_shutdown', None),
('hard_reboot', None),
('suspend', None),
('resume', None),
('send_sysrq', None),
('set_VCPUs_number_live', None),
('add_to_HVM_boot_params', None),
('remove_from_HVM_boot_params', None),
('add_to_VCPUs_params', None),
('add_to_VCPUs_params_live', None),
('remove_from_VCPUs_params', None),
('add_to_platform', None),
('remove_from_platform', None),
('add_to_other_config', None),
('remove_from_other_config', None),
('save', None),
('set_memory_dynamic_max_live', None),
('set_memory_dynamic_min_live', None),
('send_trigger', None),
('pool_migrate', None),
('migrate', None),
('destroy', None),
('cpu_pool_migrate', None),
('destroy_local', None),
('destroy_fiber', None),
('destroy_usb_scsi', None),
('destroy_media', None),
('destroy_VIF', None),
('disable_media', None),
('enable_media', None),
('eject_media', None),
('copy_sxp_to_nfs', None),
('media_change', None),
('add_tags', None),
('check_fibers_valid', 'Map'),
('check_usb_scsi_valid', 'Map'),
('can_start','Bool'),
('init_pid2devnum_list', None),
('clear_IO_rate_limit', None),
('clear_pid2devnum_list', None),
('start_set_IO_limit', None),
('start_init_pid2dev', None),
('create_image', 'Bool'),
('send_request_via_serial', 'Bool'),
# ('del_ip_map', None),
]
VM_funcs = [('create', 'VM'),
('create_on', 'VM'),
('create_from_sxp', 'VM'),
('create_from_vmstruct', 'VM'),
('restore', None),
('get_by_name_label', 'Set(VM)'),
('get_all_and_consoles', 'Map'),
('get_lost_vm_by_label', 'Map'),
('get_lost_vm_by_date', 'Map'),
('get_record_lite', 'Set'),
('create_data_VBD', 'Bool'),
('delete_data_VBD', 'Bool'),
('create_from_template', None),
('create_on_from_template', None),
('clone_system_VDI', 'VDI'),
('create_with_VDI', None),
]
# parameters required for _create()
VM_attr_inst = [
'name_label',
'name_description',
'user_version',
'is_a_template',
'is_local_vm',
'memory_static_max',
'memory_dynamic_max',
'memory_dynamic_min',
'memory_static_min',
'VCPUs_max',
'VCPUs_at_startup',
'VCPUs_params',
'actions_after_shutdown',
'actions_after_reboot',
'actions_after_suspend',
'actions_after_crash',
'PV_bootloader',
'PV_kernel',
'PV_ramdisk',
'PV_args',
'PV_bootloader_args',
'HVM_boot_policy',
'HVM_boot_params',
'platform',
'PCI_bus',
'other_config',
'security_label']
def VM_get(self, name, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM attribute value by name.
@param name: name of VM attribute field.
@param session: session of RPC.
@param vm_ref: uuid of VM.
@return: value of field.
@rtype: dict
'''
return xen_api_success(
XendDomain.instance().get_vm_by_uuid(vm_ref).info[name])
def VM_set(self, name, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Set VM attribute value by name.
@param name: name of VM attribute field.
@param session: session of RPC.
@param vm_ref: uuid of VM.
@param value: new value of VM attribute field.
@return: True | False.
@rtype: dict
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
dominfo.info[name] = value
return self._VM_save(dominfo)
def _VM_save(self, dominfo):
'''
@author: wuyuewen
@summary: Call config save function, the struct of VM will save to disk.
@param dominfo: VM config structure.
@return: True | False.
@rtype: dict.
'''
log.debug('VM_save')
XendDomain.instance().managed_config_save(dominfo)
return xen_api_success_void()
# attributes (ro)
def VM_get_power_state(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM power state by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@return: power state.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_power_state(vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_power_state", vm_ref)
else:
return self._VM_get_power_state(vm_ref)
def _VM_get_power_state(self, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
@param vm_ref: uuid.
@return: power state.
@rtype: dict.
'''
# log.debug("in get power state")
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_power_state())
# def VM_get_power_state(self, session, vm_ref):
# #host_ref = BNPoolAPI._VM_to_Host[vm_ref]
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# return xen_api_success(dom.get_power_state())
# else:
# try:
# remote_ip = BNPoolAPI._host_structs[host_ref]['ip']
# proxy = ServerProxy('http://' + remote_ip + ':9363')
# response = proxy.session.login('root')
# if cmp(response['Status'], 'Failure') == 0:
# return xen_api_error(response['ErrorDescription'])
# session_ref = response['Value']
# return proxy.VM.get_power_state(session_ref, vm_ref)
# except socket.error:
# return xen_api_error('socket error')
def VM_get_resident_on(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM resident Host.
@param session: session of RPC.
@param vm_ref: uuid.
@return: Host uuid.
@rtype: dict.
'''
#host_ref = BNPoolAPI._VM_to_Host[vm_ref]
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
return xen_api_success(host_ref)
def VM_get_snapshots(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM snapshots by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@return: snapshots.
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_get_vdi_snapshots(session, vdi_ref)
def _VM_get_vdi_snapshots(self, session, vdi_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM snapshots by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@return: snapshots.
@rtype: dict.
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success([])
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value')
if not sr_rec:
log.debug('sr record do not exist>>>>>')
return xen_api_success([])
sr_type = sr_rec.get('type')
log.debug('sr type>>>>>>>>>>>>>>>%s' % sr_type)
if cmp(sr_type, 'gpfs') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
snapshots = proxy.get_snapshots_gpfs(mount_point, vdi_ref)
elif cmp(sr_type, 'mfs') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
snapshots = proxy.get_snapshots_mfs(mount_point, vdi_ref)
elif cmp(sr_type, 'ocfs2') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
snapshots = proxy.get_snapshots_ocfs2(mount_point, vdi_ref)
elif cmp(sr_type, 'local_ocfs2') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
snapshots = proxy.get_snapshots_ocfs2(mount_point, vdi_ref)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
snapshots = proxy.get_snapshots(sr, vdi_ref)
log.debug("snapshots : %s " % snapshots)
return xen_api_success(snapshots)
def VM_get_snapshot_policy(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM snapshot policy by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@return: snapshot policy.
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_get_vdi_snapshot_policy(session, vdi_ref)
def _VM_get_vdi_snapshot_policy(self, session, vdi_ref):
'''
@author: wuyuewen
@summary: Interal method. Get VM snapshot policy by uuid.
@param session: session of RPC.
@param vdi_ref: VM system VDI's uuid.
@return: snapshot policy.
@rtype: dict.
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value', None)
if sr_rec:
location = sr_rec['other_config']['location']
sr_type = sr_rec.get('type')
if cmp(sr_type, 'gpfs') == 0 or cmp(sr_type, 'mfs') == 0\
or cmp(sr_type, 'ocfs2') == 0 or cmp(sr_type, 'local_ocfs2') == 0:
proxy = ServerProxy("http://127.0.0.1:10010")
snapshot_policy = proxy.get_snapshot_policy(sr, vdi_ref)
log.debug("snapshot_policy : %s " % snapshot_policy)
else:
sr_ip = location.split(":")[0]
log.debug("sr rec : %s" % sr_rec)
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
snapshot_policy = proxy.get_snapshot_policy(sr, vdi_ref)
log.debug("snapshot_policy : %s " % snapshot_policy)
return xen_api_success(snapshot_policy)
else:
return xen_api_success(("1", "100"))
def VM_set_snapshot_policy(self, session, vm_ref, interval, maxnum):
'''
@author: wuyuewen
@summary: Set VM snapshot policy by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param interval: the interval of create a snap, the unit is (day).
@param maxnum: the max number of snapshots keep.
@return: True | False.
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
return self._VM_set_vdi_snapshot_policy(session, vdi_ref, interval, maxnum)
def _VM_set_vdi_snapshot_policy(self, session, vdi_ref, interval, maxnum):
'''
@author: wuyuewen
@summary: Internal method. Set VM snapshot policy by uuid.
@param session: session of RPC.
@param vdi_ref: VM system VDI's uuid.
@param interval: the interval of create a snap, the unit is (day).
@param maxnum: the max number of snapshots keep.
@return: True | False.
@rtype: dict.
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(("1", "100"))
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value', None)
if sr_rec:
sr_type = sr_rec.get('type')
if cmp(sr_type, 'gpfs') == 0 or cmp(sr_type, 'mfs') == 0\
or cmp(sr_type, 'ocfs2') == 0 or cmp(sr_type, 'local_ocfs2') == 0:
proxy = ServerProxy("http://127.0.0.1:10010")
snapshot_policy = proxy.set_snapshot_policy(sr, vdi_ref, interval, maxnum)
log.debug("snapshot_policy : %s " % snapshot_policy)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr rec : %s" % sr_rec)
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
snapshot_policy = proxy.set_snapshot_policy(sr, vdi_ref, interval, maxnum)
log.debug("snapshot_policy : %s " % snapshot_policy)
return xen_api_success(snapshot_policy)
else:
return xen_api_success(("1", "100"))
def VM_get_memory_static_max(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM memory static max.
@param session: session of RPC.
@param vm_ref: uuid.
@return: memory static max.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_memory_static_max(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_get_memory_static_max', vm_ref)
else:
return self._VM_get_memory_static_max(session, vm_ref)
def _VM_get_memory_static_max(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM memory static max.
@param session: session of RPC.
@param vm_ref: uuid.
@return: memory static max.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_memory_static_max())
def VM_get_memory_static_min(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM memory static min.
@param session: session of RPC.
@param vm_ref: uuid.
@return: memory static min.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_memory_static_min(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_get_memory_static_min', vm_ref)
else:
return self._VM_get_memory_static_min(session, vm_ref)
def _VM_get_memory_static_min(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM memory static max.
@param session: session of RPC.
@param vm_ref: uuid.
@return: memory static min.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_memory_static_min())
def VM_get_VIFs(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM VIFs.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VIFs.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VIFs(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_VIFs", vm_ref)
else:
return self._VM_get_VIFs(session, vm_ref)
def _VM_get_VIFs(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM VIFs.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VIFs.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_vifs())
def VM_get_VBDs(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM VBDs.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VBDs(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_VBDs", vm_ref)
else:
return self._VM_get_VBDs(session, vm_ref)
def _VM_get_VBDs(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM VBDs.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_vbds())
def VM_get_usb_scsi(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM usb scsi devices(VBD), the backend is /dev/XXX.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_usb_scsi(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_usb_scsi", vm_ref)
else:
return self._VM_get_usb_scsi(session, vm_ref)
def _VM_get_usb_scsi(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM usb scsi devices(VBD), the backend is /dev/XXX.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
vbds = dom.get_vbds()
result = []
for vbd in vbds:
vbd_type = self.VBD_get_type(session, vbd).get('Value', "")
if cmp(vbd_type, XEN_API_VBD_TYPE[3]) == 0:
#log.debug('fibers: %s' % vbd)
result.append(vbd)
return xen_api_success(result)
def VM_get_fibers(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM fiber devices(VBD), the backend is /dev/XXX.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_fibers(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_fibers", vm_ref)
else:
return self._VM_get_fibers(session, vm_ref)
def _VM_get_fibers(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM fiber devices(VBD), the backend is /dev/XXX.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
vbds = dom.get_vbds()
result = []
for vbd in vbds:
vbd_type = self.VBD_get_type(session, vbd).get('Value', "")
if cmp(vbd_type, XEN_API_VBD_TYPE[2]) == 0:
#log.debug('fibers: %s' % vbd)
result.append(vbd)
return xen_api_success(result)
def VM_destroy_usb_scsi(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Destroy VM usb scsi device(VBD) by vbd uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise VDIError: Cannot destroy VDI with VBDs attached
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_destroy_usb_scsi(session, vm_ref, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_destroy_usb_scsi", vm_ref, vbd_ref)
else:
return self._VM_destroy_usb_scsi(session, vm_ref, vbd_ref)
def _VM_destroy_usb_scsi(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Destroy VM usb scsi device(VBD) by vbd uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise VDIError: Cannot destroy VDI with VBDs attached
'''
storage = self._get_BNStorageAPI_instance()
vdi_ref = self.VBD_get_VDI(session, vbd_ref).get('Value')
response = self.VBD_destroy(session, vbd_ref)
if vdi_ref:
storage.VDI_destroy(session, vdi_ref)
return response
def VM_destroy_fiber(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Destroy VM fiber device(VBD) by vbd uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise VDIError: Cannot destroy VDI with VBDs attached
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_destroy_fiber(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_destroy_fiber", vm_ref, vbd_ref)
else:
return self._VM_destroy_fiber(session, vbd_ref)
def _VM_destroy_fiber(self, session, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Destroy VM fiber device(VBD) by vbd uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise VDIError: Cannot destroy VDI with VBDs attached
'''
storage = self._get_BNStorageAPI_instance()
vdi_ref = self.VBD_get_VDI(session, vbd_ref).get('Value')
response = self.VBD_destroy(session, vbd_ref)
if vdi_ref:
storage.VDI_destroy(session, vdi_ref)
return response
def VM_enable_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Enable VM's media device(cdrom device).
@precondition: VM not running
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_enable_media(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_enable_media", vbd_ref)
else:
return self._VM_enable_media(session, vbd_ref)
def _VM_enable_media(self, session, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Enable VM's media device(cdrom device).
@precondition: VM not running
@param session: session of RPC.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
'''
response = self.VBD_set_bootable(session, vbd_ref, 1)
return response
def VM_disable_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Disable VM's media device(cdrom device).
@precondition: VM not running
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_disable_media(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_disable_media", vbd_ref)
else:
return self._VM_disable_media(session, vbd_ref)
def _VM_disable_media(self, session, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Disable VM's media device(cdrom device).
@precondition: VM not running
@param session: session of RPC.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
'''
response = self.VBD_set_bootable(session, vbd_ref, 0)
return response
def VM_eject_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Eject VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_eject_media(session, vm_ref, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_eject_media", vm_ref, vbd_ref)
else:
return self._VM_eject_media(session, vm_ref, vbd_ref)
def _VM_eject_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Eject VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
node = XendNode.instance()
if not node.is_fake_media_exists():
self._fake_media_auto_create(session)
# if not os.path.exists(FAKE_MEDIA_PATH):
# os.system("touch %s" % FAKE_MEDIA_PATH)
response = self._VM_media_change(session, vm_ref, FAKE_MEDIA_NAME)
return response
def VM_destroy_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Destroy VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_destroy_media(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_destroy_media", vm_ref, vbd_ref)
else:
return self._VM_destroy_media(session, vbd_ref)
def _VM_destroy_media(self, session, vbd_ref):
'''
@author: wuyuewen
@summary: Destroy VM's media device(cdrom device).
@param session: session of RPC.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
response = self.VBD_destroy(session, vbd_ref)
return response
def VM_destroy_VIF(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Destroy VM's VIF device(network device).
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_destroy_VIF(session, vm_ref, vif_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_destroy_VIF", vm_ref, vif_ref)
else:
return self._VM_destroy_VIF(session, vm_ref, vif_ref)
def _VM_destroy_VIF(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Internal method. Destroy VM's VIF device(network device).
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
# self._VM_del_ip_map(session, vm_ref, vif_ref)
response = self.VIF_destroy(session, vif_ref)
return response
def VM_get_available_vbd_device(self, session, vm_ref, device_type = 'xvd'):
'''
@author: wuyuewen
@summary: Use at pre-create of VBD device, return the device name(xvdX/hdX) that can use.
@precondition: The available interval is xvda-xvdj/hda-hdj, limit total 10 devices.
@param session: session of RPC.
@param vm_ref: uuid
@param device_type: xvd/hd.
@return: available device name.
@rtype: dict.
@raise xen_api_error: DEVICE_OUT_OF_RANGE, NO_VBD_ERROR
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_available_vbd_device(session, vm_ref, device_type)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_available_vbd_device", vm_ref, device_type)
else:
return self._VM_get_available_vbd_device(session, vm_ref, device_type)
def _VM_get_available_vbd_device(self, session, vm_ref, device_type):
'''
@author: wuyuewen
@summary: Internal method. Use at pre-create of VBD device, return the device name(xvdX/hdX) that can use.
@precondition: The available interval is xvda-xvdj/hda-hdj, limit total 10 devices.
@param session: session of RPC.
@param vm_ref: uuid
@param device_type: xvd/hd.
@return: available device name.
@rtype: dict.
@raise xen_api_error: DEVICE_OUT_OF_RANGE, NO_VBD_ERROR
'''
vbds = self._VM_get_VBDs(session, vm_ref).get('Value')
if vbds:
if cmp(len(vbds), DISK_LIMIT+1) >= 0:
return xen_api_error(['DEVICE_OUT_OF_RANGE', 'VBD'])
vbds_first_device = self.VBD_get_device(session, vbds[0]).get('Value')
if vbds_first_device.startswith('hd'):
device_list = copy.deepcopy(VBD_DEFAULT_DEVICE)
else:
device_list = copy.deepcopy(VBD_XEN_DEFAULT_DEVICE)
for vbd in vbds:
device = self.VBD_get_device(session, vbd).get('Value')
if device and device in device_list:
device_list.remove(device)
else:
continue
if device_list:
return xen_api_success(device_list[0])
else:
return xen_api_error(['DEVICE_OUT_OF_RANGE', 'VBD'])
else:
return xen_api_error(['NO_VBD_ERROR', 'VM', vm_ref])
def VM_get_media(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@return: VBD
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_media(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_media", vm_ref)
else:
return self._VM_get_media(session, vm_ref)
def _VM_get_media(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@return: VBD
@rtype: dict.
'''
storage = self._get_BNStorageAPI_instance()
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
vbds = dom.get_vbds()
result = None
for vbd in vbds:
vbd_type = self.VBD_get_type(session, vbd).get('Value', "<none/>")
if cmp(vbd_type, XEN_API_VBD_TYPE[0]) == 0:
result = vbd
break
if result:
return xen_api_success(result)
else:
'''
if VM has no media device, create a fake one.
'''
vbd_struct = CD_VBD_DEFAULT_STRUCT
vbd_struct["VM"] = vm_ref
node = XendNode.instance()
if not node.is_fake_media_exists():
vdi = storage._fake_media_auto_create(session).get('Value')
else:
vdi = storage._VDI_get_by_name_label(session, FAKE_MEDIA_NAME).get("Value")
vbd_struct["VDI"] = vdi
return self.VBD_create(session, vbd_struct)
def _VM_get_disks(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
vbds = dom.get_vbds()
result = []
for vbd in vbds:
vbd_type = self.VBD_get_type(session, vbd).get('Value', "")
if cmp(vbd_type, XEN_API_VBD_TYPE[1]) == 0:
result.append(vbd)
return xen_api_success(result)
def VM_media_change(self, session, vm_ref, vdi_name):
'''
@author: wuyuewen
@summary: Change VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vdi_name: VDI's name label.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID, INTERNAL_ERROR
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_media_change(session, vm_ref, vdi_name)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_media_change", vm_ref, vdi_name)
else:
return self._VM_media_change(session, vm_ref, vdi_name)
def _VM_media_change(self, session, vm_ref, vdi_name):
'''
@author: wuyuewen
@summary: Internal method. Change VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vdi_name: VDI's name label.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID, INTERNAL_ERROR
'''
vbd_ref = self._VM_get_media(session, vm_ref).get('Value')
xendom = XendDomain.instance()
xennode = XendNode.instance()
vm = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
if not vm:
log.debug("No media, create one.")
vbd_struct = CD_VBD_DEFAULT_STRUCT
vbd_struct["VM"] = vm_ref
self.VBD_create(session, vbd_struct)
# return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
cur_vbd_struct = vm.get_dev_xenapi_config('vbd', vbd_ref)
'''
Check the VBD is a media device or not.
'''
if not cur_vbd_struct:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
if cur_vbd_struct['type'] != XEN_API_VBD_TYPE[0]: # Not CD
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
if cur_vbd_struct['mode'] != 'RO': # Not read only
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
vdi_uuid = xennode.get_vdi_by_name_label(vdi_name)
new_vdi = xennode.get_vdi_by_uuid(vdi_uuid)
if not new_vdi:
return xen_api_error(['HANDLE_INVALID', 'VDI', vdi_name])
new_vdi_image = new_vdi.get_location()
valid_vbd_keys = self.VBD_attr_ro + self.VBD_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
new_vbd_struct = {}
for k in cur_vbd_struct.keys():
if k in valid_vbd_keys:
new_vbd_struct[k] = cur_vbd_struct[k]
new_vbd_struct['VDI'] = vdi_uuid
try:
XendTask.log_progress(0, 100,
vm.change_vdi_of_vbd,
new_vbd_struct, new_vdi_image)
except XendError, e:
log.exception("Error in VBD_media_change")
# if str(e).endswith("VmError: Device"):
# log.debug("No media create new...")
# log.debug(new_vbd_struct)
# self.VBD_create(session, new_vbd_struct)
return xen_api_error(['INTERNAL_ERROR', str(e)])
# return xen_api_success_void()
return xen_api_success_void()
def VM_get_VTPMs(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_vtpms())
def VM_get_consoles(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's console device(VNC device).
@param session: session of RPC.
@param vm_ref: uuid
@return: console
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_consoles(vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_consoles", vm_ref)
else:
return self._VM_get_consoles(vm_ref)
def _VM_get_consoles(self, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's console device(VNC device).
@param session: session of RPC.
@param vm_ref: uuid
@return: console
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_consoles())
def VM_get_DPCIs(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_dpcis())
def VM_get_DSCSIs(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_dscsis())
def VM_get_DSCSI_HBAs(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_dscsi_HBAs())
def VM_get_tools_version(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return dom.get_tools_version()
def VM_get_metrics(self, _, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_metrics())
#frank
def VM_get_cpu_qos(self, _, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_cpu_qos())
#frank
def VM_get_network_qos(self, _, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_network_qos())
def VM_get_VCPUs_max(self, _, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's max VCPUs.
@param _: session of RPC.
@param vm_ref: uuid
@return: VCPUs num
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VCPUs_max(_, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_VCPUs_max', vm_ref)
else:
return self._VM_get_VCPUs_max(_, vm_ref)
def _VM_get_VCPUs_max(self, _, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's max VCPUs.
@param _: session of RPC.
@param vm_ref: uuid
@return: VCPUs num
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.info['VCPUs_max'])
def VM_get_VCPUs_at_startup(self, _, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_todo()
def VM_get_VCPUs_CPU(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VCPUs' bounding CPUs.
@param session: session of RPC.
@param vm_ref: uuid
@return: VCPUs-CPUs dict.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VCPUs_CPU(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_VCPUs_CPU', vm_ref)
else:
return self._VM_get_VCPUs_CPU(session, vm_ref)
def _VM_get_VCPUs_CPU(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VCPUs' bounding CPUs.
@param session: session of RPC.
@param vm_ref: uuid
@return: VCPUs-CPUs dict.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dominfo.getVCPUsCPU())
def VM_get_ip_addr(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's ip address.
@precondition: VM must install VM-tools first.
@param session: session of RPC.
@param vm_ref: uuid
@return: IPv4 address.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_ip_addr(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_ip_addr', vm_ref)
else:
return self._VM_get_ip_addr(session, vm_ref)
def _VM_get_ip_addr(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's ip address.
@precondition: VM must install VM-tools first.
@param session: session of RPC.
@param vm_ref: uuid
@return: IPv4 address.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dominfo.getDomainIp())
def VM_get_MAC(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's MAC address.
@precondition: has a VIF device.
@param session: session of RPC.
@param vm_ref: uuid
@return: MAC address.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_MAC(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_MAC', vm_ref)
else:
return self._VM_get_MAC(session, vm_ref)
def _VM_get_MAC(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's MAC address.
@precondition: has a VIF device.
@param session: session of RPC.
@param vm_ref: uuid
@return: MAC address.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dominfo.getDomainMAC())
def VM_get_vnc_location(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's VNC location.
@precondition: has a console device.
@param session: session of RPC.
@param vm_ref: uuid
@return: VNC location.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_vnc_location(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_vnc_location', vm_ref)
else:
return self._VM_get_vnc_location(session, vm_ref)
def _VM_get_vnc_location(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's VNC location.
@precondition: has a console device.
@param session: session of RPC.
@param vm_ref: uuid
@return: VNC location.
@rtype: dict.
'''
xendom = XendDomain.instance();
dom = xendom.get_vm_by_uuid(vm_ref)
# consoles = dom.get_consoles()
# vnc_location = "0"
# for console in consoles:
# location = xendom.get_dev_property_by_uuid('console', console, 'location')
# log.debug("vm %s console %s location %s" % (vm_ref, console, location))
# if location.find(".") != -1:
# vnc_location = location
vnc_location = dom.get_console_port()
log.debug('VM(%s) get vnc location (%s)' % (vm_ref, vnc_location))
return xen_api_success(vnc_location)
# attributes (rw)
def VM_get_name_label(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's name label.
@param session: session of RPC.
@param vm_ref: uuid
@return: name label.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_name_label(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_name_label', vm_ref)
else:
return self._VM_get_name_label(session, vm_ref)
def _VM_get_name_label(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's name label.
@param session: session of RPC.
@param vm_ref: uuid
@return: name label.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.getName())
def VM_get_name_description(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's name description.
@param session: session of RPC.
@param vm_ref: uuid
@return: name description.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_name_description(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_name_description', vm_ref)
else:
return self._VM_get_name_description(session, vm_ref)
def _VM_get_name_description(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's name description.
@param session: session of RPC.
@param vm_ref: uuid
@return: name description.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.getNameDescription())
def VM_get_user_version(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_todo()
def VM_get_is_a_template(self, session, ref):
'''
@author: wuyuewen
@summary: Get VM is a template or not.
@param session: session of RPC.
@param ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_is_a_template(session, ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_is_a_template', ref)
else:
return self._VM_get_is_a_template(session, ref)
def _VM_get_is_a_template(self, session, ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM is a template or not.
@param session: session of RPC.
@param ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
log.debug('ref:%s' % ref)
try:
return xen_api_success(XendDomain.instance().get_vm_by_uuid(ref).info['is_a_template'])
except KeyError:
return xen_api_error(['key error', ref])
def VM_get_is_local_vm(self, session, ref):
'''
@author: wuyuewen
@summary: Get VM is a local VM(disk file in local storage, not shared) or not.
@param session: session of RPC.
@param ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_is_local_vm(session, ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_is_local_vm', ref)
else:
return self._VM_get_is_local_vm(session, ref)
def _VM_get_is_local_vm(self, session, ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM is a local VM(disk file in local storage, not shared) or not.
@param session: session of RPC.
@param ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
# log.debug('ref:%s' % ref)
try:
storage = self._get_BNStorageAPI_instance()
vdis = storage._VDI_get_by_vm(session, ref).get('Value')
if vdis:
for vdi_uuid in vdis:
vdi = storage._get_VDI(vdi_uuid)
if vdi:
sharable = vdi.sharable
if not sharable:
return xen_api_success(not sharable)
else:
log.exception('failed to get vdi by vdi_uuid: %s' % vdi_uuid)
return xen_api_success(True)
# return xen_api_error(['failed to get vdi by vdi_uuid', vdi_uuid])
return xen_api_success(not sharable)
else:
log.exception('failed to get vdi by vm: %s' % ref)
return xen_api_success(False)
# return xen_api_error(['failed to get vdi by vm',ref])
except KeyError:
return xen_api_error(['key error', ref])
except VDIError:
return xen_api_success(False)
# # get inner ip of a VM
# def VM_get_inner_ip(self, session, vm_ref):
# ip_map = self.VM_get_ip_map(session, vm_ref).get('Value')
# mac2ip_list = {}
# for mac, ipmap in ip_map.items():
# inner_ip = ipmap.split('@')[0]
# mac2ip_list[mac] = inner_ip
# return xen_api_success(mac2ip_list)
# #Get mapping intranet ip address to outer net ip address.
# def VM_get_ip_map(self, session, vm_ref):
# if BNPoolAPI._isMaster:
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self._VM_get_ip_map(session, vm_ref)
# else:
# host_ip = BNPoolAPI.get_host_ip(host_ref)
# return xen_rpc_call(host_ip, 'VM_get_ip_map', vm_ref)
# else:
# return self._VM_get_ip_map(session, vm_ref)
#
# def _VM_get_ip_map(self, session, vm_ref):
# dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# return xen_api_success(dom.get_ip_map())
def VM_get_auto_power_on(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('auto_power_on', session, vm_ref)
def VM_get_memory_dynamic_max(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's memory dynamic max.
@param session: session of RPC.
@param vm_ref: uuid
@return: memory dynamic max(Bytes).
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_memory_dynamic_max(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_get_memory_dynamic_max', vm_ref)
else:
return self._VM_get_memory_dynamic_max(session, vm_ref)
def _VM_get_memory_dynamic_max(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's memory dynamic max.
@param session: session of RPC.
@param vm_ref: uuid
@return: memory dynamic max(Bytes).
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_memory_dynamic_max())
def VM_get_memory_dynamic_min(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's memory dynamic min.
@param session: session of RPC.
@param vm_ref: uuid
@return: memory dynamic min(Bytes).
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_memory_dynamic_min(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_get_memory_dynamic_min', vm_ref)
else:
return self._VM_get_memory_dynamic_min(session, vm_ref)
def _VM_get_memory_dynamic_min(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's memory dynamic min.
@param session: session of RPC.
@param vm_ref: uuid
@return: memory dynamic min(Bytes).
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_memory_dynamic_min())
def VM_get_VCPUs_params(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_vcpus_params())
def VM_get_actions_after_shutdown(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_on_shutdown())
def VM_get_actions_after_reboot(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_on_reboot())
def VM_get_actions_after_suspend(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_on_suspend())
def VM_get_actions_after_crash(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_on_crash())
def VM_get_PV_bootloader(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_bootloader', session, vm_ref)
def VM_get_PV_kernel(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_kernel', session, vm_ref)
def VM_get_PV_ramdisk(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_ramdisk', session, vm_ref)
def VM_get_PV_args(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_args', session, vm_ref)
def VM_get_PV_bootloader_args(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_bootloader_args', session, vm_ref)
def VM_get_HVM_boot_policy(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('HVM_boot_policy', session, vm_ref)
def VM_get_HVM_boot_params(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('HVM_boot_params', session, vm_ref)
def VM_get_platform(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_platform())
def VM_get_PCI_bus(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return dom.get_pci_bus()
def VM_get_VCPUs_affinity(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's VCPUs available CPU affinity.
@param session: session of RPC.
@param vm_ref: uuid
@return: dict of CPU affinity.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp (host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VCPUs_affinity(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_VCPUs_affinity', vm_ref)
else:
return self._VM_get_VCPUs_affinity(session, vm_ref)
def _VM_get_VCPUs_affinity(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's VCPUs available CPU affinity.
@param session: session of RPC.
@param vm_ref: uuid
@return: dict of CPU affinity.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dominfo.getVCPUsAffinity())
def VM_set_VCPUs_affinity(self, session, vm_ref, vcpu, cpumap):
'''
@author: wuyuewen
@summary: Set VM's VCPU available CPU affinity, VCPU can used one of these CPUs.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param vcpu: number of VCPU, if VM has 2 VCPU, then VCPU number is 0 or 1.
@param cpumap: numbers of CPUs, e.g. "0,2,4,8" means CPUs number 0,2,4,8
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp (host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_VCPUs_affinity(session, vm_ref, vcpu, cpumap)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_VCPUs_affinity', vm_ref, vcpu, cpumap)
else:
return self._VM_set_VCPUs_affinity(session, vm_ref, vcpu, cpumap)
def _VM_set_VCPUs_affinity(self, session, vm_ref, vcpu, cpumap):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPU available CPU affinity, VCPU can used one of these CPUs.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param vcpu: number of VCPU, if VM has 2 VCPU, then VCPU number is 0 or 1.
@param cpumap: numbers of CPUs, e.g. "0,2,4,8" means CPUs number 0,2,4,8
@return: True | False.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
domid = dominfo.getDomid()
if not dominfo:
raise XendInvalidDomain(str(domid))
vcpu = 'cpumap%d' % int(vcpu)
if not domid or cmp(domid, -1) == 0 :
self.VM_add_to_VCPUs_params(session, vm_ref, vcpu, cpumap)
else:
self.VM_add_to_VCPUs_params_live(session, vm_ref, vcpu, cpumap)
# dominfo.setVCPUsAffinity(vcpu, cpumap)
return xen_api_success_void()
def VM_set_PCI_bus(self, session, vm_ref, val):
'''
@deprecated: not used
'''
return self.VM_set('PCI_bus', session, vm_ref, val)
def VM_get_other_config(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's other config.
@param session: session of RPC.
@param vm_ref: uuid
@return: other config field.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_other_config(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_other_config', vm_ref)
else:
return self._VM_get_other_config(session, vm_ref)
#
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self.VM_get('other_config', session, vm_ref)
# else:
# log.debug("get other config")
# host_ip = BNPoolAPI._host_structs[host_ref]['ip']
# return xen_rpc_call(host_ip, "VM_get_other_config", vm_ref)
# add by wufan 20131016
def _VM_get_other_config(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's other config.
@param session: session of RPC.
@param vm_ref: uuid
@return: other config field.
@rtype: dict.
'''
other_config = self.VM_get('other_config', session, vm_ref).get('Value')
#log.debug('_VM_get_other_config: type%s value%s' % (type(other_config), other_config))
#if other_config :
# tag_list = other_config.get('tag',{})
# if isinstance(tag_list, str):
# self._VM_convert_other_config(session, vm_ref)
# other_config = self.VM_get('other_config', session, vm_ref).get('Value')
return xen_api_success(other_config)
# add by wufan
def _VM_convert_other_config(self, session, vm_ref):
'''
@deprecated: not used
'''
OTHER_CFG_DICT_kEYS = ['tag', 'rate', 'burst']
convert_other_config = {}
other_config = self.VM_get('other_config', session, vm_ref).get('Value')
#log.debug('_VM_get_other_config: type%s value%s' % (type(other_config), other_config))
if other_config and isinstance(other_config, dict):
for key, value in other_config.items():
if key in OTHER_CFG_DICT_kEYS and not isinstance(value, dict):
value = eval(value)
if isinstance(value, dict):
convert_other_config.setdefault(key,{})
for k, v in value.items():
convert_other_config[key][k] = v
else:
convert_other_config[key] = value
self._VM_set_other_config(session, vm_ref, convert_other_config)
log.debug('_VM_convert_other_config: type%s value%s' % (type(convert_other_config), convert_other_config))
return xen_api_success_void()
def VM_get_tags(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_tags(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_tags', vm_ref)
else:
return self._VM_get_tags(session, vm_ref)
def _VM_get_tags(self, session, vm_ref):
'''
@deprecated: not used
'''
try:
return self.VM_get('tags', session, vm_ref)
except Exception, exn:
log.exception(exn)
return xen_api_error(exn)
def VM_get_all_tag(self, session, vm_ref, tag_type):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_all_tag(session, vm_ref, tag_type)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_all_tag', vm_ref, tag_type)
else:
return self._VM_get_all_tag(session, vm_ref, tag_type)
def _VM_get_all_tag(self, session, vm_ref, tag_type):
'''
@deprecated: not used
'''
tag_list = {}
try:
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
#log.debug('other_config: %s', other_config)
if other_config:
tag_list = other_config.get(tag_type,{})
log.debug('list:%s' % tag_list)
return xen_api_success(tag_list)
except Exception, exn:
log.exception(exn)
return xen_api_success(tag_list)
def VM_get_tag(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Get VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@return: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_tag(session, vm_ref, vif_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_tag', vm_ref, vif_ref)
else:
return self._VM_get_tag(session, vm_ref, vif_ref)
# original:wuyuewen
#def _VM_get_tag(self, session, vm_ref):
# try:
# other_config = self._VM_get_other_config(session, vm_ref).get('Value')
# tag = "-1"
# if other_config:
# tag = other_config.get('tag', "-1")
# return xen_api_success(tag)
# except Exception, exn:
# log.exception(exn)
# return xen_api_success(tag)
# add by wufan read from VM's other_config
def _VM_get_tag(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@return: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@rtype: dict.
'''
tag = '-1'
eth_num = '-1'
try:
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
device = self.VIF_get_device(session, vif_ref).get('Value')
if device != '' and device.startswith('eth'):
eth_num = device[3:]
if other_config:
tag_list = other_config.get('tag',{})
#log.debug('tag_list type:%s' % type(tag_list))
tag = tag_list.get(eth_num,'-1')
#log.debug('_VM_get_tag:%s' % tag)
return xen_api_success(tag)
except Exception, exn:
log.exception(exn)
return xen_api_success(tag)
def VM_get_rate(self, session, vm_ref, param_type, vif_ref):
'''
@author: wuyuewen
@summary: Get VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param vif_ref: VIF uuid
@return: VIF's rate(kbps).
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_rate(session, vm_ref, param_type, vif_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_rate', vm_ref, param_type, vif_ref)
else:
return self._VM_get_rate(session, vm_ref, param_type, vif_ref)
def _VM_get_rate(self, session, vm_ref, param_type, vif_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param vif_ref: VIF uuid
@return: VIF's rate(kbps).
@rtype: dict.
'''
rate = '-1'
eth_num = '-1'
try:
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
device = self.VIF_get_device(session, vif_ref).get('Value')
#log.debug('>>>>>>>>>>>>device')
#log.debug(device)
eth_num = ''
if device != '' and device.startswith('eth'):
eth_num = device[3:]
elif not device :
vif_refs = self._VM_get_VIFs(session, vm_ref).get('Value')
log.debug('vif_refs %s' % vif_refs)
try:
eth_num = str(vif_refs.index(vif_ref))
except:
eth_num = ''
pass
log.debug('eth_num %s' % eth_num)
if other_config and eth_num != '':
rate_list = other_config.get(param_type,{})
log.debug('rate_list %s' % rate_list)
rate = rate_list.get(eth_num,'-1')
return xen_api_success(rate)
except Exception, exn:
log.exception(exn)
return xen_api_success(rate)
def VM_get_domid(self, _, ref):
'''
@author: wuyuewen
@summary: Get VM's id.
@precondition: VM is running.
@param _: session of RPC.
@param ref: uuid
@return: VM's id.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_domid(_, ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_domid', ref)
else:
return self._VM_get_domid(_, ref)
def _VM_get_domid(self, _, ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's id.
@precondition: VM is running.
@param _: session of RPC.
@param ref: uuid
@return: VM's id.
@rtype: dict.
'''
domid = XendDomain.instance().get_vm_by_uuid(ref).getDomid()
return xen_api_success(domid is None and -1 or domid)
def VM_get_cpu_pool(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
pool_ref = XendCPUPool.query_pool_ref(dom.get_cpu_pool())
return xen_api_success(pool_ref)
def VM_set_pool_name(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('pool_name', session, vm_ref, value)
def VM_get_is_control_domain(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Check the VM is dom0 or not.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_is_control_domain(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_is_control_domain", vm_ref)
else:
return self._VM_get_is_control_domain(session, vm_ref)
def _VM_get_is_control_domain(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check the VM is dom0 or not.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
'''
xd = XendDomain.instance()
return xen_api_success(xd.get_vm_by_uuid(vm_ref) == xd.privilegedDomain())
def VM_get_VIF_record(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Get VIF record, this method is a instead of VIF_get_record() use in Pool.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@return: VIF record struct.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VIF_get_record(session, vif_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VIF_get_record", vif_ref)
else:
return self.VIF_get_record(session, vif_ref)
def VM_get_network_record(self, session, vm_ref, vif):
'''
@author: wuyuewen
@summary: Get network record, this method is a instead of network_get_record() use in Pool.
@param session: session of RPC.
@param vm_ref: uuid
@param vif: VIF uuid
@return: network record struct.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
xenapi = self._get_XendAPI_instance()
bridge = self._VIF_get(vif, "bridge").get('Value')
list_network = xenapi.network_get_by_name_label(session, bridge).get('Value')
if not list_network:
return xen_api_error(['NETWORK_NOT_EXISTS'])
net_ref = list_network[0]
net = XendAPIStore.get(net_ref, "network")
return xen_api_success(net.get_record())
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_network_record", vm_ref, vif)
else:
xenapi = self._get_XendAPI_instance()
bridge = self._VIF_get(vif, "bridge").get('Value')
list_network = xenapi.network_get_by_name_label(session, bridge).get('Value')
if not list_network:
return xen_api_error(['NETWORK_NOT_EXISTS'])
net_ref = list_network[0]
net = XendAPIStore.get(net_ref, "network")
return xen_api_success(net.get_record())
def VM_get_VBD_record(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Get VBD record, this method is a instead of VBD_get_record() use in Pool.
@param session: session of RPC.
@param vm_ref: uuid
@param vbd_ref: VBD uuid
@return: VBD record struct.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VBD_get_record(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VBD_get_record", vbd_ref)
else:
return self.VBD_get_record(session, vbd_ref)
def VM_get_system_VDI(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VDI that VM's system VBD linked, VM->VBD(VM's disk)->VDI(Storage management).
@precondition: VM has system VBD device.
@param session: session of RPC.
@param vm_ref: uuid
@return: VDI.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_system_VDI(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_system_VDI", vm_ref)
else:
return self._VM_get_system_VDI(session, vm_ref)
def _VM_get_system_VDI(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VDI that VM's system VBD linked, VM->VBD(VM's disk)->VDI(Storage management).
@precondition: VM has system VBD device.
@param session: session of RPC.
@param vm_ref: uuid
@return: VDI.
@rtype: dict.
'''
vbds = self._VM_get_VBDs(session, vm_ref).get('Value', [])
sys_vbd = ''
sys_vdi = ''
if vbds:
for vbd in vbds:
bootable = self.VBD_get_bootable(session, vbd).get('Value', False)
vbd_type = self.VBD_get_type(session, vbd).get('Value', '')
if bootable and cmp(vbd_type, 'Disk') == 0:
sys_vbd = vbd
break
if sys_vbd:
sys_vdi = self.VBD_get_VDI(session, sys_vbd).get('Value', '')
return xen_api_success(sys_vdi)
def VM_set_name_label(self, session, vm_ref, label):
'''
@author: wuyuewen
@summary: Set VM's name label.
@precondition: Only support english, param <label> has no special character except "_" "-" ".".
@param session: session of RPC.
@param vm_ref: uuid
@param label: name label to change.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM error
'''
try:
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
self._VM_set_name_label(session, vm_ref, label)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
xen_rpc_call(remote_ip, 'VM_set_name_label', vm_ref, label)
return xen_api_success_void()
else:
return self._VM_set_name_label(session, vm_ref, label)
except VmError, e:
return xen_api_error(['VM error: ', e])
def _VM_set_name_label(self, session, vm_ref, label):
'''
@author: wuyuewen
@summary: Internal method. Set VM's name label.
@precondition: Only support english, param <label> has no special character except "_" "-" ".".
@param session: session of RPC.
@param vm_ref: uuid
@param label: name label to change.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM error
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.setName(label)
self._VM_save(dom)
return xen_api_success_void()
def VM_set_name_description(self, session, vm_ref, desc):
'''
@author: wuyuewen
@summary: Set VM's name description.
@precondition: Only support english, param <desc> has no special character except "_" "-" ".".
@param session: session of RPC.
@param vm_ref: uuid
@param desc: name description to change.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM error
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_name_description(session, vm_ref, desc)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_name_description', vm_ref, desc)
else:
return self._VM_set_name_description(session, vm_ref, desc)
def _VM_set_name_description(self, session, vm_ref, desc):
'''
@author: wuyuewen
@summary: Internal method. Set VM's name description.
@precondition: Only support english, param <desc> has no special character except "_" "-" ".".
@param session: session of RPC.
@param vm_ref: uuid
@param desc: name description to change.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM error
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.setNameDescription(desc)
self._VM_save(dom)
return xen_api_success_void()
def VM_set_user_version(self, session, vm_ref, ver):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_todo()
def VM_set_is_a_template(self, session, vm_ref, is_template):
'''
@author: wuyuewen
@summary: Change a VM to VM template, or change a VM template to VM.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param is_template: True | False
@return: True | False
@rtype: dict.
@raise xen_api_error: VM_BAD_POWER_STATE
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_is_a_template(session, vm_ref, is_template)
else:
return xen_rpc_call(host_ip, 'VM_set_is_a_template', vm_ref, is_template)
else:
return self._VM_set_is_a_template(session, vm_ref, is_template)
def _VM_set_is_a_template(self, session, vm_ref, is_template):
'''
@author: wuyuewen
@summary: Internal method. Change a VM to VM template, or change a VM template to VM.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param is_template: True | False
@return: True | False
@rtype: dict.
@raise xen_api_error: VM_BAD_POWER_STATE
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if dom._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
return xen_api_error(
['VM_BAD_POWER_STATE', vm_ref,
XendDomain.POWER_STATE_NAMES[XEN_API_VM_POWER_STATE_RUNNING],
XendDomain.POWER_STATE_NAMES[dom._stateGet()]])
dom.set_is_a_template(is_template)
self.VM_save(dom)
return xen_api_success_void()
# #Mapping intranet ip address to outer net ip address.
# def VM_set_ip_map(self, session, vm_ref, vif):
# if BNPoolAPI._isMaster:
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self._VM_set_ip_map(session, vm_ref, vif)
# else:
# host_ip = BNPoolAPI.get_host_ip(host_ref)
# return xen_rpc_call(host_ip, 'VM_set_ip_map', vm_ref, vif)
# else:
# return self._VM_set_ip_map(session, vm_ref, vif)
#
# def _VM_set_ip_map(self, session, vm_ref, vif):
# mac = None
# mac_rec = self.VIF_get_MAC(session, vif)
# if mac_rec.get('Status') == 'Success':
# mac = mac_rec.get('Value')
# if mac:
# dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# dom.set_ip_map(mac)
# return xen_api_success(self._VM_save(dom))
# else:
# log.error('Can not get MAC from vif.')
# return xen_api_error(['Get MAC from vif failed!VM:', vm_ref])
# def VM_del_ip_map(self, session, vm_ref, vif):
# if BNPoolAPI._isMaster:
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self._VM_del_ip_map(session, vm_ref, vif)
# else:
# host_ip = BNPoolAPI.get_host_ip(host_ref)
# return xen_rpc_call(host_ip, 'VM_del_ip_map', vm_ref, vif)
# else:
# return self._VM_del_ip_map(session, vm_ref, vif)
#
# def _VM_del_ip_map(self, session, vm_ref, vif):
# mac = None
# mac_rec = self.VIF_get_MAC(session, vif)
# if mac_rec.get('Status') == 'Success':
# mac = mac_rec.get('Value')
# if mac:
# dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# dom.set_ip_map(mac, True)
# return xen_api_success(self._VM_save(dom))
# else:
# log.error('Can not get MAC from vif.')
# return xen_api_error(['Get MAC from vif failed!VM:', vm_ref])
def VM_set_auto_power_on(self, session, vm_ref, val):
'''
@deprecated: not used
'''
return self.VM_set('auto_power_on', session, vm_ref, val)
def VM_set_memory_dynamic_max(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory dynamic max.
@precondition: VM not running, memory dynamic max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_dynamic_max(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_dynamic_max', vm_ref, mem)
else:
return self._VM_set_memory_dynamic_max(session, vm_ref, mem)
def _VM_set_memory_dynamic_max(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory dynamic max.
@precondition: VM not running, memory dynamic max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_dynamic_max(int(mem))
return self._VM_save(dom)
def VM_set_memory_dynamic_min(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory dynamic min.
@precondition: VM not running, memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_dynamic_min(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_dynamic_min', vm_ref, mem)
else:
return self._VM_set_memory_dynamic_min(session, vm_ref, mem)
def _VM_set_memory_dynamic_min(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory dynamic min.
@precondition: VM not running, memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_dynamic_min(int(mem))
return self._VM_save(dom)
def VM_set_memory_static_max(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory static max.
@precondition: VM not running, memory static max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_static_max(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_static_max', vm_ref, mem)
else:
return self._VM_set_memory_static_max(session, vm_ref, mem)
def _VM_set_memory_static_max(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory static max.
@precondition: VM not running, memory static max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_static_max(int(mem))
return self._VM_save(dom)
def VM_set_memory_static_min(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory static min.
@precondition: VM not running, memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_static_min(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_static_min', vm_ref, mem)
else:
return self._VM_set_memory_static_min(session, vm_ref, mem)
def _VM_set_memory_static_min(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory static min.
@precondition: VM not running, memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_static_min(int(mem))
return self._VM_save(dom)
def VM_set_memory_dynamic_max_live(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory dynamic max when VM is running.
@precondition: memory dynamic max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_dynamic_max_live(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_dynamic_max_live', vm_ref, mem)
else:
return self._VM_set_memory_dynamic_max_live(session, vm_ref, mem)
def _VM_set_memory_dynamic_max_live(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory dynamic max when VM is running.
@precondition: memory dynamic max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
log.debug(int(mem))
dom.set_memory_dynamic_max(int(mem))
# need to pass target as MiB
dom.setMemoryTarget(int(mem)/1024/1024)
return xen_api_success_void()
def VM_set_memory_dynamic_min_live(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory dynamic min when VM is running.
@precondition: memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_dynamic_min_live(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_dynamic_min_live', vm_ref, mem)
else:
return self._VM_set_memory_dynamic_min_live(session, vm_ref, mem)
def _VM_set_memory_dynamic_min_live(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory dynamic min when VM is running.
@precondition: memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_dynamic_min(int(mem))
# need to pass target as MiB
dom.setMemoryTarget(int(mem) / 1024 / 1024)
return xen_api_success_void()
def VM_set_VCPUs_params(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('vcpus_params', session, vm_ref, value)
def VM_add_to_VCPUs_params(self, session, vm_ref, key, value):
'''
@deprecated: not used
'''
log.debug('in VM_add_to_VCPUs_params')
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'vcpus_params' not in dom.info:
dom.info['vcpus_params'] = {}
dom.info['vcpus_params'][key] = value
return self._VM_save(dom)
def VM_add_to_VCPUs_params_live(self, session, vm_ref, key, value):
'''
@deprecated: not used
'''
self.VM_add_to_VCPUs_params(session, vm_ref, key, value)
self._VM_VCPUs_params_refresh(vm_ref)
return xen_api_success_void()
def _VM_VCPUs_params_refresh(self, vm_ref):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
#update the cpumaps
for key, value in xeninfo.info['vcpus_params'].items():
if key.startswith("cpumap"):
log.debug(key)
if len(key) == 6:
continue
vcpu = int(key[6:])
try:
cpus = map(int, value.split(","))
xendom.domain_pincpu(xeninfo.getDomid(), vcpu, value)
except Exception, ex:
log.exception(ex)
#need to update sched params aswell
if 'weight' in xeninfo.info['vcpus_params'] \
and 'cap' in xeninfo.info['vcpus_params']:
weight = xeninfo.info['vcpus_params']['weight']
xendom.domain_sched_credit_set(xeninfo.getDomid(), weight)
def VM_set_VCPUs_number_live(self, _, vm_ref, num):
'''
@author: wuyuewen
@summary: Set VM's VCPUs number when VM is running.
@precondition: num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_VCPUs_number_live(_, vm_ref, num)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_VCPUs_number_live', vm_ref, num)
else:
return self._VM_set_VCPUs_number_live(_, vm_ref, num)
def _VM_set_VCPUs_number_live(self, _, vm_ref, num):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPUs number when VM is running.
@precondition: num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.setVCpuCount(int(num))
return xen_api_success_void()
def VM_remove_from_VCPUs_params(self, session, vm_ref, key):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'vcpus_params' in dom.info \
and key in dom.info['vcpus_params']:
del dom.info['vcpus_params'][key]
return self._VM_save(dom)
else:
return xen_api_success_void()
def VM_set_VCPUs_at_startup(self, session, vm_ref, num):
'''
@author: wuyuewen
@summary: Set VM's VCPUs when vm startup.
@todo: do not work
@precondition: VM not running, num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_VCPUs_at_startup(session, vm_ref, num)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_VCPUs_at_startup', vm_ref, num)
else:
return self._VM_set_VCPUs_at_startup(session, vm_ref, num)
def _VM_set_VCPUs_at_startup(self, session, vm_ref, num):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPUs when vm startup.
@todo: do not work
@precondition: VM not running, num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
return self.VM_set('VCPUs_at_startup', session, vm_ref, num)
def VM_set_VCPUs_max(self, session, vm_ref, num):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPUs number.
@precondition: VM not running, num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_VCPUs_max(session, vm_ref, num)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_VCPUs_max', vm_ref, num)
else:
return self._VM_set_VCPUs_max(session, vm_ref, num)
def _VM_set_VCPUs_max(self, session, vm_ref, num):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPUs number.
@precondition: VM not running, num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
dominfo.setVCpuCount(int(num))
return xen_api_success_void()
# return self.VM_set('VCPUs_max', session, vm_ref, num)
def VM_set_actions_after_shutdown(self, session, vm_ref, action):
'''
@deprecated: not used
'''
if action not in XEN_API_ON_NORMAL_EXIT:
return xen_api_error(['VM_ON_NORMAL_EXIT_INVALID', vm_ref])
return self.VM_set('actions_after_shutdown', session, vm_ref, action)
def VM_set_actions_after_reboot(self, session, vm_ref, action):
'''
@deprecated: not used
'''
if action not in XEN_API_ON_NORMAL_EXIT:
return xen_api_error(['VM_ON_NORMAL_EXIT_INVALID', vm_ref])
return self.VM_set('actions_after_reboot', session, vm_ref, action)
def VM_set_actions_after_suspend(self, session, vm_ref, action):
'''
@deprecated: not used
'''
if action not in XEN_API_ON_NORMAL_EXIT:
return xen_api_error(['VM_ON_NORMAL_EXIT_INVALID', vm_ref])
return self.VM_set('actions_after_suspend', session, vm_ref, action)
def VM_set_actions_after_crash(self, session, vm_ref, action):
'''
@deprecated: not used
'''
if action not in XEN_API_ON_CRASH_BEHAVIOUR:
return xen_api_error(['VM_ON_CRASH_BEHAVIOUR_INVALID', vm_ref])
return self.VM_set('actions_after_crash', session, vm_ref, action)
# edit by wufan
# value :cd ,boot from disk
# value :dc , boot from cdrom
# change when vm is not running
def VM_set_boot_order(self, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Set VM's boot priority, value=cd means boot from disk, value=dc means boot from cdrom.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param value: cd/dc
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_boot_order(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_boot_order', vm_ref, value)
else:
return self._VM_set_boot_order(session, vm_ref, value)
def _VM_set_boot_order(self, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Internal method. Set VM's boot priority, value=cd means boot from disk, value=dc means boot from cdrom.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param value: cd/dc
@return: True | False.
@rtype: dict.
'''
log.debug('set boot order: %s' % value)
# VM_add_to_HVM_boot_params
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'HVM_boot_params' not in dom.info:
dom.info['HVM_boot_params'] = {}
dom.info['HVM_boot_params']['order'] = value
# VM_add_to_platform
plat = dom.get_platform()
plat['boot'] = value
dom.info['platform'] = plat
# VM_set_HVM_boot_policy
dom.info['HVM_boot_policy'] = 'BIOS order'
return self._VM_save(dom)
# get serial path on host
def VM_get_platform_serial(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get Host TCP port of VM's platform serial.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
'''
log.debug('VM get platform serial')
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_platform_serial(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_platform_serial', vm_ref)
else:
return self._VM_get_platform_serial(session, vm_ref)
# get serial devices in platform
def _VM_get_platform_serial(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get Host TCP port of VM's platform serial.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
'''
# get serial file path
try:
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
plat = dom.get_platform()
value = plat.get('serial')
index = value.find('tcp:127.0.0.1:')
retv = ()
if index != -1:
port = value[index+14:19]
retv = ('127.0.0.1', port)
return xen_api_success(retv)
except Exception, exn:
log.exception(exn)
return xen_api_error('get serial path failed')
# set serial devices in platform
# eg: serial pipe:/tmp/fifotest
def VM_set_platform_serial(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Auto find and set a vailed Host TCP port to VM's platform serial,
the port range is 14000-15000, see PORTS_FOR_SERIAL.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error:
'''
log.debug('VM_set_platform_serial')
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_platform_serial(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_platform_serial', vm_ref)
else:
return self._VM_set_platform_serial(session, vm_ref)
# set serial devices in platform
def _VM_set_platform_serial(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Auto find and set a vailed Host TCP port to VM's platform serial,
the port range is 14000-15000, see PORTS_FOR_SERIAL.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error:
'''
# get serial file path
# save in the same path with boot vbd
try:
xennode = XendNode.instance()
sysvdi_path = xennode.get_sysvdi_path_by_vm(vm_ref)
if sysvdi_path == '':
log.debug('Invalid system vdi path in vm_ref: %s' % vm_ref)
return xen_api_error("Invalid system vdi path")
# file_name = 'pipe.out'
# SERIAL_FILE = "%s/%s" % (sysvdi_path, file_name)
# if not os.path.exists(SERIAL_FILE):
# os.system("/usr/bin/mkfifo %s" % SERIAL_FILE)
#
# serial_value = 'pipe:%s' % SERIAL_FILE
# log.debug('set serial value: %s' % serial_value)
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
avail_port = dom.get_free_port()
serial_value = 'tcp:127.0.0.1:%s,server,nowait' % avail_port
log.debug('set serial value: %s' % serial_value)
plat = dom.get_platform()
# log.debug('origin platform serial: %s' % plat['serial'])
plat['serial'] = serial_value
dom.info['platform'] = plat
return self._VM_save(dom)
except Exception, exn:
log.debug(exn)
return xen_api_error('create serial failed')
def VM_send_request_via_serial(self, session, vm_ref, json_obj, flag):
'''
@author: wuyuewen
@summary: Send a request into VM's system use serial device.
@precondition: VM is running, has a serial device, already installed a serial Agent in VM's system.
@param session: session of RPC.
@param vm_ref: uuid
@param json_obj: serial request value use json object.
@param flag: True | False, do/don't checkout whether serial Agent is running in VM or not.
@return: True | False.
@rtype: dict.
'''
log.debug('VM send request via serial')
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_send_request_via_serial(session, vm_ref, json_obj, flag)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_send_request_via_serial', vm_ref, json_obj, flag)
else:
return self._VM_send_request_via_serial(session, vm_ref, json_obj, flag)
def _VM_send_request_via_serial(self, session, vm_ref, json_obj, flag):
'''
@author: wuyuewen
@summary: Internal method. Send a request into VM's system use serial device.
@precondition: VM is running, has a serial device, already installed a serial Agent in VM's system.
@param session: session of RPC.
@param vm_ref: uuid
@param json_obj: serial request value use json object.
@param flag: True | False, do/don't checkout whether serial Agent is running in VM or not.
@return: True | False.
@rtype: dict.
'''
try:
response = self._VM_get_platform_serial(session, vm_ref)
if cmp(response['Status'], 'Failure') == 0:
return xen_api_success(False)
address = response.get('Value')
if not address:
log.error('VM serial not correct!')
return xen_api_success(False)
(ip, port) = address
retv = Netctl.serial_opt(ip, port, json_obj, flag)
if retv:
return xen_api_success(True)
else:
return xen_api_success(False)
except Exception ,exn:
log.exception(exn)
return xen_api_success(False)
# edit by wufan
def VM_set_HVM_boot_policy(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_HVM_boot_policy(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_HVM_boot_policy', vm_ref, value)
else:
return self._VM_set_HVM_boot_policy(session, vm_ref, value)
def _VM_set_HVM_boot_policy(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if value != "" and value != "BIOS order":
return xen_api_error(
['VALUE_NOT_SUPPORTED', 'VM.HVM_boot_policy', value,
'Xend supports only the "BIOS order" boot policy.'])
else:
return self.VM_set('HVM_boot_policy', session, vm_ref, value)
def VM_set_HVM_boot_params(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_HVM_boot_params(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_HVM_boot_params', vm_ref, value)
else:
return self._VM_set_HVM_boot_params(session, vm_ref, value)
def _VM_set_HVM_boot_params(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('HVM_boot_params', session, vm_ref, value)
def VM_add_to_HVM_boot_params(self, session, vm_ref, key, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_add_to_HVM_boot_params(session, vm_ref, key, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_add_to_HVM_boot_params', vm_ref, key, value)
else:
return self._VM_add_to_HVM_boot_params(session, vm_ref, key, value)
def _VM_add_to_HVM_boot_params(self, session, vm_ref, key, value):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'HVM_boot_params' not in dom.info:
dom.info['HVM_boot_params'] = {}
dom.info['HVM_boot_params'][key] = value
return self._VM_save(dom)
def VM_remove_from_HVM_boot_params(self, session, vm_ref, key):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_remove_from_HVM_boot_params(session, vm_ref, key)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_remove_from_HVM_boot_params', vm_ref, key)
else:
return self._VM_remove_from_HVM_boot_params(session, vm_ref, key)
def _VM_remove_from_HVM_boot_params(self, session, vm_ref, key):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'HVM_boot_params' in dom.info \
and key in dom.info['HVM_boot_params']:
del dom.info['HVM_boot_params'][key]
return self._VM_save(dom)
else:
return xen_api_success_void()
def VM_set_PV_bootloader(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_bootloader', session, vm_ref, value)
def VM_set_PV_kernel(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_kernel', session, vm_ref, value)
def VM_set_PV_ramdisk(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_ramdisk', session, vm_ref, value)
def VM_set_PV_args(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_args', session, vm_ref, value)
def VM_set_PV_bootloader_args(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_bootloader_args', session, vm_ref, value)
def VM_set_platform(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('platform', session, vm_ref, value)
# edit by wufan
def VM_add_to_platform(self, session, vm_ref, key, value):
'''
@author: wuyuewen
@summary: Change a attribute in VM paltform.
@precondition: VM not running, key exists in platform field.
@param session: session of RPC.
@param vm_ref: uuid
@param key: attribute in VM platform field.
@param value: value to change.
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_add_to_platform(session, vm_ref, key, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_add_to_platform', vm_ref, key, value)
else:
return self._VM_add_to_platform(session, vm_ref, key, value)
def _VM_add_to_platform(self, session, vm_ref, key, value):
'''
@author: wuyuewen
@summary: Internal method. Change a attribute in VM paltform.
@precondition: VM not running, key exists in platform field.
@param session: session of RPC.
@param vm_ref: uuid
@param key: attribute in VM platform field.
@param value: value to change.
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
try:
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
plat = dom.get_platform()
plat[key] = value
return self.VM_set_platform(session, vm_ref, plat)
except KeyError:
return xen_api_error(['key error', vm_ref, key])
def VM_remove_from_platform(self, session, vm_ref, key):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
plat = dom.get_platform()
if key in plat:
del plat[key]
return self.VM_set_platform(session, vm_ref, plat)
else:
return xen_api_success_void()
def VM_set_other_config(self, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Set VM other config field.
@param session: session of RPC.
@param vm_ref: uuid
@param value: a dict structure of other config.
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_other_config(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_other_config', vm_ref, value)
else:
return self._VM_set_other_config(session, vm_ref, value)
def _VM_set_other_config(self, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Internal method. Set VM other config field.
@param session: session of RPC.
@param vm_ref: uuid
@param value: a dict structure of other config.
@return: True | False.
@rtype: dict.
'''
return self.VM_set('other_config', session, vm_ref, value)
def VM_add_to_other_config(self, session, vm_ref, key, value):
'''
@author: wuyuewen
@summary: Add a attribute to VM other config field.
@param session: session of RPC.
@param vm_ref: uuid
@param key: attribute key.
@param value: attribute value.
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_add_to_other_config(session, vm_ref, key, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_add_to_other_config', vm_ref, key, value)
else:
return self._VM_add_to_other_config(session, vm_ref, key, value)
def _VM_add_to_other_config(self, session, vm_ref, key, value):
'''
@author: wuyuewen
@summary: Interal method. Add a attribute to VM other config field.
@param session: session of RPC.
@param vm_ref: uuid
@param key: attribute key.
@param value: attribute value.
@return: True | False.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if dom and 'other_config' in dom.info:
dom.info['other_config'][key] = value
return self._VM_save(dom)
def VM_add_tags(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_add_tags(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_add_tags', vm_ref, value)
else:
return self._VM_add_tags(session, vm_ref, value)
def _VM_add_tags(self, session, vm_ref, value):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if dom and 'tags' in dom.info:
dom.info['tags'].append(value)
return self._VM_save(dom)
def VM_set_tags(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_tags(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_tags', vm_ref, value)
else:
return self._VM_set_tags(session, vm_ref, value)
def _VM_set_tags(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('tags', session, vm_ref, value)
def _VM_update_rate(self, session, vm_ref, type, vif_refs):
'''
@deprecated: not used
'''
eth_list = []
for vif_ref in vif_refs:
device = self.VIF_get_device(session, vif_ref).get('Value')
if device != '' and device.startswith('eth'):
eth_num = device[3:]
eth_list.append(eth_num)
#log.debug("--------------->eth list:%s" % eth_list)
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
final_tag_list = {}
try:
other_config = self.VM_get_other_config( session, vm_ref).get('Value')
#log.debug('VM update tag')
if other_config:
tag_list = other_config.get(type, {})
if tag_list and isinstance(tag_list, dict):
for key, value in tag_list.items():
if key in eth_list:
final_tag_list[key] = value
dominfo.info['other_config'][type] = final_tag_list
self._VM_save(dominfo)
log.debug('VM_update_%s' % type)
return xen_api_success_void()
except Exception, exn:
log.exception(exn)
return xen_api_success_void()
#add by wufan
def _VM_update_tag(self, session, vm_ref, vif_refs):
'''
@deprecated: not used
'''
eth_list = []
for vif_ref in vif_refs:
device = self.VIF_get_device(session, vif_ref).get('Value')
if device != '' and device.startswith('eth'):
eth_num = device[3:]
eth_list.append(eth_num)
#log.debug("--------------->eth list:%s" % eth_list)
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
final_tag_list = {}
try:
other_config = self.VM_get_other_config( session, vm_ref).get('Value')
#log.debug('VM update tag')
if other_config:
tag_list = other_config.get('tag', {})
if tag_list and isinstance(tag_list, dict):
for key, value in tag_list.items():
if key in eth_list:
final_tag_list[key] = value
dominfo.info['other_config']['tag'] = final_tag_list
self._VM_save(dominfo)
log.debug('VM_update_tag')
return xen_api_success_void()
except Exception, exn:
log.exception(exn)
return xen_api_success_void()
#add by wufan
def VM_set_all_rate(self, session, vm_ref, param_type, tag_list=None):
'''
@author: wuyuewen
@summary: Set all VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param tag_list: dict of rate for each VIF, the structure is {eth_num : rate}, e.g. {0:1000, 1:1000}
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_all_rate(session, vm_ref, param_type, tag_list)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_all_rate', vm_ref, param_type, tag_list)
else:
return self._VM_set_all_rate(session, vm_ref, param_type, tag_list)
#add by wufan
def _VM_set_all_rate(self, session, vm_ref, type, tag_list=None):
'''
@author: wuyuewen
@summary: Internal method. Set all VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param tag_list: dict of rate for each VIF, the structure is {eth_num : rate}, e.g. {0:1000, 1:1000}
@return: True | False.
@rtype: dict.
'''
log.debug('set vm all type: %s' % type)
if tag_list is None:
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
#log.debug('dom info %s' % dominfo.info)
vif_refs = self._VM_get_VIFs(session, vm_ref).get('Value')
for vif_ref in vif_refs:
tag = self._VM_get_rate(session, vm_ref, type, vif_ref).get('Value')
self._VM_set_rate( session, vm_ref, type, vif_ref, tag)
self._VM_update_rate(session, vm_ref, type, vif_refs)
else:
for eth_num, tag in tag_list.items():
self._VM_set_rate_by_ethnum(session, vm_ref, type, eth_num, tag)
return xen_api_success_void()
def VM_get_dev2path_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return xen_api_success(self._VM_get_dev2path_list(session, vm_ref))
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_dev2path_list', vm_ref)
else:
return xen_api_success(self._VM_get_dev2path_list(session, vm_ref))
'''
get device_type, img_path
return: {dev: img_path}
eg:
{'hda': '/home/sr_mount/2133.vhd'}
'''
def _VM_get_dev2path_list(self, session, vm_ref):
'''
@deprecated: not used
'''
storage = self._get_BNStorageAPI_instance()
dev2path_list = {}
vbd_refs = self._VM_get_VBDs(session, vm_ref).get('Value')
for vbd_ref in vbd_refs:
if self._VBD_get(vbd_ref, 'type').get('Value').lower() == 'disk':
dev = self._VBD_get(vbd_ref, 'device').get('Value')
# vdi_ref = self._VBD_get(vbd_ref, 'VDI').get('Value')
location = self._VBD_get(vbd_ref, 'uname').get('Value')
# location = storage._get_VDI(vdi_ref).location
dev2path_list[dev] = location
log.debug('_VM_get_dev2path_list')
log.debug(dev2path_list)
return dev2path_list
# when VM start ,async call to find IO pid
def VM_start_set_IO_limit(self, session, vm_ref, io_limit_list={}):
'''
@author: wuyuewen
@summary: Internal method.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return XendTask.log_progress(0, 100,
self.VM_start_init_pid2dev, session, vm_ref, io_limit_list)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_start_init_pid2dev', vm_ref, io_limit_list)
else:
return XendTask.log_progress(0, 100,
self.VM_start_init_pid2dev, session, vm_ref, io_limit_list)
# local call, called in VM_start_set_IO_limit
def VM_start_init_pid2dev(self, session, vm_ref, io_limit_list):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('VM_start_init_start_pid2dev')
max_count = 0
while True and max_count < 5:
max_count += 1
dom_id = self._VM_get_domid('', vm_ref).get('Value')
if dom_id and dom_id != '-1':
break
time.sleep(2)
if not dom_id:
log.exception('Init pid2dev failed, dom id is None!')
return xen_api_success_void()
max_count = 0
while True and max_count < 5:
max_count += 1
pid2dev_list = XendIOController.get_VM_pid2dev(dom_id)
if pid2dev_list:
break
time.sleep(2)
log.debug('get pid2dev_list:')
log.debug(pid2dev_list)
# self._VM_init_pid2devnum_list(session, vm_ref)
if io_limit_list:
for k, v in io_limit_list.items():
(type, io_unit) = k.split('_')
log.debug('Set disk io rate, type: %s %s, value: %s' % (type, io_unit, v))
self._VM_set_IO_rate_limit(session, vm_ref, type, v, io_unit)
else:
for type in ['read', 'write']:
for io_unit in ['MBps', 'iops']:
rate = self._VM_get_IO_rate_limit(session, vm_ref, type, io_unit).get('Value')
if rate != '-1':
log.debug('Set disk io rate, type: %s %s, value: %s' % (type, io_unit, rate))
self._VM_set_IO_rate_limit(session, vm_ref, type, rate, io_unit)
return xen_api_success_void()
'''get {VM_pid1: (major, minor1), VM_pid2: (major, minor2)}
and cache the result in memory
when start or migrate the vm, call this function
'''
def VM_init_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_init_pid2devnum_list(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_init_pid2devnum_list', vm_ref)
else:
return self._VM_init_pid2devnum_list(session, vm_ref)
def _VM_init_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
log.debug("VM_init_pid2devnum_list")
dev2path_list = self._VM_get_dev2path_list(session, vm_ref)
dom_id = self._VM_get_domid('', vm_ref).get('Value')
pid2devnum_list = XendIOController.get_VM_pid2num(dom_id, dev2path_list)
return self._VM_set_pid2devnum_list(session, vm_ref, pid2devnum_list)
#clear old pid2devnum_list before set
def _VM_set_pid2devnum_list(self, session, vm_ref, pid2devnum_list):
'''
@deprecated: not used
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('Set vm(%s) pid2devnum:' %(domname))
log.debug(pid2devnum_list)
dominfo.info.setdefault('other_config',{})
dominfo.info['other_config']['pid2dev'] = {} #clear pid2dev_list
for pid, devnum in pid2devnum_list.items():
dominfo.info['other_config']['pid2dev'][pid] = devnum
self._VM_save(dominfo)
return xen_api_success(dominfo.info['other_config']['pid2dev'])
def VM_clear_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_clear_pid2devnum_list(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_clear_pid2devnum_list', vm_ref)
else:
return self._VM_clear_pid2devnum_list(session, vm_ref)
def _VM_clear_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('clear vm(%s) pid2devnum:' %(domname))
if dominfo.info.get('other_config', {}) and \
'pid2dev' in dominfo.info['other_config']:
del dominfo.info['other_config']['pid2dev']
self._VM_save(dominfo)
return xen_api_success_void()
def VM_get_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_pid2devnum_list(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_pid2devnum_list', vm_ref)
else:
return self._VM_get_pid2devnum_list(session, vm_ref)
def _VM_get_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
try:
pid2num_list = {}
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
if other_config:
pid2num_list = other_config.get('pid2dev',{})
#if can't get from memory, the excute cmd
if not pid2num_list:
log.debug("cant't get pid2devnum_list from memory, execute cmd")
pid2num_list = self._VM_init_pid2devnum_list(session, vm_ref).get('Value')
log.debug(pid2num_list)
return xen_api_success(pid2num_list)
except Exception, exn:
log.exception(exn)
return xen_api_success(pid2num_list)
def VM_get_vbd2device_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_vbd2device_list(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_vbd2device_list', vm_ref)
else:
return self._VM_get_vbd2device_list(session, vm_ref)
def _VM_get_vbd2device_list(self, session, vm_ref):
'''
@deprecated: not used
'''
try:
vbd2device_list = {}
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
if other_config:
vbd2device_list = other_config.get('vbd2device',{})
return xen_api_success(vbd2device_list)
except Exception, exn:
log.exception(exn)
return xen_api_success(vbd2device_list)
'''
type: read | write
flag = True:excute cgroup cmd
flag = False: just set value in config file
'''
def VM_set_IO_rate_limit(self, session, vm_ref, type, value, io_unit):
'''
@author: wuyuewen
@summary: Set VM disk IO rate by cgroup, can set both read/write rate(MBps).
@param session: session of RPC.
@param vm_ref: uuid
@param type: read/write.
@param value: rate(MBps).
@param io_unit: MBps | iops
@param flag: True: excute cgroup cmd, False: just set value in VM's config file.
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_IO_rate_limit(session, vm_ref, type, value, io_unit)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_IO_rate_limit', vm_ref, type, value, io_unit)
else:
return self._VM_set_IO_rate_limit(session, vm_ref, type, value, io_unit)
def _VM_set_IO_rate_limit(self, session, vm_ref, type, value, io_unit):
'''
@deprecated: not used
'''
#use /cgroup/blkio to constrol
try:
value = int(value)
if value >= 0:
if type not in ['write', 'read'] or io_unit not in ['MBps', 'iops']:
return xen_api_error(['INVALID_TYPE_OR_UNIT'])
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
tag = '%s_%s_rate' % (type, io_unit)
log.debug('Set vm(%s) %s: %s MBps' %(dominfo.getName(), tag, value))
dom_id = dominfo.getDomid()
dev2path_list = self._VM_get_dev2path_list(session, vm_ref)
pid2num_list = XendIOController.get_VM_pid2num(dom_id, dev2path_list)
XendIOController.set_VM_IO_rate_limit(pid2num_list, type, value, io_unit)
dominfo.info.setdefault('other_config',{})
dominfo.info['other_config'][tag] = value
self._VM_save(dominfo)
# log.debug("current dominfo:>>>>>>>>>>>>")
# log.debug(dominfo.info['other_config'])
return xen_api_success_void()
elif value == -1:
tag = '%s_%s_rate' % (type, io_unit)
log.debug('%s dont have limit value' % tag)
else:
log.exception('VM set IO rate limit: value invalid')
return xen_api_error(['Value invalid'])
except Exception, exn:
log.exception(exn)
return xen_api_error(exn)
'''
limit vm rate:
flag = true :save config and excute cgroup cmd
flag = false: just save the limit rate config
'''
def _VM_set_IO_rate_limit_1(self, session, vm_ref, type, value, io_unit):
'''
@author: wuyuewen
@summary: Interal method. Set VM disk IO rate by cgroup, can set both read/write rate(MBps).
@param session: session of RPC.
@param vm_ref: uuid
@param type: read/write.
@param value: rate(MBps).
@param io_unit: MBps | iops
@param flag: True: excute cgroup cmd, False: just set value in VM's config file.
@return: True | False.
@rtype: dict.
'''
#use /cgroup/blkio to constrol
try:
value = int(value)
if value >= 0:
if type not in ['write', 'read'] or io_unit not in ['MBps', 'iops']:
return xen_api_error(['INVALID_TYPE_OR_UNIT'])
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
tag = '%s_%s_rate' % (type, io_unit)
log.debug('Set vm(%s) %s: %s MBps' %(domname, tag, value))
pid2num_list = self._VM_get_pid2devnum_list(session, vm_ref).get('Value')
XendIOController.set_VM_IO_rate_limit(pid2num_list, type, value, io_unit)
dominfo.info.setdefault('other_config',{})
dominfo.info['other_config'][tag] = value
self._VM_save(dominfo)
# log.debug("current dominfo:>>>>>>>>>>>>")
# log.debug(dominfo.info['other_config'])
return xen_api_success_void()
else:
log.exception('VM set IO rate limit: value invalid')
return xen_api_error(['Value invalid'])
except Exception, exn:
log.exception(exn)
return xen_api_error(exn)
def VM_get_IO_rate_limit(self, session, vm_ref, type, io_unit):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_IO_rate_limit(session, vm_ref, type, io_unit)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_IO_rate_limit', vm_ref, type, io_unit)
else:
return self._VM_get_IO_rate_limit(session, vm_ref, type, io_unit)
def _VM_get_IO_rate_limit(self, session, vm_ref, type, io_unit):
'''
@deprecated: not used
'''
rate = '-1'
tag = '%s_%s_rate' % (type, io_unit)
try:
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
if other_config:
rate = other_config.get(tag,'-1')
return xen_api_success(rate)
except Exception, exn:
log.exception(exn)
return xen_api_success(rate)
def VM_clear_IO_rate_limit(self, session, vm_ref, type, io_unit):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_clear_IO_rate_limit(session, vm_ref, type, io_unit)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_clear_IO_rate_limit', vm_ref, type, io_unit)
else:
return self._VM_clear_IO_rate_limit(session, vm_ref, type, io_unit)
def _VM_clear_IO_rate_limit(self, session, vm_ref, type, io_unit):
'''
@deprecated: not used
'''
if type not in ['write', 'read'] or io_unit not in ['MBps', 'iops']:
return xen_api_error(['INVALID_TYPE_OR_UNIT'])
pid2num_list = self._VM_get_pid2devnum_list(session, vm_ref).get('Value')
#use /cgroup/blkio to constrol
XendIOController.clear_VM_IO_rate_limit(pid2num_list, type, io_unit)
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
tag = '%s_%s_rate' % (type, io_unit)
log.debug('clear vm(%s) %s' %(domname, tag))
if dominfo.info.get('other_config', {}) and tag in dominfo.info['other_config']:
del dominfo.info['other_config'][tag] #clear config
self._VM_save(dominfo)
return xen_api_success_void()
def _VM_clean_IO_limit_shutdown(self, session, vm_ref):
'''
@deprecated: not used
'''
log.debug('shutdown clean: pid2dev and rate limit in cgroup file')
pid2num_list = self._VM_get_pid2devnum_list(session, vm_ref).get('Value')
for type in ['read', 'write']:
for io_unit in ['MBps', 'iops']:
XendIOController.clear_VM_IO_rate_limit(pid2num_list, type, io_unit)
self._VM_clear_pid2devnum_list(session, vm_ref)
return xen_api_success_void()
def VM_set_rate(self, session, vm_ref, param_type, vif_ref, value):
'''
@author: wuyuewen
@summary: Set VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param vif_ref: VIF uuid
@param value: VIF's rate(kbps)
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_rate(session, vm_ref, param_type, vif_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_rate', vm_ref, param_type, vif_ref,value)
else:
return self._VM_set_rate(session, vm_ref, param_type, vif_ref, value)
def _VM_set_rate(self, session, vm_ref, param_type, vif_ref, value):
'''
@author: wuyuewen
@summary: Internal method. Set VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param vif_ref: VIF uuid
@param value: VIF's rate(kbps)
@return: True | False.
@rtype: dict.
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('Set vm(%s) %s %s:%s' %(domname, str(vif_ref), param_type, value))
device = self.VIF_get_device(session, vif_ref).get('Value')
log.debug('vif_ref:%s VM_set_%s:%s rate:%s' % (vif_ref, param_type, device, value))
template = False
eth_num = ''
if device != '' and device.startswith('eth'):
eth_num = device[3:]
elif not device :
#log.debug('dom info %s' % dominfo.info)
vif_refs = self._VM_get_VIFs(session, vm_ref).get('Value')
#log.debug('vif refs: %s' % vif_refs)
try:
eth_num = str(vif_refs.index(vif_ref))
template = True
#log.debug('>>>>>>>eth_num" %s' % eth_num)
except:
eth_num = ''
pass
if eth_num != '':
log.debug('eth_num : %s ' % eth_num)
try:
if not template:
dominfo.set_rate(param_type, int(eth_num), value) # ovs_cmd
#self._VM_get_other_config(session, vm_ref) # in oder to convert other_config
dominfo.info.setdefault('other_config',{})
tag_list = dominfo.info['other_config'].setdefault(param_type,{})
dominfo.info['other_config'][param_type][eth_num] = value
#log.debug('other_config: %s' % value)
return self._VM_save(dominfo)
except Exception,exn:
log.debug(exn)
return xen_api_error(['device name invalid', device])
return xen_api_success_void()
def _VM_set_rate_by_ethnum(self, session, vm_ref, param_type, eth_num, value):
'''
@author: wuyuewen
@summary: Internal method. Set VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param eth_num: eth_num of VIF
@param value: VIF's rate(kbps)
@return: True | False.
@rtype: dict.
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('VM_set_%s:%s rate:%s' % ( param_type, eth_num, value))
dominfo.set_rate(param_type, int(eth_num), value) # ovs_cmd
#self._VM_get_other_config(session, vm_ref) # in oder to convert other_config
dominfo.info.setdefault('other_config',{})
tag_list = dominfo.info['other_config'].setdefault(param_type,{})
dominfo.info['other_config'][param_type][eth_num] = value
return self._VM_save(dominfo)
#add by wufan
def VM_set_all_tag(self, session, vm_ref, tag_list=None):
'''
@author: wuyuewen
@summary: Set all VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param tag_list: dict of tag for each VIF, the structure is {eth_num, tag_num} , e.g. {0:1, 1:2}
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_all_tag(session, vm_ref, tag_list)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_all_tag', vm_ref, tag_list)
else:
return self._VM_set_all_tag(session, vm_ref, tag_list)
#add by wufan
def _VM_set_all_tag(self, session, vm_ref, tag_list=None):
'''
@author: wuyuewen
@summary: Internal method. Set all VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param tag_list: dict of tag for each VIF, the structure is {eth_num, tag_num} , e.g. {0:1, 1:2}
@return: True | False
@rtype: dict.
'''
log.debug('set vm all tag')
if tag_list is None:
# xd = XendDomain.instance()
# dominfo = xd.get_vm_by_uuid(vm_ref)
# log.debug('dom info %s' % dominfo.info)
vif_refs = self._VM_get_VIFs(session, vm_ref).get('Value')
for vif_ref in vif_refs:
tag = self._VM_get_tag(session, vm_ref, vif_ref).get('Value')
#log.debug('tag:%s' % str(tag))
self._VM_set_tag( session, vm_ref, vif_ref, tag)
self._VM_update_tag(session, vm_ref, vif_refs)
else:
#tag_list is a dict
#log.debug('tag_list:%s' % tag_list)
for eth_num, tag in tag_list.items():
self._VM_set_tag_by_ethnum(session, vm_ref, eth_num, tag)
return xen_api_success_void()
def VM_set_tag(self, session, vm_ref, vif_ref, value, ovs=None):
'''
@author: wuyuewen
@summary: Set VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@param value: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_tag(session, vm_ref, vif_ref, value, ovs)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_tag', vm_ref, vif_ref, value, ovs)
else:
return self._VM_set_tag(session, vm_ref, vif_ref, value, ovs)
#original by wuyuewen
#def _VM_set_tag(self, session, vm_ref, value):
# xd = XendDomain.instance()
# dominfo = xd.get_vm_by_uuid(vm_ref)
# domname = dominfo.getName()
# tag = self._VM_get_tag(session, vm_ref).get('Value')
# if tag:
# log.debug('Set vm(%s) vlan: %s' % (domname, value))
# dominfo.set_tag(value)
# return self._VM_add_to_other_config(session, vm_ref, "tag", value)
#add by wufan
def _VM_set_tag(self, session, vm_ref, vif_ref, value, ovs):
'''
@author: wuyuewen
@summary: Internal method. Set VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@param value: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@return: True | False
@rtype: dict.
'''
xennode = XendNode.instance()
xenapi = self._get_XendAPI_instance()
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
if not xd.is_valid_dev("vif", vif_ref):
return xen_api_error(['VIF_NOT_EXISTS'])
device = self.VIF_get_device(session, vif_ref).get('Value')
bridge = xd.get_dev_property_by_uuid('vif', vif_ref, 'bridge')
# network_org = xd.get_dev_property_by_uuid('vif', vif_ref, 'network')
log.debug('Set vm(%s) %s vlan: %s ovs: %s bridge: %s' %(domname, str(vif_ref), value, ovs, bridge))
# log.debug('vif_ref:%s VM_set_tag:%s vlanid:%s' % (vif_ref, device, value))
eth_num = -1
if device and device.startswith('eth'):
eth_num = device[3:]
else:
vifs = self._VM_get_VIFs(session, vm_ref).get('Value')
if vif_ref in vifs:
eth_num = vifs.index(vif_ref)
if ovs and cmp(ovs, bridge) != 0:
xennode._init_networks()
is_valid_network = xennode.is_valid_network(ovs)
if not is_valid_network:
return xen_api_error(['OVS_NOT_EXISTS'])
network_new = None
list_network_new = xenapi.network_get_by_name_label(session, ovs).get('Value')
if list_network_new:
network_new = list_network_new[0]
dominfo.switch_vif_to_different_ovs_and_set_tag(int(eth_num), value, ovs, bridge)
try:
# rc = self._VIF_set(vif_ref, 'network', network_new, network_org)
rc1 = self._VIF_set(vif_ref, 'bridge', ovs, bridge)
if not rc1:
dominfo.switch_vif_to_different_ovs_and_set_tag(int(eth_num), value, bridge, ovs)
return xen_api_error(['VIF_SET_BRIDGE_ERROR'])
except Exception, e:
dominfo.switch_vif_to_different_ovs_and_set_tag(int(eth_num), value, bridge, ovs)
raise e
else:
dominfo.set_tag(int(eth_num), value) # ovs_cmd
#self._VM_get_other_config(session, vm_ref) # in oder to convert other_config
dominfo.info.setdefault('other_config',{})
dominfo.info['other_config'].setdefault('tag',{})
dominfo.info['other_config']['tag'][eth_num] = value
self._VM_save(dominfo)
return xen_api_success_void()
def _VM_set_tag_by_ethnum(self, session, vm_ref, eth_num, value):
'''
@author: wuyuewen
@summary: Internal method. Set VIF's tag(VLAN-ID) by eth_num, this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param eth_num: eth_num of VIF
@param value: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@return: True | False
@rtype: dict.
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('Set vm(%s) %s vlan:%s' %(domname, str(eth_num), value))
dominfo.set_tag(int(eth_num), value) # ovs_cmd
#self._VM_get_other_config(session, vm_ref) # in oder to convert other_config
dominfo.info.setdefault('other_config',{})
tag_list = dominfo.info['other_config'].setdefault('tag',{})
dominfo.info['other_config']['tag'][eth_num] = value
return self._VM_save(dominfo)
def VM_remove_from_other_config(self, session, vm_ref, key):
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_remove_from_other_config(session, vm_ref, key)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_remove_from_other_config', vm_ref, key)
else:
return self._VM_remove_from_other_config(session, vm_ref, key)
def _VM_remove_from_other_config(self, session, vm_ref, key):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if dom and 'other_config' in dom.info \
and key in dom.info['other_config']:
del dom.info['other_config'][key]
return self._VM_save(dom)
else:
return xen_api_success_void()
def VM_get_crash_dumps(self, _, vm_ref):
'''
@deprecated: not used
'''
return xen_api_todo()
def verify(self, ip):
'''
@deprecated: not used
'''
try:
proxy = ServerProxy("http://" + ip + ":9363/")
response = proxy.session.login('root')
except socket.error:
return False
else:
if cmp(response['Status'], 'Failure') == 0:
return False
return True
def VM_get_suspend_VDI(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_suspend_VDI(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_suspend_VDI', vm_ref)
else:
return self._VM_get_suspend_VDI(session, vm_ref)
def _VM_get_suspend_VDI(self, session, vm_ref):
'''
@deprecated: not used
'''
xennode = XendNode.instance()
return xen_api_success(xennode.get_suspend_VDI(vm_ref))
def VM_get_suspend_SR(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_suspend_SR(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_suspend_SR', vm_ref)
else:
return self._VM_get_suspend_SR(session, vm_ref)
def _VM_get_suspend_SR(self, session, vm_ref):
'''
@deprecated: not used
'''
xennode = XendNode.instance()
return xen_api_success(xennode.get_suspend_SR(vm_ref))
# class methods
def VM_get_all_and_consoles(self, session):
'''
@deprecated: not used
'''
VM_and_consoles = {}
for d in XendDomain.instance().list('all'):
vm_uuid = d.get_uuid()
if cmp(vm_uuid, DOM0_UUID) == 0:
continue
dom = XendDomain.instance().get_vm_by_uuid(vm_uuid)
vm_consoles = []
for console in dom.get_consoles():
vm_consoles.append(console)
VM_and_consoles[vm_uuid] = vm_consoles
return xen_api_success(VM_and_consoles)
# def VM_get_all(self, session):
# refs = self._VM_get_all()
# if BNPoolAPI._isMaster:
# host_ref = XendNode.instance().uuid
# for key in BNPoolAPI.get_hosts():
# if cmp(key, host_ref) != 0:
# ip = BNPoolAPI.get_host_ip(key)
# refs += xen_rpc_call(ip, "VM_get_all")
#
# return xen_api_success(refs)
def VM_get_all(self, session):
'''
@author: wuyuewen
@summary: Get all guest VMs.
@param session: session of RPC.
@return: list of VMs uuid.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
refs = []
refs.extend(self._VM_get_all(session).get('Value'))
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
# log.debug(remote_ip)
refs.extend(xen_rpc_call(remote_ip, 'VM_get_all').get('Value'))
return xen_api_success(refs)
else:
return self._VM_get_all(session)
def _VM_get_all(self, session):
'''
@author: wuyuewen
@summary: Internal method. Get all guest VMs.
@param session: session of RPC.
@return: list of VMs uuid.
@rtype: dict.
'''
refs = [d.get_uuid() for d in XendDomain.instance().list('all')
if d.get_uuid() != DOM0_UUID]
if refs:
return xen_api_success(refs)
else:
return xen_api_success([])
def VM_get_by_name_label(self, session, label):
'''
@author: wuyuewen
@summary: Get VM by VM's name label.
@param session: session of RPC.
@param label: name label of VM
@return: VM.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
refs = []
refs.extend(self._VM_get_by_name_label(session, label)['Value'])
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
refs.extend(xen_rpc_call(remote_ip, 'VM_get_by_name_label', label)['Value'])
return xen_api_success(refs)
else:
return self._VM_get_by_name_label(session, label)
def _VM_get_by_name_label(self, session, label):
'''
@author: wuyuewen
@summary: Internal method. Get VM by VM's name label.
@param session: session of RPC.
@param label: name label of VM
@return: VM.
@rtype: dict.
'''
xendom = XendDomain.instance()
uuids = []
dom = xendom.domain_lookup_by_name_label(label)
if dom:
return xen_api_success([dom.get_uuid()])
return xen_api_success([])
def VM_get_security_label(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
label = dom.get_security_label()
return xen_api_success(label)
def VM_set_security_label(self, session, vm_ref, sec_label, old_label):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
(rc, errors, oldlabel, new_ssidref) = \
dom.set_security_label(sec_label, old_label)
if rc != xsconstants.XSERR_SUCCESS:
return xen_api_error(['SECURITY_ERROR', rc,
xsconstants.xserr2string(-rc)])
if rc == 0:
rc = new_ssidref
return xen_api_success(rc)
def VM_create_on(self, session, vm_struct, host_ref):
'''
@author: wuyuewen
@summary: A Pool range method, create a VM on a Host in Pool.
@precondition: vm_struct is legal, vm name not duplicated.
@param session: session of RPC.
@param vm_struct: dict of vm structure
@param host_ref: VM create on which Host.
@return: VM.
@rtype: dict.
@raise xen_api_error: VM name already exists
@raise XendError:
'''
if BNPoolAPI._isMaster:
log.debug(vm_struct)
newuuid = vm_struct.get('uuid', None)
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vm_label = vm_struct.get('nameLabel')
vms = self.VM_get_by_name_label(session, vm_label)
if vms.get('Value'):
return xen_api_error(['VM name already exists', 'VM', vm_label])
else:
if cmp(host_ref, XendNode.instance().uuid) == 0:
response = self._VM_create(session, vm_struct)
domuuid = response.get('Value')
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(remote_ip, 'VM_create_on', vm_struct, host_ref)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct('vm_create', domuuid, host_ref)
return response
else:
response = self._VM_create(session, vm_struct)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct('vm_create', domuuid, XendNode.instance().uuid)
return response
def VM_create(self, session, vm_struct):
'''
@author: wuyuewen
@summary: A Host range method, create a VM on this Host.
@precondition: vm_struct is legal, vm name not duplicated.
@param session: session of RPC.
@param vm_struct: dict of vm structure
@return: VM.
@rtype: dict.
@raise xen_api_error: VM name already exists
@raise XendError:
'''
if BNPoolAPI._isMaster:
newuuid = vm_struct.get('uuid', None)
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vm_label = vm_struct.get('nameLabel')
vms = self.VM_get_by_name_label(session, vm_label)
if vms.get('Value'):
return xen_api_error(['VM name already exists', 'VM', vm_label])
else:
response = self._VM_create(session, vm_struct)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct('vm_create', domuuid, XendNode.instance().uuid)
return response
else:
response = self._VM_create(session, vm_struct)
domuuid = response.get('Value')
log.debug("new vm local uuid : %s", domuuid)
if domuuid:
BNPoolAPI.update_data_struct('vm_create', domuuid, XendNode.instance().uuid)
return response
def _VM_create(self, session, vm_struct):
'''
@author: wuyuewen
@summary: Internal method. Create a VM on this Host.
@precondition: vm_struct is legal, vm name not duplicated.
@param session: session of RPC.
@param vm_struct: dict of vm structure
@return: VM.
@rtype: dict.
@raise xen_api_error: VM name already exists
@raise XendError:
'''
xendom = XendDomain.instance()
domuuid = XendTask.log_progress(0, 100,
xendom.create_domain, vm_struct)
return xen_api_success(domuuid)
def _VM_create_check_vm_uuid_unique(self, newuuid):
if newuuid:
return BNPoolAPI.check_vm_uuid_unique(newuuid)
else:
return True
def VM_create_from_vmstruct(self, session, vm_struct):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
domuuid = XendTask.log_progress(0, 100,
xendom.create_domain, vm_struct)
return xen_api_success(domuuid)
def VM_create_from_sxp(self, session, path, start_it=False, update_pool_structs=True):
'''
@author: wuyuewen
@summary: Create a VM on this Host from .sxp file.
@precondition: sxp file is legal, vm name not duplicated.
@param session: session of RPC.
@param path: path of sxp file
@param start_it: Start the VM after create, if start_it=True, Host must have enough free memory.
@return: VM.
@rtype: dict.
@raise xen_api_error: VM name already exists
@raise XendError:
'''
# filename = '/home/share/config.sxp'
try:
sxp_obj = sxp.parse(open(path, 'r'))
sxp_obj = sxp_obj[0]
xendom = XendDomain.instance()
domuuid = XendTask.log_progress(0, 100,
xendom.domain_new, sxp_obj)
if update_pool_structs:
BNPoolAPI.update_data_struct('vm_create', domuuid, XendNode.instance().uuid)
if start_it:
# try:
response = self._VM_start(session, domuuid, False, True)
if cmp(response['Status'], 'Failure') == 0:
self._VM_destroy(session, domuuid, False)
return response
# except Exception, exn:
# self._VM_destroy(session, domuuid, False)
# return xen_api_error(['VM_START_FAILED', 'VM', domuuid])
return response
else:
return xen_api_success(domuuid)
except IOError, e:
return xen_api_error(["Unable to read file: %s" % path])
except Exception, exn:
log.exception(exn)
return xen_api_error(['Create from sxp failed!'])
# finally:
# cmd = 'rm -f %s' % path
# doexec(cmd)
# return XendTask.log_progress(0, 100, do_vm_func,
# "domain_start", domuuid, False, False)
# object methods
def VM_get_record(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's record.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: VM record
@rtype: dict.
@raise xen_api_error: VM not exists
'''
#log.debug('=================vm_get_record:%s' % vm_ref)
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_record(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_record', vm_ref)
else:
return self._VM_get_record(session, vm_ref)
def _VM_get_record(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's record.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: VM record
@rtype: dict.
@raise xen_api_error: VM not exists
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
xennode = XendNode.instance()
if not xeninfo:
log.debug("can not find vm:" + vm_ref)
return xen_api_error(['HANDLE_INVALID', 'VM', vm_ref])
domid = xeninfo.getDomid()
dom_uuid = xeninfo.get_uuid()
record = {
'uuid': dom_uuid,
'power_state': xeninfo.get_power_state(),
'name_label': xeninfo.getName(),
'name_description': xeninfo.getNameDescription(),
'user_version': 1,
'is_a_template': xeninfo.info['is_a_template'],
'is_local_vm' : self._VM_get_is_local_vm(session, vm_ref).get("Value", True),
'ip_addr' : xeninfo.getDomainIp(),
'MAC' : xeninfo.getDomainMAC(),
'auto_power_on': xeninfo.info['auto_power_on'],
'resident_on': XendNode.instance().uuid,
'memory_static_min': xeninfo.get_memory_static_min(),
'memory_static_max': xeninfo.get_memory_static_max(),
'memory_dynamic_min': xeninfo.get_memory_dynamic_min(),
'memory_dynamic_max': xeninfo.get_memory_dynamic_max(),
'VCPUs_params': xeninfo.get_vcpus_params(),
'VCPUs_at_startup': xeninfo.getVCpuCount(),
'VCPUs_max': xeninfo.getVCpuCount(),
'actions_after_shutdown': xeninfo.get_on_shutdown(),
'actions_after_reboot': xeninfo.get_on_reboot(),
'actions_after_suspend': xeninfo.get_on_suspend(),
'actions_after_crash': xeninfo.get_on_crash(),
'consoles': xeninfo.get_consoles(),
'VIFs': xeninfo.get_vifs(),
'VBDs': xeninfo.get_vbds(),
'VTPMs': xeninfo.get_vtpms(),
'DPCIs': xeninfo.get_dpcis(),
'DSCSIs': xeninfo.get_dscsis(),
'DSCSI_HBAs': xeninfo.get_dscsi_HBAs(),
'PV_bootloader': xeninfo.info.get('PV_bootloader'),
'PV_kernel': xeninfo.info.get('PV_kernel'),
'PV_ramdisk': xeninfo.info.get('PV_ramdisk'),
'PV_args': xeninfo.info.get('PV_args'),
'PV_bootloader_args': xeninfo.info.get('PV_bootloader_args'),
'HVM_boot_policy': xeninfo.info.get('HVM_boot_policy'),
'HVM_boot_params': xeninfo.info.get('HVM_boot_params'),
'platform': xeninfo.get_platform(),
'PCI_bus': xeninfo.get_pci_bus(),
'tools_version': xeninfo.get_tools_version(),
'other_config': xeninfo.info.get('other_config', {}),
'tags' : xeninfo.info.get('tags', []),
'domid': domid is None and -1 or domid,
'is_control_domain': xeninfo.info['is_control_domain'],
'metrics': xeninfo.get_metrics(),
'cpu_qos': xeninfo.get_cpu_qos(),
'security_label': xeninfo.get_security_label(),
'crash_dumps': [],
'suspend_VDI' : xennode.get_suspend_VDI(dom_uuid),
'suspend_SR' : xennode.get_suspend_SR(dom_uuid),
'connected_disk_SRs' : xennode.get_connected_disk_sr(dom_uuid),
'connected_iso_SRs' : xennode.get_connected_iso_sr(dom_uuid),
'pool_name': xeninfo.info.get('pool_name'),
# 'cpu_pool' : XendCPUPool.query_pool_ref(xeninfo.get_cpu_pool()),
}
#log.debug(record)
return xen_api_success(record)
# def VM_get_record_lite(self, session, vm_ref=''):
# if BNPoolAPI._isMaster:
# hosts = self.host_get_all(session).get('Value', '')
# node = XendNode.instance()
# records = []
# if hosts:
# for host in hosts:
# if cmp(node.uuid, host) == 0:
# records.append(self._VM_get_record_lite(session))
# else:
# host_ip = BNPoolAPI.get_host_ip(host)
# records.append(xen_rpc_call(host_ip, 'VM_get_record_lite', '').get('Value', []))
# return xen_api_success(records)
# else:
# return xen_api_success(self._VM_get_record_lite(session))
def VM_get_record_lite(self, session, vm_ref=''):
'''
@deprecated: not used
'''
vms = self._VM_get_all(session).get('Value', [])
retv = []
if vms:
for vm_ref in vms:
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
# xennode = XendNode.instance()
if not xeninfo:
log.debug("can not find vm:" + vm_ref)
return xen_api_error(['HANDLE_INVALID', 'VM', vm_ref])
# domid = xeninfo.getDomid()
dom_uuid = xeninfo.get_uuid()
record_lite = {'uuid' : dom_uuid,
'power_state' : xeninfo.get_power_state(),
}
# log.debug(record_lite)
retv.append(record_lite)
return xen_api_success(retv)
def VM_clean_reboot(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Attempt to cleanly reboot the specified VM.
This can only be called when the specified VM is in the Running state.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise XendError: Bad power state.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
response = self._VM_clean_reboot(session, vm_ref)
response = self._VM_reboot_checkout(session, vm_ref)
# self. _VM_set_all_tag(session, vm_ref)
# self._VM_set_all_rate(session, vm_ref, 'rate')
# self._VM_set_all_rate(session, vm_ref, 'burst')
# self.VM_start_set_IO_limit(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(host_ip, "VM_clean_reboot", vm_ref)
return response
else:
response = self._VM_clean_reboot(session, vm_ref)
response = self._VM_reboot_checkout(session, vm_ref)
# self. _VM_set_all_tag(session, vm_ref)
# self._VM_set_all_rate(session, vm_ref, 'rate')
# self._VM_set_all_rate(session, vm_ref, 'burst')
# self.VM_start_set_IO_limit(session, vm_ref)
return response
def _VM_clean_reboot(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Attempt to cleanly reboot the specified VM.
This can only be called when the specified VM is in the Running state.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise XendError: Bad power state.
'''
#self._VM_clean_IO_limit_shutdown(session, vm_ref) #add by wufan
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
XendTask.log_progress(0, 100, xeninfo.shutdown, "reboot")
return xen_api_success_void()
def _VM_reboot_checkout(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Checkout when reboot operation finish, VM_ID = VM_ID + 1.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise Exception: Timeout 90 seconds.
'''
domid_old = self.VM_get_domid(session, vm_ref)['Value']
i = 0
flag = False
one_more = True
while True:
i += 1
domid_new = self.VM_get_domid(session, vm_ref)['Value']
if cmp(int(domid_new), int(domid_old)) > 0:
log.debug('reboot finished: %s, cost time: %s' % (vm_ref, str(i)))
flag = True
break
elif cmp(i, 90) > 0 and cmp(int(domid_new), -1) == 0 or not domid_new:
if one_more:
one_more = False
i -= 6
continue
else:
log.exception('reboot timeout!')
break
else:
time.sleep(1)
continue
return xen_api_success(flag)
def VM_clean_shutdown(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Attempt to cleanly shutdown the specified VM.
This can only be called when the specified VM is in the Running state.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise XendError: Bad power state.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
response = self._VM_clean_shutdown(session,vm_ref)
response = self._VM_shutdown_checkout(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(host_ip, "VM_clean_shutdown", vm_ref)
return response
else:
response = self._VM_clean_shutdown(session, vm_ref)
response = self._VM_shutdown_checkout(session, vm_ref)
return response
def _VM_clean_shutdown(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Attempt to cleanly shutdown the specified VM.
This can only be called when the specified VM is in the Running state.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise XendError: Bad power state.
'''
#self._VM_clean_IO_limit_shutdown(session, vm_ref) #add by wufan
is_a_template = self._VM_get_is_a_template(session, vm_ref).get('Value')
if is_a_template:
return xen_api_error(XEND_API_ERROR_VM_IS_TEMPLATE)
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
XendTask.log_progress(0, 100, xeninfo.shutdown, "poweroff")
return xen_api_success_void()
def _VM_shutdown_checkout(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Checkout when shutdown operation finish.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise Exception: Timeout 90 seconds.
'''
i = 0
time_out = 60
flag = False
while True:
i += 1
# ps_new = self.VM_get_power_state(session, vm_ref)['Value']
domid = self.VM_get_domid(session, vm_ref).get('Value')
# log.debug(ps_new)
if not domid or cmp (int(domid), -1) == 0:
log.debug("shutdown finished: %s, cost time: %s" % (vm_ref, str(i)))
flag = True
break
elif cmp(i, time_out) > 0:
log.exception("shutdown timeout!")
break
else:
time.sleep(1)
continue
return xen_api_success(flag)
'''
when VM create from template, migrate VM to destinate host
VM is shutdown, refer to VM_start_on
'''
def VM_change_host(self, session, vm_ref, temp_ref, host_ref, path):
'''
@author: wuyuewen
@summary: When VM create from template, migrate VM to destinate host, refer to VM_create_on_from_template.
@precondition: VM not running
@param session: session of RPC.
@param vm_ref: VM's uuid
@param temp_ref: VM template uuid
@param host_ref: migrate VM to which host
@return: True | False
@rtype: dict.
@raise xen_api_error: CHANGE_HOST_ON_FAILED
'''
try:
log.debug("in VM_change_host: %s" % vm_ref)
if BNPoolAPI._isMaster:
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
return xen_api_success(True)
xennode = XendNode.instance()
master_uuid = xennode.uuid
h_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if not h_ref:
log.exception('Get host by VM failed! BNPoolAPI update_data_struct not sync!')
h_ref = BNPoolAPI.get_host_by_vm(temp_ref)
h_ip = BNPoolAPI.get_host_ip(h_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
paths = xennode.get_ha_sr_location()
log.debug(paths)
# if cmp(paths, {}) !=0:
if paths:
for p in paths.values():
# path = os.path.join(p, CACHED_CONFIG_FILE)
path = os.path.join(p, '%s.sxp' % vm_ref)
break
else:
path = ''
log.debug('vm_migrate to ha path: %s' % path)
# else:
# return xen_api_error(['nfs_ha not mounted', NFS_HA_DEFAULT_PATH])
#copy sxp file to nfs
log.debug("<dest ip>, <host ip>: <%s>, <%s>" % (host_ip, h_ip))
xen_rpc_call(h_ip, 'VM_copy_sxp_to_nfs', vm_ref, path)
if cmp(host_ref, master_uuid) == 0 and cmp(master_uuid, h_ref) == 0:
log.debug("-----condition 1-----")
log.debug("vm dest: master, vm now: master")
response = {'Status' : 'Success', 'Value' : vm_ref}
# return xen_api_success(True)
elif cmp(host_ref, master_uuid) == 0 and cmp(master_uuid, h_ref) != 0:
log.debug("-----condition 2-----")
log.debug("vm dest: master, vm now: node")
response = self.VM_create_from_sxp(session, path, False, False)
# log.debug('create from template: %s' % response)
if cmp (response.get('Status'), 'Success') == 0:
xen_rpc_call(h_ip, 'VM_destroy', vm_ref, False, False, False)
# log.debug('destroy : %s' % response)
elif cmp(host_ref, master_uuid) != 0 and cmp(master_uuid, h_ref) == 0:
log.debug("-----condition 3-----")
log.debug("vm dest: node, vm now: master")
log.debug("host ip (%s) path(%s)" % (host_ip, path))
response = xen_rpc_call(host_ip, 'VM_create_from_sxp', path, False, False)
if cmp (response.get('Status'), 'Success') == 0:
self._VM_destroy(session, vm_ref, False, False)
elif cmp(host_ref, master_uuid) != 0 and cmp(master_uuid, h_ref) != 0:
if cmp(h_ref, host_ref) == 0:
log.debug("-----condition 4-----")
log.debug("vm dest: node1, vm now: node2, node1 = node2")
response = {'Status' : 'Success', 'Value' : vm_ref}
else:
log.debug("-----condition 5-----")
log.debug("vm dest: node1, vm now: node2, node1 != node2")
response = xen_rpc_call(host_ip, 'VM_create_from_sxp', path, False, False)
if cmp (response.get('Status'), 'Success') == 0:
xen_rpc_call(h_ip, 'VM_destroy', vm_ref, False, False, False)
if cmp (response.get('Status'), 'Success') == 0:
BNPoolAPI.update_data_struct('vm_start_on', vm_ref, h_ref, host_ref) # reason here is pre-fixed
log.debug("Finished change host on: %s migrate vm(%s) to %s" % (h_ip, vm_ref, host_ip))
return response
else:
path = ''
return xen_api_success(True)
except Exception, exn:
log.exception(exn)
return xen_api_error(['CHANGE_HOST_ON_FAILED,', exn])
# finally:
# if path:
# cmd = 'rm -f %s' % path
# doexec(cmd)
'''
1.clone vm on the same host of template
2.migrate vm to the destinate host
3.destroy origin vm
'''
def VM_create_on_from_template(self, session, host_ref, vm_ref, newname, config, ping=False):
'''
@author: wuyuewen
@summary: 1. Clone VM from template on the same Host
2. Migrate VM to destinate Host, if migrate success, destroy origin VM on origin Host.
3. Start VM and set VM password, if start VM failed, VM will destroy.
@precondition: 1. Storage has enough space, template structure is legal. See VM_clone_MAC
2. See VM_change_host.
3. Destinate Host has enough free memory, VM already installed Agent for password change. See VM_set_config.
@param session: session of RPC.
@param host_ref: destinate Host
@param vm_ref: VM's uuid
@param newname: name of new VM
@param config: dict type config
@param ping: True | False, VM installed Agent.
True: VM boot into OS then method return
False: VM excute start option and resturn.
@return: True | False
@rtype: dict.
@raise xen_api_error: CHANGE_HOST_ON_FAILED, create vm from template error
'''
# self.__vm_clone_lock__.acquire()
path = None
try:
log.debug('1.vm_create from template>>>>>')
newuuid = config.get('newUuid', None)
mac_addr = config.get('MAC', None)
st1 = time.time()
paths = XendNode.instance().get_ha_sr_location()
log.debug(paths)
if not BNPoolAPI.check_vm(vm_ref):
return xen_api_error(['VM_NOT_FOUND'])
if not BNPoolAPI.check_host(host_ref):
return xen_api_error(['HOST_NOT_FOUND'])
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
if mac_addr and not self._VIF_is_mac_format_legal(mac_addr):
return xen_api_error(['MAC_INVALID'])
# if cmp(paths, {}) !=0:
if paths:
for p in paths.values():
# path = os.path.join(p, CACHED_CONFIG_FILE)
path = os.path.join(p, '%s.sxp' % vm_ref)
break
else:
return xen_api_error(['HA_DIR_NOT_FOUND'])
if not mac_addr:
log.debug('2. vm_clone >>>>>>')
response = self.VM_clone(session, vm_ref, newname, None, newuuid)
else:
log.debug('2. vm_clone_mac >>>>>>')
response = self.VM_clone_MAC(session, vm_ref, newname, mac_addr, None, newuuid)
e1 = (time.time() - st1)
log.debug('VM clone cost time :%s ' % e1)
# log.debug("rpc.VM_start():", e4)
if response.get('Status') == 'Success':
# self.__vm_change_host_lock__.acquire()
# try:
domuuid = response.get('Value')
log.debug('new VM uuid:%s' % domuuid)
# change VM host from cur to host_ref
response = self.VM_change_host(session, domuuid, vm_ref, host_ref, path)
log.debug('change host response: %s' % response)
# finally:
# self.__vm_change_host_lock__.release()
if response.get('Status') == 'Success':
log.debug('3. vm_set_config>>>>>')
response = self.VM_set_config(session, domuuid, config, ping) # when set config failed, VM will be deleted!
e2 = (time.time() - st1)
log.debug(">>>>VM_create_on_from_template<<<< Total cost: %s" % e2)
if response.get('Status') == 'Success':
return response
return xen_api_error(['create vm from template error'])
except Exception, exn:
log.exception(exn)
return xen_api_error(['create vm from template error: %s' % exn])
finally:
if path:
st1 = time.time()
cmd = 'rm -f %s' % path
doexec(cmd)
e1 = (time.time() - st1)
log.debug('remove %s cost: %s' %(path, e1))
# finally:
# self.__vm_clone_lock__.release()
def VM_create_from_template(self, session, vm_ref, newname, config):
'''
@deprecated: not used
'''
log.debug('1.vm_create from template>>>>>')
newuuid = config.get('newUuid', None)
mac_addr = config.get('MAC', None)
st1 = time.time()
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not self._VIF_is_mac_format_legal(mac_addr):
return xen_api_error(['MAC_INVALID'])
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
if not mac_addr:
log.debug('2. vm_clone >>>>>>')
response = self.VM_clone(session, vm_ref, newname, None, newuuid)
else:
log.debug('2. vm_clone_mac >>>>>>')
response = self.VM_clone_MAC(session, vm_ref, newname, mac_addr, None, newuuid)
e1 = (time.time() - st1)
log.debug('VM clone cost time :%s ' % e1)
# log.debug("rpc.VM_start():", e4)
if response.get('Status') == 'Success':
domuuid = response.get('Value')
log.debug('new VM uuid:%s' % domuuid)
log.debug('3. vm_set_config>>>>>')
response = self.VM_set_config(session, domuuid, config) # when set config failed, VM will be deleted!
if response.get('Status') == 'Success':
return response
return xen_api_error(['create vm from template error'])
def VM_create_with_VDI(self, session, host_ref, vm_ref, newname, config, ping=False):
'''
@deprecated: not used
'''
# self.__vm_clone_lock__.acquire()
path = None
try:
storage = self._get_BNStorageAPI_instance()
log.debug('1.vm_create from template>>>>>')
newuuid = config.get('newUuid', None)
mac_addr = config.get('MAC', None)
if not BNPoolAPI.check_vm(vm_ref):
return xen_api_error(['VM_NOT_FOUND'])
if not BNPoolAPI.check_host(host_ref):
return xen_api_error(['HOST_NOT_FOUND'])
if not self._VIF_is_mac_format_legal(mac_addr):
return xen_api_error(['MAC_INVALID'])
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vdi_new_uuid = config.get('vdiUuid', None)
st1 = time.time()
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
sys_vdi = self.VM_get_system_VDI(session, vm_ref).get('Value', '')
if not newuuid:
newuuid = genuuid.gen_regularUuid()
vdi_uuid_map = {}
vdis = vdis_resp.get('Value', [])
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
if sys_vdi in vdis and vdi_new_uuid:
vdi_uuid_map[sys_vdi] = vdi_new_uuid
paths = XendNode.instance().get_ha_sr_location()
log.debug(paths)
# if cmp(paths, {}) !=0:
if paths:
for p in paths.values():
# path = os.path.join(p, CACHED_CONFIG_FILE)
path = os.path.join(p, '%s.sxp' % vm_ref)
break
else:
return xen_api_error(['HA_DIR_NOT_FOUND'])
if not mac_addr:
log.debug('2. vm_clone >>>>>>')
response = self.VM_clone(session, vm_ref, newname, vdi_uuid_map, newuuid, True)
else:
log.debug('2. vm_clone_mac >>>>>>')
response = self.VM_clone_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid, True)
e1 = (time.time() - st1)
log.debug('VM clone cost time :%s ' % e1)
# log.debug("rpc.VM_start():", e4)
if response.get('Status') == 'Success':
domuuid = response.get('Value')
log.debug('new VM uuid:%s' % domuuid)
# change VM host from cur to host_ref
response = self.VM_change_host(session, domuuid, vm_ref, host_ref, path)
log.debug('change host response: %s' % response)
if response.get('Status') == 'Success':
log.debug('3. vm_set_config>>>>>')
response = self.VM_set_config(session, domuuid, config, ping) # when set config failed, VM will be deleted!
e2 = (time.time() - st1)
log.debug(">>>>VM_create_with_VDI<<<< Total cost: %s" % e2)
if response.get('Status') == 'Success':
return response
return xen_api_error(['create vm from template error'])
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
# finally:
# self.__vm_clone_lock__.release()
def VM_set_passwd(self, session, vm_ref, vm_ip, passwd, origin_passwd, vm_type):
'''
@author: wuyuewen
@summary: VM set password use SSH protocol. The set password agent running in Host, use host 10086 port.
@precondition: Set password Agent is running, windows VM has SSH-Server installed.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param vm_ip: VM's ip
@param passwd: new password
@param origin_passwd: origin password
@param vm_type: windows | linux
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
response = self._VM_set_passwd(session, vm_ref, vm_ip, passwd, origin_passwd, vm_type)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(host_ip, "VM_set_passwd", vm_ref, vm_ip, passwd, origin_passwd, vm_type)
return response
else:
response = self._VM_set_passwd(session, vm_ref, vm_ip, passwd, origin_passwd, vm_type)
return response
def _VM_set_passwd(self, session, vm_ref, vm_ip, passwd, origin_passwd, vm_type ):
'''
@author: wuyuewen
@summary: Internal method. VM set password use SSH protocol. The set password agent running in Host, use host 10086 port.
@precondition: Set password Agent is running, windows VM has SSH-Server installed.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param vm_ip: VM's ip
@param passwd: new password
@param origin_passwd: origin password
@param vm_type: windows | linux
@return: True | False
@rtype: dict.
'''
#log.debug('vm set passwd(%s) ip(%s) origin(%s) new(%s) vm_type(%s)' % (vm_ref, vm_ip, origin_passwd, passwd, vm_type))
# by henry
log.debug('vm set passwd(%s) ip(%s) origin(%s) new(%s) vm_type(%s)' % (vm_ref, vm_ip, "********", "********", vm_type))
is_on = self._test_ip(vm_ip, 3)
if not is_on:
log.debug('vm(%s) ip(%s) cannot ping, try one more time...' % (vm_ref, vm_ip))
is_on = self._test_ip(vm_ip, 3)
if not is_on:
log.debug('Finally, vm(%s) ip(%s) cannot ping' % (vm_ref, vm_ip))
return xen_api_success(False)
proxy = xmlrpclib.Server("http://127.0.0.1:10086")
flag = proxy.VM_set_passwd(vm_ip, passwd, origin_passwd, vm_type)
return xen_api_success(flag)
def VM_set_config(self, session, vm_ref, config, ping=False):
'''
@author: wuyuewen
@summary: Contain several options:
1. set vm vcpu and memory.
2. start vm.
3. ping vm to check if start.
4. set password use SSH protocol or Serial device.
@precondition: Every option has an error handling or rollback option.
1. set vm vcpu and memory error, vm destroy
2. vm cannot start, vm destroy
3. vm cannot ping, vm do not get ip, return error and remain vm to check
4. vm cannot set passwd, return error and remain vm to check
@param session: session of RPC.
@param vm_ref: VM's uuid
@param config: dict type config
@param ping: True | False, ping or donnot ping after start.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM set config error, VM start and change password error.
'''
log.debug("Starting VM_set_config...")
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
log.debug('Master node...')
response = self._VM_set_config(session, vm_ref, config, ping)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(host_ip, "VM_set_config", vm_ref, config, ping)
return response
else:
response = self._VM_set_config(session, vm_ref, config, ping)
return response
def _VM_set_config(self, session, vm_ref, config, ping=False):
'''
@author: wuyuewen
@summary: Internal method. Contain several options:
1. set vm vcpu and memory.
2. start vm.
3. ping vm to check if start.
4. set password use SSH protocol or Serial device.
@precondition: Every option has an error handling or rollback option.
1. set vm vcpu and memory error, vm destroy
2. vm cannot start, vm destroy
3. vm cannot ping, vm do not get ip, return error and remain vm to check
4. vm cannot set passwd, return error and remain vm to check
@param session: session of RPC.
@param vm_ref: VM's uuid
@param config: dict type config
@param ping: True | False, ping or donnot ping after start.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM set config error, VM start and change password error.
'''
time_log = {}
log.debug('vm set config')
MB = 1024*1024
vcpu_num = int(config.get('cpuNumber', 1))
memory_value = int(config.get('memoryValue', 1024))*MB
vlanid = config.get('vlanId', '-1')
IO_read_limit = int(config.get('IOreadLimit', 30))
IO_write_limit = int(config.get('IOwriteLimit', 100))
vm_passwd = config.get('passwd', '')
origin_passwd = config.get('origin_passwd', '')
vm_ip = config.get('IP', '')
vm_type = config.get('type', 'linux')
try:
st1 = time.time()
#1. set cup and memeory
vcpu_max = self._VM_get_VCPUs_max('', vm_ref).get('Value')
if vcpu_num > vcpu_max:
self._VM_set_VCPUs_number_live('', vm_ref, vcpu_num)
self._VM_set_VCPUs_max(session, vm_ref, vcpu_num)
self._VM_set_VCPUs_at_startup(session, vm_ref, vcpu_num)
elif vcpu_num < vcpu_max:
self._VM_set_VCPUs_max(session, vm_ref, vcpu_num)
self._VM_set_VCPUs_number_live('', vm_ref, vcpu_num)
self._VM_set_VCPUs_at_startup(session, vm_ref, vcpu_num)
memory = int(self._VM_get_memory_static_max(session, vm_ref).get('Value'))
log.debug('memory: %s' % memory)
if memory > memory_value:
#log.debug('memory > memory_value: --> %s > %s' % (memory, memory_value))
self._VM_set_memory_dynamic_max(session, vm_ref, memory_value)
self._VM_set_memory_dynamic_min(session, vm_ref, 512*MB)
self._VM_set_memory_static_max(session, vm_ref, memory_value)
elif memory < memory_value:
#log.debug('memory < memory_value: --> %s < %s' % (memory, memory_value))
self._VM_set_memory_static_max(session, vm_ref, memory_value)
self._VM_set_memory_dynamic_max(session, vm_ref, memory_value)
self._VM_set_memory_dynamic_min(session, vm_ref, 512*MB)
#2. set vlanid
#self._VM_set_tag_by_ethnum(session, vm_ref, 0, vlanid)
#log.debug('set tag in other config:>>>>>>>>>>>>>>>>')
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
dominfo.info['other_config'].setdefault('tag',{})
dominfo.info['other_config']['tag']['0'] = vlanid
#self._VM_save(dominfo)
#3. set IO limit
self._VM_set_IO_rate_limit(session, vm_ref, 'write', IO_write_limit, 'MBps')
e1 = time.time() - st1
time_log['set config'] = e1
log.debug('4. finish set vm(%s) vcpu,memeory and io rate limit' % vm_ref)
log.debug('====set vm(%s) vcpu,memeory and io rate limit cost time: %s=======' % (vm_ref, e1))
except Exception, exn:
log.error(exn)
self.VM_destroy(session, vm_ref, True)
storage = self._get_BNStorageAPI_instance()
storage.VDI_destroy(session, vm_ref)
return xen_api_error(['VM set config error'])
try:
#5. start vm
# st2 = time.time()
log.debug('5. excute start vm>>>>>>>>>>>>>>>>>>')
start_status = self._VM_start(session, vm_ref, False, True).get('Status')
if start_status == 'Failure':
self._VM_destroy(session, vm_ref, True) # start failed, vm destroy
log.debug('6. vm start failed>>>>>>>>> return')
return xen_api_error('vm(%s) start error' % vm_ref)
is_setPasswd = False
if vm_ip:
if ping:
timeout = 120
deadline = 1
st2 = time.time()
log.debug('6. start to check whether vm load OS>>>>>')
is_on = self._VM_start_checkout(vm_ip, timeout, deadline)
e2 = time.time() - st2
log.debug('=====vm(%s) start and load OS cost time: %s=======' %(vm_ref, e2))
# time_log['load os'] = e2
if not is_on:
log.debug('7. vm(%s) cannot ping in %s times' % (vm_ref, str(timeout * 1)))
return xen_api_error('vm(%s) cannot ping in %s' % (vm_ref, str(timeout * 1)))
if is_on and vm_passwd and origin_passwd:
set_passwd = threading.Thread(target=self._set_passwd, name='set_passwd',\
kwargs={'session':session, 'vm_ip':vm_ip, 'vm_ref':vm_ref, 'vm_passwd':vm_passwd, \
'origin_passwd':origin_passwd, 'vm_type':vm_type})
set_passwd.start()
else:
check_start_and_set_passwd = threading.Thread(target=self._check_start_and_set_passwd, name='check_start_and_set_passwd',\
kwargs={'session':session, 'vm_ip':vm_ip, 'vm_ref':vm_ref, 'vm_passwd':vm_passwd, \
'origin_passwd':origin_passwd, 'vm_type':vm_type})
check_start_and_set_passwd.start()
else:
log.debug('Start VM and change passwd using serial.')
if ping:
timeout = 120
st2 = time.time()
log.debug('6. start to check whether vm load OS via serial>>>>>')
is_on = self._VM_start_checkout_via_serial(session, vm_ref, timeout)
e2 = time.time() - st2
log.debug('=====vm(%s) start and load OS cost time: %s=======' %(vm_ref, e2))
# time_log['load os'] = e2
if not is_on:
log.debug('7. vm(%s) cannot ping via serial in %s times' % (vm_ref, str(timeout * 1)))
return xen_api_error('vm(%s) cannot ping via serial in %s' % (vm_ref, str(timeout * 1)))
if is_on and vm_passwd:
set_passwd = threading.Thread(target=self._set_passwd_via_serial, name='set_passwd_via_serial',\
kwargs={'session':session, 'vm_ref':vm_ref, 'vm_passwd':vm_passwd, \
'vm_type':vm_type})
set_passwd.start()
else:
check_start_and_set_passwd = threading.Thread(target=self._check_start_and_set_passwd_via_serial, name='check_start_and_set_passwd_via_serial',\
kwargs={'session':session, 'vm_ref':vm_ref, 'vm_passwd':vm_passwd, \
'vm_type':vm_type})
check_start_and_set_passwd.start()
# finally:
# self.__set_passwd_lock__.release()
#6. get record of VM
st4 = time.time()
VM_record = self._VM_get_record(session, vm_ref).get('Value')
if VM_record and isinstance(VM_record, dict):
VM_record['setpasswd'] = is_setPasswd
e4 = time.time() - st4
e5 = time.time() - st1
time_log['get record'] = e4
time_log['total'] = e5
log.debug('return vm record----> %s' % VM_record)
log.debug('8.vm create from template Succeed!>>>>>>>>>>')
log.debug('===vm(%s) set config cost time===' % vm_ref)
# time_log['set config'] = e1
# time_log['load os'] = e2
# time_log['set passwd'] = e3
if time_log.get('set config', ''):
log.debug('set vm vcpu,memeory and io rate limit cost time: %s' % e1)
# if time_log.get('load os', ''):
# log.debug('vmstart and load OS cost time: %s' % e2)
# if time_log.get('set passwd'):
# log.debug('vm set passwd cost time: %s' % e3)
if time_log.get('get record'):
log.debug('vm get record cost time: %s' % e4)
if time_log.get('total'):
log.debug('>>>>Total time<<<<: %s' % e5)
log.debug('=====vm(%s) end=====' % (vm_ref))
return xen_api_success(VM_record)
except Exception, exn:
log.error(exn)
if isinstance(exn, VMBadState):
return xen_api_error(['VM start error, bad power state.'])
log.error('9.vm create error....shutdown and remove vm(%s)' % vm_ref)
self._VM_hard_shutdown(session, vm_ref)
self.VM_destroy(session, vm_ref, True)
storage = self._get_BNStorageAPI_instance()
storage.VDI_destroy(session, vm_ref)
return xen_api_error(['VM start and change password error'])
def _check_start_and_set_passwd(self, session, vm_ip, vm_ref, vm_passwd, origin_passwd, vm_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
timeout = 120
deadline = 1
st2 = time.time()
log.debug('6. start to check whether vm load OS>>>>>')
is_on = self._VM_start_checkout(vm_ip, timeout, deadline)
e2 = time.time() - st2
log.debug('=====vm(%s) start and load OS cost time: %s=======' %(vm_ref, e2))
# time_log['load os'] = e2
if not is_on:
log.debug('7. vm(%s) cannot ping in %s times' % (vm_ref, str(timeout * 1)))
return xen_api_error('vm(%s) cannot ping in %s' % (vm_ref, str(timeout * 1)))
#raise Exception, '7. vm(vm_ref) cannot ping in %s s' % (vm_ref, timeout)
if is_on and vm_passwd and origin_passwd:
# self.__set_passwd_lock__.acquire()
# try:
st3 = time.time()
is_setPasswd = self._VM_set_passwd(session, vm_ref, vm_ip, vm_passwd, origin_passwd, vm_type).get('Value', '')
log.debug("7. set passwd result = %s type= %s" % (is_setPasswd, type(is_setPasswd)))
if not is_setPasswd:
log.debug('vm(%s) set passwd failed!' % vm_ref)
e3 = time.time() - st3
log.debug('======vm(%s) set passwd cost time: %s=======' %(vm_ref, e3))
# time_log['set passwd'] = e3
def _check_start_and_set_passwd_via_serial(self, session, vm_ref, vm_passwd, vm_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
timeout = 200
st2 = time.time()
log.debug('6. start to check whether vm load OS via serial>>>>>')
is_on = self._VM_start_checkout_via_serial(session, vm_ref, timeout)
e2 = time.time() - st2
log.debug('=====vm(%s) start and load OS cost time: %s=======' %(vm_ref, e2))
# time_log['load os'] = e2
if not is_on:
log.debug('7. vm(%s) cannot ping via serial in %s times' % (vm_ref, str(timeout * 1)))
return xen_api_error('vm(%s) cannot ping via serial in %s' % (vm_ref, str(timeout * 1)))
#raise Exception, '7. vm(vm_ref) cannot ping in %s s' % (vm_ref, timeout)
if is_on and vm_passwd:
# self.__set_passwd_lock__.acquire()
# try:
# st3 = time.time()
self._set_passwd_via_serial(session, vm_ref, vm_passwd, vm_type)
# log.debug("7. set passwd via serial result = %s type= %s" % (is_setPasswd, type(is_setPasswd)))
# if not is_setPasswd:
# log.debug('vm(%s) set passwd via serial failed!' % vm_ref)
# e3 = time.time() - st3
# log.debug('======vm(%s) set passwd cost time: %s=======' %(vm_ref, e3))
# time_log['set passwd'] = e3
def _set_passwd(self, session, vm_ip, vm_ref, vm_passwd, origin_passwd, vm_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
st3 = time.time()
is_setPasswd = self._VM_set_passwd(session, vm_ref, vm_ip, vm_passwd, origin_passwd, vm_type).get('Value', '')
log.debug("7. set passwd result = %s type= %s" % (is_setPasswd, type(is_setPasswd)))
if not is_setPasswd:
log.debug('vm(%s) set passwd failed!' % vm_ref)
e3 = time.time() - st3
log.debug('======vm(%s) set passwd cost time: %s=======' %(vm_ref, e3))
# test if ping ip return true
def _test_ip(self, ip, deadline = 1):
'''
@author: wuyuewen
@summary: Internal method.
'''
import os
import subprocess
import datetime
time1 = datetime.datetime.now()
cmd = "ping -w %s %s" % (deadline, ip)
re = subprocess.call(cmd, shell=True)
time2 = datetime.datetime.now()
t = time2 - time1
log.debug('ping %s result: %s, cost time: %s' %(ip, re, str(t)))
if re:
return False
else:
return True
def _set_passwd_via_serial(self, session, vm_ref, vm_passwd, vm_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
st3 = time.time()
response = self._VM_get_platform_serial(session, vm_ref)
if cmp(response['Status'], 'Failure') == 0:
log.exception('VM_get_platform_serial failed!')
return xen_api_success(False)
address = response.get('Value')
log.debug('serial port: %s' % str(address))
if not address:
log.error('VM serial not correct!')
return xen_api_success(False)
(ip, port) = address
import json
if cmp(vm_type, 'linux') == 0:
userName = 'root'
else:
userName = 'Administrator'
json_obj = json.dumps({'requestType':'Agent.SetPassword', 'userName':userName, 'password':vm_passwd})
is_setPasswd = Netctl.serial_opt(ip, port, json_obj, False)
log.debug("7. set passwd via serial, result = %s type= %s" % (is_setPasswd, type(is_setPasswd)))
if not is_setPasswd:
log.debug('vm(%s) set passwd via serial failed!' % vm_ref)
e3 = time.time() - st3
log.debug('======vm(%s) set passwd cost time: %s=======' %(vm_ref, e3))
def _VM_start_checkout(self, vm_ip, timeout = 60, deadline = 1):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('VM load os checkout>>>>')
cnt = 0
while cnt < timeout:
if self._test_ip(vm_ip, deadline):
return True
# time.sleep(1)
cnt += 1
log.debug('vm not start>>>>>')
return False
def _VM_start_checkout_via_serial(self, session, vm_ref, timeout = 60):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('VM load os checkout>>>>')
response = self._VM_get_platform_serial(session, vm_ref)
if cmp(response['Status'], 'Failure') == 0:
log.exception('VM_get_platform_serial failed!')
return xen_api_success(False)
address = response.get('Value')
log.debug('serial port: %s' % str(address))
if not address:
log.error('VM serial not correct!')
return xen_api_success(False)
(ip, port) = address
import json
json_obj = json.dumps({'requestType':'Agent.Ping'})
log.debug(json_obj)
if self._test_serial(ip, port, json_obj, timeout):
return True
# cnt = 0
# while cnt < timeout:
# if self._test_serial(ip, port, json_obj):
# return True
## time.sleep(1)
# cnt += 1
log.debug('vm not start>>>>>')
return False
def _test_serial(self, ip, port, json_obj, timeout):
'''
@author: wuyuewen
@summary: Internal method.
'''
import datetime
time1 = datetime.datetime.now()
re = Netctl.serial_opt(ip, port, json_obj, False, timeout, True)
time2 = datetime.datetime.now()
t = time2 - time1
log.debug('ping %s:%s result: %s, cost time: %s' %(ip, port, re, str(t)))
return re
'''
generate template from vm
1. vm_clone
2. set template
return True or False
'''
def VM_create_image(self, session, vm_ref, template_name, template_uuid):
'''
@author: wuyuewen
@summary: Generate template from VM, contain several options:
1. vm_clone
2. set template
@param session: session of RPC.
@param vm_ref: VM's uuid
@param template_name: new template name.
@param template_uuid: template uuid
@return: True | False
@rtype: dict.
'''
log.debug('==========vm(%s) create template==========' % vm_ref)
result = False
try:
response = self.VM_clone(session, vm_ref, template_name, None, template_uuid)
if response.get('Status') == 'Success':
domuuid = response.get('Value')
assert domuuid == template_uuid
log.debug('new VM uuid:%s' % domuuid)
self.VM_set_is_a_template(session, template_uuid, True)
result = True
except Exception, exn:
log.exception(exn)
self.VM_destroy(session, template_uuid, True)
finally:
log.debug('============end===============')
return xen_api_success(result)
def VM_clone(self, session, vm_ref, newname, vdi_uuid_map = None, newuuid = None, vdi_exists = False):
'''
@author: wuyuewen
@summary: Internal method. Clone VM, contain several options:
1. get origin VM's VDIs
2. clone VM
3. if clone VM success, clone VDIs
@param session: session of RPC.
@param vm_ref: origin VM's uuid
@param newname: new VM's name
@param vdi_uuid_map: origin VM's VDIs mapping to new clone VDIs
@param newuuid: new VM's uuid
@param vdi_exists: True | False, if new VDIs exist or not(create in advance).
@return: True | False
@rtype: dict.
'''
log.debug('in VM_clone')
storage = self._get_BNStorageAPI_instance()
if not vdi_uuid_map:
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
sys_vdi = self.VM_get_system_VDI(session, vm_ref).get('Value', '')
if not newuuid:
newuuid = genuuid.gen_regularUuid()
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vdi_uuid_map = {}
vdis = vdis_resp.get('Value', [])
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
if sys_vdi in vdis:
vdi_uuid_map[sys_vdi] = newuuid
if BNPoolAPI._isMaster:
h_ref = BNPoolAPI.get_host_by_vm(vm_ref)
#mapping parrent vdi's uuid to new one.
h_ip = BNPoolAPI.get_host_ip(h_ref)
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
return xen_rpc_call(h_ip, 'VM_clone_local', vm_ref, newname, vdi_uuid_map, newuuid)
log.debug("VM_clone, vdi map:")
log.debug(vdi_uuid_map)
if cmp(h_ref, XendNode.instance().uuid) == 0:
log.debug("clone from master")
response = self._VM_clone(session, vm_ref, newname, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
else:
log.debug("clone from slave")
response = xen_rpc_call(h_ip, 'VM_clone', vm_ref, newname, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
log.debug('New domain uuid: %s' % domuuid)
if domuuid:
BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
if not vdi_exists:
storage.VDI_clone(session, vdi_uuid_map, newname, domuuid)
# log.debug("return from async")
return response
else:
log.debug('in VM_clone local')
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
response = self.VM_clone_local(session, vm_ref, newname, vdi_uuid_map, newuuid)
else:
log.debug('in VM_clone local, else')
response = self._VM_clone(session, vm_ref, newname, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
if not vdi_exists:
storage.VDI_clone(session, vdi_uuid_map, newname, domuuid)
return response
def VM_clone_local(self, session, vm_ref, newname, vdi_uuid_map=None, newuuid=None):
'''
@deprecated: not used
'''
storage = self._get_BNStorageAPI_instance()
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
if not vdi_uuid_map:
vdi_uuid_map = {}
vdis = vdis_resp.get('Value')
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
log.debug(vdi_uuid_map)
response = self._VM_clone(session, vm_ref, newname, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct("vm_clone", domuuid, XendNode.instance().uuid)
response = storage._VDI_clone(session, vdi_uuid_map, newname, vm_ref)
vdi_uuid = response.get('Value')
if vdi_uuid:
#BNPoolAPI.update_VDI_create(host_ref, sr_ref)
BNPoolAPI.update_data_struct("vdi_create", XendNode.instance().uuid, vdi_uuid)
return xen_api_success(domuuid)
def _VM_clone(self, session, vm_ref, newname, vdi_uuid_map=None, newuuid=None):
log.debug('in _VM_clone')
xendom = XendDomain.instance()
domuuid = XendTask.log_progress(0, 100, xendom.domain_clone, vm_ref, newname,\
vdi_uuid_map, newuuid)
return xen_api_success(domuuid)
'''
when clone a VM, need to pass the MAC value
'''
def VM_clone_MAC(self, session, vm_ref, newname, mac_addr, vdi_uuid_map = None, newuuid = None, vdi_exists = False):
'''
@author: wuyuewen
@summary: Clone VM with param MAC.
@see VM_clone
'''
log.debug('in VM_clone with MAC...')
storage = self._get_BNStorageAPI_instance()
if not vdi_uuid_map:
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
sys_vdi = self.VM_get_system_VDI(session, vm_ref).get('Value', '')
if not newuuid:
newuuid = genuuid.gen_regularUuid()
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vdi_uuid_map = {}
vdis = vdis_resp.get('Value', [])
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
if sys_vdi in vdis:
vdi_uuid_map[sys_vdi] = newuuid
if BNPoolAPI._isMaster:
h_ref = BNPoolAPI.get_host_by_vm(vm_ref)
#mapping parrent vdi's uuid to new one.
h_ip = BNPoolAPI.get_host_ip(h_ref)
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
return xen_rpc_call(h_ip, 'VM_clone_local_MAC', vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
log.debug("VM_clone, vdi map:")
log.debug(vdi_uuid_map)
# log.debug("update pool data structs before clone!!!")
# BNPoolAPI.update_data_struct("vm_clone", newuuid, h_ref)
if cmp(h_ref, XendNode.instance().uuid) == 0:
log.debug("clone from master")
response = self._VM_clone_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
# domuuid = response.get('Value')
# if domuuid:
# BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
else:
log.debug("clone from slave")
response = xen_rpc_call(h_ip, 'VM_clone_MAC', vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
# domuuid = response.get('Value')
# log.debug('New domain uuid: %s' % domuuid)
# if domuuid:
# BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
if cmp(response.get('Status'), 'Success') == 0:
domuuid = response.get('Value')
if not domuuid:
log.exception('WARNING: VM_clone_MAC, domuuid not return!!!')
domuuid = newuuid
BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
else:
BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
if not vdi_exists:
storage.VDI_clone(session, vdi_uuid_map, newname, domuuid)
return response
else:
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
response = self.VM_clone_local_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
else:
log.debug('in VM_clone MAC')
log.debug("VM_clone, vdi map:")
log.debug(vdi_uuid_map)
response = self._VM_clone_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
if not vdi_exists:
storage.VDI_clone(session, vdi_uuid_map, newname, domuuid)
return response
def VM_clone_local_MAC(self, session, vm_ref, newname, mac_addr, vdi_uuid_map=None, newuuid=None):
'''
@deprecated: not used
'''
log.debug('VM_clone_local_MAC >>>>>')
storage = self._get_BNStorageAPI_instance()
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
if not vdi_uuid_map:
vdi_uuid_map = {}
vdis = vdis_resp.get('Value')
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
log.debug(vdi_uuid_map)
response = self._VM_clone_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid = newuuid)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct("vm_clone", domuuid, XendNode.instance().uuid)
response = storage._VDI_clone(session, vdi_uuid_map, newname, vm_ref)
vdi_uuid = response.get('Value')
if vdi_uuid:
#BNPoolAPI.update_VDI_create(host_ref, sr_ref)
BNPoolAPI.update_data_struct("vdi_create", XendNode.instance().uuid, vdi_uuid)
return xen_api_success(domuuid)
def _VM_clone_MAC(self, session, vm_ref, newname, mac_addr, vdi_uuid_map=None, newuuid=None):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_clone_MAC
'''
log.debug('in _VM_clone_MAC')
xendom = XendDomain.instance()
domuuid = xendom.domain_clone_MAC(vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
# domuuid = XendTask.log_progress(0, 100, xendom.domain_clone_MAC, vm_ref, newname, mac_addr,\
# vdi_uuid_map, newuuid)
return xen_api_success(domuuid)
def VM_clone_system_VDI(self, session, vm_ref, newuuid):
'''
@author: wuyuewen
@summary: Clone VM system VDI
@param session: session of RPC.
@param vm_ref: VM's uuid
@param newuuid: new VDI uuid
@return: True | False
@rtype: dict.
@raise xen_api_error:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_clone_system_VDI(session, vm_ref, newuuid)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_clone_system_VDI', vm_ref, newuuid)
else:
return self._VM_clone_system_VDI(session, vm_ref, newuuid)
def _VM_clone_system_VDI(self, session, vm_ref, newuuid):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_clone_system_VDI
'''
try:
storage = self._get_BNStorageAPI_instance()
sys_vdi = self.VM_get_system_VDI(session, vm_ref).get('Value')
if sys_vdi:
vdi_uuid_map = { sys_vdi : newuuid }
new_vdi = storage.VDI_clone(session, vdi_uuid_map, newuuid, newuuid).get('Value')
if new_vdi:
return xen_api_success(new_vdi)
else:
return xen_api_error(['VM_clone_system_VDI', ' Failed'])
else:
return xen_api_error(['VM_clone_system_VDI', ' orig VDI not found!'])
except Exception, exn:
log.debug(exn)
storage.VDI_destroy(session, newuuid)
return xen_api_error(['VM_clone_system_VDI', ' Exception'])
def VM_destroy(self, session, vm_ref, del_vdi=False, del_ha_sxp=True, update_pool_structs=True):
'''
@author: wuyuewen
@summary: Destroy the specified VM. The VM is completely removed from the system.
This function can only be called when the VM is in the Halted State.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param del_vdi: True | False, destroy VM's VDIs either
@param del_ha_sxp: True | False, destroy sxp file in HA dir.
@param update_pool_structs: True | False, update_pool_structs in Xend memory structure.
@return: True | False
@rtype: dict.
@raise xen_api_error:
'''
storage = self._get_BNStorageAPI_instance()
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
log.debug("destroy local vm: %s" % vm_ref)
return xen_rpc_call(host_ip, 'VM_destroy_local', vm_ref, True)
if cmp(host_ref, XendNode.instance().uuid) == 0:
vdis = storage._VDI_get_by_vm(session, vm_ref).get('Value')
response = self._VM_destroy(session, vm_ref, del_ha_sxp, update_pool_structs)
else:
vdis = xen_rpc_call(host_ip, 'VDI_get_by_vm', vm_ref).get('Value')
response = xen_rpc_call(host_ip, 'VM_destroy', vm_ref, del_vdi, del_ha_sxp, update_pool_structs)
if update_pool_structs:
BNPoolAPI.update_data_struct("vm_destroy", vm_ref)
if del_vdi and vdis:
## host_ip = BNPoolAPI.get_host_ip(XendNode.instance().uuid)
for vdi in vdis:
log.debug('destroy vdi: %s' % vdi)
storage.VDI_destroy(session, vdi)
# xen_rpc_call(host_ip, 'VDI_destroy', vdi, True)
return response
else:
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
response = self.VM_destroy_local(session, vm_ref, del_vdi)
else:
vdis = storage._VDI_get_by_vm(session, vm_ref).get('Value')
response = self._VM_destroy(session, vm_ref, del_ha_sxp, update_pool_structs)
if del_vdi and vdis:
# host_ip = BNPoolAPI.get_host_ip(XendNode.instance().uuid)
for vdi in vdis:
log.debug('destroy vdi: %s' % vdi)
storage.VDI_destroy(session, vdi)
return response
def VM_destroy_local(self, session, vm_ref, del_vdi=False):
'''
@deprecated: not used
'''
storage = self._get_BNStorageAPI_instance()
vdis = storage._VDI_get_by_vm(session, vm_ref).get('Value')
response = self._VM_destroy(session, vm_ref, False)
BNPoolAPI.update_data_struct("vm_destroy", vm_ref)
if del_vdi and vdis:
for vdi in vdis:
storage._VDI_destroy(session, vdi)
return response
def _VM_destroy(self, session, vm_ref, del_ha_sxp=False, update_pool_structs=True):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_destroy
'''
self._VM_clean_IO_limit_shutdown(session, vm_ref) #add by wufan
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# vifs = dom.get_vifs()
# if vifs:
# for vif in dom.get_vifs():
# self._VM_del_ip_map(session, vm_ref, vif)
return XendTask.log_progress(0, 100, do_vm_func,
"domain_delete", vm_ref, del_ha_sxp, update_pool_structs)
def VM_get_lost_vm_by_label(self, session, label, exactMatch):
'''
@author: wuyuewen
@summary: In some uncommon conditions VM will destroy by Xend but VM disk(VDIs) still exist.
This method can find VM via HA stored sxp file.
@param session: session of RPC.
@param label: label(uuid or name) of VM
@param exactMatch: full match the given label
@return: list of VMs
@rtype: dict.
'''
if BNPoolAPI._isMaster:
all_vms = {}
all_vms = self._VM_get_lost_vm_by_label(session, label, exactMatch).get('Value')
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
response = xen_rpc_call(remote_ip, 'VM_get_lost_vm_by_label', label, exactMatch)
remote_vms = response.get('Value')
if remote_vms:
all_vms.update(remote_vms)
# log.debug(all_vms)
return xen_api_success(all_vms)
else:
return self._VM_get_lost_vm_by_label(session, label, exactMatch)
def _VM_get_lost_vm_by_label(self, session, label, exactMatch):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_get_lost_vm_by_label
'''
xendom = XendDomain.instance()
return xen_api_success(xendom.find_lost_vm_by_label(label, exactMatch))
def VM_get_lost_vm_by_date(self, session, date1, date2):
'''
@author: wuyuewen
@summary: In some uncommon conditions VM will destroy by Xend but VM disk(VDIs) still exist.
This method can find VM via HA stored sxp file.
@param session: session of RPC.
@param date1: date of start
@param date2: date of end
@return: list of VMs
@rtype: dict.
'''
if BNPoolAPI._isMaster:
all_vms = {}
now_vms = []
all_vms = self._VM_get_lost_vm_by_date(session, date1, date2).get('Value')
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
response = xen_rpc_call(remote_ip, 'VM_get_lost_vm_by_date', date1, date2)
remote_vms = response.get('Value')
if remote_vms:
all_vms.update(remote_vms)
now_vms_resp = self.VM_get_all(session)
if cmp(now_vms_resp['Status'], 'Success') == 0:
now_vms = now_vms_resp.get("Value")
if now_vms:
for i in all_vms.keys():
vm_uuid_s = re.search("\/(S+)\/", i)
if i in now_vms:
del all_vms[i]
continue
# log.debug(all_vms)
return xen_api_success(all_vms)
else:
return self._VM_get_lost_vm_by_date(session, date1, date2)
def _VM_get_lost_vm_by_date(self, session, date1, date2):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_get_lost_vm_by_date
'''
xendom = XendDomain.instance()
return xen_api_success(xendom.find_lost_vm_by_date(date1, date2))
def VM_hard_reboot(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Stop executing the specified VM without attempting a clean shutdown and immediately restart the VM.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise VMBadState:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_hard_reboot(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_hard_reboot', vm_ref)
else:
return self._VM_hard_reboot(session, vm_ref)
def _VM_hard_reboot(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_hard_reboot
'''
#self._VM_clean_IO_limit_shutdown(session, vm_ref)
return XendTask.log_progress(0, 100, do_vm_func,
"domain_reset", vm_ref)
def VM_hard_shutdown(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Stop executing the specified VM without attempting a clean shutdown.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise VMBadState:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_hard_shutdown(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_hard_shutdown', vm_ref)
i = 0
time_out = 120
while True:
i += 1
# ps_new = self.VM_get_power_state(session, vm_ref)['Value']
domid = self.VM_get_domid(session, vm_ref)['Value']
# log.debug(ps_new)
if not domid or cmp (int(domid), -1) == 0:
break
elif cmp(i, time_out) > 0:
break
else:
time.sleep(0.5)
continue
else:
return self._VM_hard_shutdown(session, vm_ref)
def _VM_hard_shutdown(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_hard_shutdown
'''
#self._VM_clean_IO_limit_shutdown(session, vm_ref)
return XendTask.log_progress(0, 100, do_vm_func,
"domain_destroy", vm_ref)
def VM_pause(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_pause(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_pause', vm_ref)
else:
return self._VM_pause(session, vm_ref)
def _VM_pause(self, session, vm_ref):
'''
@deprecated: not used
'''
return XendTask.log_progress(0, 100, do_vm_func,
"domain_pause", vm_ref)
# do snapshot for system vdi of vm
def VM_snapshot(self, session, vm_ref, name):
'''
@author: wuyuewen
@summary: Take a snapshot of VM's system VDI. The sragent running in Host, use host 10010 port.
@precondition: sragent is running in host.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param name: snapshot's name
@return: True | False
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
# log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_snapshot_vdi(session, vdi_ref, name)
# snapshot for vdi of vm
def _VM_snapshot_vdi(self, session, vdi_ref, name):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_snapshot
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.exception('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record(session, sr).get('Value')
if not sr_rec:
log.exception('Get SR record failed!')
return xen_api_success(False)
# log.debug("sr rec : %s" % sr_rec)
sr_type = sr_rec.get('type')
result = False
if cmp(sr_type, 'gpfs') == 0:
log.debug('gpfs snapshot>>>>>')
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.snapshot_gpfs(mount_point, vdi_ref, name)
elif cmp(sr_type, 'mfs') == 0:
log.debug('mfs snapshot>>>>>>')
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.snapshot_mfs(mount_point, vdi_ref, name)
elif cmp(sr_type, 'ocfs2') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.snapshot_ocfs2(mount_point, vdi_ref, name)
elif cmp(sr_type, 'local_ocfs2') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.snapshot_ocfs2(mount_point, vdi_ref, name)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
result = proxy.snapshot(sr, vdi_ref, name)
log.debug("snapshot result : %s " % result)
return xen_api_success(result)
def VM_rollback(self, session, vm_ref, name):
'''
@author: wuyuewen
@summary: Rollback a snapshot of VM's system VDI. The sragent must running in Host, use host 10010 port.
@precondition: sragent is running in host.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param name: snapshot's name
@return: True | False
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
# log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_rollback_vdi(session, vdi_ref, name)
def _VM_rollback_vdi(self, session, vdi_ref, name):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_rollback
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value')
if not sr_rec:
log.debug('sr record do not exist>>>>')
return xen_api_success(False)
# log.debug("sr rec : %s" % sr_rec)
sr_type = sr_rec.get('type')
result = False
if cmp(sr_type, 'gpfs') == 0:
log.debug('rollback gpfs>>>>>')
p_location = vdi_rec['location'].split(':')[1]
index = p_location.rfind('/')
if index != -1:
file_name = p_location[index+1:]
new_location = p_location[:index+1] + name + p_location[index+1:]
snap_location = '%s/%s/.snapshots/%s/%s' %(sr_rec['location'], vdi_ref, \
name, file_name)
log.debug('=====>VM rollback :snap location %s=====' % snap_location)
log.debug('new_location: %s' % new_location)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.rollback_gpfs(snap_location, new_location, p_location)
elif cmp(sr_type, 'mfs') == 0:
log.debug('mfs snapshot>>>>>>')
mfs_name = sr_rec['mfs_name']
log.debug('mfs_name: %s' % mfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.rollback_mfs(mfs_name, vdi_ref, name)
elif cmp(sr_type, 'ocfs2') == 0:
log.debug('mfs snapshot>>>>>>')
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.rollback_ocfs2(mount_point, vdi_ref, name)
elif cmp(sr_type, 'local_ocfs2') == 0:
log.debug('mfs snapshot>>>>>>')
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.rollback_ocfs2(mount_point, vdi_ref, name)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
result = proxy.rollback(sr, vdi_ref, name)
log.debug("rollback result : %s " % result)
return xen_api_success(result)
def VM_destroy_snapshot(self, session, vm_ref, name):
'''
@author: wuyuewen
@summary: Destroy a snapshot of VM's system VDI. The sragent must running in Host, use host 10010 port.
@precondition: sragent is running in host.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param name: snapshot's name
@return: True | False
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
# log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_destroy_vdi_snapshot(session, vdi_ref, name)
def VM_destroy_all_snapshots(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Destroy all snapshots of VM's system VDI. The sragent must running in Host, use host 10010 port.
@precondition: sragent is running in host.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
# log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_destroy_all_vdi_snapshots(session, vdi_ref)
def _VM_destroy_all_vdi_snapshots(self, session, vdi_ref, sr = None):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_destroy_all_snapshots
'''
storage = self._get_BNStorageAPI_instance()
if not sr:
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value')
if not sr_rec:
log.debug('sr record do not exist>>>>')
return xen_api_success(False)
sr_type = sr_rec.get('type')
result = False
if cmp(sr_type, 'gpfs') == 0:
gpfs_name = sr_rec['gpfs_name']
log.debug('gpfs_name: %s' % gpfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.destroy_all_gpfs(gpfs_name, vdi_ref)
elif cmp(sr_type, 'mfs') == 0:
mfs_name = sr_rec['mfs_name']
log.debug('mfs_name: %s' % mfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_all_mfs(mfs_name, vdi_ref)
elif cmp(sr_type, 'ocfs2') == 0:
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_all_ocfs2(mount_point, vdi_ref)
elif cmp(sr_type, 'local_ocfs2') == 0:
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_all_ocfs2(mount_point, vdi_ref)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr rec : %s" % sr_rec)
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
result = proxy.destroy_all(sr, vdi_ref)
log.debug("destroy_snapshot result : %s " % result)
if result == True: # destroy succeed
inUse = vdi_rec.get('inUse', True)
log.debug('vdi in use>>>>>>>>>>>>>>%s' % inUse)
if not inUse:
storage.VDI_destroy_final(session, vdi_ref, True, True)
return xen_api_success(result)
def _VM_destroy_vdi_snapshot(self, session, vdi_ref, name):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_destroy_snapshot
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value')
if not sr_rec:
log.debug('sr record do not exist>>>>')
return xen_api_success(False)
sr_type = sr_rec.get('type')
result = False
if cmp(sr_type, 'gpfs') == 0:
gpfs_name = sr_rec['gpfs_name']
log.debug('gpfs_name: %s' % gpfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.destroy_gpfs(gpfs_name, vdi_ref, name)
elif cmp(sr_type, 'mfs') == 0:
mfs_name = sr_rec['mfs_name']
log.debug('mfs_name: %s' % mfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
log.debug(name)
result = proxy.destroy_mfs(mfs_name, vdi_ref, name)
elif cmp(sr_type, 'ocfs2') == 0:
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_ocfs2(mount_point, vdi_ref, name)
elif cmp(sr_type, 'local_ocfs2') == 0:
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_ocfs2(mount_point, vdi_ref, name)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr rec : %s" % sr_rec)
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
result = proxy.destroy(sr, vdi_ref, name)
log.debug("destroy_snapshot result : %s " % result)
# if thereis not snapshots and vdi is not in relation with vm
inUse = vdi_rec.get('inUse', True)
log.debug('vdi in use>>>>>>>>>>>>>>%s' % inUse)
if not inUse:
snap_num = len(self._VM_get_vdi_snapshots(session, vdi_ref).get('Value'))
if snap_num == 0:
storage.VDI_destroy_final(session, vdi_ref, True, True)
return xen_api_success(result)
def VM_resume(self, session, vm_ref, start_paused):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_resume(session, vm_ref, start_paused)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_resume', vm_ref, start_paused)
else:
return self._VM_resume(session, vm_ref, start_paused)
def _VM_resume(self, session, vm_ref, start_paused):
'''
@deprecated: not used
'''
return XendTask.log_progress(0, 100, do_vm_func,
"domain_resume", vm_ref,
start_paused = start_paused)
def VM_start(self, session, vm_ref, start_paused, force_start):
'''
@author: wuyuewen
@summary: Start the specified VM. This function can only be called with the VM is in the Halted State.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param: start_paused
Instantiate VM in paused state if set to true.
@param: force_start
Attempt to force the VM to start. If this flag is false then
the VM may fail pre-boot safety checks (e.g. if the CPU the VM
last booted on looks substantially different to the current
one)
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_start(session, vm_ref, start_paused, force_start)
else:
return xen_rpc_call(host_ip, 'VM_start', vm_ref, start_paused, force_start)
else:
return self._VM_start(session, vm_ref, start_paused, force_start)
def _VM_start(self, session, vm_ref, start_paused, force_start):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_start
'''
if not self._VM_can_start(session, vm_ref):
return xen_api_error(['MEMORY_NOT_ENOUGH', 'VM', vm_ref])
crush_vm = self._VM_check_fibers_valid(session, vm_ref).get('Value')
if crush_vm:
return xen_api_error(['FIBER_IN_USE:', crush_vm])
crush_vm = self._VM_check_usb_scsi_valid(session, vm_ref).get('Value')
if crush_vm:
return xen_api_error(['USB_IN_USE:', crush_vm])
try:
log.debug("VM starting now....")
response = XendTask.log_progress(0, 100, do_vm_func,
"domain_start", vm_ref,
start_paused=start_paused,
force_start=force_start)
log.debug(response)
return response
except HVMRequired, exn:
log.error(exn)
return xen_api_error(['VM_HVM_REQUIRED', vm_ref])
#add by wufan
def VM_can_start(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Check specified VM can start or not, check host free memory.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict
@raise xen_api_error:
'''
return xen_api_success(self._VM_can_start(session, vm_ref))
def _VM_can_start(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_can_start
'''
host_mem_free = self._host_metrics_get_memory_free()
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
if not dominfo:
log.debug("can not find vm:" + vm_ref)
return xen_api_error(['VM_NOT_FOUND', 'VM', vm_ref])
if self._VM_get_is_a_template(session, vm_ref).get('Value'):
return xen_api_error(XEND_API_ERROR_VM_IS_TEMPLATE)
dom_mem = dominfo.get_memory_dynamic_max()
free_memory = int(host_mem_free) - int(dom_mem)
log.debug("can start: %s, memory left limit: %sG" % (str(cmp(free_memory, RESERVED_MEM) > 0), str(RESERVED_MEM/1024/1024/1024)))
log.debug("free memory: %sG" % str(free_memory/1024/1024/1024))
# by henry, dom0 memory should greate than 4G
if cmp(free_memory, RESERVED_MEM) > 0:
return True
else:
return False
def _host_metrics_get_memory_free(self):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_metrics_get_memory_free
'''
node = XendNode.instance()
xendom = XendDomain.instance()
doms = xendom.list()
doms_mem_total = 0
for dom in doms:
if cmp(dom.get_uuid(), DOM0_UUID) == 0:
continue
dominfo = xendom.get_vm_by_uuid(dom.get_uuid())
doms_mem_total += dominfo.get_memory_dynamic_max()
# log.debug("doms memory total: " + str(doms_mem_total))
# log.debug("host memory total:" + str(node.xc.physinfo()['total_memory'] * 1024))
return node.xc.physinfo()['total_memory'] * 1024 - doms_mem_total
'''
check whether vif is create and up
'''
def _VM_check_vif_up(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('check if vif up >>>>>>>>>>')
# get vm domid
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
domid = dominfo.getDomid()
vif_num = len(dominfo.get_vifs()) # get num of vifs
log.debug('vm(%) domid(%s) has %s vifs' % (vm_ref, domid, vif_num))
for eth_num in range(vif_num):
vif_dev = 'vif%s.%s' % (domid, eth_num)
vif_emu_dev = 'vif%s.%-emu' %(domid, eth_num)
# def _VM_check_fiber(self, session, vm_ref):
# if self._VM_check_fibers_valid(session, vm_ref).get('Value'):
# return True
# else :
# log.debug('fiber device in use')
# return False
def VM_start_on(self, session, vm_ref, host_ref, start_paused, force_start):
'''
@author: wuyuewen
@summary: Start the specified VM on specified Host. This function can only be called with the VM is in the Halted State.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param host_ref: Host's uuid
@param: start_paused
Instantiate VM in paused state if set to true.
@param: force_start
Attempt to force the VM to start. If this flag is false then
the VM may fail pre-boot safety checks (e.g. if the CPU the VM
last booted on looks substantially different to the current
one)
@return: True | False
@rtype: dict.
'''
# import threading
# lock = threading.Lock()
# lock.acquire()
#self.__init_lock__.acquire()
try:
log.debug("in VM_start_on: %s" % vm_ref)
if BNPoolAPI._isMaster:
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
return self.VM_start(session, vm_ref, start_paused, force_start)
xennode = XendNode.instance()
master_uuid = xennode.uuid
h_ref = BNPoolAPI.get_host_by_vm(vm_ref)
h_ip = BNPoolAPI.get_host_ip(h_ref)
log.debug(h_ip)
host_ip = BNPoolAPI.get_host_ip(host_ref)
paths = xennode.get_ha_sr_location()
log.debug(paths)
# if cmp(paths, {}) !=0:
if paths:
for p in paths.values():
# path = os.path.join(p, CACHED_CONFIG_FILE)
path = os.path.join(p, '%s.sxp' % vm_ref)
break
else:
path = ''
log.debug('vm_start_on ha path: %s' % path)
# else:
# return xen_api_error(['nfs_ha not mounted', NFS_HA_DEFAULT_PATH])
#copy sxp file to nfs
xen_rpc_call(h_ip, 'VM_copy_sxp_to_nfs', vm_ref, path)
if cmp(host_ref, master_uuid) == 0 and cmp(master_uuid, h_ref) == 0:
log.debug("-----condition 1-----")
log.debug("vm dest: master, vm now: master")
response = self._VM_start(session, vm_ref, start_paused, force_start)
elif cmp(host_ref, master_uuid) == 0 and cmp(master_uuid, h_ref) != 0:
log.debug("-----condition 2-----")
log.debug("vm dest: master, vm now: node")
response = self.VM_create_from_sxp(session, path, True, False)
if cmp (response.get('Status'), 'Success') == 0:
xen_rpc_call(h_ip, 'VM_destroy', vm_ref, False, False, False)
elif cmp(host_ref, master_uuid) != 0 and cmp(master_uuid, h_ref) == 0:
log.debug("-----condition 3-----")
log.debug("vm dest: node, vm now: master")
response = xen_rpc_call(host_ip, 'VM_create_from_sxp', path, True, False)
if cmp (response.get('Status'), 'Success') == 0:
self._VM_destroy(session, vm_ref, False, False)
elif cmp(host_ref, master_uuid) != 0 and cmp(master_uuid, h_ref) != 0:
if cmp(h_ref, host_ref) == 0:
log.debug("-----condition 4-----")
log.debug("vm dest: node1, vm now: node2, node1 = node2")
response = self.VM_start(session, vm_ref, start_paused, force_start)
else:
log.debug("-----condition 5-----")
log.debug("vm dest: node1, vm now: node2, node1 != node2")
response = xen_rpc_call(host_ip, 'VM_create_from_sxp', path, True, False)
if cmp (response.get('Status'), 'Success') == 0:
xen_rpc_call(h_ip, 'VM_destroy', vm_ref, False, False, False)
if cmp (response.get('Status'), 'Success') == 0:
BNPoolAPI.update_data_struct('vm_start_on', vm_ref, h_ref, host_ref)
log.debug("Finished start on: %s migrate vm(%s) to %s" % (h_ip, vm_ref, host_ip))
return response
else:
path = ''
return self.VM_start(session, vm_ref, start_paused, force_start)
except Exception, exn:
log.exception(traceback.print_exc())
return xen_api_error(['START_ON_FAILED,', exn])
finally:
if path:
cmd = 'rm -f %s' % path
doexec(cmd)
def VM_copy_sxp_to_nfs(self, session, vm_ref, path):
'''
@author: wuyuewen
@summary: Internal method. Copy sxp to HA dir.
'''
XendDomain.instance().copy_sxp_to_ha(vm_ref, path)
return xen_api_success_void()
def VM_suspend(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_suspend(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_suspend', vm_ref)
else:
return self._VM_suspend(session, vm_ref)
def _VM_suspend(self, session, vm_ref):
'''
@deprecated: not used
'''
return XendTask.log_progress(0, 100, do_vm_func,
"domain_suspend", vm_ref)
def VM_unpause(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_unpause(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_unpause', vm_ref)
else:
return self._VM_unpause(session, vm_ref)
def _VM_unpause(self, session, vm_ref):
'''
@deprecated: not used
'''
return XendTask.log_progress(0, 100, do_vm_func,
"domain_unpause", vm_ref)
def VM_send_sysrq(self, _, vm_ref, req):
'''
@deprecated: not used
'''
xeninfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
if xeninfo.state == XEN_API_VM_POWER_STATE_RUNNING \
or xeninfo.state == XEN_API_VM_POWER_STATE_PAUSED:
xeninfo.send_sysrq(req)
return xen_api_success_void()
else:
return xen_api_error(
['VM_BAD_POWER_STATE', vm_ref,
XendDomain.POWER_STATE_NAMES[XEN_API_VM_POWER_STATE_RUNNING],
XendDomain.POWER_STATE_NAMES[xeninfo.state]])
def VM_send_trigger(self, _, vm_ref, trigger, vcpu):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
xendom.domain_send_trigger(xeninfo.getDomid(), trigger, vcpu)
return xen_api_success_void()
def VM_migrate(self, session, vm_ref, destination_url, live, other_config):
'''
@deprecated: not used
'''
return self._VM_migrate(session, vm_ref, destination_url, live, other_config)
def _VM_migrate(self, session, vm_ref, destination_url, live, other_config):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_pool_migrate
'''
self._VM_clean_IO_limit_shutdown(session, vm_ref) #add by wufan
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
port = other_config.get("port", 0)
node = other_config.get("node", -1)
ssl = other_config.get("ssl", None)
chs = other_config.get("change_home_server", False)
xendom.domain_migrate(xeninfo.getDomid(), destination_url,
bool(live), port, node, ssl, bool(chs))
#log.debug('migrate')
# set all tag
#self.VM_set_all_tag(session, vm_ref)
return xen_api_success_void()
def VM_pool_migrate(self, session, vm_ref, dst_host_ref, other_config):
'''
@author: wuyuewen
@summary: Migrate specified VM to specified Host. IO limit setting must read
before migrate and set back after migrate.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param dst_host_ref: destination Host's uuid
@param other_config: useless
@return: True | False
@rtype: dict.
'''
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
dst_host_ip = BNPoolAPI.get_host_ip(dst_host_ref)
tag_list = self.VM_get_all_tag(session, vm_ref, 'tag').get('Value')
rate_list = self.VM_get_all_tag(session, vm_ref, 'rate').get('Value')
burst_list = self.VM_get_all_tag(session, vm_ref, 'burst').get('Value')
io_limit_list = {}
for type in ['read', 'write']:
for io_unit in ['MBps', 'iops']:
key = "%s_%s" % (type, io_unit)
io_limit_list[key] = self.VM_get_IO_rate_limit(session, vm_ref, type, io_unit).get('Value')
if cmp(host_ref, XendNode.instance().uuid) == 0:
self._VM_migrate(session, vm_ref, dst_host_ip, True, other_config)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
xen_rpc_call(host_ip, "VM_migrate", vm_ref, dst_host_ip, True, other_config)
log.debug("Migrate VM from host: %s" % host_ip)
log.debug("Migrate VM to host: %s" % dst_host_ip)
BNPoolAPI.update_data_struct("vm_migrate", vm_ref, host_ref, dst_host_ref)
self.VM_set_all_tag(session, vm_ref, tag_list)
self.VM_set_all_rate(session, vm_ref, 'rate', rate_list)
self.VM_set_all_rate(session, vm_ref, 'burst', burst_list)
self.VM_start_set_IO_limit(session, vm_ref, io_limit_list)
return xen_api_success_void()
def VM_save(self, _, vm_ref, dest, checkpoint):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
xendom.domain_save(xeninfo.getDomid(), dest, checkpoint)
return xen_api_success_void()
def VM_restore(self, _, src, paused):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xendom.domain_restore(src, bool(paused))
return xen_api_success_void()
def VM_check_usb_scsi_valid(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check usb scsi validity.
'''
return self._VM_check_usb_scsi_valid(session, vm_ref)
def _VM_check_usb_scsi_valid(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check usb scsi validity.
'''
log.debug('VM_check_fibers_valid')
crush_vm = None
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
#get local fiber uuid of the to_started vm
loc_fiber_unames = []
loc_fiber_uuids= self._VM_get_usb_scsi(session, vm_ref).get('Value')
# get local fiber uname of the to_started vm
for loc_fiber_uuid in loc_fiber_uuids:
dev_type, dev_config = dominfo.info['devices'].get(loc_fiber_uuid, (None, None))
if dev_config:
loc_fiber_uname = dev_config.get('uname')
if loc_fiber_uname:
loc_fiber_unames.append(loc_fiber_uname)
if loc_fiber_unames:
running_vms = xd.get_running_vms()
for vm in running_vms:
#if vm.info.get('domid') == dominfo.info.get('domid'):
#log.debug('check dom itself %s' % vm.info.get('domid'))
#continue
device_struct = vm.info['devices']
for uuid, config in device_struct.items():
if config[1].get('uname') in loc_fiber_unames:
vm_name = vm.info['name_label']
crush_vm = vm_name
return xen_api_success(crush_vm)
return xen_api_success(crush_vm)
def VM_check_fibers_valid(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check fibers validity.
'''
return self._VM_check_fibers_valid(session, vm_ref)
#add by wufan
def _VM_check_fibers_valid(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check fibers validity.
'''
log.debug('VM_check_fibers_valid')
crush_vm = None
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
#get local fiber uuid of the to_started vm
loc_fiber_unames = []
loc_fiber_uuids= self._VM_get_fibers(session, vm_ref).get('Value')
# get local fiber uname of the to_started vm
for loc_fiber_uuid in loc_fiber_uuids:
dev_type, dev_config = dominfo.info['devices'].get(loc_fiber_uuid, (None, None))
if dev_config:
loc_fiber_uname = dev_config.get('uname')
if loc_fiber_uname:
loc_fiber_unames.append(loc_fiber_uname)
if loc_fiber_unames:
running_vms = xd.get_running_vms()
for vm in running_vms:
#if vm.info.get('domid') == dominfo.info.get('domid'):
#log.debug('check dom itself %s' % vm.info.get('domid'))
#continue
device_struct = vm.info['devices']
for uuid, config in device_struct.items():
if config[1].get('uname') in loc_fiber_unames:
vm_name = vm.info['name_label']
crush_vm = vm_name
return xen_api_success(crush_vm)
return xen_api_success(crush_vm)
def VM_cpu_pool_migrate(self, session, vm_ref, cpu_pool_ref):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
domid = xeninfo.getDomid()
pool = XendAPIStore.get(cpu_pool_ref, XendCPUPool.getClass())
if pool == None:
return xen_api_error(['HANDLE_INVALID', 'cpu_pool', cpu_pool_ref])
if domid is not None:
if domid == 0:
return xen_api_error(['OPERATION_NOT_ALLOWED',
'could not move Domain-0'])
try:
XendCPUPool.move_domain(cpu_pool_ref, domid)
except Exception, ex:
return xen_api_error(['INTERNAL_ERROR',
'could not move domain'])
self.VM_set('pool_name', session, vm_ref, pool.get_name_label())
return xen_api_success_void()
def VM_create_data_VBD(self, session, vm_ref, vdi_ref, read_only=False):
'''
@author: wuyuewen
@summary: VM create data VBD and VDI.
@precondition: At most 8 data VBD.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param vdi_ref: new VDI's uuid
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_create_data_VBD(session, vm_ref, vdi_ref, read_only)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_create_data_VBD', vm_ref, vdi_ref, read_only)
else:
return self._VM_create_data_VBD(session, vm_ref, vdi_ref, read_only)
def _VM_create_data_VBD(self, session, vm_ref, vdi_ref, read_only):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_create_data_VBD
'''
log.debug("=====VM_create_data_VBD=====")
if not read_only:
vbd_struct = {'VM' : vm_ref,
'VDI' : vdi_ref,
'bootable' : False,
# 'device' : self._VM_get_available_vbd_device(session, vm_ref, 'xvd').get('Value', ''),
'mode' : 'RW',
'type' : 'Disk',
}
else:
vbd_struct = {'VM' : vm_ref,
'VDI' : vdi_ref,
'bootable' : False,
# 'device' : self._VM_get_available_vbd_device(session, vm_ref, 'xvd').get('Value', ''),
'mode' : 'R',
'type' : 'Disk',
}
response = self._VBD_create(session, vbd_struct)
if cmp(response.get('Status'), 'Success') == 0:
return xen_api_success(True)
else:
return xen_api_success(False)
def VM_delete_data_VBD(self, session, vm_ref, vdi_ref):
'''
@author: wuyuewen
@summary: VM delete data VBD and VDI.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param vdi_ref: new VDI's uuid
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_delete_data_VBD(session, vm_ref, vdi_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_delete_data_VBD', vm_ref, vdi_ref)
else:
return self._VM_delete_data_VBD(session, vm_ref, vdi_ref)
def _VM_delete_data_VBD(self, session, vm_ref, vdi_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_delete_data_VBD
'''
self.__vbd_lock__.acquire()
try:
log.debug("=====VM_delete_data_VBD=====")
log.debug('VDI ref: %s' % vdi_ref)
vdi = XendNode.instance().get_vdi_by_uuid(vdi_ref)
vbd = []
vbd_ref = ""
if vdi:
log.debug('get VBDs by VDI:')
vbd = vdi.getVBDs()
log.debug(vbd)
else:
return xen_api_success(False)
if vbd and isinstance(vbd, list):
vbd_ref = vbd[0]
else:
return xen_api_success(False)
log.debug("vbd ref: %s" % vbd_ref)
response = self.VBD_destroy(session, vbd_ref)
if cmp(response.get('Status'), 'Success') == 0:
return xen_api_success(True)
else:
return xen_api_success(False)
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
finally:
self.__vbd_lock__.release()
# Xen API: Class VBD
# ----------------------------------------------------------------
VBD_attr_ro = ['VM',
'VDI',
'metrics',
'runtime_properties',
'io_read_kbs',
'io_write_kbs']
VBD_attr_rw = ['device',
'bootable',
'mode',
'type']
VBD_attr_inst = VBD_attr_rw
VBD_methods = [('media_change', None), ('destroy', None), ('destroy_on', None)]
VBD_funcs = [('create', 'VBD'),
('create_on', 'VBD')]
# object methods
def VBD_get_record(self, session, vbd_ref):
storage = self._get_BNStorageAPI_instance()
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
cfg = vm.get_dev_xenapi_config('vbd', vbd_ref)
if not cfg:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
valid_vbd_keys = self.VBD_attr_ro + self.VBD_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
return_cfg = {}
for k in cfg.keys():
if k in valid_vbd_keys:
return_cfg[k] = cfg[k]
return_cfg['metrics'] = vbd_ref
return_cfg['runtime_properties'] = {} #todo
return_cfg['io_read_kbs'] = vm.get_dev_property('vbd', vbd_ref, 'io_read_kbs')
return_cfg['io_write_kbs'] = vm.get_dev_property('vbd', vbd_ref, 'io_write_kbs')
if return_cfg.has_key('VDI') and return_cfg.get('VDI'):
location = storage.VDI_get_location(session, return_cfg.get('VDI')).get('Value')
if location:
return_cfg['userdevice'] = location
# log.debug(return_cfg)
return xen_api_success(return_cfg)
def VBD_media_change(self, session, vbd_ref, new_vdi_ref):
xendom = XendDomain.instance()
xennode = XendNode.instance()
vm = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
cur_vbd_struct = vm.get_dev_xenapi_config('vbd', vbd_ref)
if not cur_vbd_struct:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
if cur_vbd_struct['type'] != XEN_API_VBD_TYPE[0]: # Not CD
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
if cur_vbd_struct['mode'] != 'RO': # Not read only
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
new_vdi = xennode.get_vdi_by_uuid(new_vdi_ref)
if not new_vdi:
return xen_api_error(['HANDLE_INVALID', 'VDI', new_vdi_ref])
new_vdi_image = new_vdi.get_location()
valid_vbd_keys = self.VBD_attr_ro + self.VBD_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
new_vbd_struct = {}
for k in cur_vbd_struct.keys():
if k in valid_vbd_keys:
new_vbd_struct[k] = cur_vbd_struct[k]
new_vbd_struct['VDI'] = new_vdi_ref
try:
XendTask.log_progress(0, 100,
vm.change_vdi_of_vbd,
new_vbd_struct, new_vdi_image)
except XendError, e:
log.exception("Error in VBD_media_change")
return xen_api_error(['INTERNAL_ERROR', str(e)])
return xen_api_success_void()
# class methods
def VBD_create_on(self, session, vbd_struct, host_ref):
storage = self._get_BNStorageAPI_instance()
# log.debug(vbd_struct)
if BNPoolAPI._isMaster:
vbd_type = vbd_struct.get('type')
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VBD_create(session, vbd_struct)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
if cmp(vbd_type, XEN_API_VBD_TYPE[0]) == 0:
vdi = vbd_struct.get('VDI')
if vdi:
log.debug(storage.VDI_get_name_label(session, vdi))
vdi_name = storage.VDI_get_name_label(session, vdi).get('Value')
if vdi_name:
remote_vdi = xen_rpc_call(remote_ip, 'VDI_get_by_name_label', vdi_name).get('Value')
if remote_vdi:
vbd_struct['VDI'] = remote_vdi
else:
return xen_api_error(['%s VDI %s not find!' % (remote_ip, vdi_name)])
else:
return xen_api_error(['Invaild VDI %s' % vdi])
else:
return xen_api_error(['vbd struct error, VDI not define.'])
return xen_rpc_call(remote_ip, 'VBD_create', vbd_struct)
else:
return self.VBD_create(session, vbd_struct)
def VBD_create(self, session, vbd_struct):
vm_ref = vbd_struct.get('VM')
if not vm_ref:
return xen_api_error(['VM_NOT_FOUND'])
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VBD_create(session, vbd_struct)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VBD_create', vbd_struct)
else:
return self._VBD_create(session, vbd_struct)
def _VBD_create(self, session, vbd_struct):
xendom = XendDomain.instance()
xennode = XendNode.instance()
if not xendom.is_valid_vm(vbd_struct['VM']):
return xen_api_error(['VM_NOT_FOUND', 'VM', vbd_struct['VM']])
dom = xendom.get_vm_by_uuid(vbd_struct['VM'])
vdi = xennode.get_vdi_by_uuid(vbd_struct['VDI'])
if not vdi:
return xen_api_error(['HANDLE_INVALID', 'VDI', vbd_struct['VDI']])
# new VBD via VDI/SR
vdi_image = vdi.get_location()
log.debug("vdi location: %s" % vdi_image)
try:
vbd_ref = XendTask.log_progress(0, 100,
dom.create_vbd_for_xenapi,
vbd_struct, vdi_image)
log.debug('VBD_create %s' % vbd_ref)
except XendError, e:
log.exception("Error in VBD_create")
return xen_api_error(['INTERNAL_ERROR', str(e)])
xendom.managed_config_save(dom)
return xen_api_success(vbd_ref)
def VBD_destroy(self, session, vbd_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
# vdi_ref = XendDomain.instance()\
# .get_dev_property_by_uuid('vbd', vbd_ref, "VDI")
# vdi = XendNode.instance().get_vdi_by_uuid(vdi_ref)
XendTask.log_progress(0, 100, vm.destroy_vbd, vbd_ref)
xendom.managed_config_save(vm)
return xen_api_success_void()
def VBD_destroy_on(self, session, vbd_ref, host_ref):
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VBD_destroy(session, vbd_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, "VBD_destroy", vbd_ref)
else:
return self.VBD_destroy(session, vbd_ref)
def _VBD_get(self, vbd_ref, prop):
return xen_api_success(
XendDomain.instance().get_dev_property_by_uuid(
'vbd', vbd_ref, prop))
# attributes (ro)
def VBD_get_metrics(self, _, vbd_ref):
return xen_api_success(vbd_ref)
def VBD_get_runtime_properties(self, _, vbd_ref):
xendom = XendDomain.instance()
dominfo = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
device = dominfo.get_dev_config_by_uuid('vbd', vbd_ref)
try:
devid = int(device['id'])
device_sxps = dominfo.getDeviceSxprs('vbd')
device_dicts = [dict(device_sxp[1][0:]) for device_sxp in device_sxps]
device_dict = [device_dict
for device_dict in device_dicts
if int(device_dict['virtual-device']) == devid][0]
return xen_api_success(device_dict)
except Exception, exn:
log.exception(exn)
return xen_api_success({})
# attributes (rw)
def VBD_get_VM(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'VM')
def VBD_get_VDI(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'VDI')
def VBD_get_device(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'device')
def VBD_get_bootable(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'bootable')
def VBD_get_mode(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'mode')
def VBD_get_type(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'type')
def VBD_set_bootable(self, session, vbd_ref, bootable):
bootable = bool(bootable)
xd = XendDomain.instance()
vm = xd.get_vm_with_dev_uuid('vbd', vbd_ref)
vm.set_dev_property('vbd', vbd_ref, 'bootable', int(bootable))
xd.managed_config_save(vm)
return xen_api_success_void()
def VBD_set_mode(self, session, vbd_ref, mode):
if mode == 'RW':
mode = 'w'
else:
mode = 'r'
xd = XendDomain.instance()
vm = xd.get_vm_with_dev_uuid('vbd', vbd_ref)
vm.set_dev_property('vbd', vbd_ref, 'mode', mode)
xd.managed_config_save(vm)
return xen_api_success_void()
def VBD_set_VDI(self, session, vbd_ref, VDI):
xd = XendDomain.instance()
vm = xd.get_vm_with_dev_uuid('vbd', vbd_ref)
vm.set_dev_property('vbd', vbd_ref, 'VDI', VDI)
xd.managed_config_save(vm)
return xen_api_success_void()
def VBD_get_all(self, session):
xendom = XendDomain.instance()
vbds = [d.get_vbds() for d in XendDomain.instance().list('all')]
vbds = reduce(lambda x, y: x + y, vbds)
return xen_api_success(vbds)
# Xen API: Class VBD_metrics
# ----------------------------------------------------------------
VBD_metrics_attr_ro = ['io_read_kbs',
'io_write_kbs',
'last_updated']
VBD_metrics_attr_rw = []
VBD_metrics_methods = []
def VBD_metrics_get_all(self, session):
return self.VBD_get_all(session)
def VBD_metrics_get_record(self, _, ref):
vm = XendDomain.instance().get_vm_with_dev_uuid('vbd', ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VBD_metrics', ref])
return xen_api_success(
{ 'io_read_kbs' : vm.get_dev_property('vbd', ref, 'io_read_kbs'),
'io_write_kbs' : vm.get_dev_property('vbd', ref, 'io_write_kbs'),
'last_updated' : now()
})
def VBD_metrics_get_io_read_kbs(self, _, ref):
return self._VBD_get(ref, 'io_read_kbs')
def VBD_metrics_get_io_write_kbs(self, session, ref):
return self._VBD_get(ref, 'io_write_kbs')
def VBD_metrics_get_last_updated(self, _1, _2):
return xen_api_success(now())
# Xen API: Class VIF
# ----------------------------------------------------------------
VIF_attr_ro = ['network',
'VM',
'metrics',
'runtime_properties']
VIF_attr_rw = ['device',
'MAC',
'MTU',
'security_label',
'physical_network',
'physical_network_local',
]
VIF_attr_inst = VIF_attr_rw
VIF_methods = [('destroy', None)]
VIF_funcs = [('create', 'VIF'),
('create_on', 'VIF'),
('create_bind_to_physical_network', None)
]
# object methods
def VIF_get_record(self, session, vif_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
cfg = vm.get_dev_xenapi_config('vif', vif_ref)
if not cfg:
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
valid_vif_keys = self.VIF_attr_ro + self.VIF_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
return_cfg = {}
for k in cfg.keys():
if k in valid_vif_keys:
return_cfg[k] = cfg[k]
return_cfg['metrics'] = vif_ref
return xen_api_success(return_cfg)
# class methods
def VIF_create_on(self, session, vif_struct, host_ref):
if BNPoolAPI._isMaster:
network = vif_struct.get('network')
log.debug("get network from rec: %s", network)
#if network:
# log.debug(xenapi.network_get_name_label(session, network))
# network_label = xenapi.network_get_name_label(session, network).get('Value')
# # log.debug(network_label)
#else:
# vif_struct['network'] = 'ovs0'
# log.debug("get from network : %s" % vif_struct.get('network'))
# #return xen_api_error(['network not found'])
if not network or cmp(network, 'OpaqueRef:NULL') == 0:
vif_struct['network'] = 'ovs1'
log.debug("get from network : %s" % vif_struct.get('network'))
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VIF_create(session, vif_struct)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
#remote_network = xen_rpc_call(remote_ip, 'network_get_by_name_label', network_label).get('Value')
#if remote_network:
# log.debug(remote_network[0])
# vif_struct['network'] = remote_network[0]
#else:
# return xen_api_error(['%s network not found!' % remote_ip, 'Network'])
return xen_rpc_call(remote_ip, 'VIF_create', vif_struct)
else:
network = vif_struct.get('network')
log.debug("get network from rec: %s", network)
if not network or cmp(network, 'OpaqueRef:NULL') == 0:
vif_struct['network'] = 'ovs1'
log.debug("get from network : %s" % vif_struct.get('network'))
return self.VIF_create(session, vif_struct)
def VIF_create_bind_to_physical_network(self, session, vif_struct, phy_network):
if BNPoolAPI._isMaster:
vm_ref = vif_struct.get('VM', '')
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VIF_create_bind_to_physical_network(session, vif_struct, phy_network)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VIF_create_bind_to_physical_network', vif_struct, phy_network)
else:
return self._VIF_create_bind_to_physical_network(session, vif_struct, phy_network)
def _VIF_create_bind_to_physical_network(self, session, vif_struct, phy_network):
vm_ref = vif_struct.get('VM', '')
vifs = self._VM_get_VIFs(session, vm_ref).get('Value')
if vifs:
if cmp(len(vifs), INTERFACE_LIMIT) >= 0:
return xen_api_error(['DEVICE_OUT_OF_RANGE', 'VIF'])
xenapi = self._get_XendAPI_instance()
log.debug('VIF create bind to physical network')
network_refs = xenapi.network_get_all(session).get('Value')
network_names = []
for ref in network_refs:
namelabel = xenapi.network_get_name_label(session, ref).get('Value')
network_names.append(namelabel)
# log.debug(network_names)
if phy_network not in network_names:
return xen_api_error(['Network name do not exist!'] + network_names)
vif_struct['network'] = phy_network
log.debug("get from network : %s" % vif_struct.get('network'))
return self._VIF_create(session, vif_struct)
'''
set physical network for vm, pass the refer
'''
def VIF_set_physical_network(self, session, vif_ref, vm_ref, phy_network):
log.debug('VIF(%s)_set_physical_network on vm(%s)' % (vif_ref, vm_ref))
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VIF_set_physical_network_local(session, vif_ref, vm_ref, phy_network)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VIF_set_physical_network', vif_ref, vm_ref, phy_network)
else:
return self.VIF_set_physical_network_local(session, vif_ref, vm_ref, phy_network)
def VIF_set_physical_network_local(self, session, vif_ref, vm_ref, phy_network):
xenapi = self._get_XendAPI_instance()
log.debug('local method VIF(%s)_set_physical_network on vm(%s)' % (vif_ref, vm_ref))
network_refs = xenapi.network_get_all(session).get('Value')
network_names = {}
for ref in network_refs:
namelabel = xenapi.network_get_name_label(session, ref).get('Value')
network_names[namelabel] = ref
log.debug(network_names)
if phy_network not in network_names:
return xen_api_error(['Network name do not exist!'] + network_names)
xendom = XendDomain.instance()
dom = xendom.get_vm_with_dev_uuid('vif', vif_ref)
if not dom:
log.debug('vif cannot be found on vm!')
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
# if dom._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
# log.debug('VM(%s) is running!' % vm_ref)
# return xen_api_error(['VM is running!'])
try:
origin_network = self.VIF_get_network(session, vif_ref).get('Value')
except:
log.exception("VIF did not have Network field.")
origin_network = None
new_network = network_names[phy_network]
origin_bridge = self._VIF_get(vif_ref, 'bridge').get('Value')
# origin_bridge = xenapi.network_get_name_label(session, origin_network).get('Value')
new_bridge = phy_network
# log.debug('origin_network: %s and new_network: %s' % (origin_network, new_network))
# log.debug('origin_bridge: %s and new_bridge: %s' % (origin_bridge, new_bridge))
#must set both network and bridge, or set bridge only,
#do not set network only, set network only won't work
rc = True
rc1 = True
if origin_network and cmp(origin_network, new_network) != 0 :
rc = self._VIF_set(vif_ref, 'network', new_network, origin_network)
if cmp(origin_bridge, new_bridge) != 0:
rc1 = self._VIF_set(vif_ref, 'bridge', new_bridge, origin_bridge)
if rc == False or rc1 == False:
log.error('set vif physical network failed')
return xen_api_error(['set vif physical network failed'])
return xen_api_success_void()
def VIF_create(self, session, vif_struct):
vm_ref = vif_struct.get('VM')
if not vm_ref:
return xen_api_error(['VM_NOT_FOUND'])
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VIF_create(session, vif_struct)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VIF_create', vif_struct)
else:
return self._VIF_create(session, vif_struct)
def _VIF_create(self, session, vif_struct):
xendom = XendDomain.instance()
mac = vif_struct.get('MAC')
vm_ref = vif_struct.get('VM')
if not xendom.is_valid_vm(vm_ref):
return xen_api_error(['VM_NOT_FOUND', 'VM', vif_struct.get('VM')])
vifs = self._VM_get_VIFs(session, vm_ref).get('Value')
if vifs:
if cmp(len(vifs), INTERFACE_LIMIT) >= 0:
return xen_api_error(['DEVICE_OUT_OF_RANGE', 'VIF'])
if not self._VIF_is_mac_format_legal(mac):
return xen_api_error(['MAC_INVALID'])
dom = xendom.get_vm_by_uuid(vif_struct.get('VM'))
try:
vif_ref = dom.create_vif(vif_struct)
xendom.managed_config_save(dom)
return xen_api_success(vif_ref)
except XendError, exn:
return xen_api_error(['INTERNAL_ERROR', str(exn)])
def _VIF_is_mac_format_legal(self, mac):
mac_re = re.compile("00:16:3e:[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]")
if not mac:
return True
if mac and cmp(mac_re.match(mac), None) != 0:
return True
return False
def VIF_destroy(self, session, vif_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
vm.destroy_vif(vif_ref)
xendom.managed_config_save(vm)
return xen_api_success_void()
def _VIF_get(self, ref, prop):
return xen_api_success(
XendDomain.instance().get_dev_property_by_uuid('vif', ref, prop))
# getters/setters
def VIF_get_metrics(self, _, vif_ref):
return xen_api_success(vif_ref)
def VIF_get_VM(self, session, vif_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)
return xen_api_success(vm.get_uuid())
def VIF_get_MTU(self, session, vif_ref):
return self._VIF_get(vif_ref, 'MTU')
def VIF_get_MAC(self, session, vif_ref):
return self._VIF_get(vif_ref, 'MAC')
def VIF_get_device(self, session, vif_ref):
return self._VIF_get(vif_ref, 'device')
def VIF_get_network(self, session, vif_ref):
return self._VIF_get(vif_ref, 'network')
def VIF_get_all(self, session):
xendom = XendDomain.instance()
vifs = [d.get_vifs() for d in XendDomain.instance().list('all')]
vifs = reduce(lambda x, y: x + y, vifs)
return xen_api_success(vifs)
def VIF_get_runtime_properties(self, _, vif_ref):
xendom = XendDomain.instance()
dominfo = xendom.get_vm_with_dev_uuid('vif', vif_ref)
device = dominfo.get_dev_config_by_uuid('vif', vif_ref)
try:
devid = int(device['id'])
device_sxps = dominfo.getDeviceSxprs('vif')
device_dicts = [dict(device_sxp[1][1:])
for device_sxp in device_sxps]
device_dict = [device_dict
for device_dict in device_dicts
if int(device_dict['handle']) == devid][0]
return xen_api_success(device_dict)
except Exception, exn:
log.exception(exn)
return xen_api_success({})
def VIF_get_security_label(self, session, vif_ref):
return self._VIF_get(vif_ref, 'security_label')
def _VIF_set(self, ref, prop, val, old_val):
return XendDomain.instance().set_dev_property_by_uuid(
'vif', ref, prop, val, old_val)
def VIF_set_security_label(self, session, vif_ref, sec_lab, old_lab):
xendom = XendDomain.instance()
dom = xendom.get_vm_with_dev_uuid('vif', vif_ref)
if not dom:
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
if dom._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
raise SecurityError(-xsconstants.XSERR_RESOURCE_IN_USE)
rc = self._VIF_set(vif_ref, 'security_label', sec_lab, old_lab)
if rc == False:
raise SecurityError(-xsconstants.XSERR_BAD_LABEL)
return xen_api_success(xsconstants.XSERR_SUCCESS)
# Xen API: Class VIF_metrics
# ----------------------------------------------------------------
VIF_metrics_attr_ro = ['io_read_kbs',
'io_write_kbs',
'io_total_read_kbs',
'io_total_write_kbs',
'last_updated']
VIF_metrics_attr_rw = []
VIF_metrics_methods = []
def VIF_metrics_get_all(self, session):
return self.VIF_get_all(session)
def VIF_metrics_get_record(self, _, ref):
vm = XendDomain.instance().get_vm_with_dev_uuid('vif', ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VIF_metrics', ref])
return xen_api_success(
{ 'io_read_kbs' : vm.get_dev_property('vif', ref, 'io_read_kbs'),
'io_write_kbs' : vm.get_dev_property('vif', ref, 'io_write_kbs'),
'io_total_read_kbs' : vm.get_dev_property('vif', ref, 'io_total_read_kbs'),
'io_total_write_kbs' : vm.get_dev_property('vif', ref, 'io_total_write_kbs'),
'last_updated' : now()
})
def VIF_metrics_get_io_read_kbs(self, _, ref):
return self._VIF_get(ref, 'io_read_kbs')
def VIF_metrics_get_io_write_kbs(self, session, ref):
return self._VIF_get(ref, 'io_write_kbs')
def VIF_metrics_get_io_total_read_kbs(self, _, ref):
return self._VIF_get(ref, 'io_total_read_kbs')
def VIF_metrics_get_io_total_write_kbs(self, session, ref):
return self._VIF_get(ref, 'io_total_write_kbs')
def VIF_metrics_get_last_updated(self, _1, _2):
return xen_api_success(now())
# Xen API: Class console
# ----------------------------------------------------------------
console_attr_ro = ['location', 'protocol', 'VM']
console_attr_rw = ['other_config']
console_methods = [('destroy', None)]
console_funcs = [('create', 'console'),
('create_on', 'console')]
def console_get_all(self, session):
xendom = XendDomain.instance()
# cons = list(BNPoolAPI._consoles_to_VM.keys())
cons = [d.get_consoles() for d in XendDomain.instance().list('all')]
cons = reduce(lambda x, y: x + y, cons)
return xen_api_success(cons)
def console_get_location(self, session, console_ref):
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_console(console_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._console_get_location(console_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "console_get_location", console_ref)
else:
return self._console_get_location(console_ref)
def _console_get_location(self, console_ref):
xendom = XendDomain.instance()
return xen_api_success(xendom.get_dev_property_by_uuid('console',
console_ref,
'location'))
def console_get_protocol(self, session, console_ref):
xendom = XendDomain.instance()
return xen_api_success(xendom.get_dev_property_by_uuid('console',
console_ref,
'protocol'))
def console_get_VM(self, session, console_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('console', console_ref)
return xen_api_success(vm.get_uuid())
def console_get_other_config(self, session, console_ref):
xendom = XendDomain.instance()
return xen_api_success(xendom.get_dev_property_by_uuid('console',
console_ref,
'other_config'))
# object methods
def _console_get_record(self, session, console_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('console', console_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'console', console_ref])
cfg = vm.get_dev_xenapi_config('console', console_ref)
log.debug(cfg)
if not cfg:
return xen_api_error(['HANDLE_INVALID', 'console', console_ref])
valid_console_keys = self.console_attr_ro + self.console_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
return_cfg = {}
for k in cfg.keys():
if k in valid_console_keys:
return_cfg[k] = cfg[k]
return xen_api_success(return_cfg)
def console_get_record(self, session, console_ref):
if BNPoolAPI._isMaster:
# try:
host_ref = BNPoolAPI.get_host_by_console(console_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._console_get_record(session, console_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'console_get_record', console_ref)
# proxy = ServerProxy('http://' + remote_ip + ':9363')
# response = proxy.session.login('root')
# if cmp(response['Status'], 'Failure') == 0:
# return xen_api_error(response['ErrorDescription'])
# session_ref = response['Value']
# return proxy.console.get_record(session_ref, console_ref)
# except KeyError:
# return xen_api_error(['key error', console_ref])
# except socket.error:
# return xen_api_error(['socket error', console_ref])
else:
return self._console_get_record(session, console_ref)
def console_create_on(self, session, console_struct, host_ref):
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.console_create(session, console_struct)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(remote_ip, 'console_create', console_struct)
if cmp (response.get('Status'), 'Success') == 0:
BNPoolAPI.update_data_struct("console_create", response.get('Value'), console_struct.get('VM'))
return response
else:
return self.console_create(session, console_struct)
def console_create(self, session, console_struct):
vm_ref = console_struct['VM']
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._console_create(session, console_struct)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'console_create', console_struct)
else:
return self._console_create(session, console_struct)
def _console_create(self, session, console_struct):
xendom = XendDomain.instance()
if not xendom.is_valid_vm(console_struct['VM']):
return xen_api_error(['HANDLE_INVALID', 'VM',
console_struct['VM']])
dom = xendom.get_vm_by_uuid(console_struct['VM'])
try:
if 'protocol' not in console_struct:
return xen_api_error(['CONSOLE_PROTOCOL_INVALID',
'No protocol specified'])
console_ref = dom.create_console(console_struct)
xendom.managed_config_save(dom)
BNPoolAPI.update_data_struct("console_create", console_ref, dom.get_uuid())
return xen_api_success(console_ref)
except XendError, exn:
return xen_api_error(['INTERNAL_ERROR', str(exn)])
def console_destroy(self, session, console_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('console', console_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'Console', console_ref])
vm.destroy_console(console_ref)
xendom.managed_config_save(vm)
return xen_api_success_void()
def console_set_other_config(self, session, console_ref, other_config):
xd = XendDomain.instance()
vm = xd.get_vm_with_dev_uuid('console', console_ref)
vm.set_console_other_config(console_ref, other_config)
xd.managed_config_save(vm)
return xen_api_success_void()
class BNVMAPIAsyncProxy:
""" A redirector for Async.Class.function calls to XendAPI
but wraps the call for use with the XendTaskManager.
@ivar xenapi: Xen API instance
@ivar method_map: Mapping from XMLRPC method name to callable objects.
"""
method_prefix = 'Async.'
def __init__(self, xenapi):
"""Initialises the Async Proxy by making a map of all
implemented Xen API methods for use with XendTaskManager.
@param xenapi: XendAPI instance
"""
self.xenapi = xenapi
self.method_map = {}
for method_name in dir(self.xenapi):
method = getattr(self.xenapi, method_name)
if method_name[0] != '_' and hasattr(method, 'async') \
and method.async == True:
self.method_map[method.api] = method
def _dispatch(self, method, args):
"""Overridden method so that SimpleXMLRPCServer will
resolve methods through this method rather than through
inspection.
@param method: marshalled method name from XMLRPC.
@param args: marshalled arguments from XMLRPC.
"""
# Only deal with method names that start with "Async."
if not method.startswith(self.method_prefix):
return xen_api_error(['MESSAGE_METHOD_UNKNOWN', method])
# Lookup synchronous version of the method
synchronous_method_name = method[len(self.method_prefix):]
if synchronous_method_name not in self.method_map:
return xen_api_error(['MESSAGE_METHOD_UNKNOWN', method])
method = self.method_map[synchronous_method_name]
# Check that we've got enough arguments before issuing a task ID.
needed = argcounts[method.api]
if len(args) != needed:
return xen_api_error(['MESSAGE_PARAMETER_COUNT_MISMATCH',
self.method_prefix + method.api, needed,
len(args)])
# Validate the session before proceeding
session = args[0]
if not auth_manager().is_session_valid(session):
return xen_api_error(['SESSION_INVALID', session])
# create and execute the task, and return task_uuid
return_type = getattr(method, 'return_type', '<none/>')
task_uuid = XendTaskManager.create_task(method, args,
synchronous_method_name,
return_type,
synchronous_method_name,
session)
return xen_api_success(task_uuid)
def instance():
"""Singleton constructror. Use this method instead of the class constructor.
"""
global inst
try:
inst
except:
inst = BNVMAPI(None)
return inst
| mit | -6,386,652,897,247,954,000 | 40.294742 | 164 | 0.508084 | false |
KyleKing/PiAlarm | .archive-python/modules/tm1637.py | 1 | 6795 | """Manipulate a TM1637 7-segment display."""
import math
import threading
from time import localtime, sleep
from . import config as cg
from .context import IO
IO.setwarnings(False)
IO.setmode(IO.BCM)
HexDigits = [0x3f, 0x06, 0x5b, 0x4f, 0x66, 0x6d, 0x7d,
0x07, 0x7f, 0x6f, 0x77, 0x7c, 0x39, 0x5e, 0x79, 0x71]
ADDR_AUTO = 0x40
ADDR_FIXED = 0x44
STARTADDR = 0xC0
class TM1637(object):
"""TM1637 7-Segment Display."""
def __init__(self, clk, dio, brightness=1.0):
"""Initializer."""
self.CLK = clk
self.DIO = dio
self.brightness = brightness
self.double_point = False
self.current_values = [0, 0, 0, 0]
IO.setup(self.CLK, IO.OUT)
IO.setup(self.DIO, IO.OUT)
def cleanup(self):
"""Stop updating clock, turn off display, and cleanup GPIO."""
self.stop_clock()
self.clear()
IO.cleanup()
def clear(self):
"""Clear display."""
b = self.brightness
point = self.double_point
self.brightness = 0
self.double_point = False
data = [0x7F, 0x7F, 0x7F, 0x7F]
self.show(data)
# Restore previous settings:
self.brightness = b
self.double_point = point
def show(self, data):
"""Show data on display."""
for i in range(0, 4):
self.current_values[i] = data[i]
self.start()
self.write_byte(ADDR_AUTO)
self.br()
self.write_byte(STARTADDR)
for i in range(0, 4):
self.write_byte(self.coding(data[i]))
self.br()
self.write_byte(0x88 + int(self.brightness))
self.stop()
def set_digit(self, idx, data):
"""Set 7-segment digit by index [0, 3]."""
assert not (idx < 0 or idx > 3), 'Index must be in (0,3). Args: ({},{})'.format(idx, data)
self.current_values[idx] = data
self.start()
self.write_byte(ADDR_FIXED)
self.br()
self.write_byte(STARTADDR | idx)
self.write_byte(self.coding(data))
self.br()
self.write_byte(0x88 + int(self.brightness))
self.stop()
def set_brightness(self, percent):
"""Set brightness in range 0-1."""
max_brightness = 7.0
brightness = math.ceil(max_brightness * percent)
if (brightness < 0):
brightness = 0
if (self.brightness != brightness):
self.brightness = brightness
self.show(self.current_values)
def show_colon(self, on):
"""Show or hide double point divider."""
if (self.double_point != on):
self.double_point = on
self.show(self.current_values)
def write_byte(self, data):
"""Write byte to display."""
for i in range(0, 8):
IO.output(self.CLK, IO.LOW)
if (data & 0x01):
IO.output(self.DIO, IO.HIGH)
else:
IO.output(self.DIO, IO.LOW)
data = data >> 1
IO.output(self.CLK, IO.HIGH)
# Wait for ACK
IO.output(self.CLK, IO.LOW)
IO.output(self.DIO, IO.HIGH)
IO.output(self.CLK, IO.HIGH)
IO.setup(self.DIO, IO.IN)
while IO.input(self.DIO):
sleep(0.001)
if (IO.input(self.DIO)):
IO.setup(self.DIO, IO.OUT)
IO.output(self.DIO, IO.LOW)
IO.setup(self.DIO, IO.IN)
IO.setup(self.DIO, IO.OUT)
def start(self):
"""Send start signal to TM1637."""
IO.output(self.CLK, IO.HIGH)
IO.output(self.DIO, IO.HIGH)
IO.output(self.DIO, IO.LOW)
IO.output(self.CLK, IO.LOW)
def stop(self):
"""Stop clock."""
IO.output(self.CLK, IO.LOW)
IO.output(self.DIO, IO.LOW)
IO.output(self.CLK, IO.HIGH)
IO.output(self.DIO, IO.HIGH)
def br(self):
"""Terse break."""
self.stop()
self.start()
def coding(self, data):
"""Set coding of display."""
point_data = 0x80 if self.double_point else 0
return 0 if data == 0x7F else HexDigits[data] + point_data
def clock(self, military_time):
"""Clock thread script."""
# Based on: https://github.com/johnlr/raspberrypi-tm1637
self.show_colon(True)
while (not self.__stop_event.is_set()):
t = localtime()
hour = t.tm_hour
if not military_time:
hour = 12 if (t.tm_hour % 12) == 0 else t.tm_hour % 12
d0 = hour // 10 if hour // 10 else 0
d1 = hour % 10
d2 = t.tm_min // 10
d3 = t.tm_min % 10
digits = [d0, d1, d2, d3]
self.show(digits)
# # Optional visual feedback of running alarm:
# print digits
# for i in tqdm(range(60 - t.tm_sec)):
for i in range(60 - t.tm_sec):
if (not self.__stop_event.is_set()):
sleep(1)
def start_clock(self, military_time=True):
"""Start clock thread."""
# Stop event based on: http://stackoverflow.com/a/6524542/3219667
self.__stop_event = threading.Event()
self.__clock_thread = threading.Thread(target=self.clock, args=(military_time,))
self.__clock_thread.daemon = True # stops w/ main thread
self.__clock_thread.start()
def stop_clock(self):
"""Stop clock thread."""
try:
print('Attempting to stop live clock')
self.__stop_event.set()
self.clear()
except AttributeError:
print('No clock to close')
if __name__ == '__main__':
"""Confirm the display operation"""
# Initialize the clock (GND, VCC=3.3V, Example Pins are DIO=20 and CLK=21)
clock = cg.get_pin('7Segment', 'clk')
digital = cg.get_pin('7Segment', 'dio')
display = TM1637(CLK=clock, DIO=digital, brightness=1.0)
print('clock', clock)
print('digital', digital)
display.clear()
digits = [1, 2, 3, 4]
display.show(digits)
input('1234 - Working? (Press Key)')
print('Updating one digit at a time:')
display.clear()
display.set_digit(1, 3)
sleep(0.5)
display.set_digit(2, 2)
sleep(0.5)
display.set_digit(3, 1)
sleep(0.5)
display.set_digit(0, 4)
input('4321 - (Press Key)')
print('Add double point\n')
display.show_colon(True)
sleep(0.2)
print('Brightness Off')
display.set_brightness(0)
sleep(0.5)
print('Full Brightness')
display.set_brightness(1)
sleep(0.5)
print('30% Brightness')
display.set_brightness(0.3)
sleep(0.3)
input('Start the clock?')
display.start_clock(military_time=True)
input('Stop the clock?')
display.stop_clock()
| mit | -4,063,615,821,204,735,500 | 28.16309 | 98 | 0.547903 | false |
fresskarma/tinyos-1.x | tools/python/pytos/util/MessageSnooper.py | 1 | 4759 | #!/usr/bin/python
#$Id: MessageSnooper.py,v 1.2 2005/10/27 02:23:37 kaminw Exp $
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# @author Kamin Whitehouse
#
import sys
import pytos.Comm as Comm
import pytos.tools.Drain as Drain
import pytos.tools.Drip as Drip
import threading
def registerAllMsgs(msgs, msgQueue, connection) :
for msgName in msgs._msgNames :
msg = msgs[msgName]
connection.register( msg , msgQueue )
class MessageSnooper( object ) :
"""This module offers \"register\" and \"unregister\" functions that
take a messageHandler argument but no message type argument.
Instead, the messageHandler will receive ALL incoming messages. It
currently handles local receive, drain messages, rpc messages, and
ramSymbol messages. Any new routing protocols should be
incorporated into this module.
usage:
snooper = MessageSnooper(app)
snooper.start
snooper.stop
snooper.register(callbackFcn)
snooper.unregister(callbackFcn)
"""
def __init__( self , app="" ) :
self.app = app
self.listeners = []
msgQueue = Comm.MessageQueue(10)
#register the msgQueue for all message types with localComm
comm = Comm.getCommObject(self.app, self.app.motecom)
registerAllMsgs(self.app.msgs, msgQueue, comm)
#register the msgQueue for all message types with drain and unregister DrainMsg with localComm
if "AM_DRAINMSG" in self.app.enums._enums :
drains = Drain.getDrainObject(self.app)
for drain in drains:
registerAllMsgs(self.app.msgs, msgQueue, drain)
comm.unregister(self.app.msgs.DrainMsg, msgQueue)
#if rpc is imported
if self.app.__dict__.has_key("rpc") :
#make sure a drip object exists for snooping on cmds
drips = Drip.getDripObject(self.app, self.app.motecom, self.app.enums.AM_RPCCOMMANDMSG)
#register the msgQueue for all rpc response messages
for command in self.app.rpc._messages.values() :
command.register(msgQueue)
#and unregister RpcResponseMsg from drain
drains = Drain.getDrainObject(self.app, self.app.motecom, 0xfffe) #ugh... hard coded number
for drain in drains:
drain.unregister(app.msgs.RpcResponseMsg, msgQueue)
#if ram symbols is imported
if self.app.__dict__.has_key("ramSymbols") :
#register the msgQueue for all ram symbol response messages
for symbol in self.app.ramSymbols._messages.values() :
symbol.registerPeek(msgQueue)
symbol.registerPoke(msgQueue)
#and unregister from peek/poke rpc commands
self.app.RamSymbolsM.peek.unregister(msgQueue)
self.app.RamSymbolsM.poke.unregister(msgQueue)
#register the msgQueue for all message types with drip and unregister DripMsg with localComm
if "AM_DRIPMSG" in self.app.enums._enums :
drips = Drip.getDripObject(self.app)
for drip in drips:
print "actually dtrying to register dripmsgs\n"
registerAllMsgs(self.app.msgs, msgQueue, drip)
comm.unregister(self.app.msgs.DripMsg, msgQueue)
self.running = True
msgThread = threading.Thread(target=self.processMessages,
args=(msgQueue,))
msgThread.setDaemon(True)
msgThread.start()
def processMessages(self, msgQueue) :
while True :
(addr,msg) = msgQueue.get()
if self.running == True :
for listener in self.listeners :
listener.messageReceived(addr, msg)
def stop(self) :
self.running = False
def start(self) :
self.running = True
def register(self, msgHandler) :
self.listeners.append(msgHandler)
def unregister(self, msgHandler) :
self.listeners.remove(msgHandler)
| bsd-3-clause | 8,937,040,000,648,870,000 | 37.691057 | 98 | 0.709603 | false |
stankovski/AutoRest | ClientRuntimes/Python/msrestazure/msrestazure/azure_active_directory.py | 1 | 17399 | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import ast
import time
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
import keyring
from oauthlib.oauth2 import BackendApplicationClient, LegacyApplicationClient
from oauthlib.oauth2.rfc6749.errors import (
InvalidGrantError,
MismatchingStateError,
OAuth2Error,
TokenExpiredError)
from requests import RequestException
import requests_oauthlib as oauth
from msrest.authentication import OAuthTokenAuthentication
from msrest.exceptions import TokenExpiredError as Expired
from msrest.exceptions import (
AuthenticationError,
raise_with_traceback)
def _build_url(uri, paths, scheme):
"""Combine URL parts.
:param str uri: The base URL.
:param list paths: List of strings that make up the URL.
:param str scheme: The URL scheme, 'http' or 'https'.
:rtype: str
:return: Combined, formatted URL.
"""
path = [str(p).strip('/') for p in paths]
combined_path = '/'.join(path)
parsed_url = urlparse(uri)
replaced = parsed_url._replace(scheme=scheme)
if combined_path:
path = '/'.join([replaced.path, combined_path])
replaced = replaced._replace(path=path)
new_url = replaced.geturl()
new_url = new_url.replace('///', '//')
return new_url
def _http(uri, *extra):
"""Convert https URL to http.
:param str uri: The base URL.
:param str extra: Additional URL paths (optional).
:rtype: str
:return: An HTTP URL.
"""
return _build_url(uri, extra, 'http')
def _https(uri, *extra):
"""Convert http URL to https.
:param str uri: The base URL.
:param str extra: Additional URL paths (optional).
:rtype: str
:return: An HTTPS URL.
"""
return _build_url(uri, extra, 'https')
class AADMixin(object):
"""Mixin for Authentication object.
Provides some AAD functionality:
- State validation
- Token caching and retrieval
- Default AAD configuration
"""
_auth_endpoint = "//login.microsoftonline.com"
_china_auth_endpoint = "//login.chinacloudapi.cn"
_token_uri = "/oauth2/token"
_auth_uri = "/oauth2/authorize"
_tenant = "common"
_resource = 'https://management.core.windows.net/'
_china_resource = "https://management.core.chinacloudapi.cn/"
_keyring = "AzureAAD"
def _configure(self, **kwargs):
"""Configure authentication endpoint.
Optional kwargs may include:
- china (bool): Configure auth for China-based service,
default is 'False'.
- tenant (str): Alternative tenant, default is 'common'.
- auth_uri (str): Alternative authentication endpoint.
- token_uri (str): Alternative token retrieval endpoint.
- resource (str): Alternative authentication resource, default
is 'https://management.core.windows.net/'.
- verify (bool): Verify secure connection, default is 'True'.
- keyring (str): Name of local token cache, default is 'AzureAAD'.
"""
if kwargs.get('china'):
auth_endpoint = self._china_auth_endpoint
resource = self._china_resource
else:
auth_endpoint = self._auth_endpoint
resource = self._resource
tenant = kwargs.get('tenant', self._tenant)
self.auth_uri = kwargs.get('auth_uri', _https(
auth_endpoint, tenant, self._auth_uri))
self.token_uri = kwargs.get('token_uri', _https(
auth_endpoint, tenant, self._token_uri))
self.verify = kwargs.get('verify', True)
self.cred_store = kwargs.get('keyring', self._keyring)
self.resource = kwargs.get('resource', resource)
self.state = oauth.oauth2_session.generate_token()
self.store_key = "{}_{}".format(
self._auth_endpoint.strip('/'), self.store_key)
def _check_state(self, response):
"""Validate state returned by AAD server.
:param str response: URL returned by server redirect.
:raises: ValueError if state does not match that of the request.
:rtype: None
"""
query = parse_qs(urlparse(response).query)
if self.state not in query.get('state', []):
raise ValueError(
"State received from server does not match that of request.")
def _parse_token(self):
if self.token.get('expires_at'):
countdown = float(self.token['expires_at']) - time.time()
self.token['expires_in'] = countdown
kwargs = {}
if self.token.get('refresh_token'):
kwargs['auto_refresh_url'] = self.token_uri
kwargs['auto_refresh_kwargs'] = {'client_id': self.id,
'resource': self.resource}
kwargs['token_updater'] = self._default_token_cache
return kwargs
def _default_token_cache(self, token):
"""Store token for future sessions.
:param dict token: An authentication token.
:rtype: None
"""
self.token = token
keyring.set_password(self.cred_store, self.store_key, str(token))
def _retrieve_stored_token(self):
"""Retrieve stored token for new session.
:raises: ValueError if no cached token found.
:rtype: dict
:return: Retrieved token.
"""
token = keyring.get_password(self.cred_store, self.store_key)
if token is None:
raise ValueError("No stored token found.")
self.token = ast.literal_eval(str(token))
self.signed_session()
def signed_session(self):
"""Create token-friendly Requests session, using auto-refresh.
Used internally when a request is made.
:rtype: requests_oauthlib.OAuth2Session
:raises: TokenExpiredError if token can no longer be refreshed.
"""
kwargs = self._parse_token()
try:
new_session = oauth.OAuth2Session(
self.id,
token=self.token,
**kwargs)
return new_session
except TokenExpiredError as err:
raise_with_traceback(Expired, "", err)
def clear_cached_token(self):
"""Clear any stored tokens.
:raises: KeyError if failed to clear token.
:rtype: None
"""
try:
keyring.delete_password(self.cred_store, self.store_key)
except keyring.errors.PasswordDeleteError:
raise_with_traceback(KeyError, "Unable to clear token.")
class UserPassCredentials(OAuthTokenAuthentication, AADMixin):
"""Credentials object for Headless Authentication,
i.e. AAD authentication via username and password.
Headless Auth requires an AAD login (no a Live ID) that already has
permission to access the resource e.g. an organization account, and
that 2-factor auth be disabled.
Optional kwargs may include:
- china (bool): Configure auth for China-based service,
default is 'False'.
- tenant (str): Alternative tenant, default is 'common'.
- auth_uri (str): Alternative authentication endpoint.
- token_uri (str): Alternative token retrieval endpoint.
- resource (str): Alternative authentication resource, default
is 'https://management.core.windows.net/'.
- verify (bool): Verify secure connection, default is 'True'.
- keyring (str): Name of local token cache, default is 'AzureAAD'.
- cached (bool): If true, will not attempt to collect a token,
which can then be populated later from a cached token.
:param str username: Account username.
:param str password: Account password.
:param str client_id: Client ID, if not set, Xplat Client ID
will be used.
:param str secret: Client secret, only if required by server.
"""
def __init__(self, username, password,
client_id=None, secret=None, **kwargs):
if not client_id:
# Default to Xplat Client ID.
client_id = '04b07795-8ddb-461a-bbee-02f9e1bf7b46'
super(UserPassCredentials, self).__init__(client_id, None)
self._configure(**kwargs)
self.store_key += "_{}".format(username)
self.username = username
self.password = password
self.secret = secret
self.client = LegacyApplicationClient(client_id=self.id)
if not kwargs.get('cached'):
self.set_token()
@classmethod
def retrieve_session(cls, username, client_id=None):
"""Create ServicePrincipalCredentials from a cached token if it has not
yet expired.
"""
session = cls(username, None, client_id=client_id, cached=True)
session._retrieve_stored_token()
return session
def _setup_session(self):
"""Create token-friendly Requests session.
:rtype: requests_oauthlib.OAuth2Session
"""
return oauth.OAuth2Session(client=self.client)
def set_token(self):
"""Get token using Username/Password credentials.
:raises: AuthenticationError if credentials invalid, or call fails.
"""
session = self._setup_session()
optional = {}
if self.secret:
optional['client_secret'] = self.secret
try:
token = session.fetch_token(self.token_uri, client_id=self.id,
username=self.username,
password=self.password,
resource=self.resource,
verify=self.verify,
**optional)
except (RequestException, OAuth2Error, InvalidGrantError) as err:
raise_with_traceback(AuthenticationError, "", err)
self.token = token
class ServicePrincipalCredentials(OAuthTokenAuthentication, AADMixin):
"""Credentials object for Service Principle Authentication.
Authenticates via a Client ID and Secret.
Optional kwargs may include:
- china (bool): Configure auth for China-based service,
default is 'False'.
- tenant (str): Alternative tenant, default is 'common'.
- auth_uri (str): Alternative authentication endpoint.
- token_uri (str): Alternative token retrieval endpoint.
- resource (str): Alternative authentication resource, default
is 'https://management.core.windows.net/'.
- verify (bool): Verify secure connection, default is 'True'.
- keyring (str): Name of local token cache, default is 'AzureAAD'.
- cached (bool): If true, will not attempt to collect a token,
which can then be populated later from a cached token.
:param str client_id: Client ID.
:param str secret: Client secret.
"""
def __init__(self, client_id, secret, **kwargs):
super(ServicePrincipalCredentials, self).__init__(client_id, None)
self._configure(**kwargs)
self.secret = secret
self.client = BackendApplicationClient(self.id)
if not kwargs.get('cached'):
self.set_token()
@classmethod
def retrieve_session(cls, client_id):
"""Create ServicePrincipalCredentials from a cached token if it has not
yet expired.
"""
session = cls(client_id, None, cached=True)
session._retrieve_stored_token()
return session
def _setup_session(self):
"""Create token-friendly Requests session.
:rtype: requests_oauthlib.OAuth2Session
"""
return oauth.OAuth2Session(self.id, client=self.client)
def set_token(self):
"""Get token using Client ID/Secret credentials.
:raises: AuthenticationError if credentials invalid, or call fails.
"""
session = self._setup_session()
try:
token = session.fetch_token(self.token_uri, client_id=self.id,
resource=self.resource,
client_secret=self.secret,
response_type="client_credentials",
verify=self.verify)
except (RequestException, OAuth2Error, InvalidGrantError) as err:
raise_with_traceback(AuthenticationError, "", err)
else:
self.token = token
class InteractiveCredentials(OAuthTokenAuthentication, AADMixin):
"""Credentials object for Interactive/Web App Authentication.
Requires that an AAD Client be configured with a redirect URL.
Optional kwargs may include:
- china (bool): Configure auth for China-based service,
default is 'False'.
- tenant (str): Alternative tenant, default is 'common'.
- auth_uri (str): Alternative authentication endpoint.
- token_uri (str): Alternative token retrieval endpoint.
- resource (str): Alternative authentication resource, default
is 'https://management.core.windows.net/'.
- verify (bool): Verify secure connection, default is 'True'.
- keyring (str): Name of local token cache, default is 'AzureAAD'.
- cached (bool): If true, will not attempt to collect a token,
which can then be populated later from a cached token.
:param str client_id: Client ID.
:param str redirect: Redirect URL.
"""
def __init__(self, client_id, redirect, **kwargs):
super(InteractiveCredentials, self).__init__(client_id, None)
self._configure(**kwargs)
self.redirect = redirect
if not kwargs.get('cached'):
self.set_token()
@classmethod
def retrieve_session(cls, client_id, redirect):
"""Create InteractiveCredentials from a cached token if it has not
yet expired.
"""
session = cls(client_id, redirect, cached=True)
session._retrieve_stored_token()
return session
def _setup_session(self):
"""Create token-friendly Requests session.
:rtype: requests_oauthlib.OAuth2Session
"""
return oauth.OAuth2Session(self.id,
redirect_uri=self.redirect,
state=self.state)
def get_auth_url(self, msa=False, **additional_args):
"""Get URL to web portal for authentication.
:param bool msa: Set to 'True' if authenticating with Live ID. Default
is 'False'.
:param additional_args: Set and additional kwargs for requrired AAD
configuration: msdn.microsoft.com/en-us/library/azure/dn645542.aspx
:rtype: Tuple
:returns: The URL for authentication (str), and state code that will
be verified in the response (str).
"""
if msa:
additional_args['domain_hint'] = 'live.com'
session = self._setup_session()
auth_url, state = session.authorization_url(self.auth_uri,
resource=self.resource,
**additional_args)
return auth_url, state
def set_token(self, response_url):
"""Get token using Authorization Code from redirected URL.
:param str response_url: The full redirected URL from successful
authentication.
:raises: AuthenticationError if credentials invalid, or call fails.
"""
self._check_state(response_url)
session = self._setup_session()
if response_url.startswith(_http(self.redirect)):
response_url = _https(response_url)
elif not response_url.startswith(_https(self.redirect)):
response_url = _https(self.redirect, response_url)
try:
token = session.fetch_token(self.token_uri,
authorization_response=response_url,
verify=self.verify)
except (InvalidGrantError, OAuth2Error,
MismatchingStateError, RequestException) as err:
raise_with_traceback(AuthenticationError, "", err)
else:
self.token = token
| mit | 1,136,198,482,296,480,600 | 37.488938 | 79 | 0.620797 | false |
Subsets and Splits