repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
t3dev/odoo | addons/website/models/res_partner.py | 1 | 2246 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import werkzeug
from odoo import api, models
def urlplus(url, params):
return werkzeug.Href(url)(params or None)
class Partner(models.Model):
_name = 'res.partner'
_inherit = ['res.partner', 'website.published.multi.mixin']
@api.multi
def google_map_img(self, zoom=8, width=298, height=298):
google_maps_api_key = self.env['website'].get_current_website().google_maps_api_key
if not google_maps_api_key:
return False
params = {
'center': '%s, %s %s, %s' % (self.street or '', self.city or '', self.zip or '', self.country_id and self.country_id.display_name or ''),
'size': "%sx%s" % (width, height),
'zoom': zoom,
'sensor': 'false',
'key': google_maps_api_key,
}
return urlplus('//maps.googleapis.com/maps/api/staticmap', params)
@api.multi
def google_map_link(self, zoom=10):
params = {
'q': '%s, %s %s, %s' % (self.street or '', self.city or '', self.zip or '', self.country_id and self.country_id.display_name or ''),
'z': zoom,
}
return urlplus('https://maps.google.com/maps', params)
@api.multi
def _get_name(self):
name = super(Partner, self)._get_name()
if self._context.get('display_website') and self.env.user.has_group('website.group_multi_website'):
if self.website_id:
name += ' [%s]' % self.website_id.name
return name
def _compute_display_name(self):
self2 = self.with_context(display_website=False)
super(Partner, self2)._compute_display_name()
# onchange uses the cache to retrieve value, we need to copy computed_value into the initial env
for record, record2 in zip(self, self2):
record.display_name = record2.display_name
@api.multi
def get_base_url(self):
"""When using multi-website, we want the user to be redirected to the
most appropriate website if possible."""
res = super(Partner, self).get_base_url()
return self.website_id and self.website_id._get_http_domain() or res
| gpl-3.0 | -4,205,987,540,885,179,000 | 36.433333 | 149 | 0.600178 | false |
google-research/google-research | value_dice/wrappers/normalize_state_wrapper.py | 1 | 1143 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper that scales and shifts observations."""
import gym
class NormalizeStateWrapper(gym.ObservationWrapper):
"""Wraps an environment to shift and scale observations.
"""
def __init__(self, env, shift, scale):
super(NormalizeStateWrapper, self).__init__(env)
self.shift = shift
self.scale = scale
def observation(self, observation):
return (observation + self.shift) * self.scale
@property
def _max_episode_steps(self):
return self.env._max_episode_steps # pylint: disable=protected-access
| apache-2.0 | 1,763,295,217,204,775,200 | 32.617647 | 74 | 0.736658 | false |
Sirs0ri/PersonalAssistant_Interfaces | interface/__main__.py | 1 | 4508 | """Starts the main interface. To be called with 'python interface'
from the root folder. After importing it, the interface will be
started start"""
import logging
import logging.handlers
import Queue
import socket
import sys
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
from twisted.internet import reactor
import logger
import basic_interface as interface
# if "--localhost" in sys.argv or "-L" in sys.argv:
# IP = "127.0.0.1"
# else:
# IP = "192.168.178.46"
logger.initialize()
LOGGER = logging.getLogger(__name__)
def wait_for_server_ip():
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(30)
# Bind the socket to the port
server_address = ('', 10000)
LOGGER.info('starting up on %s port %s',
server_address[0],
server_address[1])
interface.print_msg("Waiting for a broadcast from the server.")
sock.bind(server_address)
# expects (host, port) as arg, two brackets are on purpose
data = None
try:
LOGGER.info('waiting to receive message')
# TODO: This fails in bash if the port isn't explicitly opened
data, address = sock.recvfrom(4096)
LOGGER.info('received %d bytes from %s', len(data), address)
LOGGER.info(data)
interface.print_msg('Received %s from %s' % (data, address))
if data:
sock.sendto("I'll connect!", address)
finally:
sock.close()
if data and data.split(":")[0] == "sam.ip.broadcast":
ip, port = address[0], int(data.split(":")[1])
LOGGER.info("Used the broadcasted IP.")
interface.print_msg("Used the broadcasted IP.")
else:
ip, port = None, None
LOGGER.info("No broadcast received.")
interface.print_msg("No broadcast received.")
return ip, port
class Interface(WebSocketClientProtocol):
def onConnect(self, response):
LOGGER.info("Server connected: %s", response.peer)
interface.on_connect(response)
def onOpen(self):
LOGGER.info("Connection open.")
# self.sendMessage(u"Hello, world!".encode('utf8'))
# TODO Put some kind of authentication here
interface.on_open(COMMANDS)
def sendInput():
try:
val = COMMANDS.get(timeout=0.1)
except Queue.Empty:
val = None
if val == "exit":
interface.on_exit()
self.sendClose()
elif val is not None:
self.sendMessage(val.encode('utf8'))
interface.message_sent(val)
if val == "exit_server":
interface.on_exit()
self.sendClose()
# TODO: Close when the server closed the connection
if self.state is self.STATE_OPEN:
self.factory.reactor.callLater(0.01, sendInput)
self.factory.reactor.callLater(0.01, sendInput)
def onMessage(self, payload, isBinary):
interface.on_message(payload, isBinary)
if isBinary:
LOGGER.info("Binary message received: %d", len(payload))
else:
LOGGER.info("Text message received: %s", payload.decode('utf8'))
def onClose(self, wasClean, code, reason):
LOGGER.warn("The connection has been ended.")
# self.sendClose()
if reason:
LOGGER.info(reason)
interface.on_close(wasClean, code, reason)
reactor.stop()
if __name__ == '__main__':
# TODO: Establish conection separately.
LOGGER.debug("-"*79)
LOGGER.debug("Starting Interface")
LOGGER.debug("-"*79)
interface.start()
COMMANDS = Queue.PriorityQueue()
factory = WebSocketClientFactory()
factory.protocol = Interface
if "--localhost" in sys.argv or "-L" in sys.argv:
ip, port = "127.0.0.1", 19113
LOGGER.info("Used the local IP as requested per commandline-arg.")
interface.print_msg(
"Used the local IP as requested per commandline-arg.")
else:
ip, port = wait_for_server_ip()
if ip:
reactor.connectTCP(ip, port, factory)
reactor.run()
else:
interface.on_close(False, None, "No Server found.")
| mit | -3,016,389,696,744,748,000 | 30.305556 | 76 | 0.600266 | false |
jithinbp/SEELablet-apps | seel_res/GUI/E_MISCELLANEOUS/A_Add-ons/DUST_SENSOR.py | 1 | 4615 | #!/usr/bin/python
from __future__ import print_function
from SEEL_Apps.utilitiesClass import utilitiesClass
from templates import ui_dsm501 as dsm501
import numpy as np
from PyQt4 import QtGui,QtCore
import sys,time
params = {
'image' : 'DSM501.png',
'helpfile': 'http://www.takingspace.org/make-your-own-aircasting-particle-monitor/',
'name':'Dust Sensor\nDSM501',
'hint':'''
Study the concentration of PM2.5 particles over time using a DSM501/PPD42NS sensor. Connect PIN2 of the sensor to ID1, PIN3 to 5V, PIN5 to GND
'''
}
class AppWindow(QtGui.QMainWindow, dsm501.Ui_MainWindow,utilitiesClass):
def __init__(self, parent=None,**kwargs):
super(AppWindow, self).__init__(parent)
self.setupUi(self)
self.I=kwargs.get('I',None)
self.setWindowTitle(self.I.H.version_string+' : '+params.get('name','').replace('\n',' ') )
self.plot1=self.add2DPlot(self.plot_area)
labelStyle = {'color': 'rgb(255,255,255)', 'font-size': '11pt'}
self.plot1.setLabel('bottom','Time -->', units='S',**labelStyle)
self.plot1.getAxis('left').setLabel('Concentration -->>', color='#ffffff')
self.plot1.setLimits(xMin=0,yMin=0)
self.total_samples = 100
self.acquired_samples = 0
self.timegap = 10 #mS
self.sampling_time = 2000 #mS
self.timer2 = QtCore.QTimer()
self.timer2.timeout.connect(self.updateProgress)
self.timer2.start(500)
self.I.set_state(SQR1=True)
self.curve = self.addCurve(self.plot1,'Concentration')
self.resultsTable.setRowCount(self.total_samples)
self.resultsTable.setColumnCount(3)
self.resultsTable.setHorizontalHeaderLabels(['time','Occupancy %','Concentration mg/m^3'])
self.running=False
self.start_time = time.time()
self.samplingStartTime=time.time()
self.timer = self.newTimer()
#self.running=True
#self.timer.singleShot(0,self.run)
self.X=[]
self.Y=[]
def start(self):
self.X=[]
self.Y=[]
self.running = True
self.timer.singleShot(0,self.run)
def stop(self):
self.running=False
def updateProgress(self):
if not self.running:return
val = 1e5*(time.time()-self.samplingStartTime)/(self.sampling_time)
self.timeProgressBar.setValue(val)
def run(self):
if not self.running:return
self.samplingStartTime = time.time()
self.sampling_time = self.integrationBox.value()*1e3 #convert to mS
self.I.start_one_channel_LA(channel='ID1',channel_mode=1,trigger_mode=0) #every edge
if self.running: self.timer.singleShot(self.sampling_time,self.plotData)
def plotData(self):
if not self.running:return
a,b,c,d,e = self.I.get_LA_initial_states()
if a==self.I.MAX_SAMPLES/4: a = 0
tmp = self.I.fetch_long_data_from_LA(a,1)
print (a,b,c,d,e,tmp)
self.I.dchans[0].load_data(e,tmp)
#print (self.I.dchans[0].timestamps,self.I.dchans[0].initial_state)
stamps = self.I.dchans[0].timestamps
if len(stamps)>2:
if not self.I.dchans[0].initial_state:
stamps = stamps[1:] - stamps[0]
diff = np.diff(stamps)
lows = diff[::2]
highs = diff[1::2]
#print(stamps,sum(lows),sum(highs))
low_occupancy = 100*sum(lows)/stamps[-1] #Occupancy ratio
self.progressBar.setValue(low_occupancy)
concentration = 1.1*pow(low_occupancy,3)-3.8*pow(low_occupancy,2)+520*low_occupancy+0.62; #From the spec sheet curve
self.X.append(time.time()-self.start_time)
self.Y.append(concentration)
self.curve.setData(self.X,self.Y)
item = QtGui.QTableWidgetItem();item.setText('%s'%(time.strftime("%H:%M:%S %d-%h")));self.resultsTable.setItem(self.acquired_samples, 0, item);#item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled)
item = QtGui.QTableWidgetItem();item.setText('%.3f'%(low_occupancy));self.resultsTable.setItem(self.acquired_samples, 1, item);#item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled)
item = QtGui.QTableWidgetItem();item.setText('%.3f'%(concentration));self.resultsTable.setItem(self.acquired_samples, 2, item);#item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled)
self.acquired_samples +=1
if self.acquired_samples==self.total_samples:
self.total_samples = self.acquired_samples+10
self.resultsTable.setRowCount(self.total_samples)
if self.running: self.timer.singleShot(self.timegap,self.run)
def saveData(self):
self.saveDataWindow([self.curve],self.plot1)
def closeEvent(self, event):
self.timer.stop()
self.finished=True
self.running = False
def __del__(self):
self.timer.stop()
print ('bye')
if __name__ == "__main__":
from SEEL import interface
app = QtGui.QApplication(sys.argv)
myapp = AppWindow(I=interface.connect())
myapp.show()
sys.exit(app.exec_())
| gpl-3.0 | -4,322,714,388,244,980,700 | 34.5 | 212 | 0.711376 | false |
bjtrost/TCAG-WGS-CNV-workflow | functions.py | 1 | 5027 |
##########
#sort
def sort_list(x,y):
return cmp(x,y)
##########
#code to calculate reciprocal overlap
def reciprocal_overlap(s_1,e_1,s_2,e_2):
if s_2 > e_1 or s_1 > e_2:
return [0,0]
else:
#get the smaller start
if s_2 >=s_1:
o_start = s_2
else:
o_start = s_1
#get the smaller end
if e_2 >= e_1:
o_end = e_1
else:
o_end = e_2
#calculate length of call and length of overlap
s1_len = e_1 - s_1
s2_len = e_2 - s_2
o_len = o_end - o_start
if 100 * o_len / (s1_len * 1.0) < 0 or 100 * o_len / (s2_len * 1.0) < 0:
print "s_1: ", s_1, "e_1: ",e_1, "s_2:", s_2, "e_2:", e_2, "o_start:", o_start, "o_end:", o_end
print "s1_len: ", s1_len, "s2_len: ", s2_len, " o_len: ", o_len, "% s1 length overlap: ", 100 * o_len / (s1_len * 1.0), "% s2 length overlap: ", 100 * o_len / (s2_len * 1.0)
sys.exit(0)
#return the percent overlap
return [100 * o_len / (s1_len * 1.0),100 * o_len / (s2_len * 1.0)]
##########
#merge overlappping regions into cluster, note that the start and end of the cluster are trimmed
def cluster(o_data,c_data,ref_start,ref_end):
START = 0
END = 0
clusterString = ""
#for all regions
for data in o_data:
start = data[0]
end = data[1]
region = `start`+"-"+`end`+","
if START == 0 and END == 0:
START = start
END = end
clusterString += region
continue
elif start <= END:
clusterString += region
#now we have a new cluster end
if end > END:
END = end
#region doesn't overlap with the cluster
else:
if START < ref_start:
START = ref_start
if END > ref_end:
END = ref_end
c_data.append([START,END])
#start new cluster
clusterString = region
START = start
END = end
#the last cluster details
if clusterString != "":
if START < ref_start:
START = ref_start
if END > ref_end:
END = ref_end
c_data.append([START,END])
##########
#merge overlappping regions into cluster, no start and end cluster trimming
def alt_cluster(o_data,c_data):
START = 0
END = 0
clusterString = ""
#for all regions
for data in o_data:
start = data[0]
end = data[1]
region = `start`+"-"+`end`+","
if START == 0 and END == 0:
START = start
END = end
clusterString += region
continue
elif start <= END:
clusterString += region
#now we have a new cluster end
if end > END:
END = end
#region doesn't overlap with the cluster
else:
c_data.append([START,END])
#start new cluster
clusterString = region
START = start
END = end
#the last cluster details
if clusterString != "":
c_data.append([START,END])
##########
#code to calculate overlap
def overlap(s_1,e_1,s_2,e_2):
if s_2 > e_1 or s_1 > e_2:
return [0,0]
else:
#get the smaller start
if s_2 >=s_1:
o_start = s_2
else:
o_start = s_1
#get the smaller end
if e_2 >= e_1:
o_end = e_1
else:
o_end = e_2
#calculate length of call and length of overlap
s1_len = e_1 - s_1
s2_len = e_2 - s_2
o_len = o_end - o_start
if 100 * o_len / (s1_len * 1.0) < 0 or 100 * o_len / (s2_len * 1.0) < 0:
print "s_1: ", s_1, "e_1: ",e_1, "s_2:", s_2, "e_2:", e_2, "o_start:", o_start, "o_end:", o_end
print "s1_len: ", s1_len, "s2_len: ", s2_len, " o_len: ", o_len, "% s1 length overlap: ", 100 * o_len / (s1_len * 1.0), "% s2 length overlap: ", 100 * o_len / (s2_len * 1.0)
sys.exit(0)
#return the percent boundary
return [o_start,o_end]
##########
#find overlap between list of intervals and the region
def find_overlap(intervals,start,end):
boundaries = []
c_boundaries = []
for i in intervals:
ovlp = overlap(i[0],i[1],start,end)
if ovlp == [0,0]:
continue
else:
boundaries.append(ovlp)
boundaries.sort(sort_list)
cluster(boundaries,c_boundaries,start,end)
covered = 0
for c in c_boundaries:
covered += c[1]-c[0]+1
return (covered/((end-start+1)*1.0))*100
##########
#find overlap between list of calls and the region
def find_overlap_calls(calls,start,end):
boundaries = []
c_boundaries = []
for i in calls:
ovlp = overlap(i.get_start(),i.get_end(),start,end)
if ovlp == [0,0]:
continue
else:
boundaries.append(ovlp)
boundaries.sort(sort_list)
cluster(boundaries,c_boundaries,start,end)
covered = 0
for c in c_boundaries:
covered += c[1]-c[0]+1
return (covered/((end-start+1)*1.0))*100
| mit | -8,482,681,232,780,927,000 | 27.089385 | 179 | 0.509051 | false |
kdart/pycopia | setup.py | 1 | 8604 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
DOC = """
Master builder (custom script).
This top-level setup script helps with dealing with all sub-packages at
once. It also provides an installer for a simplify setting up developer mode.
Invoke it like a standard setup.py script. However, Any names after the
operation name are taken as sub-package names that are operated on. If no
names are given then all packages are operated on.
Commands:
list -- List available subpackages. These are the names you may
optionally supply.
publish -- Put source distribution on pypi.
build -- Run setuptools build phase on named sub-packages
(or all of them).
install -- Run setuptools install phase.
install_scripts -- Only install scripts (files in bin) with a direct copy.
eggs -- Build distributable egg package.
rpms -- Build RPMs on platforms that support building RPMs.
msis -- Build Microsoft .msi on Windows.
wininst -- Build .exe installer on Windows.
develop -- Developer mode, as defined by setuptools.
develophome -- Developer mode, installing .pth and script files in
user directory.
clean -- Run setuptools clean phase.
squash -- Squash (flatten) all named sub-packages into single tree
in $PYCOPIA_SQUASH, or user site-directory if no
$PYCOPIA_SQUASH defined. This also removes the setuptools
runtime dependency.
Most regular setuptools commands also work. They are passed through by
default.
NOTE: The install operation requires that the sudo command be configured for
you.
"""
import sys
import os
import site
try:
import setuptools
except ImportError:
print("Pycopia requires the package named 'setuptools' to be installed.", file=sys.stderr)
try:
WEXITSTATUS = os.WEXITSTATUS
except AttributeError: # running on Windows
def WEXITSTATUS(arg):
return arg
os.environ["HOME"] = os.environ["USERPROFILE"]
RSYNCCHECK = "rsync --version >nul"
SCRIPT_DIR = os.path.join(sys.prefix, "Scripts")
else:
RSYNCCHECK = "rsync --version >/dev/null"
SCRIPT_DIR = "/usr/local/bin"
# sub-packages are listed in dependency order. A subpackage may import modules
# from other subpackages that appear earlier in this list, but not later.
PACKAGES = [
"aid",
"utils",
"core",
"CLI",
"debugger",
"process",
"net",
"SMI",
"mibs",
"SNMP",
"storage",
"audio",
"XML",
"WWW",
"QA",
"vim",
"doc",
"fepy",
]
# Where to put "squashed", or flattened target where all subpackages are
# installed into one directory, and removing "package namespace" support.
PYCOPIA_SQUASH = os.environ.get("PYCOPIA_SQUASH", site.USER_SITE)
# Where top-level scripts will be installed to when install_scripts command is used.
PYCOPIA_BIN = os.environ.get("PYCOPIA_BIN", os.path.join(os.path.expandvars("$HOME"), "bin"))
def _do_commands(name, cmds, root):
# use sudo on Linux and possibly other platforms. On Windows it's
# assumed you're running as Administrator (everybody does it...)
if root and sys.platform not in ("win32", "cli"):
sudo = "sudo "
else:
sudo = ""
cmd = "%s%s setup.py %s" % (sudo, sys.executable, " ".join(cmds))
print("========", name, "==", cmd)
rv = False
os.chdir(name)
try:
rv = WEXITSTATUS(os.system(cmd)) == 0
finally:
os.chdir("..")
print("====================== END", name, "\n")
return rv
def do_eggs(name):
return _do_commands(name, ["bdist_egg"], False)
def do_rpms(name):
return _do_commands(name, ["bdist_rpm", "--python", sys.executable], False)
def do_msis(name):
return _do_commands(name, ["bdist_msi"], False)
def do_wininst(name):
return _do_commands(name, ["bdist_wininst"], False)
# "scripts", those files in bin/, may require some special interpreter
# flags, such as -S, This prevents setuptools from functioning.
# Since Pycopia scripts are written generically there is not reason not to
# install them as-is.
# only works on Linux for now.
def _do_scripts(name, scriptdir, root=False):
if root and sys.platform not in ("win32", "cli"):
sudo = "sudo "
else:
sudo = ""
os.chdir(name)
rv = True
try:
if os.path.isdir("bin"):
if sys.platform == "darwin":
cmd = "%scp -a bin/* %s" % (sudo, scriptdir)
else:
cmd = "%scp -dR --preserve=mode bin/* %s" % (sudo, scriptdir)
print("======== SCRIPTS", name, "==", cmd)
rv = WEXITSTATUS(os.system(cmd)) == 0
finally:
os.chdir("..")
print("====================== END SCRIPTS", name)
return rv
def do_install_scripts(name):
return _do_scripts(name, PYCOPIA_BIN)
def do_develophome(name):
if not os.path.isdir(site.USER_SITE):
os.makedirs(site.USER_SITE)
rv = _do_commands(name, ["develop", "--install-dir", site.USER_SITE, "--script-dir", PYCOPIA_BIN, "-l -N"], False)
rvs = _do_scripts(name, PYCOPIA_BIN)
return rv and rvs
def do_develop(name):
rv = _do_commands(name, ["develop", "--script-dir", PYCOPIA_BIN, "-l -N"], False)
rvs = _do_scripts(name, PYCOPIA_BIN)
return rv and rvs
def do_publish(name):
return _do_commands(name, ['egg_info -RDb ""', "sdist", "register", "upload"], False)
def do_egg_info(name):
return _do_commands(name, ['egg_info'], False)
def do_install(name):
rv1 = _do_commands(name, ["install -O2", "--install-scripts", SCRIPT_DIR], True)
# Don't use the setuptools script wrapper for Pycopia scripts. This
# will overwrite the installed scripts with a direct copy.
rv2 = _do_scripts(name, SCRIPT_DIR, True)
return rv1 and rv2
def do_clean(name):
return _do_commands(name, ["clean"], False)
def do_list(name):
print(name, end=" ")
return True
# "squash" selected sub packages to a single package. Also removes
# setuptools dependency when tarballed.
def do_squash(name):
if not _check_rsync():
print("Squash requires rsync tool to be installed.")
return False
if not os.path.isdir(PYCOPIA_SQUASH):
os.makedirs(PYCOPIA_SQUASH)
os.chdir(name)
uname = os.uname()
bin_dir = os.path.join("build", "lib.%s-%s-%s" % (uname[0].lower(), uname[4], sys.version[:3]))
# e.g: build/lib.linux-x86_64-2.5/pycopia
print("======== SQUASH", name, "to", PYCOPIA_SQUASH)
try:
if WEXITSTATUS(os.system("%s setup.py build" % (sys.executable,))) != 0:
return False
for pydir in ("build/lib", bin_dir):
if os.path.isdir(pydir):
cmd = "rsync -azvu %s/ %s" % (pydir, PYCOPIA_SQUASH)
if WEXITSTATUS(os.system(cmd)) != 0:
return False
finally:
os.chdir("..")
_null_init(PYCOPIA_SQUASH)
print("====================== END", name, "squashed into", PYCOPIA_SQUASH, "\n")
return True
def _null_init(directory):
open(os.path.join(directory, "pycopia", "__init__.py"), "w").close()
def _check_rsync():
return WEXITSTATUS(os.system(RSYNCCHECK)) == 0
def do_generic(name):
pass
def get_svn_revision():
import subprocess
from xml.etree import ElementTree
info = ElementTree.fromstring(subprocess.check_output("svn info --xml".split()))
rev = info.find("entry").attrib["revision"]
return int(rev)
def main(argv):
try:
cmd = argv[1]
except IndexError:
print(DOC)
return 1
# mainrev = get_svn_revision()
# os.environ["PYCOPIA_REVISION"] = str(mainrev)
try:
method = globals()["do_" + cmd]
except KeyError:
def method(name):
return _do_commands(name, [cmd], False)
for name in (argv[2:] or PACKAGES):
if not method(name):
break
print()
return 0
sys.exit(main(sys.argv))
| apache-2.0 | -4,479,907,580,095,837,000 | 31.714829 | 118 | 0.630288 | false |
sa2ajj/DistroTracker | pts/mail/migrations/0001_initial.py | 1 | 7292 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CommandConfirmation'
db.create_table(u'mail_commandconfirmation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('confirmation_key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('commands', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'mail', ['CommandConfirmation'])
# Adding model 'BounceStats'
db.create_table(u'mail_bouncestats', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email_user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.EmailUser'])),
('mails_sent', self.gf('django.db.models.fields.IntegerField')(default=0)),
('mails_bounced', self.gf('django.db.models.fields.IntegerField')(default=0)),
('date', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal(u'mail', ['BounceStats'])
# Adding unique constraint on 'BounceStats', fields ['email_user', 'date']
db.create_unique(u'mail_bouncestats', ['email_user_id', 'date'])
def backwards(self, orm):
# Removing unique constraint on 'BounceStats', fields ['email_user', 'date']
db.delete_unique(u'mail_bouncestats', ['email_user_id', 'date'])
# Deleting model 'CommandConfirmation'
db.delete_table(u'mail_commandconfirmation')
# Deleting model 'BounceStats'
db.delete_table(u'mail_bouncestats')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.emailuser': {
'Meta': {'object_name': 'EmailUser'},
'default_keywords': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Keyword']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_email': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['django_email_accounts.UserEmail']", 'unique': 'True'})
},
u'core.keyword': {
'Meta': {'object_name': 'Keyword'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'django_email_accounts.user': {
'Meta': {'object_name': 'User'},
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'main_email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'django_email_accounts.useremail': {
'Meta': {'object_name': 'UserEmail'},
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '244'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'emails'", 'null': 'True', 'to': u"orm['django_email_accounts.User']"})
},
u'mail.bouncestats': {
'Meta': {'ordering': "[u'-date']", 'unique_together': "((u'email_user', u'date'),)", 'object_name': 'BounceStats'},
'date': ('django.db.models.fields.DateField', [], {}),
'email_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.EmailUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mails_bounced': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mails_sent': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'mail.commandconfirmation': {
'Meta': {'object_name': 'CommandConfirmation'},
'commands': ('django.db.models.fields.TextField', [], {}),
'confirmation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['mail'] | gpl-2.0 | 8,308,571,386,329,360,000 | 62.417391 | 187 | 0.566786 | false |
jamescooke/water-pouring-python | water/game.py | 1 | 7431 | import copy
from functools import reduce
from .cup import Cup
or_reduction = lambda x, y: x or y
and_reduction = lambda x, y: x and y
class Game(object):
cups = None
parent = None # Game that created this one
children = None
def __init__(self, sizes=None, parent=None):
"""
Set up a game with cups.
Improvements
* Just pass Cups instead of int configs for Cups
* Put default cup config somewhere other than init
>>> g = Game()
>>> len(g.cups)
3
>>> g.parent is None
True
>>> g.children
[]
>>> h = Game(sizes=[(5, 5), (5, 0)], parent=g)
>>> len(h.cups)
2
>>> h.parent is g
True
"""
self.cups = []
if sizes is None:
# Set up cups with default sizes
sizes = [(3, 0), (5, 0), (8, 8)]
for cap, cont in sizes:
self.cups.append(Cup(cap=cap, cont=cont))
# Save a pointer to the parent
self.parent = parent
# Children starts empty
self.children = []
def is_goal(self):
"""
There is a Cup in the Game that has the goal conditions.
>>> g = Game(sizes=[(4, 4)])
>>> g.is_goal()
True
>>> h = Game()
>>> h.is_goal()
False
"""
return reduce(
or_reduction,
[cup.is_goal() for cup in self.cups]
)
def __eq__(self, g):
"""
Games have same number of Cups and all Cups are equal.
:pre: Game has at least one cup.
>>> g = Game(sizes=[(3, 0), (5, 5)])
1. Less or more games, even if equal, is not equal.
>>> g == Game(sizes=[(3, 0)])
False
>>> g == Game(sizes=[(3, 0), (5, 5), (1, 1)])
False
2. Same num of cups means checking cups.
>>> g == Game(sizes=[(3, 1), (5, 4)])
False
3. Equal is equal.
>>> g == Game(sizes=[(3, 0), (5, 5)])
True
"""
return (
len(self.cups) == len(g.cups)
and reduce(
and_reduction,
[cup == g.cups[pos] for pos, cup in enumerate(self.cups)]
)
)
def net_has_game(self, g):
"""
Game's network of games contains this game.
"""
return self.top_parent().has_game(g)
def top_parent(self):
"""
Returns the top parent for a game, the parent state that has no parent.
"""
return self if self.parent is None else self.parent.top_parent()
def has_game(self, g):
"""
Passed Game ``g`` is in this Game's tree of Games
>>> from unittest.mock import Mock
>>> g = Game(sizes=[(3, 0), (5, 5)])
1. If the game being seached for matches, then True
>>> g.has_game(Game(sizes=[(3, 0), (5, 5)]))
True
2. If game does not match and no child games, False
>>> g.has_game(Game(sizes=[(4, 0), (5, 5)]))
False
3. If game being search for does not match, sub games are searched
>>> s_a = Mock(name='sub Game A')
>>> s_a.has_game.return_value = False
>>> s_b = Mock(name='sub Game B')
>>> s_b.has_game.return_value = True
>>> g.children.append(s_a)
>>> g.children.append(s_b)
>>> g.has_game(Game(sizes=[(4, 0), (5, 5)]))
True
"""
return (
self == g
or (
len(self.children) > 0
and reduce(
or_reduction,
[game.has_game(g) for game in self.children]
)
)
)
def make_game(self, c_a, c_b):
"""
Create a new game state by pouring Cup at ``c_a`` into Cup at ``c_b``.
New game will have its parent set as this Game.
1. Does not care if the pour is a 'good pour', just returns the new
game. If there are no contents to pour, or no space in the
destination, then the new game will be in the same state and will
be removed by the de-duplication search.
>>> g = Game(sizes=[(3, 0), (5, 5)])
>>> h = g.make_game(0, 1)
>>> g == h
True
>>> h.parent is g
True
2. When the pour is good, then the cups' states are adjusted
accordingly. Original parent Game's cups stay the same.
>>> g = Game(sizes=[(3, 3), (5, 5), (8, 0)])
>>> h = g.make_game(0, 2)
>>> expected = Game(sizes=[(3, 0), (5, 5), (8, 3)])
>>> h == expected
True
>>> h.parent is g
True
>>> g.cups[0].contents
3
"""
new_game = copy.deepcopy(self)
new_game.parent = self
(new_game.cups[c_a],
new_game.cups[c_b]) = new_game.cups[c_a].pour_into(new_game.cups[c_b])
return new_game
def make_children(self):
"""
Do all the pours, check that new Games don't exist in the network and
for those that are new add them to this Game's children.
1. If there's just one cup, does nothing
>>> g = Game(sizes=[(4, 4)])
>>> g.make_children()
0
>>> g.children
[]
2. If a pour option creates a Game that's already in the network then
it's not added to the children.
>>> g = Game(sizes=[(3, 0), (5, 5)])
>>> g.make_children()
1
>>> expected = Game(sizes=[(3, 3), (5, 2)])
>>> g.children[0] == expected
True
3. If the Game generated by pouring is already in the network, then no
new games are generated. In this example, the only option from Game
g is to pour the 5 cup into the 3 cup, but this is the same state
as the parent h, so is ignored.
>>> h = Game(sizes=[(3, 3), (5, 2)])
>>> g = Game(sizes=[(3, 0), (5, 5)])
>>> h.children = [g]
>>> g.parent = h
>>> g.make_children()
0
"""
for c_a in range(len(self.cups)):
for c_b in range(len(self.cups)):
if c_b == c_a:
continue
new_game = self.make_game(c_a, c_b)
if not self.net_has_game(new_game):
self.children.append(new_game)
return len(self.children)
def is_solvable(self):
"""
Main function. Could be written as a one line boolean, but keeping it
like this for readability. See unittests for coverage.
"""
if self.is_goal():
self.print_trace()
return True
if self.make_children() == 0:
return False
return self.solvable_child()
def solvable_child(self):
"""
Recursively walks list of Game's children looking for a solvable one.
Wishing python was haskell ._. See unittests for coverage.
"""
for child in self.children:
if child.is_solvable():
return True
return False
def print_trace(self):
"""
Run up the stack of Games printing each one so that a history can be
outputted when success is found. See unittests for coverage.
"""
if self.parent is not None:
self.parent.print_trace()
print(self.cups)
| gpl-2.0 | -2,943,814,514,436,023,000 | 28.027344 | 79 | 0.491724 | false |
EndPointCorp/appctl | appctl/test/appctl_support/test_proc_runner.py | 1 | 4832 | #!/usr/bin/env python3
import os
import unittest
import gc
import weakref
import rospy
from appctl_support import ProcRunner
import collections
PKG = 'appctl'
NAME = 'test_proc_runner'
TEST_CMD = ['sleep', '5']
GRACE_DELAY = 0.5 # seconds
# http://stackoverflow.com/questions/568271/
def check_pid(pid):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
class MockSpawnHandler(object):
def __init__(self):
self.spawns = 0
def __call__(self):
self.spawns += 1
class MockBrokenSpawnHandler(object):
def __init__(self):
return
def __call__(self):
raise Exception("I was born to fail.")
class MockInvalidSpawnHandler(object):
def __init__(self):
assert not isinstance(self, collections.Callable)
class TestProcRunner(unittest.TestCase):
def setUp(self):
self.runner = ProcRunner(TEST_CMD)
def tearDown(self):
self.runner.shutdown()
if self.runner.is_alive():
self.runner.join()
def test_startup(self):
self.runner.start()
self.assertTrue(self.runner.is_alive(), 'Runner must be alive after start()')
def test_shutdown(self):
self.runner.start()
rospy.sleep(GRACE_DELAY)
pid = self.runner.proc.pid
self.assertIsNotNone(pid, 'Must get a pid after start()')
self.runner.shutdown()
self.runner.join()
self.assertFalse(check_pid(pid), 'Process must not respond to sig0 after shutdown()')
def test_kill_proc(self):
self.runner.start()
rospy.sleep(GRACE_DELAY)
pid = self.runner.proc.pid
self.assertTrue(check_pid(pid), 'Must have a pid to start with')
self.runner._kill_proc()
self.assertFalse(check_pid(pid), 'Process must be dead')
def test_respawn(self):
self.runner.start()
rospy.sleep(GRACE_DELAY)
first_pid = self.runner.proc.pid
self.runner._kill_proc()
rospy.sleep(self.runner.respawn_delay + GRACE_DELAY)
second_pid = self.runner.proc.pid
self.assertNotEqual(first_pid, second_pid, 'Must have a different pid after respawn')
self.assertTrue(check_pid(second_pid), 'Must be alive after respawn')
def test_not_respawn(self):
"""
Test the respawn flag.
respawn is True by default, this test cases aims at not
respawning the managed process.
"""
self.runner = ProcRunner(TEST_CMD, respawn=False)
self.runner.start()
rospy.sleep(GRACE_DELAY)
pid = self.runner.proc.pid
self.runner._kill_proc()
rospy.sleep(self.runner.respawn_delay + GRACE_DELAY)
# we want, after the kill, there be no process
self.assertFalse(check_pid(pid), 'Must be dead, was killed.')
# can't do more tests the subprocess instance, it's been set None
self.assertEqual(self.runner.proc, None)
def test_spawn_handler(self):
"""
Spawn handlers must be run on each spawn.
"""
mock_handler = MockSpawnHandler()
self.runner.add_spawn_hook(mock_handler)
self.runner.start()
rospy.sleep(GRACE_DELAY)
self.assertEqual(mock_handler.spawns, 1, 'Invalid number of spawn handler calls so far')
self.runner._kill_proc()
rospy.sleep(self.runner.respawn_delay + GRACE_DELAY)
self.assertEqual(mock_handler.spawns, 2, 'Invalid number of spawn handler calls so far')
def test_broken_spawn_handler(self):
"""
Broken spawn handlers must not wreck the thread.
"""
mock_handler = MockBrokenSpawnHandler()
self.runner.add_spawn_hook(mock_handler)
self.test_spawn_handler()
def test_invalid_spawn_handler(self):
"""
Invalid spawn handlers must raise a TypeError when added.
"""
invalid_hook = MockInvalidSpawnHandler()
with self.assertRaises(TypeError):
self.runner.add_spawn_hook(invalid_hook)
class TestProcRunnerCleanup(unittest.TestCase):
def test_cleanup(self):
runner = ProcRunner(TEST_CMD)
runner.start()
rospy.sleep(GRACE_DELAY)
runner_ref = weakref.ref(runner)
proc_ref = weakref.ref(runner.proc)
runner.shutdown()
rospy.sleep(GRACE_DELAY)
gc.collect()
self.assertIsNone(proc_ref(), 'proc must be freed on shutdown')
runner = None
gc.collect()
self.assertIsNone(runner_ref(), 'runner must be freed post-delete')
if __name__ == '__main__':
import rostest
rostest.rosrun(PKG, NAME, TestProcRunner)
rostest.rosrun(PKG, NAME, TestProcRunnerCleanup)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| apache-2.0 | 2,074,059,391,645,228,800 | 27.934132 | 96 | 0.630381 | false |
openpassword/blimey | tests/integration/openpassword/agile_keychain/test_item_manager.py | 1 | 4311 | import os
import shutil
import time
import json
from nose.tools import raises
from blimey.agile_keychain._manager._item_manager import ItemManager
from blimey.agile_keychain.data_source import AgileKeychainItem
from blimey.exceptions import ItemNotFoundException
class ItemManagerTest:
_fixture_path = os.path.join('tests', 'fixtures', 'test.agilekeychain')
_temporary_path = os.path.join('tests', 'fixtures', 'temp.agilekeychain')
_password = "somepassword"
def it_gets_items(self):
item_manager = ItemManager(self._fixture_path)
item = item_manager.get_by_id('5F7210FD2F3F460692B7083C60854A02')
assert item['uuid'] == "5F7210FD2F3F460692B7083C60854A02"
@raises(ItemNotFoundException)
def it_throws_if_requested_item_is_not_found(self):
item_manager = ItemManager(self._fixture_path)
item_manager.get_by_id('notfoundid')
# 1Password 3 changes deleted item type to system.Tombstone
# Refer to the item in the fixture for an example of this
@raises(ItemNotFoundException)
def it_throws_if_requested_item_is_of_type_tombstone(self):
item_manager = ItemManager(self._fixture_path)
item_manager.get_by_id('320BE3D1B490458F82314E1A2B99552A')
# 1Password 4+ replaces the item contents with "{}"
# Refer to the item in the fixture for an example of this
@raises(ItemNotFoundException)
def it_throws_if_requested_item_is_empty(self):
item_manager = ItemManager(self._fixture_path)
item_manager.get_by_id('CAF7A781A71E44CFBB63F9356B46A0C9')
def it_gets_all_non_null_and_non_tombstoned_items(self):
item_manager = ItemManager(self._fixture_path)
items = item_manager.get_all_items()
expected_item_uuids = [
'2E21D652E0754BD59F6B94B0323D0142',
'4A3D784D115F4279BDFCE46D0A162D57',
'5F7210FD2F3F460692B7083C60854A02',
'6371E49FEFA042EDB335421459E5B29F',
'9315F5EA8DCC4CB7BE09155DB7FCD1ED',
'97019BEBCF9E402F8F0C033474B1B85D',
'9E7673CCBB5B4AC9A7A8838835CB7E83',
'B851D6E3232842B0858BC10968632A9C',
'D05009E62D7D401CB8ACF2FE6981C031',
'ECE79F0A4BDF44CE8E7986897D84D1EC'
]
assert len(items) == len(expected_item_uuids)
for item in items:
assert item['uuid'] in expected_item_uuids
def it_saves_items(self):
self._init_default_data_dir()
item_manager = ItemManager(self._temporary_path)
item = self._get_item()
item_manager.save_item(item)
retrieved_item = item_manager.get_by_id(item['uuid'])
assert item['uuid'] == retrieved_item['uuid']
def it_sets_update_time_on_save(self):
self._init_default_data_dir()
item_manager = ItemManager(self._temporary_path)
item = self._get_item()
item_manager.save_item(item)
retrieved_item = item_manager.get_by_id(item['uuid'])
assert item['updatedAt'] > 0
assert item['updatedAt'] <= time.time()
def it_updates_contents_file_when_items_are_saved(self):
self._init_default_data_dir()
item_manager = ItemManager(self._temporary_path)
item = self._get_item()
item_manager.save_item(item)
with open(os.path.join(self._temporary_path, 'data', 'default', 'contents.js')) as file:
contents = json.load(file)
assert contents[0][0] == item['uuid']
assert contents[0][1] == item['typeName']
assert contents[0][2] == item['title']
assert contents[0][3] == item['locationKey']
assert contents[0][4] == item['folderUuid']
assert contents[0][5] == 0 # No idea what this value is
assert contents[0][6] == 'Y' # Corresponds to 'trashed'
def _get_item(self):
return AgileKeychainItem({
'uuid': 'abcdef',
'typeName': 'typename',
'title': 'Title',
'locationKey': 'locationkey',
'folderUuid': 'undefined',
'trashed': True
})
def _init_default_data_dir(self):
os.makedirs(os.path.join(self._temporary_path, 'data', 'default'))
self.teardown = self._path_clean
def _path_clean(self):
shutil.rmtree(self._temporary_path)
| mit | -2,044,052,607,398,452,500 | 34.336066 | 96 | 0.644398 | false |
LukeJFernandez/stitch-flex | app/util/validatefeeds.py | 1 | 1341 | """ Utility module for validating camera feeds. """
from __future__ import absolute_import, division, print_function
from .textformatter import TextFormatter
from .feed import CameraFeed
def view_valid_camera_feeds():
"""
Shows all valid feed views, one after another. The next feed shows when the current is closed.
"""
valid_feeds = []
TextFormatter.print_heading("Checking for valid feeds.")
try:
for index in xrange(1, 5):
if check_feed(index):
valid_feeds.append(index)
except NameError:
for index in range(1, 5):
if check_feed(index):
valid_feeds.append(index)
if len(valid_feeds) > 0:
TextFormatter.print_heading("Valid Feeds:")
for feed in valid_feeds:
show_camera_feed(feed)
else:
TextFormatter.print_info("No Valid Feeds")
def check_feed(feed_index):
"""
Checks if the provided index points to a valid camera feed.
"""
camera_feed = CameraFeed(feed_index)
return camera_feed.is_valid()
def show_camera_feed(feed_index):
"""
Shows the camera feed pointed to by the provided feed_index.
"""
camera_feed = CameraFeed(feed_index)
# Show the uncorrected feed.
camera_feed.show(False)
if __name__ == "__main__":
view_valid_camera_feeds()
| mit | 2,553,727,568,890,088,000 | 28.152174 | 98 | 0.634601 | false |
arichar6/veusz | veusz/setting/setting.py | 1 | 57973 | # Copyright (C) 2005 Jeremy S. Sanders
# Email: Jeremy Sanders <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
"""Module for holding setting values.
e.g.
s = Int('foo', 5)
s.get()
s.set(42)
s.fromUIText('42')
"""
from __future__ import division
import re
import sys
import numpy as N
from ..compat import cbasestr, cstr, crepr
from .. import qtall as qt4
from . import controls
from .settingdb import settingdb, uilocale, ui_floattostring, ui_stringtofloat
from .reference import ReferenceBase, Reference
from .. import utils
from .. import datasets
class OnModified(qt4.QObject):
"""onmodified is emitted from an object contained in each setting."""
onModified = qt4.pyqtSignal()
class Setting(object):
"""A class to store a value with a particular type."""
# differentiate widgets, settings and setting
nodetype = 'setting'
typename = 'setting'
# various items in class hierarchy
iswidget = False
issetting = True
issettings = False
def __init__(self, name, value, descr='', usertext='',
formatting=False, hidden=False):
"""Initialise the values.
name: setting name
value: default value and initial value
descr: description of the setting
usertext: name of setting for user
formatting: whether setting applies to formatting
hidden: hide widget from user
"""
self.readonly = False
self.parent = None
self.name = name
self.descr = descr
self.usertext = usertext
self.formatting = formatting
self.hidden = hidden
self.default = value
self.onmodified = OnModified()
self._val = self._ref = None
# calls the set function for the val property
self.val = value
def _copyHelper(self, before, after, optional):
"""Help copy an object.
before are arguments before val
after are arguments after val
optinal as optional arguments
"""
val = self._ref if self._ref else self._val
args = (self.name,) + before + (val,) + after
opt = optional.copy()
opt['descr'] = self.descr
opt['usertext'] = self.usertext
opt['formatting'] = self.formatting
opt['hidden'] = self.hidden
obj = self.__class__(*args, **opt)
obj.readonly = self.readonly
obj.default = self.default
return obj
def copy(self):
"""Make a setting which has its values copied from this one.
This needs to be overridden if the constructor changes
"""
return self._copyHelper((), (), {})
def get(self):
"""Get the value."""
if self._ref:
return self._ref.resolve(self).get()
else:
return self._val
def set(self, v):
"""Set the value."""
if isinstance(v, ReferenceBase):
self._val = None
self._ref = v
else:
self._val = self.normalize(v)
self._ref = None
self.onmodified.onModified.emit()
val = property(
get, set, None,
'Get or modify the value of the setting')
def isReference(self):
"""Is this a setting a reference to another object."""
return bool(self._ref)
def getReference(self):
"""Return the reference object. Raise ValueError if not a reference"""
if self._ref:
return self._ref
else:
raise ValueError("Setting is not a reference")
def getStylesheetLink(self):
"""Get text that this setting should default to linked to the
stylesheet."""
path = []
obj = self
while not obj.parent.iswidget:
path.insert(0, obj.name)
obj = obj.parent
path = ['', 'StyleSheet', obj.parent.typename] + path
return '/'.join(path)
def linkToStylesheet(self):
"""Make this setting link to stylesheet setting, if possible."""
self.set( Reference(self.getStylesheetLink()) )
@property
def path(self):
"""Return full path of setting."""
path = []
obj = self
while obj is not None:
# logic easier to understand here
# do not add settings name for settings of widget
if not obj.iswidget and obj.parent.iswidget:
pass
else:
if obj.name == '/':
path.insert(0, '')
else:
path.insert(0, obj.name)
obj = obj.parent
return '/'.join(path)
def toUIText(self):
"""Convert the type to text to show in UI."""
return ""
def fromUIText(self, text):
"""Convert text from UI into type for setting.
Raises utils.InvalidType if cannot convert."""
return None
def saveText(self, saveall, rootname = ''):
"""Return text to restore the value of this setting."""
if (saveall or not self.isDefault()) and not self.readonly:
if self._ref:
return "SetToReference('%s%s', %s)\n" % (
rootname, self.name, crepr(self._ref.value))
else:
return "Set('%s%s', %s)\n" % (
rootname, self.name, crepr(self.val) )
else:
return ''
def setOnModified(self, fn):
"""Set the function to be called on modification (passing True)."""
self.onmodified.onModified.connect(fn)
if self._ref:
# tell references to notify us if they are modified
self._ref.setOnModified(self, fn)
def removeOnModified(self, fn):
"""Remove the function from the list of function to be called."""
self.onmodified.onModified.disconnect(fn)
def newDefault(self, value):
"""Update the default and the value."""
self.default = value
self.val = value
def isDefault(self):
"""Is the current value a default?
This also returns true if it is linked to the appropriate stylesheet
"""
if self._ref and isinstance(self.default, ReferenceBase):
return self._ref.value == self.default.value
else:
return self._val == self.default
def isDefaultLink(self):
"""Is this a link to the default stylesheet value."""
return self._ref and self._ref.value == self.getStylesheetLink()
def setSilent(self, val):
"""Set the setting, without propagating modified flags.
This shouldn't often be used as it defeats the automatic updation.
Used for temporary modifications."""
self._ref = None
self._val = self.normalize(val)
def normalize(self, val):
"""Convert external value to normalized form for storing
Raises a utils.InvalidType if this is not possible."""
return val
def makeControl(self, *args):
"""Make a qt control for editing the setting.
The control emits settingValueChanged() when the setting has
changed value."""
return None
def getDocument(self):
"""Return document."""
p = self.parent
while p:
if p.iswidget:
return p.document
p = p.parent
return None
def getWidget(self):
"""Return associated widget."""
w = self.parent
while not w.iswidget:
w = w.parent
return w
def safeEvalHelper(self, text):
"""Evaluate an expression, catching naughtiness."""
try:
comp = self.getDocument().evaluate.compileCheckedExpression(
text)
if comp is None:
raise utils.InvalidType
return float( eval(comp, self.getDocument().evaluate.context) )
except:
raise utils.InvalidType
# forward setting to another setting
class SettingBackwardCompat(Setting):
"""Forward setting requests to another setting.
This is used for backward-compatibility.
"""
typename = 'backward-compat'
def __init__(self, name, newrelpath, val, translatefn=None,
**args):
"""Point this setting to another.
newrelpath is a path relative to this setting's parent
"""
self.translatefn = translatefn
args['hidden'] = True
Setting.__init__(self, name, val, **args)
self.relpath = newrelpath
def getForward(self):
"""Get setting this setting forwards to."""
doc = self.getDocument()
return doc.resolveSettingPath(self.parent, self.relpath)
def normalize(self, val):
if self.parent is not None:
return self.getForward().normalize(val)
def toUIText(self):
return self.getForward().toUIText()
def fromUIText(self, val):
return self.getForward().fromUIText(val)
def set(self, val):
if self.parent is not None and not isinstance(val, ReferenceBase):
if self.translatefn:
val = self.translatefn(val)
self.getForward().set(val)
def isDefault(self):
return self.getForward().isDefault()
def get(self):
return self.getForward().get()
def copy(self):
return self._copyHelper(
(self.relpath,), (), {'translatefn': self.translatefn})
def makeControl(self, *args):
return None
def saveText(self, saveall, rootname = ''):
return ''
def linkToStylesheet(self):
"""Do nothing for backward compatibility settings."""
pass
# Store strings
class Str(Setting):
"""String setting."""
typename = 'str'
def normalize(self, val):
if isinstance(val, cbasestr):
return val
raise utils.InvalidType
def toUIText(self):
return self.val
def fromUIText(self, text):
return text
def makeControl(self, *args):
return controls.String(self, *args)
class Notes(Str):
"""String for making notes."""
typename = 'str-notes'
def makeControl(self, *args):
return controls.Notes(self, *args)
# Store bools
class Bool(Setting):
"""Bool setting."""
typename = 'bool'
def normalize(self, val):
if type(val) in (bool, int):
return bool(val)
raise utils.InvalidType
def toUIText(self):
return 'True' if self.val else 'False'
def fromUIText(self, text):
t = text.strip().lower()
if t in ('true', '1', 't', 'y', 'yes'):
return True
elif t in ('false', '0', 'f', 'n', 'no'):
return False
else:
raise utils.InvalidType
def makeControl(self, *args):
return controls.Bool(self, *args)
# Storing integers
class Int(Setting):
"""Integer settings."""
typename = 'int'
def __init__(self, name, value, minval=-1000000, maxval=1000000,
**args):
"""Initialise the values.
minval is minimum possible value of setting
maxval is maximum possible value of setting
"""
self.minval = minval
self.maxval = maxval
Setting.__init__(self, name, value, **args)
def copy(self):
"""Make a setting which has its values copied from this one.
This needs to be overridden if the constructor changes
"""
return self._copyHelper((), (), {'minval': self.minval,
'maxval': self.maxval})
def normalize(self, val):
if isinstance(val, int):
if val >= self.minval and val <= self.maxval:
return val
else:
raise utils.InvalidType('Out of range allowed')
raise utils.InvalidType
def toUIText(self):
return uilocale.toString(self.val)
def fromUIText(self, text):
i, ok = uilocale.toLongLong(text)
if not ok:
raise ValueError
if i >= self.minval and i <= self.maxval:
return i
else:
raise utils.InvalidType('Out of range allowed')
def makeControl(self, *args):
return controls.Int(self, *args)
def _finiteRangeFloat(f, minval=-1e300, maxval=1e300):
"""Return a finite float in range or raise exception otherwise."""
f = float(f)
if not N.isfinite(f):
raise utils.InvalidType('Finite values only allowed')
if f < minval or f > maxval:
raise utils.InvalidType('Out of range allowed')
return f
# for storing floats
class Float(Setting):
"""Float settings."""
typename = 'float'
def __init__(self, name, value, minval=-1e200, maxval=1e200,
**args):
"""Initialise the values.
minval is minimum possible value of setting
maxval is maximum possible value of setting
"""
self.minval = minval
self.maxval = maxval
Setting.__init__(self, name, value, **args)
def copy(self):
"""Make a setting which has its values copied from this one.
This needs to be overridden if the constructor changes
"""
return self._copyHelper((), (), {'minval': self.minval,
'maxval': self.maxval})
def normalize(self, val):
if isinstance(val, int) or isinstance(val, float):
return _finiteRangeFloat(
val, minval=self.minval, maxval=self.maxval)
raise utils.InvalidType
def toUIText(self):
return ui_floattostring(self.val)
def fromUIText(self, text):
try:
f = ui_stringtofloat(text)
except ValueError:
# try to evaluate
f = self.safeEvalHelper(text)
return self.normalize(f)
def makeControl(self, *args):
return controls.Edit(self, *args)
class FloatOrAuto(Float):
"""Save a float or text auto."""
typename = 'float-or-auto'
def normalize(self, val):
if type(val) in (int, float):
return _finiteRangeFloat(val, minval=self.minval, maxval=self.maxval)
elif isinstance(val, cbasestr) and val.strip().lower() == 'auto':
return 'Auto'
else:
raise utils.InvalidType
def toUIText(self):
if isinstance(self.val, cbasestr) and self.val.lower() == 'auto':
return 'Auto'
else:
return ui_floattostring(self.val)
def fromUIText(self, text):
if text.strip().lower() == 'auto':
return 'Auto'
else:
return Float.fromUIText(self, text)
def makeControl(self, *args):
return controls.Choice(self, True, ['Auto'], *args)
class IntOrAuto(Setting):
"""Save an int or text auto."""
typename = 'int-or-auto'
def normalize(self, val):
if isinstance(val, int):
return val
elif isinstance(val, cbasestr) and val.strip().lower() == 'auto':
return 'Auto'
else:
raise utils.InvalidType
def toUIText(self):
if isinstance(self.val, cbasestr) and self.val.lower() == 'auto':
return 'Auto'
else:
return uilocale.toString(self.val)
def fromUIText(self, text):
if text.strip().lower() == 'auto':
return 'Auto'
else:
i, ok = uilocale.toLongLong(text)
if not ok:
raise utils.InvalidType
return i
def makeControl(self, *args):
return controls.Choice(self, True, ['Auto'], *args)
# these are functions used by the distance setting below.
# they don't work as class methods
def _distPhys(match, painter, mult):
"""Convert a physical unit measure in multiples of points."""
return painter.pixperpt * mult * float(match.group(1))
def _idistval(val, unit):
"""Convert value to text, dropping zeros and . points on right."""
return ("%.3f" % val).rstrip('0').rstrip('.') + unit
def _distInvPhys(pixdist, painter, mult, unit):
"""Convert number of pixels into physical distance."""
return _idistval(pixdist / (mult*painter.pixperpt), unit)
def _distPerc(match, painter):
"""Convert from a percentage of maxdim."""
return painter.maxdim * 0.01 * float(match.group(1))
def _distInvPerc(pixdist, painter):
"""Convert pixel distance into percentage."""
return _idistval(pixdist * 100. / painter.maxdim, '%')
def _distFrac(match, painter):
"""Convert from a fraction a/b of maxdim."""
try:
return painter.maxdim * float(match.group(1))/float(match.group(4))
except ZeroDivisionError:
return 0.
def _distRatio(match, painter):
"""Convert from a simple 0.xx ratio of maxdim."""
# if it's greater than 1 then assume it's a point measurement
if float(match.group(1)) > 1.:
return _distPhys(match, painter, 1)
return painter.maxdim * float(match.group(1))
# regular expression to match distances
distre_expr = r'''^
[ ]* # optional whitespace
(\.?[0-9]+|[0-9]+\.[0-9]*) # a floating point number
[ ]* # whitespace
(cm|pt|mm|inch|in|"|%|| # ( unit, no unit,
(?P<slash>/) ) # or / )
(?(slash)[ ]* # if it was a slash, match any whitespace
(\.?[0-9]+|[0-9]+\.[0-9]*)) # and match following fp number
[ ]* # optional whitespace
$'''
class Distance(Setting):
"""A veusz distance measure, e.g. 1pt or 3%."""
typename = 'distance'
# match a distance
distre = re.compile(distre_expr, re.VERBOSE)
# functions to convert from unit values to points
unit_func = {
'cm': lambda match, painter:
_distPhys(match, painter, 720/25.4),
'pt': lambda match, painter:
_distPhys(match, painter, 1.),
'mm': lambda match, painter:
_distPhys(match, painter, 72/25.4),
'in': lambda match, painter:
_distPhys(match, painter, 72.),
'inch': lambda match, painter:
_distPhys(match, painter, 72.),
'"': lambda match, painter:
_distPhys(match, painter, 72.),
'%': _distPerc,
'/': _distFrac,
'': _distRatio
}
# inverse functions for converting points to units
inv_unit_func = {
'cm': lambda match, painter:
_distInvPhys(match, painter, 720/25.4, 'cm'),
'pt': lambda match, painter:
_distInvPhys(match, painter, 1., 'pt'),
'mm': lambda match, painter:
_distInvPhys(match, painter, 72/25.4, 'mm'),
'in': lambda match, painter:
_distInvPhys(match, painter, 72., 'in'),
'inch': lambda match, painter:
_distInvPhys(match, painter, 72., 'in'),
'"': lambda match, painter:
_distInvPhys(match, painter, 72., 'in'),
'%': _distInvPerc,
'/': _distInvPerc,
'': _distInvPerc
}
@classmethod
def isDist(kls, dist):
"""Is the text a valid distance measure?"""
return kls.distre.match(dist) is not None
def normalize(self, val):
if self.distre.match(val) is not None:
return val
else:
raise utils.InvalidType
def toUIText(self):
# convert decimal point to display locale
return self.val.replace('.', uilocale.decimalPoint())
def fromUIText(self, text):
# convert decimal point from display locale
text = text.replace(uilocale.decimalPoint(), '.')
if self.isDist(text):
return text
else:
raise utils.InvalidType
def makeControl(self, *args):
return controls.Distance(self, *args)
@classmethod
def convertDistance(kls, painter, dist):
'''Convert a distance to plotter units.
dist: eg 0.1 (fraction), 10% (percentage), 1/10 (fraction),
10pt, 1cm, 20mm, 1inch, 1in, 1" (size)
painter: painter to get metrics to convert physical sizes
'''
# match distance against expression
m = kls.distre.match(dist)
if m is not None:
# lookup function to call to do conversion
func = kls.unit_func[m.group(2)]
return func(m, painter)
# none of the regexps match
raise ValueError( "Cannot convert distance in form '%s'" %
dist )
def convert(self, painter):
"""Convert this setting's distance as above"""
return self.convertDistance(painter, self.val)
def convertPts(self, painter):
"""Get the distance in points."""
return self.convert(painter) / painter.pixperpt
def convertInverse(self, distpix, painter):
"""Convert distance in pixels into units of this distance.
"""
m = self.distre.match(self.val)
if m is not None:
# if it matches convert back
inversefn = self.inv_unit_func[m.group(2)]
else:
# otherwise force unit
inversefn = self.inv_unit_func['cm']
# do inverse mapping
return inversefn(distpix, painter)
class DistancePt(Distance):
"""For a distance in points."""
def makeControl(self, *args):
return controls.DistancePt(self, *args)
class DistancePhysical(Distance):
"""For physical distances (no fractional)."""
def isDist(self, val):
m = self.distre.match(val)
if m:
# disallow non-physical distances
if m.group(2) not in ('/', '', '%'):
return True
return False
def makeControl(self, *args):
return controls.Distance(self, *args, physical=True)
class DistanceOrAuto(Distance):
"""A distance or the value Auto"""
typename = 'distance-or-auto'
distre = re.compile( distre_expr + r'|^Auto$', re.VERBOSE )
def isAuto(self):
return self.val == 'Auto'
def makeControl(self, *args):
return controls.Distance(self, allowauto=True, *args)
class Choice(Setting):
"""One out of a list of strings."""
# maybe should be implemented as a dict to speed up checks
typename = 'choice'
def __init__(self, name, vallist, val, **args):
"""Setting val must be in vallist.
descriptions is an optional addon to put a tooltip on each item
in the control.
"""
assert type(vallist) in (list, tuple)
self.vallist = vallist
self.descriptions = args.get('descriptions', None)
if self.descriptions:
del args['descriptions']
Setting.__init__(self, name, val, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((self.vallist,), (), {})
def normalize(self, val):
if val in self.vallist:
return val
else:
raise utils.InvalidType
def toUIText(self):
return self.val
def fromUIText(self, text):
if text in self.vallist:
return text
else:
raise utils.InvalidType
def makeControl(self, *args):
argsv = {'descriptions': self.descriptions}
return controls.Choice(self, False, self.vallist, *args, **argsv)
class ChoiceOrMore(Setting):
"""One out of a list of strings, or anything else."""
# maybe should be implemented as a dict to speed up checks
typename = 'choice-or-more'
def __init__(self, name, vallist, val, **args):
"""Setting has val must be in vallist.
descriptions is an optional addon to put a tooltip on each item
in the control
"""
self.vallist = vallist
self.descriptions = args.get('descriptions', None)
if self.descriptions:
del args['descriptions']
Setting.__init__(self, name, val, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((self.vallist,), (), {})
def normalize(self, val):
return val
def toUIText(self):
return self.val
def fromUIText(self, text):
return text
def makeControl(self, *args):
argsv = {'descriptions': self.descriptions}
return controls.Choice(self, True, self.vallist, *args, **argsv)
class FloatChoice(ChoiceOrMore):
"""A numeric value, which can also be chosen from the list of values."""
typename = 'float-choice'
def normalize(self, val):
if isinstance(val, int) or isinstance(val, float):
return _finiteRangeFloat(val)
raise utils.InvalidType
def toUIText(self):
return ui_floattostring(self.val)
def fromUIText(self, text):
try:
f = ui_stringtofloat(text)
except ValueError:
# try to evaluate
f = self.safeEvalHelper(text)
return self.normalize(f)
def makeControl(self, *args):
argsv = {'descriptions': self.descriptions}
strings = [ui_floattostring(x) for x in self.vallist]
return controls.Choice(self, True, strings, *args, **argsv)
class FloatDict(Setting):
"""A dictionary, taking floats as values."""
typename = 'float-dict'
def normalize(self, val):
if type(val) != dict:
raise utils.InvalidType
for v in val.values():
if type(v) not in (float, int):
raise utils.InvalidType
# return copy
return dict(val)
def toUIText(self):
text = ['%s = %s' % (k, ui_floattostring(self.val[k]))
for k in sorted(self.val)]
return '\n'.join(text)
def fromUIText(self, text):
"""Do conversion from list of a=X\n values."""
out = {}
# break up into lines
for l in text.split('\n'):
l = l.strip()
if len(l) == 0:
continue
# break up using =
p = l.strip().split('=')
if len(p) != 2:
raise utils.InvalidType
try:
v = ui_stringtofloat(p[1])
except ValueError:
raise utils.InvalidType
out[ p[0].strip() ] = v
return out
def makeControl(self, *args):
return controls.MultiLine(self, *args)
class FloatList(Setting):
"""A list of float values."""
typename = 'float-list'
def normalize(self, val):
if type(val) not in (list, tuple):
raise utils.InvalidType
# horribly slow test for invalid entries
out = []
for i in val:
if type(i) not in (float, int):
raise utils.InvalidType
else:
out.append( float(i) )
return out
def toUIText(self):
"""Make a string a, b, c."""
# can't use the comma for splitting if used as a decimal point
join = ', '
if uilocale.decimalPoint() == ',':
join = '; '
return join.join( [ui_floattostring(x) for x in self.val] )
def fromUIText(self, text):
"""Convert from a, b, c or a b c."""
# don't use commas if it is the decimal separator
splitre = r'[\t\n, ]+'
if uilocale.decimalPoint() == ',':
splitre = r'[\t\n; ]+'
out = []
for x in re.split(splitre, text.strip()):
if x:
try:
out.append( ui_stringtofloat(x) )
except ValueError:
out.append( self.safeEvalHelper(x) )
return out
def makeControl(self, *args):
return controls.String(self, *args)
class WidgetPath(Str):
"""A setting holding a path to a widget. This is checked for validity."""
typename = 'widget-path'
def __init__(self, name, val, relativetoparent=True,
allowedwidgets = None,
**args):
"""Initialise the setting.
The widget is located relative to
parent if relativetoparent is True, otherwise this widget.
If allowedwidgets is not None, only those widgets types in the list are
allowed by this setting.
"""
Str.__init__(self, name, val, **args)
self.relativetoparent = relativetoparent
self.allowedwidgets = allowedwidgets
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (),
{'relativetoparent': self.relativetoparent,
'allowedwidgets': self.allowedwidgets})
def getReferredWidget(self, val = None):
"""Get the widget referred to. We double-check here to make sure
it's the one.
Returns None if setting is blank
utils.InvalidType is raised if there's a problem
"""
# this is a bit of a hack, so we don't have to pass a value
# for the setting (which we need to from normalize)
if val is None:
val = self.val
if val == '':
return None
# find the widget associated with this setting
widget = self
while not widget.iswidget:
widget = widget.parent
# usually makes sense to give paths relative to a parent of a widget
if self.relativetoparent:
widget = widget.parent
# resolve the text to a widget
try:
widget = widget.document.resolveWidgetPath(widget, val)
except ValueError:
raise utils.InvalidType
# check the widget against the list of allowed types if given
if self.allowedwidgets is not None:
allowed = False
for c in self.allowedwidgets:
if isinstance(widget, c):
allowed = True
if not allowed:
raise utils.InvalidType
return widget
class Dataset(Str):
"""A setting to choose from the possible datasets."""
typename = 'dataset'
def __init__(self, name, val, dimensions=1, datatype='numeric',
**args):
"""
dimensions is the number of dimensions the dataset needs
"""
self.dimensions = dimensions
self.datatype = datatype
Setting.__init__(self, name, val, **args)
def copy(self):
"""Make a setting which has its values copied from this one."""
return self._copyHelper((), (),
{'dimensions': self.dimensions,
'datatype': self.datatype})
def makeControl(self, *args):
"""Allow user to choose between the datasets."""
return controls.Dataset(self, self.getDocument(), self.dimensions,
self.datatype, *args)
def getData(self, doc):
"""Return a list of datasets entered."""
d = doc.data.get(self.val)
if ( d is not None and
d.datatype == self.datatype and
(d.dimensions == self.dimensions or self.dimensions == 'all') ):
return d
class Strings(Setting):
"""A multiple set of strings."""
typename = 'str-multi'
def normalize(self, val):
"""Takes a tuple/list of strings:
('ds1','ds2'...)
"""
if isinstance(val, cbasestr):
return (val, )
if type(val) not in (list, tuple):
raise utils.InvalidType
# check each entry in the list is appropriate
for ds in val:
if not isinstance(ds, cbasestr):
raise utils.InvalidType
return tuple(val)
def makeControl(self, *args):
"""Allow user to choose between the datasets."""
return controls.Strings(self, self.getDocument(), *args)
class Datasets(Setting):
"""A setting to choose one or more of the possible datasets."""
typename = 'dataset-multi'
def __init__(self, name, val, dimensions=1, datatype='numeric',
**args):
"""
dimensions is the number of dimensions the dataset needs
"""
Setting.__init__(self, name, val, **args)
self.dimensions = dimensions
self.datatype = datatype
def normalize(self, val):
"""Takes a tuple/list of strings:
('ds1','ds2'...)
"""
if isinstance(val, cbasestr):
return (val, )
if type(val) not in (list, tuple):
raise utils.InvalidType
# check each entry in the list is appropriate
for ds in val:
if not isinstance(ds, cbasestr):
raise utils.InvalidType
return tuple(val)
def copy(self):
"""Make a setting which has its values copied from this one."""
return self._copyHelper((), (),
{'dimensions': self.dimensions,
'datatype': self.datatype})
def makeControl(self, *args):
"""Allow user to choose between the datasets."""
return controls.Datasets(self, self.getDocument(), self.dimensions,
self.datatype, *args)
def getData(self, doc):
"""Return a list of datasets entered."""
out = []
for name in self.val:
d = doc.data.get(name)
if ( d is not None and
d.datatype == self.datatype and
d.dimensions == self.dimensions ):
out.append(d)
return out
class DatasetExtended(Dataset):
"""Choose a dataset, give an expression or specify a list of float
values."""
typename = 'dataset-extended'
def normalize(self, val):
"""Check is a string (dataset name or expression) or a list of
floats (numbers).
"""
if isinstance(val, cbasestr):
return val
elif self.dimensions == 1:
# list of numbers only allowed for 1d datasets
if isinstance(val, float) or isinstance(val, int):
return [val]
else:
try:
return [float(x) for x in val]
except (TypeError, ValueError):
pass
raise utils.InvalidType
def toUIText(self):
if isinstance(self.val, cbasestr):
return self.val
else:
# join based on , or ; depending on decimal point
join = ', '
if uilocale.decimalPoint() == ',':
join = '; '
return join.join( [ ui_floattostring(x)
for x in self.val ] )
def fromUIText(self, text):
"""Convert from text."""
text = text.strip()
if self.dimensions > 1:
return text
# split based on , or ; depending on decimal point
splitre = r'[\t\n, ]+'
if uilocale.decimalPoint() == ',':
splitre = r'[\t\n; ]+'
out = []
for x in re.split(splitre, text):
if x:
try:
out.append( ui_stringtofloat(x) )
except ValueError:
# fail conversion, so exit with text
return text
return out
def getFloatArray(self, doc):
"""Get a numpy of values or None."""
if isinstance(self.val, cbasestr):
ds = doc.evaluate.evalDatasetExpression(
self.val, datatype=self.datatype, dimensions=self.dimensions)
if ds:
# get numpy array of values
return N.array(ds.data)
else:
# list of values
return N.array(self.val)
return None
def isDataset(self, doc):
"""Is this setting a dataset?"""
return (isinstance(self.val, cbasestr) and
doc.data.get(self.val))
def isEmpty(self):
"""Is this unset?"""
return self.val == [] or self.val == ''
def getData(self, doc):
"""Return veusz dataset"""
if isinstance(self.val, cbasestr):
return doc.evaluate.evalDatasetExpression(
self.val, datatype=self.datatype, dimensions=self.dimensions)
else:
return datasets.valsToDataset(
self.val, self.datatype, self.dimensions)
class DatasetOrStr(Dataset):
"""Choose a dataset or enter a string.
Non string datasets are converted to string arrays using this.
"""
typename = 'dataset-or-str'
def __init__(self, name, val, **args):
Dataset.__init__(self, name, val, datatype='text', **args)
def getData(self, doc, checknull=False):
"""Return either a list of strings, a single item list.
If checknull then None is returned if blank
"""
if doc:
ds = doc.data.get(self.val)
if ds and ds.dimensions == 1:
return doc.formatValsWithDatatypeToText(
ds.data, ds.displaytype)
if checknull and not self.val:
return None
else:
return [cstr(self.val)]
def makeControl(self, *args):
return controls.DatasetOrString(self, self.getDocument(), *args)
def copy(self):
"""Make a setting which has its values copied from this one."""
return self._copyHelper((), (), {})
class Color(ChoiceOrMore):
"""A color setting."""
typename = 'color'
def __init__(self, name, value, **args):
"""Initialise the color setting with the given name, default
and description."""
ChoiceOrMore.__init__(self, name, [], value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def color(self, painter, dataindex=0):
"""Return QColor from color.
painter is a Veusz Painter
dataindex is index for automatically getting colors for subdatasets.
"""
if self.val.lower() == 'auto':
# lookup widget
w = self.parent
while w is not None and not w.iswidget:
w = w.parent
if w is None:
return qt4.QColor()
# get automatic color
return painter.docColor(w.autoColor(painter, dataindex=dataindex))
else:
return painter.docColor(self.val)
def makeControl(self, *args):
return controls.Color(self, *args)
class FillStyle(Choice):
"""A setting for the different fill styles provided by Qt."""
typename = 'fill-style'
_fillstyles = [ 'solid', 'horizontal', 'vertical', 'cross',
'forward diagonals', 'backward diagonals',
'diagonal cross',
'94% dense', '88% dense', '63% dense', '50% dense',
'37% dense', '12% dense', '6% dense' ]
_fillcnvt = { 'solid': qt4.Qt.SolidPattern,
'horizontal': qt4.Qt.HorPattern,
'vertical': qt4.Qt.VerPattern,
'cross': qt4.Qt.CrossPattern,
'forward diagonals': qt4.Qt.FDiagPattern,
'backward diagonals': qt4.Qt.BDiagPattern,
'diagonal cross': qt4.Qt.DiagCrossPattern,
'94% dense': qt4.Qt.Dense1Pattern,
'88% dense': qt4.Qt.Dense2Pattern,
'63% dense': qt4.Qt.Dense3Pattern,
'50% dense': qt4.Qt.Dense4Pattern,
'37% dense': qt4.Qt.Dense5Pattern,
'12% dense': qt4.Qt.Dense6Pattern,
'6% dense': qt4.Qt.Dense7Pattern }
controls.FillStyle._fills = _fillstyles
controls.FillStyle._fillcnvt = _fillcnvt
def __init__(self, name, value, **args):
Choice.__init__(self, name, self._fillstyles, value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def qtStyle(self):
"""Return Qt ID of fill."""
return self._fillcnvt[self.val]
def makeControl(self, *args):
return controls.FillStyle(self, *args)
class LineStyle(Choice):
"""A setting choosing a particular line style."""
typename = 'line-style'
# list of allowed line styles
_linestyles = ['solid', 'dashed', 'dotted',
'dash-dot', 'dash-dot-dot', 'dotted-fine',
'dashed-fine', 'dash-dot-fine',
'dot1', 'dot2', 'dot3', 'dot4',
'dash1', 'dash2', 'dash3', 'dash4', 'dash5',
'dashdot1', 'dashdot2', 'dashdot3']
# convert from line styles to Qt constants and a custom pattern (if any)
_linecnvt = { 'solid': (qt4.Qt.SolidLine, None),
'dashed': (qt4.Qt.DashLine, None),
'dotted': (qt4.Qt.DotLine, None),
'dash-dot': (qt4.Qt.DashDotLine, None),
'dash-dot-dot': (qt4.Qt.DashDotDotLine, None),
'dotted-fine': (qt4.Qt.CustomDashLine, [2, 4]),
'dashed-fine': (qt4.Qt.CustomDashLine, [8, 4]),
'dash-dot-fine': (qt4.Qt.CustomDashLine, [8, 4, 2, 4]),
'dot1': (qt4.Qt.CustomDashLine, [0.1, 2]),
'dot2': (qt4.Qt.CustomDashLine, [0.1, 4]),
'dot3': (qt4.Qt.CustomDashLine, [0.1, 6]),
'dot4': (qt4.Qt.CustomDashLine, [0.1, 8]),
'dash1': (qt4.Qt.CustomDashLine, [4, 4]),
'dash2': (qt4.Qt.CustomDashLine, [4, 8]),
'dash3': (qt4.Qt.CustomDashLine, [8, 8]),
'dash4': (qt4.Qt.CustomDashLine, [16, 8]),
'dash5': (qt4.Qt.CustomDashLine, [16, 16]),
'dashdot1': (qt4.Qt.CustomDashLine, [0.1, 4, 4, 4]),
'dashdot2': (qt4.Qt.CustomDashLine, [0.1, 4, 8, 4]),
'dashdot3': (qt4.Qt.CustomDashLine, [0.1, 2, 4, 2]),
}
controls.LineStyle._lines = _linestyles
def __init__(self, name, default, **args):
Choice.__init__(self, name, self._linestyles, default, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def qtStyle(self):
"""Get Qt ID of chosen line style."""
return self._linecnvt[self.val]
def makeControl(self, *args):
return controls.LineStyle(self, *args)
class Axis(Str):
"""A setting to hold the name of an axis.
direction is 'horizontal', 'vertical' or 'both'
"""
typename = 'axis'
def __init__(self, name, val, direction, **args):
"""Initialise using the document, so we can get the axes later.
direction is horizontal or vertical to specify the type of axis to
show
"""
Setting.__init__(self, name, val, **args)
self.direction = direction
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (self.direction,), {})
def makeControl(self, *args):
"""Allows user to choose an axis or enter a name."""
return controls.Axis(self, self.getDocument(), self.direction, *args)
class WidgetChoice(Str):
"""Hold the name of a child widget."""
typename = 'widget-choice'
def __init__(self, name, val, widgettypes={}, **args):
"""Choose widgets from (named) type given."""
Setting.__init__(self, name, val, **args)
self.widgettypes = widgettypes
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (),
{'widgettypes': self.widgettypes})
def buildWidgetList(self, level, widget, outdict):
"""A recursive helper to build up a list of possible widgets.
This iterates over widget's children, and adds widgets as tuples
to outdict using outdict[name] = (widget, level)
Lower level images of the same name outweigh other images further down
the tree
"""
for child in widget.children:
if child.typename in self.widgettypes:
if (child.name not in outdict) or (outdict[child.name][1]>level):
outdict[child.name] = (child, level)
else:
self.buildWidgetList(level+1, child, outdict)
def getWidgetList(self):
"""Return a dict of valid widget names and the corresponding objects."""
# find widget which contains setting
widget = self.parent
while not widget.iswidget and widget is not None:
widget = widget.parent
# get widget's parent
if widget is not None:
widget = widget.parent
# get list of widgets from recursive find
widgets = {}
if widget is not None:
self.buildWidgetList(0, widget, widgets)
# turn (object, level) pairs into object
outdict = {}
for name, val in widgets.items():
outdict[name] = val[0]
return outdict
def findWidget(self):
"""Find the image corresponding to this setting.
Returns Image object if succeeds or None if fails
"""
widgets = self.getWidgetList()
try:
return widgets[self.get()]
except KeyError:
return None
def makeControl(self, *args):
"""Allows user to choose an image widget or enter a name."""
return controls.WidgetChoice(self, self.getDocument(), *args)
class Marker(Choice):
"""Choose a marker type from one allowable."""
typename = 'marker'
def __init__(self, name, value, **args):
Choice.__init__(self, name, utils.MarkerCodes, value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def makeControl(self, *args):
return controls.Marker(self, *args)
class Arrow(Choice):
"""Choose an arrow type from one allowable."""
typename = 'arrow'
def __init__(self, name, value, **args):
Choice.__init__(self, name, utils.ArrowCodes, value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def makeControl(self, *args):
return controls.Arrow(self, *args)
class LineSet(Setting):
"""A setting which corresponds to a set of lines.
"""
typename='line-multi'
def normalize(self, val):
"""Takes a tuple/list of tuples:
[('dotted', '1pt', 'color', <trans>, False), ...]
These are style, width, color, and hide or
style, widget, color, transparency, hide
"""
if type(val) not in (list, tuple):
raise utils.InvalidType
# check each entry in the list is appropriate
for line in val:
try:
style, width, color, hide = line
except ValueError:
raise utils.InvalidType
if ( not isinstance(color, cbasestr) or
not Distance.isDist(width) or
style not in LineStyle._linestyles or
type(hide) not in (int, bool) ):
raise utils.InvalidType
return val
def makeControl(self, *args):
"""Make specialised lineset control."""
return controls.LineSet(self, *args)
def makePen(self, painter, row):
"""Make a pen for the painter using row.
If row is outside of range, then cycle
"""
if len(self.val) == 0:
return qt4.QPen(qt4.Qt.NoPen)
else:
row = row % len(self.val)
v = self.val[row]
style, width, color, hide = v
width = Distance.convertDistance(painter, width)
style, dashpattern = LineStyle._linecnvt[style]
col = painter.docColor(color)
pen = qt4.QPen(col, width, style)
if dashpattern:
pen.setDashPattern(dashpattern)
if hide:
pen.setStyle(qt4.Qt.NoPen)
return pen
class FillSet(Setting):
"""A setting which corresponds to a set of fills.
This setting keeps an internal array of LineSettings.
"""
typename = 'fill-multi'
def normalize(self, val):
"""Takes a tuple/list of tuples:
[('solid', 'color', False), ...]
These are color, fill style, and hide or
color, fill style, and hide
(style, color, hide,
[optional transparency, linewidth,
linestyle, spacing, backcolor, backtrans, backhide]])
"""
if type(val) not in (list, tuple):
raise utils.InvalidType
# check each entry in the list is appropriate
for fill in val:
try:
style, color, hide = fill[:3]
except ValueError:
raise utils.InvalidType
if ( not isinstance(color, cbasestr) or
style not in utils.extfillstyles or
type(hide) not in (int, bool) or
len(fill) not in (3, 10) ):
raise utils.InvalidType
return val
def makeControl(self, *args):
"""Make specialised lineset control."""
return controls.FillSet(self, *args)
def returnBrushExtended(self, row):
"""Return BrushExtended for the row."""
from . import collections
s = collections.BrushExtended('tempbrush')
s.parent = self
if len(self.val) == 0:
s.hide = True
else:
v = self.val[row % len(self.val)]
s.style = v[0]
s.color = v[1]
s.hide = v[2]
if len(v) == 10:
(s.transparency, s.linewidth, s.linestyle,
s.patternspacing, s.backcolor,
s.backtransparency, s.backhide) = v[3:]
return s
class Filename(Str):
"""Represents a filename setting."""
typename = 'filename'
def makeControl(self, *args):
return controls.Filename(self, 'file', *args)
def normalize(self, val):
if sys.platform == 'win32':
val = val.replace('\\', '/')
return val
class ImageFilename(Filename):
"""Represents an image filename setting."""
typename = 'filename-image'
def makeControl(self, *args):
return controls.Filename(self, 'image', *args)
class FontFamily(Str):
"""Represents a font family."""
typename = 'font-family'
def makeControl(self, *args):
"""Make a special font combobox."""
return controls.FontFamily(self, *args)
class ErrorStyle(Choice):
"""Error bar style.
The allowed values are below in _errorstyles.
"""
typename = 'errorbar-style'
_errorstyles = (
'none',
'bar', 'barends', 'box', 'diamond', 'curve',
'barbox', 'bardiamond', 'barcurve',
'boxfill', 'diamondfill', 'curvefill',
'fillvert', 'fillhorz',
'linevert', 'linehorz',
'linevertbar', 'linehorzbar'
)
controls.ErrorStyle._errorstyles = _errorstyles
def __init__(self, name, value, **args):
Choice.__init__(self, name, self._errorstyles, value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def makeControl(self, *args):
return controls.ErrorStyle(self, *args)
class AlignHorz(Choice):
"""Alignment horizontally."""
typename = 'align-horz'
def __init__(self, name, value, **args):
Choice.__init__(self, name, ['left', 'centre', 'right'], value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
class AlignVert(Choice):
"""Alignment vertically."""
typename = 'align-vert'
def __init__(self, name, value, **args):
Choice.__init__(self, name, ['top', 'centre', 'bottom'], value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
class AlignHorzWManual(Choice):
"""Alignment horizontally."""
typename = 'align-horz-+manual'
def __init__(self, name, value, **args):
Choice.__init__(self, name, ['left', 'centre', 'right', 'manual'],
value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
class AlignVertWManual(Choice):
"""Alignment vertically."""
typename = 'align-vert-+manual'
def __init__(self, name, value, **args):
Choice.__init__(self, name, ['top', 'centre', 'bottom', 'manual'],
value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
# Bool which shows/hides other settings
class BoolSwitch(Bool):
"""Bool switching setting."""
def __init__(self, name, value, settingsfalse=[], settingstrue=[],
**args):
"""Enables/disables a set of settings if True or False
settingsfalse and settingstrue are lists of names of settings
which are hidden/shown to user
"""
self.sfalse = settingsfalse
self.strue = settingstrue
Bool.__init__(self, name, value, **args)
def makeControl(self, *args):
return controls.BoolSwitch(self, *args)
def copy(self):
return self._copyHelper((), (), {'settingsfalse': self.sfalse,
'settingstrue': self.strue})
class ChoiceSwitch(Choice):
"""Show or hide other settings based on the choice given here."""
def __init__(self, name, vallist, value, settingstrue=[], settingsfalse=[],
showfn=lambda val: True, **args):
"""Enables/disables a set of settings if True or False
settingsfalse and settingstrue are lists of names of settings
which are hidden/shown to user depending on showfn(val)."""
self.sfalse = settingsfalse
self.strue = settingstrue
self.showfn = showfn
Choice.__init__(self, name, vallist, value, **args)
def makeControl(self, *args):
return controls.ChoiceSwitch(self, False, self.vallist, *args)
def copy(self):
return self._copyHelper((self.vallist,), (),
{'settingsfalse': self.sfalse,
'settingstrue': self.strue,
'showfn': self.showfn})
class FillStyleExtended(ChoiceSwitch):
"""A setting for the different fill styles provided by Qt."""
typename = 'fill-style-ext'
_strue = ( 'linewidth', 'linestyle', 'patternspacing',
'backcolor', 'backtransparency', 'backhide' )
@staticmethod
def _ishatch(val):
"""Is this a hatching fill?"""
return not ( val == 'solid' or val.find('dense') >= 0 )
def __init__(self, name, value, **args):
ChoiceSwitch.__init__(self, name, utils.extfillstyles, value,
settingstrue=self._strue, settingsfalse=(),
showfn=self._ishatch,
**args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def makeControl(self, *args):
return controls.FillStyleExtended(self, *args)
class RotateInterval(Choice):
'''Rotate a label with intervals given.'''
def __init__(self, name, val, **args):
Choice.__init__(self, name,
('-180', '-135', '-90', '-45',
'0', '45', '90', '135', '180'),
val, **args)
def normalize(self, val):
"""Store rotate angle."""
# backward compatibility with rotate option
# False: angle 0
# True: angle 90
if val == False:
val = '0'
elif val == True:
val = '90'
return Choice.normalize(self, val)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
class Colormap(Str):
"""A setting to set the color map used in an image.
This is based on a Str rather than Choice as the list might
change later.
"""
def makeControl(self, *args):
return controls.Colormap(self, self.getDocument(), *args)
class AxisBound(FloatOrAuto):
"""Axis bound - either numeric, Auto or date."""
typename = 'axis-bound'
def makeControl(self, *args):
return controls.AxisBound(self, *args)
def toUIText(self):
"""Convert to text, taking into account mode of Axis.
Displays datetimes in date format if used
"""
try:
mode = self.parent.mode
except AttributeError:
mode = None
v = self.val
if ( not isinstance(v, cbasestr) and v is not None and
mode == 'datetime' ):
return utils.dateFloatToString(v)
return FloatOrAuto.toUIText(self)
def fromUIText(self, txt):
"""Convert from text, allowing datetimes."""
v = utils.dateStringToDate(txt)
if N.isfinite(v):
return v
else:
return FloatOrAuto.fromUIText(self, txt)
| gpl-2.0 | 3,422,023,577,333,544,000 | 29.448004 | 81 | 0.560709 | false |
johntellsall/shotglass | ex-treemap/tree.py | 1 | 1087 | import os
import sys
import matplotlib
matplotlib.use('svg')
import matplotlib.pyplot as plt
import pandas as pd
import squarify
DULL_DIRECTORIES = set(['.git'])
def count_lines(path):
return sum(1 for line in open(path))
# TODO make configurable
def walk_tree(topdir):
for root, dirs, files in os.walk(topdir):
dirs[:] = list(set(dirs) - DULL_DIRECTORIES)
for file in files:
yield os.path.join(root, file)
# TODO make configurable
def is_source(path):
return os.path.splitext(path)[-1] == '.py'
project_dir = sys.argv[1]
source_paths = list(filter(is_source, walk_tree(project_dir)))
line_counts = list(map(count_lines, source_paths))
names = list(map(os.path.basename, source_paths))
print(names)
df = pd.DataFrame({
'paths': source_paths,
'names': names})
df['line_counts'] = line_counts
# TODO zap items where line_count==0
print(line_counts)
squarify.plot(
sizes=df['line_counts'],
label=df['names'], alpha=.8)
plt.axis('off')
title = os.path.basename(project_dir).title()
plt.title(title)
plt.savefig('tree.png')
| mit | 9,175,307,799,650,051,000 | 21.183673 | 62 | 0.681693 | false |
KungFuLucky7/stock_portfolios_server | venv/bin/rst2odt_prepstyles.py | 1 | 1738 | #!/Users/terrywong/stock_portfolios_server/venv/bin/python
# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <[email protected]>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
| gpl-2.0 | 5,343,714,636,521,661,000 | 24.940299 | 75 | 0.632911 | false |
unicefuganda/mics | survey/tests/ussd/test_basics_ussd.py | 1 | 67691 | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import datetime
from random import randint
import urllib2
from django.core.cache import cache
from django.test import Client
from mock import patch, MagicMock
from rapidsms.contrib.locations.models import Location, LocationType
from survey.investigator_configs import COUNTRY_PHONE_CODE
from survey.models import Investigator, Backend, Household, HouseholdHead, Batch, HouseholdMemberGroup, NumericalAnswer, \
Question, TextAnswer, QuestionOption, MultiChoiceAnswer, AnswerRule, BatchQuestionOrder, GroupCondition, Survey, \
RandomHouseHoldSelection, EnumerationArea
from survey.models.households import HouseholdMember
from survey.tests.ussd.ussd_base_test import USSDBaseTest, FakeRequest
from survey.ussd.ussd import USSD
from survey.ussd.ussd_survey import USSDSurvey
class USSDTest(USSDBaseTest):
def setUp(self):
self.client = Client()
self.ussd_params = {
'transactionId': "123344" + str(randint(1, 99999)),
'transactionTime': datetime.datetime.now().strftime('%Y%m%dT%H:%M:%S'),
'msisdn': '2567765' + str(randint(1, 99999)),
'ussdServiceCode': '130',
'ussdRequestString': '',
'response': "false"
}
self.open_survey = Survey.objects.create(name="open survey", description="open survey", has_sampling=True)
city = LocationType.objects.create(name="City")
self.mbarara = Location.objects.create(name="Mbarara", type=city)
self.ea = EnumerationArea.objects.create(name="EA2", survey=self.open_survey)
self.ea.locations.add(self.mbarara)
self.investigator = Investigator.objects.create(name="investigator name",
mobile_number=self.ussd_params['msisdn'].replace(
COUNTRY_PHONE_CODE, '', 1),
ea=self.ea,
backend=Backend.objects.create(name='something'))
self.household = Household.objects.create(investigator=self.investigator, ea=self.investigator.ea,
survey=self.open_survey, uid=0)
self.household_head = HouseholdHead.objects.create(household=self.household, surname="Surname",
date_of_birth=datetime.date(1980, 9, 1))
self.household_1 = Household.objects.create(investigator=self.investigator, ea=self.investigator.ea,
survey=self.open_survey, uid=1)
self.household_head_1 = HouseholdHead.objects.create(household=self.household_1,
surname="Name " + str(randint(1, 9999)),
date_of_birth=datetime.date(1980, 9, 1))
self.household_member = HouseholdMember.objects.create(surname="Name 2", household=self.household_1,
date_of_birth=datetime.date(2000, 2, 3))
self.batch = Batch.objects.create(order=1, name="batch test", survey=self.open_survey)
self.batch.open_for_location(self.investigator.location)
self.member_group = HouseholdMemberGroup.objects.create(name="5 to 6 years", order=0)
def test_knows_can_resume_survey_if_investigator_has_open_batches_or_is_registering_households(self):
ussd_survey = USSDSurvey(self.investigator, FakeRequest())
self.assertTrue(ussd_survey.can_resume_survey(is_registering=False))
self.assertTrue(ussd_survey.can_resume_survey(is_registering=True))
self.batch.close_for_location(self.investigator.location)
self.assertFalse(ussd_survey.can_resume_survey(is_registering=False))
self.assertTrue(ussd_survey.can_resume_survey(is_registering=True))
def test_list_household_members_after_selecting_household(self):
household_member1 = HouseholdMember.objects.create(household=self.household, surname="abcd", male=False,
date_of_birth='1989-02-02')
household_member2 = HouseholdMember.objects.create(household=self.household, surname="xyz", male=False,
date_of_birth='1989-02-02')
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household()
response_string = "responseString=%s&action=request" % USSD.MESSAGES['RETAKE_SURVEY']
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond('1')
members_list = "%s\n1: %s - (respondent)*\n2: %s*\n3: %s*" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname, household_member1.surname,
household_member2.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_goes_back_to_household_list_if_investigator_selects_household_and_chooses_not_to_retake_survey(self):
HouseholdMember.objects.filter(householdhead=None).delete()
head_group = HouseholdMemberGroup.objects.create(name="General", order=1)
condition = GroupCondition.objects.create(value='HEAD', attribute="GENERAL", condition="EQUALS")
condition.groups.add(head_group)
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.NUMBER, order=1, group=head_group)
question_1.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
self.batch.open_for_location(self.investigator.location)
self.investigator.member_answered(question_1, self.household_head, 1, self.batch)
self.investigator.member_answered(question_1, self.household_head_1, 1, self.batch)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household('2')
response_string = "responseString=%s&action=request" % USSD.MESSAGES['RETAKE_SURVEY']
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond('2')
households_list = "%s\n1: HH-%s-%s*\n2: HH-%s-%s*" % (
USSD.MESSAGES['HOUSEHOLD_LIST'], self.household_head.household.random_sample_number,
self.household_head.surname,
self.household_head_1.household.random_sample_number, self.household_head_1.surname)
response_string = "responseString=%s&action=request" % households_list
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond('1')
response_string = "responseString=%s&action=request" % USSD.MESSAGES['RETAKE_SURVEY']
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond('1')
members_list = "%s\n1: %s - (respondent)*" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond('1')
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_goes_back_to_household_list_if_investigator_selects_household_with_no_members_and_chooses_retake(self):
Household.objects.all().delete()
HouseholdMember.objects.all().delete()
self.household1_without_members = Household.objects.create(investigator=self.investigator,
ea=self.investigator.ea,
survey=self.open_survey, uid=1)
self.household2_without_members = Household.objects.create(investigator=self.investigator,
ea=self.investigator.ea,
survey=self.open_survey, uid=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household('2')
response_string = "responseString=%s&action=request" % USSD.MESSAGES['RETAKE_SURVEY']
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond('1')
households_list = "%s\n1: HH-%s*\n2: HH-%s*" % (USSD.MESSAGES['HOUSEHOLD_LIST'], 0, 0)
response_string = "responseString=%s&action=request" % households_list
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_ussd_restart_survey_option_yes_with_household_set(self):
request = FakeRequest()
request['ussdRequestString'] = 1
session_string = "SESSION-%s-%s" % ('1234567890', USSDSurvey.__name__)
session = cache.get(session_string)
session['HOUSEHOLD'] = self.household_1
cache.set(session_string, session)
members_list = "%s\n1: %s - (respondent)*\n2: %s*" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head_1.surname, self.household_member.surname)
ussd_survey = USSDSurvey(self.investigator, request)
ussd_survey.request['ussdRequestString'] = '1'
ussd_survey.restart_survey()
self.assertEqual(USSDSurvey.ACTIONS['REQUEST'], ussd_survey.action)
self.assertEqual(members_list, ussd_survey.responseString)
def test_ussd_restart_survey_option_yes_with_household_member_set(self):
request = FakeRequest()
request['ussdRequestString'] = 1
session_string = "SESSION-%s-%s" % ('1234567890', USSDSurvey.__name__)
session = cache.get(session_string)
session['HOUSEHOLD'] = self.household_1
session['HOUSEHOLD_MEMBER'] = self.household_member
cache.set(session_string, session)
restart_message = "Thank you. You have completed this household. Would you like to retake this household?\n1: Yes\n2: No"
ussd_survey = USSDSurvey(self.investigator, request)
ussd_survey.request['ussdRequestString'] = '1'
ussd_survey.restart_survey()
self.assertEqual(USSDSurvey.ACTIONS['REQUEST'], ussd_survey.action)
self.assertEqual(restart_message, ussd_survey.responseString)
def test_ussd_restart_survey_option_no(self):
request = FakeRequest()
request['ussdRequestString'] = 1
session_string = "SESSION-%s-%s" % ('1234567890', USSDSurvey.__name__)
session = cache.get(session_string)
session['HOUSEHOLD'] = self.household_1
session['HOUSEHOLD_MEMBER'] = self.household_member
cache.set(session_string, session)
households_list = "%s\n1: HH-%s-%s*\n2: HH-%s-%s*" % (
USSD.MESSAGES['HOUSEHOLD_LIST'], self.household_head.household.random_sample_number,
self.household_head.surname,
self.household_head_1.household.random_sample_number, self.household_head_1.surname)
ussd_survey = USSDSurvey(self.investigator, request)
ussd_survey.request['ussdRequestString'] = '2'
ussd_survey.restart_survey()
self.assertIsNone(ussd_survey.get_from_session('HOUSEHOLD'))
self.assertIsNone(ussd_survey.get_from_session('HOUSEHOLD_MEMBER'))
self.assertIsNone(ussd_survey.household)
self.assertEqual(USSDSurvey.ACTIONS['REQUEST'], ussd_survey.action)
self.assertEqual(households_list, ussd_survey.responseString)
def test_ussd_render_welcome_text_if_investigator_has_no_households(self):
household_message = "Sorry, you have no households registered."
new_investigator = Investigator.objects.create(name="new investigator",
mobile_number="001122334",
location=Location.objects.create(name="Entebbe"),
backend=Backend.objects.create(name='another'))
ussd_survey = USSDSurvey(new_investigator, FakeRequest())
ussd_survey.render_welcome_text()
self.assertEqual(USSDSurvey.ACTIONS['END'], ussd_survey.action)
self.assertEqual(household_message, ussd_survey.responseString)
def test_end_interview_if_batch_questions_answered_more_than_time_out_minutes_ago(self):
request = FakeRequest()
request['ussdRequestString'] = 1
session_string = "SESSION-%s-%s" % ('1234567890', USSDSurvey.__name__)
session = {}
session['HOUSEHOLD'] = self.household_1
session['HOUSEHOLD_MEMBER'] = self.household_member
cache.set(session_string, session)
ussd_survey = USSDSurvey(self.investigator, request)
ussd_survey.request['ussdRequestString'] = '1'
with patch.object(HouseholdMember, 'survey_completed', return_value=False):
with patch.object(HouseholdMember, 'last_question_answered', return_value=[1]):
with patch.object(HouseholdMember, 'can_retake_survey', return_value=False):
ussd_survey.end_interview(self.batch)
self.assertEqual(USSD.MESSAGES['BATCH_5_MIN_TIMEDOUT_MESSAGE'], ussd_survey.responseString)
self.assertEqual(USSD.ACTIONS['END'], ussd_survey.action)
def test_render_household_list_should_behave_like_new_request_if_no_household_selected(self):
request = FakeRequest()
session_string = "SESSION-%s-%s" % ('1234567890', USSDSurvey.__name__)
session = {}
session['HOUSEHOLD'] = self.household_1
session['HOUSEHOLD_MEMBER'] = self.household_member
session['PAGE'] = '1'
cache.set(session_string, session)
ussd_survey = USSDSurvey(self.investigator, request)
ussd_survey.render_households_list(self.open_survey)
self.assertEqual(USSD.ACTIONS['REQUEST'], ussd_survey.action)
self.assertEqual(USSD.MESSAGES['HOUSEHOLD_COMPLETION_MESSAGE'], ussd_survey.responseString)
def test_end_interview_if_batch_questions_answered_within_time_out_minutes_ago(self):
request = FakeRequest()
request['ussdRequestString'] = 1
session_string = "SESSION-%s-%s" % ('1234567890', USSDSurvey.__name__)
session = {}
session['HOUSEHOLD'] = self.household_1
session['PAGE'] = '1'
session['HOUSEHOLD_MEMBER'] = self.household_member
cache.set(session_string, session)
ussd_survey = USSDSurvey(self.investigator, request)
ussd_survey.request['ussdRequestString'] = '1'
with patch.object(HouseholdMember, 'survey_completed', return_value=False):
with patch.object(HouseholdMember, 'can_retake_survey', return_value=True):
ussd_survey.end_interview(self.batch)
self.assertEqual(USSD.MESSAGES['SUCCESS_MESSAGE'], ussd_survey.responseString)
self.assertEqual(USSD.ACTIONS['END'], ussd_survey.action)
def test_ussd_render_welcome_text_if_investigator_has_households(self):
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
welcome_message = "%s\n%s: Households list" % (homepage, USSD.HOUSEHOLD_LIST_OPTION)
ussd_survey = USSDSurvey(self.investigator, FakeRequest())
ussd_survey.render_welcome_text()
self.assertEqual(USSDSurvey.ACTIONS['REQUEST'], ussd_survey.action)
self.assertEqual(welcome_message, ussd_survey.responseString)
def test_renders_welcome_message_if_investigator_does_not_select_option_one_or_two_from_welcome_screen(self):
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
response = self.respond('10')
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_numerical_questions(self):
HouseholdMember.objects.create(surname="Name 2", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="How many of them are male?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("4")
self.assertEquals(4, NumericalAnswer.objects.get(investigator=self.investigator,
question=question_1).answer)
response_string = "responseString=%s&action=request" % question_2.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
self.assertEquals(2, NumericalAnswer.objects.get(investigator=self.investigator,
question=question_2).answer)
def test_textual_questions(self):
member_2 = HouseholdMember.objects.create(surname="Name 2", household=self.household,
date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.TEXT, order=1, group=self.member_group)
question_2 = Question.objects.create(text="How many of them are male?",
answer_type=Question.TEXT, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("Reply one")
self.assertEquals(self.ussd_params['ussdRequestString'],
TextAnswer.objects.get(investigator=self.investigator, question=question_1).answer)
response_string = "responseString=%s&action=request" % question_2.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("Reply two")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
self.assertEquals(self.ussd_params['ussdRequestString'],
TextAnswer.objects.get(investigator=self.investigator, question=question_2).answer)
def test_multichoice_questions(self):
HouseholdMember.objects.create(surname="Name 2", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.MULTICHOICE, order=1, group=self.member_group)
option_1_1 = QuestionOption.objects.create(question=question_1, text="OPTION 1", order=1)
option_1_2 = QuestionOption.objects.create(question=question_1, text="OPTION 2", order=2)
question_2 = Question.objects.create(text="How many of them are male?",
answer_type=Question.MULTICHOICE, order=2, group=self.member_group)
option_2_1 = QuestionOption.objects.create(question=question_2, text="OPTION 1", order=1)
option_2_2 = QuestionOption.objects.create(question=question_2, text="OPTION 2", order=2)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond(str(option_1_1.order))
self.assertEquals(option_1_1,
MultiChoiceAnswer.objects.get(investigator=self.investigator,
question=question_1).answer)
response_string = "responseString=%s&action=request" % question_2.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond(str(option_2_1.order))
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
self.assertEquals(option_2_1,
MultiChoiceAnswer.objects.get(investigator=self.investigator,
question=question_2).answer)
def test_multichoice_questions_pagination(self):
question = Question.objects.create(text="This is a question",
answer_type=Question.MULTICHOICE, order=1, group=self.member_group)
option_1 = QuestionOption.objects.create(question=question, text="OPTION 1", order=1)
option_2 = QuestionOption.objects.create(question=question, text="OPTION 2", order=2)
option_3 = QuestionOption.objects.create(question=question, text="OPTION 3", order=3)
option_4 = QuestionOption.objects.create(question=question, text="OPTION 4", order=4)
option_5 = QuestionOption.objects.create(question=question, text="OPTION 5", order=5)
option_6 = QuestionOption.objects.create(question=question, text="OPTION 6", order=6)
option_7 = QuestionOption.objects.create(question=question, text="OPTION 7", order=7)
back_text = Question.PREVIOUS_PAGE_TEXT
next_text = Question.NEXT_PAGE_TEXT
question_2 = Question.objects.create(text="This is a question",
answer_type=Question.MULTICHOICE, order=2, group=self.member_group)
option_8 = QuestionOption.objects.create(question=question_2, text="OPTION 1", order=1)
option_9 = QuestionOption.objects.create(question=question_2, text="OPTION 2", order=2)
question.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
page_1 = "%s\n1: %s\n2: %s\n3: %s\n%s" % (question.text, option_1.text, option_2.text, option_3.text, next_text)
page_2 = "%s\n4: %s\n5: %s\n6: %s\n%s\n%s" % (
question.text, option_4.text, option_5.text, option_6.text, back_text, next_text)
page_3 = "%s\n7: %s\n%s" % (question.text, option_7.text, back_text)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % page_1
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("#")
response_string = "responseString=%s&action=request" % page_2
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("#")
response_string = "responseString=%s&action=request" % page_3
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("*")
response_string = "responseString=%s&action=request" % page_2
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("*")
response_string = "responseString=%s&action=request" % page_1
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("#")
response_string = "responseString=%s&action=request" % page_2
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("1")
response_string = "responseString=%s&action=request" % question_2.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_reanswer_question(self):
HouseholdMember.objects.create(surname="Name 2", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="How many of them are male?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
rule = AnswerRule.objects.create(question=question_2, action=AnswerRule.ACTIONS['REANSWER'],
condition=AnswerRule.CONDITIONS['GREATER_THAN_QUESTION'],
validate_with_question=question_1)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("5")
response_string = "responseString=%s&action=request" % question_2.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("10")
response_string = "responseString=%s&action=request" % ("RECONFIRM: " + question_1.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("5")
response_string = "responseString=%s&action=request" % ("RECONFIRM: " + question_2.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_text_invalid_answer(self):
member_2 = HouseholdMember.objects.create(surname="Name 2", household=self.household,
date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.TEXT, order=1, group=self.member_group)
question_1.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("")
response_string = "responseString=%s&action=request" % ("INVALID ANSWER: " + question_1.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("something")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_numerical_invalid_answer(self):
HouseholdMember.objects.create(surname="Name 2", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_1.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("a")
response_string = "responseString=%s&action=request" % ("INVALID ANSWER: " + question_1.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_multichoice_invalid_answer(self):
HouseholdMember.objects.create(surname="Name 2", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="This is a question",
answer_type=Question.MULTICHOICE, order=1, group=self.member_group)
question_1.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
option_1 = QuestionOption.objects.create(question=question_1, text="OPTION 1", order=1)
option_2 = QuestionOption.objects.create(question=question_1, text="OPTION 2", order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
page_1 = "%s\n1: %s\n2: %s" % (question_1.text, option_1.text, option_2.text)
response_string = "responseString=%s&action=request" % page_1
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("a")
response_string = "responseString=%s&action=request" % ("INVALID ANSWER: " + page_1)
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("4")
response_string = "responseString=%s&action=request" % ("INVALID ANSWER: " + page_1)
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_end_interview_confirmation(self):
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="How many of them are male?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
AnswerRule.objects.create(question=question_1, action=AnswerRule.ACTIONS['END_INTERVIEW'],
condition=AnswerRule.CONDITIONS['EQUALS'], validate_with_value=0)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household("1")
members_list = "%s\n1: %s - (respondent)" % (USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
self.investigator = Investigator.objects.get(id=self.investigator.pk)
self.assertEquals(len(self.investigator.get_from_cache('CONFIRM_END_INTERVIEW')), 0)
response = self.select_household_member("1")
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
self.investigator = Investigator.objects.get(id=self.investigator.pk)
self.assertEquals(len(self.investigator.get_from_cache('CONFIRM_END_INTERVIEW')), 0)
response = self.respond("0")
response_string = "responseString=%s&action=request" % ("RECONFIRM: " + question_1.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
self.assertEquals(len(self.investigator.get_from_cache('CONFIRM_END_INTERVIEW')), 1)
self.assertEquals(0, NumericalAnswer.objects.count())
response = self.respond("0")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['HOUSEHOLD_COMPLETION_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
self.investigator = Investigator.objects.get(id=self.investigator.pk)
self.assertEquals(len(self.investigator.get_from_cache('CONFIRM_END_INTERVIEW')), 0)
self.assertFalse(self.household.has_pending_survey())
self.assertTrue(self.household_1.has_pending_survey())
self.assertFalse(self.investigator.completed_open_surveys())
self.set_questions_answered_to_twenty_minutes_ago()
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household("2")
members_list = "%s\n1: %s - (respondent)\n2: %s" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head_1.surname, self.household_member.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.select_household_member("1")
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
response = self.choose_menu_to_take_survey()
households_list_1 = "%s\n1: HH-%s-%s*\n2: HH-%s-%s" % (
USSD.MESSAGES['HOUSEHOLD_LIST'], self.household_head.household.random_sample_number,
self.household_head.surname,
self.household_head_1.household.random_sample_number, self.household_head_1.surname)
response_string = "responseString=%s&action=request" % households_list_1
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.select_household('1')
members_list = "%s\n1: %s - (respondent)*" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_end_interview_confirmation_alternative(self):
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="How many of them are male?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
rule = AnswerRule.objects.create(question=question_1, action=AnswerRule.ACTIONS['END_INTERVIEW'],
condition=AnswerRule.CONDITIONS['EQUALS'], validate_with_value=0)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household()
members_list = "%s\n1: %s - (respondent)" % (USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
self.investigator = Investigator.objects.get(id=self.investigator.pk)
self.assertEquals(len(self.investigator.get_from_cache('CONFIRM_END_INTERVIEW')), 0)
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("0")
response_string = "responseString=%s&action=request" % ("RECONFIRM: " + question_1.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
self.assertEquals(0, NumericalAnswer.objects.count())
response = self.respond("1")
response_string = "responseString=%s&action=request" % question_2.text
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_should_show_member_completion_message_and_choose_to_go_to_member_list(self):
member_2 = HouseholdMember.objects.create(surname="Name 2", household=self.household,
date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="Question 1?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="Question 2?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household()
members_list = "%s\n1: %s - (respondent)\n2: %s" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname, member_2.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.select_household_member("1")
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("1")
response_string = "responseString=%s&action=request" % question_2.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("1")
members_list = "%s\n1: %s - (respondent)*\n2: %s" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname, member_2.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_should_show_thank_you_message_on_completion_of_all_members_questions(self):
question_1 = Question.objects.create(text="Question 1?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="Question 2?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household()
members_list = "%s\n1: %s - (respondent)" % (USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.select_household_member("1")
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("1")
response_string = "responseString=%s&action=request" % question_2.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['HOUSEHOLD_COMPLETION_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
households_list_1 = "%s\n1: HH-%s-%s*\n2: HH-%s-%s" % (
USSD.MESSAGES['HOUSEHOLD_LIST'], self.household_head.household.random_sample_number,
self.household_head.surname,
self.household_head_1.household.random_sample_number, self.household_head_1.surname)
response_string = "responseString=%s&action=request" % households_list_1
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_welcome_screen_should_show_message_and_options_for_registration_and_take_survey(self):
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
response = self.reset_session()
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_choosing_take_survey_should_render_household_list(self):
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
self.select_samples()
with patch.object(USSDSurvey, 'is_active', return_value=False):
response = self.reset_session()
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.choose_menu_to_take_survey()
households_list_1 = "%s\n1: HH-%s-%s*\n2: HH-%s-%s*" % (
USSD.MESSAGES['HOUSEHOLD_LIST'], self.household_head.household.random_sample_number,
self.household_head.surname,
self.household_head_1.household.random_sample_number, self.household_head_1.surname)
response_string = "responseString=%s&action=request" % households_list_1
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_choosing_registering_HH_should_set_cache(self):
self.investigator = Investigator.objects.get(id=self.investigator.pk)
self.batch.close_for_location(self.investigator.location)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
self.select_samples()
with patch.object(USSDSurvey, 'is_active', return_value=False):
response = self.reset_session()
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.choose_menu_to_register_household()
self.assertTrue(self.investigator.get_from_cache('IS_REGISTERING_HOUSEHOLD'))
def test_resume_should_show_welcome_text_if_open_batch_is_closed_on_session_timeout(self):
question_1 = Question.objects.create(text="Question 1?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="Question 2?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
response = self.reset_session()
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.choose_menu_to_take_survey()
response = self.select_household()
members_list = "%s\n1: %s - (respondent)" % (USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.select_household_member("1")
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("1")
response_string = "responseString=%s&action=request" % question_2.text
self.assertEquals(urllib2.unquote(response.content), response_string)
self.batch.close_for_location(self.investigator.location)
response = self.reset_session()
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_ussd_new_parameter_request_empty_string(self):
self.ussd_params['transactionId'] = "123344" + str(randint(1, 99999))
self.ussd_params['response'] = 'false'
self.ussd_params['ussdRequestString'] = ''
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, "is_active", return_value=False):
response = self.client.post('/ussd', data=self.ussd_params)
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_ussd_new_parameter_request_short_code_without_application_extension(self):
self.ussd_params['transactionId'] = "123344" + str(randint(1, 99999))
self.ussd_params['response'] = 'false'
self.ussd_params['ussdRequestString'] = '*257#'
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, "is_active", return_value=False):
response = self.client.post('/ussd', data=self.ussd_params)
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_ussd_new_parameter_request_short_code_with_application_extension(self):
self.ussd_params['transactionId'] = "123344" + str(randint(1, 99999))
self.ussd_params['response'] = 'false'
self.ussd_params['ussdRequestString'] = '*153*10#'
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, "is_active", return_value=False):
response = self.client.post('/ussd', data=self.ussd_params)
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_ussd_new_parameter_request_short_code_with_application_code_set_and_application_code_posted(self):
self.ussd_params['transactionId'] = "123344" + str(randint(1, 99999))
self.ussd_params['response'] = 'false'
self.ussd_params['ussdRequestString'] = '10'
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, "is_active", return_value=False):
response = self.client.post('/ussd', data=self.ussd_params)
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_subquestion_of_different_type_from_a_multichoice_parent_question_should_not_invoke_invalid_answer(self):
HouseholdMember.objects.create(surname="Name 2", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="This is a question",
answer_type=Question.MULTICHOICE, order=1, group=self.member_group)
question_1.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
option_1 = QuestionOption.objects.create(question=question_1, text="OPTION 1", order=1)
option_2 = QuestionOption.objects.create(question=question_1, text="OPTION 2", order=2)
option_3 = QuestionOption.objects.create(question=question_1, text="specify", order=3)
sub_question_1 = Question.objects.create(text="some subquestion of question 1",
group=self.member_group,
answer_type=Question.TEXT, subquestion=True, parent=question_1)
sub_question_1.batches.add(self.batch)
AnswerRule.objects.create(question=question_1, action=AnswerRule.ACTIONS['ASK_SUBQUESTION'],
condition=AnswerRule.CONDITIONS['EQUALS_OPTION'], validate_with_option=option_3,
next_question=sub_question_1)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
page_1 = "%s\n1: %s\n2: %s\n3: %s" % (question_1.text, option_1.text, option_2.text, option_3.text)
response_string = "responseString=%s&action=request" % page_1
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("3")
response_string = "responseString=%s&action=request" % (sub_question_1.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
self.assertNotIn("INVALID ANSWER", urllib2.unquote(response.content))
def test_should_not_repeat_question_after_answer_has_been_given_the_answer_rule_is_not_repeat(self):
HouseholdMember.objects.create(surname="Surname", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="Question 1- with Skip logic",
answer_type=Question.MULTICHOICE, order=1, group=self.member_group)
option_1 = QuestionOption.objects.create(question=question_1, text="OPTION 1", order=1)
option_2 = QuestionOption.objects.create(question=question_1, text="OPTION 2", order=2)
option_3 = QuestionOption.objects.create(question=question_1, text="specify", order=3)
question_2 = Question.objects.create(text="question 2 - skipped",
answer_type=Question.TEXT, order=2, group=self.member_group)
question_3 = Question.objects.create(text="question 3 - skipped to",
answer_type=Question.TEXT, order=3, group=self.member_group)
question_4 = Question.objects.create(text="question 4",
answer_type=Question.NUMBER, order=4, group=self.member_group)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_3, order=3)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_4, order=4)
self.batch.questions.add(question_1, question_2, question_3, question_4)
AnswerRule.objects.create(question=question_1, action=AnswerRule.ACTIONS['SKIP_TO'],
condition=AnswerRule.CONDITIONS['EQUALS_OPTION'], validate_with_option=option_3,
next_question=question_3)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
page_1 = "%s\n1: %s\n2: %s\n3: %s" % (question_1.text, option_1.text, option_2.text, option_3.text)
response_string = "responseString=%s&action=request" % page_1
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("3")
response_string = "responseString=%s&action=request" % question_3.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("akampa")
response_string = "responseString=%s&action=request" % question_4.text
self.assertEquals(urllib2.unquote(response.content), response_string)
| bsd-3-clause | 1,294,537,231,428,861,400 | 58.69224 | 129 | 0.62543 | false |
thomaslima/PySpice | PySpice/Spice/Parser.py | 1 | 23694 | ####################################################################################################
#
# PySpice - A Spice Package for Python
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
"""This module implements a partial SPICE netlist parser.
It would be difficult to implement a full parser for Ngspice since the syntax is mainly contextual.
"""
####################################################################################################
import logging
####################################################################################################
from .ElementParameter import (
FlagParameter,
)
from .Netlist import ElementParameterMetaClass, NPinElement, Circuit
from .BasicElement import SubCircuitElement, BipolarJunctionTransistor
####################################################################################################
_module_logger = logging.getLogger(__name__)
####################################################################################################
class PrefixData:
##############################################
def __init__(self, prefix, classes):
self.prefix = prefix
self.classes = classes
number_of_positionals_min = 1000
number_of_positionals_max = 0
has_optionals = False
for element_class in classes:
number_of_positionals = element_class.number_of_positional_parameters
number_of_positionals_min = min(number_of_positionals_min, number_of_positionals)
number_of_positionals_max = max(number_of_positionals_max, number_of_positionals)
has_optionals = max(has_optionals, bool(element_class.optional_parameters))
self.number_of_positionals_min = number_of_positionals_min
self.number_of_positionals_max = number_of_positionals_max
self.has_optionals = has_optionals
self.multi_devices = len(classes) > 1
self.npins = prefix in ('Q', 'X') # NPinElement, Q has 3 to 4 pins
if self.npins:
self.number_of_pins = None
else:
# Q and X are single
self.number_of_pins = classes[0].number_of_pins
self.has_flag = False
for element_class in classes:
for parameter in element_class.optional_parameters.values():
if isinstance(parameter, FlagParameter):
self.has_flag = True
##############################################
def __len__(self):
return len(self.classes)
##############################################
def __iter__(self):
return iter(self.classes)
##############################################
@property
def single(self):
if not self.multi_devices:
return self.classes[0]
else:
raise NameError()
####################################################################################################
_prefix_cache = {prefix:PrefixData(prefix, classes)
for prefix, classes in ElementParameterMetaClass.__classes__.items()}
# for prefix_data in sorted(_prefix_cache.values(), key=lambda x: len(x)):
# print(prefix_data.prefix,
# len(prefix_data),
# prefix_data.number_of_positionals_min, prefix_data.number_of_positionals_max,
# prefix_data.has_optionals)
# Single:
# B 0 True
# D 1 True
# F 2 False
# G 1 False
# H 2 False
# I 1 False
# J 1 True
# K 3 False
# M 1 True
# S 2 False
# V 1 False
# W 3 False
# Z 1 True
# Two:
# E 0 1 False
# L 1 2 True
# Three:
# C 1 2 True
# R 1 2 True
# NPinElement:
# Q 1 1 True
# X 1 1 False
####################################################################################################
class Token:
""" This class implements a token, in fact a line in a Spice netlist. """
##############################################
def __init__(self, line):
self._line = line
##############################################
def __repr__(self):
return "{} {}".format(self.__class__.__name__, repr(self._line))
####################################################################################################
class Comment(Token):
pass
####################################################################################################
class Title(Token):
""" This class implements a title definition. """
##############################################
def __init__(self, line):
super().__init__(line)
self._title = self._line.read_right_of('.title')
##############################################
def __str__(self):
return self._title
##############################################
def __repr__(self):
return "Title {}".format(self._title)
####################################################################################################
class Include(Token):
""" This class implements a include definition. """
##############################################
def __init__(self, line):
super().__init__(line)
self._include = self._line.read_right_of('.include')
##############################################
def __str__(self):
return self._include
##############################################
def __repr__(self):
return "Include {}".format(self._title)
####################################################################################################
class Model(Token):
""" This class implements a model definition.
Spice syntax::
.model mname type (pname1=pval1 pname2=pval2)
"""
##############################################
def __init__(self, line):
super().__init__(line)
# Fixme
parameters, dict_parameters = self._line.split_line('.model')
self._name, self._model_type = parameters[:2]
self._parameters = dict_parameters
##############################################
@property
def name(self):
""" Name of the model """
return self._name
##############################################
def __repr__(self):
return "Model {} {} {}".format(self._name, self._model_type, self._parameters)
####################################################################################################
class SubCircuit(Token):
""" This class implements a sub-circuit definition.
Spice syntax::
.SUBCKT name node1 ... param1=value1 ...
"""
##############################################
def __init__(self, line):
super().__init__(line)
# Fixme:
parameters, dict_parameters = self._line.split_line('.subckt')
self._name, self._nodes = parameters[0], parameters[1:]
self._tokens = []
##############################################
@property
def name(self):
""" Name of the sub-circuit. """
return self._name
##############################################
def __repr__(self):
text = "SubCircuit {} {}\n".format(self._name, self._nodes)
text += '\n'.join([' ' + repr(token) for token in self._tokens])
return text
##############################################
def __iter__(self):
""" Return an iterator on the tokens. """
return iter(self._tokens)
##############################################
def append(self, token):
""" Append a token to the token's list. """
self._tokens .append(token)
####################################################################################################
class Element(Token):
""" This class implements an element definition.
"{ expression }" are allowed in device line.
"""
_logger = _module_logger.getChild('Element')
##############################################
def __init__(self, line):
super().__init__(line)
line_str = str(line)
# self._logger.debug('\n' + line_str)
# Retrieve device prefix
self._prefix = line_str[0]
prefix_data = _prefix_cache[self._prefix]
# Retrieve device name
start_location = 1
stop_location = line_str.find(' ')
# Fixme: if stop_location == -1:
self._name = line_str[start_location:stop_location]
self._nodes = []
self._parameters = []
self._dict_parameters = {}
# Read nodes
if not prefix_data.npins:
number_of_pins = prefix_data.number_of_pins
if number_of_pins:
self._nodes, stop_location = self._line.read_words(stop_location, number_of_pins)
else: # Q or X
if prefix_data.prefix == 'Q':
self._nodes, stop_location = self._line.read_words(stop_location, 3)
# Fixme: optional node
else: # X
args, stop_location = self._line.split_words(stop_location, until='=')
self._nodes = args[:-1]
self._parameters.append(args[-1]) # model name
# Read positionals
number_of_positionals = prefix_data.number_of_positionals_min
if number_of_positionals and stop_location is not None: # model is optional
self._parameters, stop_location = self._line.read_words(stop_location, number_of_positionals)
if prefix_data.multi_devices and stop_location is not None:
remaining, stop_location = self._line.split_words(stop_location, until='=')
self._parameters.extend(remaining)
if prefix_data.prefix in ('V', 'I') and stop_location is not None:
# merge remaining
self._parameters[-1] += line_str[stop_location:]
# Read optionals
if prefix_data.has_optionals and stop_location is not None:
kwargs, stop_location = self._line.split_words(stop_location)
for kwarg in kwargs:
try:
key, value = kwarg.split('=')
self._dict_parameters[key] = value
except ValueError:
if kwarg in ('off',) and prefix_data.has_flag:
self._dict_parameters['off'] = True
else:
self._logger.warn(line_str)
# raise NameError("Bad element line:", line_str)
if prefix_data.multi_devices:
for element_class in prefix_data:
if len(self._parameters) == element_class.number_of_positional_parameters:
break
else:
element_class = prefix_data.single
self.factory = element_class
# Move positionals passed as kwarg
to_delete = []
for parameter in element_class.positional_parameters.values():
if parameter.key_parameter:
i = parameter.position
self._dict_parameters[parameter.attribute_name] = self._parameters[i]
to_delete.append(i)
for i in to_delete:
del self._parameters[i]
self._logger.debug('\n' + self.__repr__())
##############################################
@property
def name(self):
""" Name of the element """
return self._name
##############################################
def __repr__(self):
return "Element {0._prefix} {0._name} {0._nodes} {0._parameters} {0._dict_parameters}".format(self)
####################################################################################################
class Line:
""" This class implements a line in the netlist. """
##############################################
def __init__(self, text, line_range):
text = str(text)
for marker in ('$', ';', '//'):
location = text.find(marker)
if location != -1:
break
if location != -1:
text = text[:location]
comment = text[location:]
else:
comment = ''
self._text = text
self._comment = comment
self._line_range = line_range
##############################################
def __repr__(self):
return "{0._line_range} {0._text}".format(self)
##############################################
def __str__(self):
return self._text
##############################################
def read_right_of(self, text):
return self._text[len(text):].strip()
##############################################
def read_words(self, start_location, number_of_words):
line_str = self._text
number_of_words_read = 0
words = []
while number_of_words_read < number_of_words: # and start_location < len(line_str)
stop_location = line_str.find(' ', start_location)
if stop_location == -1:
stop_location = None # read until end
word = line_str[start_location:stop_location].strip()
if word:
number_of_words_read += 1
words.append(word)
if stop_location is None: # we should stop
if number_of_words_read != number_of_words:
template = "Bad element line, looking for word {}/{}:\n"
raise NameError(template.format(number_of_words_read, number_of_words) +
line_str + '\n' +
' '*start_location + '^')
else:
if start_location < stop_location:
start_location = stop_location
else: # we have read a space
start_location += 1
return words, stop_location
##############################################
def split_words(self, start_location, until=None):
line_str = self._text
stop_location = None
if until is not None:
location = line_str.find(until, start_location)
if location != -1:
stop_location = location
location = line_str.rfind(' ', start_location, stop_location)
if location != -1:
stop_location = location
else:
raise NameError("Bad element line, missing key? " + line_str)
line_str = line_str[start_location:stop_location]
words = [x for x in line_str.split(' ') if x]
return words, stop_location
##############################################
def split_line(self, keyword):
""" Split the line according to the following pattern::
keyword parameter1 parameter2 ... key1=value1 key2=value2 ...
Return the list of parameters and the dictionnary.
"""
raw_parameters = self._text[len(keyword):].split()
parameters = []
dict_parameters = {}
for parameter in raw_parameters:
if '=' in parameter:
key, value = parameter.split('=')
dict_parameters[key.strip()] = value.strip()
else:
parameters.append(parameter)
return parameters, dict_parameters
####################################################################################################
class SpiceParser:
""" This class parse a Spice netlist file and build a syntax tree.
Public Attributes:
:attr:`circuit`
:attr:`models`
:attr:`subcircuits`
"""
_logger = _module_logger.getChild('SpiceParser')
##############################################
def __init__(self, path=None, source=None):
# Fixme: empty source
if path is not None:
with open(str(path), 'r') as f:
raw_lines = f.readlines()
elif source is not None:
raw_lines = source.split('\n') # Fixme: other os
else:
raise ValueError
lines = self._merge_lines(raw_lines)
self._title = None
self._tokens = self._parse(lines)
self._find_sections()
##############################################
def _merge_lines(self, raw_lines):
"""Merge broken lines and return a new list of lines.
A line starting with "+" continues the preceding line.
"""
# Fixme: better using lines[-1] ?
lines = []
current_line = ''
current_line_index = None
for line_index, line in enumerate(raw_lines):
if line.startswith('+'):
current_line += ' ' + line[1:].strip()
else:
if current_line:
lines.append(Line(current_line, slice(current_line_index, line_index)))
current_line = line.strip()
current_line_index = line_index
if current_line:
lines.append(Line(current_line, slice(current_line_index, len(raw_lines))))
return lines
##############################################
def _parse(self, lines):
""" Parse the lines and return a list of tokens. """
tokens = []
sub_circuit = None
scope = tokens
for line in lines:
# print repr(line)
text = str(line)
lower_case_text = text.lower() # !
if text.startswith('*'):
scope.append(Comment(line))
elif lower_case_text.startswith('.'):
lower_case_text = lower_case_text[1:]
if lower_case_text.startswith('subckt'):
sub_circuit = SubCircuit(line)
tokens.append(sub_circuit)
scope = sub_circuit
elif lower_case_text.startswith('ends'):
sub_circuit = None
scope = tokens
elif lower_case_text.startswith('title'):
self._title = Title(line)
scope.append(self._title)
elif lower_case_text.startswith('end'):
pass
elif lower_case_text.startswith('model'):
model = Model(line)
scope.append(model)
elif lower_case_text.startswith('include'):
scope.append(Include(line))
else:
# options param ...
# .global
# .lib filename libname
# .param
# .func .csparam .temp .if
# { expr } are allowed in .model lines and in device lines.
self._logger.warn(line)
else:
element = Element(line)
scope.append(element)
return tokens
##############################################
def _find_sections(self):
""" Look for model, sub-circuit and circuit definitions in the token list. """
self.circuit = None
self.subcircuits = []
self.models = []
for token in self._tokens:
if isinstance(token, Title):
if self.circuit is None:
self.circuit = token
else:
raise NameError("More than one title")
elif isinstance(token, SubCircuit):
self.subcircuits.append(token)
elif isinstance(token, Model):
self.models.append(token)
##############################################
def is_only_subcircuit(self):
return bool(not self.circuit and self.subcircuits)
##############################################
def is_only_model(self):
return bool(not self.circuit and not self.subcircuits and self.models)
##############################################
def build_circuit(self, ground=0):
ground = str(ground)
circuit = Circuit(str(self._title))
for token in self._tokens:
if isinstance(token, Include):
circuit.include(str(token))
for token in self._tokens:
if isinstance(token, Element):
factory = getattr(circuit, token.factory.alias)
nodes = []
for node in token._nodes:
if str(node) == ground:
node = 0
nodes.append(node)
if token._prefix != 'X':
args = nodes + token._parameters
else: # != Spice
args = token._parameters + nodes
kwargs = token._dict_parameters
message = ' '.join([str(x) for x in (token._prefix, token._name, nodes,
token._parameters, token._dict_parameters)])
self._logger.debug(message)
factory(token._name, *args, **kwargs)
return circuit
##############################################
def _to_python(self, value):
try:
int_value = int(value)
value = float(value)
if int_value == value:
return str(int_value)
else:
return str(value)
except ValueError:
return "'{}'".format(value)
##############################################
def to_python_code(self, ground=0):
ground = str(ground)
# for token in self._tokens:
# if isinstance(token, Include):
# circuit.include(str(token))
if self._title:
title = self._title
else:
title = '...'
circuit = "circuit = Circuit('{}')\n".format(title)
for token in self._tokens:
if isinstance(token, Element):
nodes = []
for node in token._nodes:
if str(node) == ground:
node = 0
nodes.append(node)
if token._prefix != 'X':
args = nodes + token._parameters
else: # != Spice
args = token._parameters + nodes
args = [self._to_python(x) for x in args]
kwargs = ['{}={}'.format(key, self._to_python(value))
for key, value in token._dict_parameters.items()]
parameters = ', '.join(args + kwargs)
circuit += "circuit.{}({})\n".format(token._prefix, parameters)
return circuit
####################################################################################################
#
# End
#
####################################################################################################
| gpl-3.0 | -8,791,368,678,577,161,000 | 30.676471 | 107 | 0.447666 | false |
chromium/chromium | third_party/blink/tools/blinkpy/common/system/platform_info_unittest.py | 7 | 11345 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import platform
import sys
import unittest
from blinkpy.common.system.executive import Executive
from blinkpy.common.system.executive_mock import MockExecutive
from blinkpy.common.system.filesystem import FileSystem
from blinkpy.common.system.filesystem_mock import MockFileSystem
from blinkpy.common.system.platform_info import PlatformInfo
def fake_sys(platform_str='darwin', windows_version_tuple=None):
class FakeSysModule(object):
platform = platform_str
if windows_version_tuple:
getwindowsversion = lambda x: windows_version_tuple
return FakeSysModule()
def fake_platform(mac_version_string='10.12.3',
release_string='bar',
linux_version='trusty'):
class FakePlatformModule(object):
def mac_ver(self):
return tuple([mac_version_string, tuple(['', '', '']), 'i386'])
def linux_distribution(self):
return tuple([None, None, linux_version])
def platform(self):
return 'foo'
def release(self):
return release_string
return FakePlatformModule()
def fake_executive(output=None):
if output:
return MockExecutive(output=output)
return MockExecutive(exception=SystemError)
class TestPlatformInfo(unittest.TestCase):
def make_info(self,
sys_module=None,
platform_module=None,
filesystem_module=None,
executive=None):
return PlatformInfo(sys_module or fake_sys(), platform_module
or fake_platform(), filesystem_module
or MockFileSystem(), executive or fake_executive())
def test_real_code(self):
# This test makes sure the real (unmocked) code actually works.
info = PlatformInfo(sys, platform, FileSystem(), Executive())
self.assertNotEquals(info.os_name, '')
self.assertNotEquals(info.os_version, '')
self.assertNotEquals(info.display_name(), '')
self.assertTrue(info.is_mac() or info.is_win() or info.is_linux()
or info.is_freebsd())
self.assertIsNotNone(info.terminal_width())
if info.is_linux():
self.assertIsNotNone(info.linux_distribution())
if info.is_mac():
self.assertTrue(info.total_bytes_memory() > 0)
else:
self.assertIsNone(info.total_bytes_memory())
def test_os_name_and_wrappers(self):
info = self.make_info(fake_sys('linux2'))
self.assertTrue(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('linux3'))
self.assertTrue(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('darwin'), fake_platform('10.12.3'))
self.assertEqual(info.os_name, 'mac')
self.assertFalse(info.is_linux())
self.assertTrue(info.is_mac())
self.assertFalse(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
self.assertEqual(info.os_name, 'win')
self.assertFalse(info.is_linux())
self.assertFalse(info.is_mac())
self.assertTrue(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('freebsd8'))
self.assertEqual(info.os_name, 'freebsd')
self.assertFalse(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
self.assertTrue(info.is_freebsd())
with self.assertRaises(AssertionError):
self.make_info(fake_sys('vms'))
def test_os_version(self):
with self.assertRaises(AssertionError):
self.make_info(fake_sys('darwin'), fake_platform('10.6.3'))
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.10.0')).os_version, 'mac10.10')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.11.0')).os_version, 'mac10.11')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.12.0')).os_version, 'mac10.12')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.13.0')).os_version, 'mac10.13')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.14.0')).os_version, 'mac10.14')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.15.0')).os_version, 'mac10.15')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.16.0')).os_version, 'mac10.16')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('11.0.0')).os_version, 'mac11.0')
with self.assertRaises(AssertionError):
self.make_info(fake_sys('darwin'), fake_platform('10.20.0'))
self.assertEqual(
self.make_info(fake_sys('linux2')).os_version, 'trusty')
info = self.make_info(
fake_sys('linux2'), fake_platform(linux_version='utopic'))
self.assertEqual(info.os_version, 'trusty')
self.assertEqual(
self.make_info(
fake_sys('freebsd8'), fake_platform(
'', '8.3-PRERELEASE')).os_version, '8.3-PRERELEASE')
self.assertEqual(
self.make_info(
fake_sys('freebsd9'),
fake_platform('', '9.0-RELEASE')).os_version, '9.0-RELEASE')
with self.assertRaises(AssertionError):
self.make_info(fake_sys('win32', tuple([5, 0, 1234])))
with self.assertRaises(AssertionError):
self.make_info(fake_sys('win32', tuple([6, 1, 1234])))
self.assertEqual(
self.make_info(fake_sys('win32', tuple([10, 1, 1234]))).os_version,
'future')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([10, 0, 1234]))).os_version,
'10')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([6, 3, 1234]))).os_version,
'8.1')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([6, 2, 1234]))).os_version,
'8')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([6, 1, 7601]))).os_version,
'7sp1')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([6, 1, 7600]))).os_version,
'7sp0')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([6, 0, 1234]))).os_version,
'vista')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([5, 1, 1234]))).os_version,
'xp')
with self.assertRaises(AssertionError):
self.make_info(
fake_sys('win32'), executive=fake_executive('5.0.1234'))
with self.assertRaises(AssertionError):
self.make_info(
fake_sys('win32'), executive=fake_executive('6.1.1234'))
def _assert_files_imply_linux_distribution(self, file_paths, distribution):
fs_module = MockFileSystem({file_path: '' for file_path in file_paths})
info = self.make_info(
sys_module=fake_sys('linux2'), filesystem_module=fs_module)
self.assertEqual(info.linux_distribution(), distribution)
def test_linux_distro_detection(self):
self._assert_files_imply_linux_distribution(['/etc/arch-release'],
'arch')
self._assert_files_imply_linux_distribution(['/etc/debian_version'],
'debian')
self._assert_files_imply_linux_distribution(['/etc/fedora-release'],
'fedora')
self._assert_files_imply_linux_distribution(
['/etc/fedora-release', '/etc/redhat-release'], 'fedora')
self._assert_files_imply_linux_distribution(['/etc/redhat-release'],
'redhat')
self._assert_files_imply_linux_distribution(['/etc/mock-release'],
'unknown')
def test_display_name(self):
info = self.make_info(fake_sys('darwin'))
self.assertNotEquals(info.display_name(), '')
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
self.assertNotEquals(info.display_name(), '')
info = self.make_info(fake_sys('linux2'))
self.assertNotEquals(info.display_name(), '')
info = self.make_info(fake_sys('freebsd9'))
self.assertNotEquals(info.display_name(), '')
def test_total_bytes_memory(self):
info = self.make_info(
fake_sys('darwin'),
fake_platform('10.12.3'),
executive=fake_executive('1234'))
self.assertEqual(info.total_bytes_memory(), 1234)
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
self.assertIsNone(info.total_bytes_memory())
info = self.make_info(fake_sys('linux2'))
self.assertIsNone(info.total_bytes_memory())
info = self.make_info(fake_sys('freebsd9'))
self.assertIsNone(info.total_bytes_memory())
def test_unsupported_platform(self):
with self.assertRaises(AssertionError):
self.make_info(fake_sys('cygwin'))
| bsd-3-clause | -1,177,675,067,622,290,200 | 40.863469 | 79 | 0.600264 | false |
Tangxuguo/Django_SNS | osf/post/migrations/0001_initial.py | 1 | 4772 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Post'
db.create_table(u'post_post', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('author', self.gf('django.db.models.fields.IntegerField')()),
('ts', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 8, 30, 0, 0))),
('content', self.gf('django.db.models.fields.TextField')()),
('title', self.gf('django.db.models.fields.CharField')(default='New Post', max_length=100)),
('excerpt', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('status', self.gf('django.db.models.fields.IntegerField')(default=0)),
('comment_status', self.gf('django.db.models.fields.IntegerField')(default=0)),
('pwd', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('lastts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('like_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('share_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('comment_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('album', self.gf('django.db.models.fields.IntegerField')(default=0)),
('cover', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal(u'post', ['Post'])
# Adding M2M table for field tags on 'Post'
m2m_table_name = db.shorten_name(u'post_post_tags')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'post.post'], null=False)),
('tag', models.ForeignKey(orm[u'tag.tag'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'tag_id'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table(u'post_post')
# Removing M2M table for field tags on 'Post'
db.delete_table(db.shorten_name(u'post_post_tags'))
models = {
u'post.post': {
'Meta': {'object_name': 'Post'},
'album': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.IntegerField', [], {}),
'comment_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'comment_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('django.db.models.fields.TextField', [], {}),
'cover': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastts': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'like_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pwd': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'share_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['tag.Tag']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'New Post'", 'max_length': '100'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 8, 30, 0, 0)'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'tag.tag': {
'Meta': {'object_name': 'Tag'},
'add_ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 8, 30, 0, 0)'}),
'cover': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['post'] | gpl-3.0 | 580,585,524,880,129,500 | 58.6625 | 127 | 0.573764 | false |
rlucio/cinder-violin-driver-icehouse | cinder/tests/test_v6000_fcp.py | 1 | 16426 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Violin Memory tests for FCP driver
by Ryan Lucio
Senior Software Engineer
Violin Memory
Note: python documentation for unit testing can be found at
http://docs.python.org/2/library/unittest.html
Note: cinder documentation for development can be found at
http://docs.openstack.org/developer/cinder/devref/development.environment.html
"""
import mox
import unittest
# TODO(rdl): import and use test utils (cinder.tests.utils)
from cinder.db.sqlalchemy import models
from cinder.volume.drivers.violin.vxg.core.session import XGSession
from cinder.volume.drivers.violin.vxg.vshare.igroup import IGroupManager
from cinder.volume.drivers.violin.vxg.vshare.iscsi import ISCSIManager
from cinder.volume.drivers.violin.vxg.vshare.lun import LUNManager
from cinder.volume.drivers.violin.vxg.vshare.snapshot import SnapshotManager
from cinder.volume.drivers.violin.vxg.vshare.vshare import VShare
from cinder.volume import configuration as conf
from cinder.volume.drivers.violin import v6000_common
from cinder.volume.drivers.violin import v6000_fcp as violin
class testV6000FC(unittest.TestCase):
"""A test class for the violin Fibrechannel driver module."""
def setUp(self):
self.m = mox.Mox()
self.m_conn = self.m.CreateMock(VShare)
self.m_conn.basic = self.m.CreateMock(XGSession)
self.m_conn.lun = self.m.CreateMock(LUNManager)
self.m_conn.iscsi = self.m.CreateMock(ISCSIManager)
self.m_conn.igroup = self.m.CreateMock(IGroupManager)
self.m_conn.snapshot = self.m.CreateMock(SnapshotManager)
self.config = mox.MockObject(conf.Configuration)
self.config.append_config_values(mox.IgnoreArg())
self.config.gateway_vip = '1.1.1.1'
self.config.gateway_mga = '2.2.2.2'
self.config.gateway_mgb = '3.3.3.3'
self.config.gateway_user = 'admin'
self.config.gateway_password = ''
self.config.volume_backend_name = 'violin'
self.config.use_igroups = False
self.config.use_thin_luns = False
self.config.san_is_local = False
self.driver = violin.V6000FCDriver(configuration=self.config)
self.driver.vmem_vip = self.m_conn
self.driver.vmem_mga = self.m_conn
self.driver.vmem_mgb = self.m_conn
self.driver.container = 'myContainer'
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
self.stats = {}
self.driver.gateway_fc_wwns = ['wwn.21:00:00:24:ff:45:fb:22',
'wwn.21:00:00:24:ff:45:fb:23',
'wwn.21:00:00:24:ff:45:f1:be',
'wwn.21:00:00:24:ff:45:f1:bf',
'wwn.21:00:00:24:ff:45:e2:30',
'wwn.21:00:00:24:ff:45:e2:31',
'wwn.21:00:00:24:ff:45:e2:5e',
'wwn.21:00:00:24:ff:45:e2:5f']
self.volume1 = mox.MockObject(models.Volume)
self.volume1.id = '3d31af29-6d7d-443f-b451-6f0040d3c9a9'
self.volume1.size = 1
self.volume2 = mox.MockObject(models.Volume)
self.volume2.id = '4c1af784-b328-43d2-84c8-db02158b922d'
self.volume2.size = 2
self.snapshot1 = mox.MockObject(models.Snapshot)
self.snapshot1.name = 'snap-01'
self.snapshot1.snapshot_id = 'f8849c41-6d72-4f5a-8339-2cd6b52b5e5a'
self.snapshot1.volume_id = 1
self.snapshot1.volume_name = 'vol-01'
self.snapshot2 = mox.MockObject(models.Snapshot)
self.snapshot2.name = 'snap-02'
self.snapshot2.snapshot_id = '23e44fad-8840-46f1-99d3-5605a08fb289'
self.snapshot2.volume_id = 2
self.snapshot2.volume_name = 'vol-02'
def tearDown(self):
self.m.UnsetStubs()
def testCheckForSetupError(self):
self.m.StubOutWithMock(v6000_common.V6000CommonDriver, 'check_for_setup_error')
v6000_common.V6000CommonDriver.check_for_setup_error()
self.m.ReplayAll()
self.assertTrue(self.driver.check_for_setup_error() is None)
self.m.VerifyAll()
def testCheckForSetupError_NoWWNConfig(self):
'''No wwns were found during setup.'''
self.driver.gateway_fc_wwns = []
self.m.StubOutWithMock(v6000_common.V6000CommonDriver, 'check_for_setup_error')
v6000_common.V6000CommonDriver.check_for_setup_error()
self.m.ReplayAll()
self.assertRaises(v6000_common.InvalidBackendConfig,
self.driver.check_for_setup_error)
self.m.VerifyAll()
def testEnsureExport(self):
# nothing to test here
#
pass
def testCreateExport(self):
# nothing to test here
#
pass
def testRemoveExport(self):
# nothing to test here
#
pass
def testInitializeConnection(self):
lun_id = 1
vol = self.volume1
igroup = None
connector = {'host': 'h1',
'wwpns': [u'50014380186b3f65', u'50014380186b3f67']}
self.m.StubOutWithMock(self.driver, '_export_lun')
self.driver._export_lun(vol, connector, igroup).AndReturn(lun_id)
self.m_conn.basic.save_config()
self.m.ReplayAll()
props = self.driver.initialize_connection(vol, connector)
self.assertEqual(props['driver_volume_type'], "fibre_channel")
self.assertEqual(props['data']['target_discovered'], True)
self.assertEqual(props['data']['target_wwn'],
self.driver.gateway_fc_wwns)
self.assertEqual(props['data']['target_lun'], lun_id)
self.m.VerifyAll()
def testInitializeConnection_SnapshotObject(self):
lun_id = 1
igroup = None
snap = self.snapshot1
connector = {'host': 'h1',
'wwpns': [u'50014380186b3f65', u'50014380186b3f67']}
self.m.StubOutWithMock(self.driver, '_export_snapshot')
self.driver._export_snapshot(snap, connector, igroup).AndReturn(lun_id)
self.m_conn.basic.save_config()
self.m.ReplayAll()
props = self.driver.initialize_connection(snap, connector)
self.assertEqual(props['driver_volume_type'], "fibre_channel")
self.assertEqual(props['data']['target_discovered'], True)
self.assertEqual(props['data']['target_wwn'],
self.driver.gateway_fc_wwns)
self.assertEqual(props['data']['target_lun'], lun_id)
self.m.VerifyAll()
def testTerminateConnection(self):
volume = self.volume1
connector = {'wwpns': [u'50014380186b3f65', u'50014380186b3f67']}
self.m.StubOutWithMock(self.driver, '_unexport_lun')
self.driver._unexport_lun(volume)
self.m_conn.basic.save_config()
self.m.ReplayAll()
self.driver.terminate_connection(volume, connector)
self.m.VerifyAll()
def testTerminateConnection_SnapshotObject(self):
snap = self.snapshot1
connector = {'wwpns': [u'50014380186b3f65', u'50014380186b3f67']}
self.m.StubOutWithMock(self.driver, '_unexport_snapshot')
self.driver._unexport_snapshot(snap)
self.m_conn.basic.save_config()
self.m.ReplayAll()
self.driver.terminate_connection(snap, connector)
self.m.VerifyAll()
def testGetVolumeStats(self):
self.m.StubOutWithMock(self.driver, '_update_stats')
self.driver._update_stats()
self.m.ReplayAll()
self.assertEqual(self.driver.get_volume_stats(True), self.driver.stats)
self.m.VerifyAll()
def testExportLun(self):
volume = self.volume1
lun_id = '1'
igroup = 'test-igroup-1'
connector = {'wwpns': [u'50014380186b3f65', u'50014380186b3f67']}
response = {'code': 0, 'message': ''}
self.m.StubOutWithMock(self.driver.lun_tracker,
'get_lun_id_for_volume')
self.m.StubOutWithMock(self.driver, '_send_cmd_and_verify')
self.driver.lun_tracker.get_lun_id_for_volume(volume).AndReturn(lun_id)
self.driver._send_cmd_and_verify(self.m_conn.lun.export_lun,
self.driver._wait_for_exportstate,
mox.IsA(str),
[self.driver.container, volume['id'],
'all', igroup, lun_id],
[volume['id'], True]
).AndReturn(response)
self.m.ReplayAll()
self.assertEqual(self.driver._export_lun(volume, connector, igroup),
lun_id)
self.m.VerifyAll()
def testExportLun_ExportFailsWithException(self):
volume = self.volume1
lun_id = '1'
igroup = 'test-igroup-1'
connector = {'wwpns': [u'50014380186b3f65', u'50014380186b3f67']}
response = {'code': 0, 'message': ''}
exception = v6000_common.ViolinBackendErr
self.m.StubOutWithMock(self.driver.lun_tracker,
'get_lun_id_for_volume')
self.m.StubOutWithMock(self.driver, '_send_cmd_and_verify')
self.driver.lun_tracker.get_lun_id_for_volume(volume).AndReturn(lun_id)
self.driver._send_cmd_and_verify(self.m_conn.lun.export_lun,
self.driver._wait_for_exportstate,
mox.IsA(str),
[self.driver.container, volume['id'],
'all', igroup, lun_id],
[volume['id'], True]
).AndRaise(exception('failed!'))
self.m.ReplayAll()
self.assertRaises(exception, self.driver._export_lun, volume,
connector, igroup)
self.m.VerifyAll()
def testUnexportLun(self):
volume = self.volume1
response = {'code': 0, 'message': ''}
self.m.StubOutWithMock(self.driver, '_send_cmd_and_verify')
self.driver._send_cmd_and_verify(
self.m_conn.lun.unexport_lun,
self.driver._wait_for_exportstate,
mox.IsA(str),
[self.driver.container, volume['id'],
'all', 'all', 'auto'],
[volume['id'], False]).AndReturn(response)
self.m.ReplayAll()
self.assertTrue(self.driver._unexport_lun(volume) is None)
self.m.VerifyAll()
def testUnexportLun_UnexportFailsWithException(self):
volume = self.volume1
response = {'code': 0, 'message': ''}
exception = v6000_common.ViolinBackendErr
self.m.StubOutWithMock(self.driver, '_send_cmd_and_verify')
self.driver._send_cmd_and_verify(self.m_conn.lun.unexport_lun,
self.driver._wait_for_exportstate,
mox.IsA(str),
[self.driver.container, volume['id'],
'all', 'all', 'auto'],
[volume['id'], False]
).AndRaise(exception('failed!'))
self.m.ReplayAll()
self.assertRaises(exception, self.driver._unexport_lun, volume)
self.m.VerifyAll()
# TODO(rdl) missing tests
#def testExportSnapshot(self):
#def testUnExportSnapshot(self):
def testAddIgroupMember(self):
volume = self.volume1
igroup = 'test-group-1'
connector = {'wwpns': [u'50014380186b3f65', u'50014380186b3f67']}
wwpns = ['wwn.50:01:43:80:18:6b:3f:65', 'wwn.50:01:43:80:18:6b:3f:67']
response = {'code': 0, 'message': 'success'}
self.m.StubOutWithMock(self.driver, '_convert_wwns_openstack_to_vmem')
self.driver._convert_wwns_openstack_to_vmem(
connector['wwpns']).AndReturn(wwpns)
self.m_conn.igroup.add_initiators(igroup,
wwpns).AndReturn(response)
self.m.ReplayAll()
self.assertTrue(self.driver._add_igroup_member(connector, igroup)
is None)
self.m.VerifyAll()
def testUpdateStats(self):
backend_name = self.config.volume_backend_name
vendor_name = "Violin Memory, Inc."
tot_bytes = 100 * 1024 * 1024 * 1024
free_bytes = 50 * 1024 * 1024 * 1024
bn0 = '/cluster/state/master_id'
resp0 = {'/cluster/state/master_id': '1'}
bn1 = "/vshare/state/global/1/container/myContainer/total_bytes"
bn2 = "/vshare/state/global/1/container/myContainer/free_bytes"
response = {bn1: tot_bytes, bn2: free_bytes}
self.m_conn.basic.get_node_values(bn0).AndReturn(resp0)
self.m_conn.basic.get_node_values([bn1, bn2]).AndReturn(response)
self.m.ReplayAll()
self.assertTrue(self.driver._update_stats() is None)
self.assertEqual(self.driver.stats['total_capacity_gb'], 100)
self.assertEqual(self.driver.stats['free_capacity_gb'], 50)
self.assertEqual(self.driver.stats['volume_backend_name'],
backend_name)
self.assertEqual(self.driver.stats['vendor_name'], vendor_name)
self.m.VerifyAll()
def testUpdateStats_DataQueryFails(self):
backend_name = self.config.volume_backend_name
vendor_name = "Violin Memory, Inc."
bn0 = '/cluster/state/master_id'
resp0 = {'/cluster/state/master_id': '1'}
bn1 = "/vshare/state/global/1/container/myContainer/total_bytes"
bn2 = "/vshare/state/global/1/container/myContainer/free_bytes"
self.m_conn.basic.get_node_values(bn0).AndReturn(resp0)
self.m_conn.basic.get_node_values([bn1, bn2]).AndReturn({})
self.m.ReplayAll()
self.assertTrue(self.driver._update_stats() is None)
self.assertEqual(self.driver.stats['total_capacity_gb'], "unknown")
self.assertEqual(self.driver.stats['free_capacity_gb'], "unknown")
self.assertEqual(self.driver.stats['volume_backend_name'],
backend_name)
self.assertEqual(self.driver.stats['vendor_name'], vendor_name)
self.m.VerifyAll()
def testGetActiveFcTargets(self):
bn0 = '/vshare/state/global/*'
resp0 = {'/vshare/state/global/1': 1,
'/vshare/state/global/2': 2}
bn1 = '/vshare/state/global/1/target/fc/**'
resp1 = {'/vshare/state/global/1/target/fc/hba-a1/wwn':
'wwn.21:00:00:24:ff:45:fb:22'}
bn2 = '/vshare/state/global/2/target/fc/**'
resp2 = {'/vshare/state/global/2/target/fc/hba-a1/wwn':
'wwn.21:00:00:24:ff:45:e2:30'}
self.m_conn.basic.get_node_values(bn0).AndReturn(resp0)
self.m_conn.basic.get_node_values(bn2).AndReturn(resp2)
self.m_conn.basic.get_node_values(bn1).AndReturn(resp1)
result = ['21000024ff45e230', '21000024ff45fb22']
self.m.ReplayAll()
self.assertEqual(self.driver._get_active_fc_targets(), result)
self.m.VerifyAll()
def testConvertWWNsOpenstackToVMEM(self):
vmem_wwns = ['wwn.50:01:43:80:18:6b:3f:65']
openstack_wwns = ['50014380186b3f65']
result = self.driver._convert_wwns_openstack_to_vmem(openstack_wwns)
self.assertEqual(result, vmem_wwns)
def testsConvertWWNsVMEMToOpenstack(self):
vmem_wwns = ['wwn.50:01:43:80:18:6b:3f:65']
openstack_wwns = ['50014380186b3f65']
result = self.driver._convert_wwns_vmem_to_openstack(vmem_wwns)
self.assertEqual(result, openstack_wwns)
| apache-2.0 | -8,490,906,755,414,301,000 | 44.00274 | 87 | 0.602216 | false |
jjtoharia/Kaggle_Intel-MobileODT-Cervical-Cancer-Screening | jjtz_intel_funciones.py | 1 | 3570 | #!/usr/bin/env python3
#Quiero utf8: áéíóú
from PIL import ImageFilter, ImageStat, Image, ImageDraw
from multiprocessing import Pool, cpu_count
# conda install --channel https://conda.anaconda.org/menpo opencv3
from cv2 import imread as cv2_imread, resize as cv2_resize, INTER_AREA as cv2_INTER_AREA # http://tanbakuchi.com/posts/comparison-of-openv-interpolation-algorithms/
import time
def timefunc(f):
def f_timer(*args, **kwargs):
start = time.time()
result = f(*args, **kwargs)
end = time.time()
print(f.__name__, ': ', '{:,.4f}'.format(end - start), ' segs.')
return(result)
return(f_timer)
from datetime import datetime
def jj_datetime(): return(datetime.now().strftime('%Y-%m-%d %H:%M:%S -')) # print(jj_datetime(), xxxx)
def jj_datetime_filename(): return(datetime.now().strftime('%Y%m%d_%H%M%S'))
def jj_input_filename_suffix(n_resize_to, b_con_muestreo): return('_{:}'.format(n_resize_to) + ('_prueba' if b_con_muestreo else ''))
def jj_safe_exec(ret_if_exception, function, *args):
try:
return(function(*args))
except:
return(ret_if_exception)
def im_multi(path):
from PIL import ImageFilter, ImageStat, Image, ImageDraw
try:
im_stats_im_ = Image.open(path)
return [path, {'size': im_stats_im_.size}]
except:
print(path)
return [path, {'size': (0,0)}]
@timefunc
def im_stats(im_stats_df):
from multiprocessing import Pool, cpu_count
im_stats_d = {}
p = Pool(cpu_count() - 1)
#ret = [p.apply_async(im_multi, x) for x in im_stats_df['path']] # Y luego hay que usar ret[n].get() para sacar cada resultado!
ret = p.map(im_multi, im_stats_df['path'])
for i in range(len(ret)):
im_stats_d[ret[i][0]] = ret[i][1] # im_stats_d[ret[i].get()[0]] = ret[i].get()[1]
im_stats_df['size'] = im_stats_df['path'].map(lambda x: ' '.join(str(s) for s in im_stats_d[x]['size']))
return im_stats_df
def get_im_cv2_32(path):
img = cv2_imread(path)
resized = cv2_resize(img, (32, 32), cv2_INTER_AREA) #use cv2_resize(img, (64, 64), cv2_INTER_AREA)
return [path, resized]
def get_im_cv2_64(path):
img = cv2_imread(path)
resized = cv2_resize(img, (64, 64), cv2_INTER_AREA)
return [path, resized]
def get_im_cv2_256(path):
img = cv2_imread(path)
resized = cv2_resize(img, (256, 256), cv2_INTER_AREA)
return [path, resized]
def get_im_cv2_512(path):
img = cv2_imread(path)
resized = cv2_resize(img, (512, 512), cv2_INTER_AREA)
return [path, resized]
def get_im_cv2_1024(path):
img = cv2_imread(path)
resized = cv2_resize(img, (1024, 1024), cv2_INTER_AREA)
return [path, resized]
@timefunc
def normalize_image_features(paths, resize_to = 32):
import numpy as np
imf_d = {}
p = Pool(cpu_count())
if resize_to == 256:
ret = p.map(get_im_cv2_256, paths)
elif resize_to == 64:
ret = p.map(get_im_cv2_64, paths)
elif resize_to == 512:
ret = p.map(get_im_cv2_512, paths)
elif resize_to == 1024:
ret = p.map(get_im_cv2_1024, paths)
else:
ret = p.map(get_im_cv2_32, paths)
for i in range(len(ret)):
imf_d[ret[i][0]] = ret[i][1]
ret = []
fdata = [imf_d[f] for f in paths]
fdata = np.array(fdata, dtype=np.uint8)
fdata = fdata.transpose((0, 3, 1, 2))
fdata = fdata.astype('float32') # fdata.astype('float64')
fdata = fdata / 255
return fdata
| mit | 8,852,059,636,198,243,000 | 32.95098 | 164 | 0.596914 | false |
pcapriotti/pledger | tests/test_directive.py | 1 | 1746 | from pledger.account import Account, AccountFactory
from pledger.parser import Parser
from pledger.directive import *
from pledger.ledger_processor import LedgerProcessor
import pytest
class ProcessorStub(object):
def __init__(self):
self.repo = AccountFactory()
self.account = self.repo.root()
self.included = []
def add_account_prefix(self, prefix):
self.account = self.account[prefix]
def remove_account_prefix(self):
self.account = self.account.parent
def include(self, filename):
self.included.append(filename)
@pytest.fixture
def processor(parser):
return ProcessorStub()
def test_directive_registry():
assert Directive.directives['account'] == AccountDirective
assert Directive.directives.get('non-existing-directive') is None
def test_unsupported_directive(parser):
with pytest.raises(UnsupportedDirective) as e:
parser.parse_directive("!nonexisting")
assert 'nonexisting' == str(e.value)
def test_account_directive(processor):
directive = AccountDirective("Assets")
assert processor.account.name == ""
directive.execute(processor)
assert processor.account.name == "Assets"
def test_end_account_directive(processor):
directive = EndAccountDirective()
processor.add_account_prefix("Assets")
directive.execute(processor)
assert processor.account.name == ""
def test_include_directive(processor):
directive = IncludeDirective("test.dat")
assert processor.included == []
directive.execute(processor)
assert processor.included == ["test.dat"]
def test_directive_parsing(parser):
directive = parser.parse_directive("!include test.dat")
assert directive.filename == "test.dat"
| mit | -5,265,927,107,568,799,000 | 26.28125 | 69 | 0.715922 | false |
natano/tiget | tiget/core/cmds/plugin.py | 1 | 1498 | import pkg_resources
from tiget.cmds import Cmd
from tiget.plugins import load_plugin, unload_plugin, reload_plugin, plugins
__all__ = ['Load', 'Reload', 'Unload']
class Load(Cmd):
description = 'load plugin'
def setup(self):
self.parser.add_argument('plugin_name', nargs='?')
def do(self, args):
if args.plugin_name:
try:
load_plugin(args.plugin_name)
except ImportError as e:
raise self.error(e)
else:
self.print('Available plugins:')
entry_points = pkg_resources.iter_entry_points('tiget.plugins')
names = set(ep.name for ep in entry_points)
names.update(plugins.keys())
for name in sorted(names):
loaded = name in plugins
self.print('[{}] {}'.format('*' if loaded else ' ', name))
class Reload(Cmd):
description = 'reload plugin'
def setup(self):
self.parser.add_argument('plugin_name')
def do(self, args):
try:
reload_plugin(args.plugin_name)
except KeyError:
raise self.error('no plugin "{}" loaded'.format(args.plugin_name))
class Unload(Cmd):
description = 'unload plugin'
def setup(self):
self.parser.add_argument('plugin_name')
def do(self, args):
try:
unload_plugin(args.plugin_name)
except KeyError:
raise self.error('no plugin "{}" loaded'.format(args.plugin_name))
| isc | 7,311,382,799,280,952,000 | 26.236364 | 78 | 0.577437 | false |
popazerty/beyonwiz-4.1 | lib/python/Screens/Wizard.py | 1 | 25373 | from boxbranding import getMachineBrand, getMachineName
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
from enigma import eTimer, eEnv
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.config import config, ConfigText, ConfigPassword, KEY_LEFT, KEY_RIGHT, KEY_0, KEY_DELETE, KEY_BACKSPACE, KEY_ASCII, ConfigSelection, ConfigBoolean
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.Slider import Slider
from Components.ActionMap import NumberActionMap
from Components.ConfigList import ConfigList
from Components.Sources.List import List
class WizardSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent)
self["text"] = StaticText("")
self.onShow.append(self.setCallback)
def setCallback(self):
self.parent.setLCDTextCallback(self.setText)
def setText(self, text):
self["text"].setText(text)
class Wizard(Screen):
instance = None
def createSummary(self):
print "WizardCreateSummary"
return WizardSummary
class parseWizard(ContentHandler):
def __init__(self, wizard):
self.isPointsElement, self.isReboundsElement = 0, 0
self.wizard = wizard
self.currContent = ""
self.lastStep = 0
def startElement(self, name, attrs):
#print "startElement", name
self.currContent = name
if name == "step":
self.lastStep += 1
if attrs.has_key('id'):
id = str(attrs.get('id'))
else:
id = ""
#print "id:", id
if attrs.has_key('nextstep'):
nextstep = str(attrs.get('nextstep'))
else:
nextstep = None
if attrs.has_key('timeout'):
timeout = int(attrs.get('timeout'))
else:
timeout = None
if attrs.has_key('timeoutaction'):
timeoutaction = str(attrs.get('timeoutaction'))
else:
timeoutaction = 'nextpage'
if attrs.has_key('timeoutstep'):
timeoutstep = str(attrs.get('timeoutstep'))
else:
timeoutstep = ''
self.wizard[self.lastStep] = {"id": id, "condition": "", "text": "", "timeout": timeout, "timeoutaction": timeoutaction, "timeoutstep": timeoutstep, "list": [], "config": {"screen": None, "args": None, "type": "" }, "code": "", "codeafter": "", "code_async": "", "codeafter_async": "", "nextstep": nextstep}
if attrs.has_key('laststep'):
self.wizard[self.lastStep]["laststep"] = str(attrs.get('laststep'))
elif name == "text":
self.wizard[self.lastStep]["text"] = str(attrs.get('value')).replace("\\n", "\n")
elif name == "displaytext":
self.wizard[self.lastStep]["displaytext"] = str(attrs.get('value')).replace("\\n", "\n")
elif name == "list":
if attrs.has_key('type'):
if attrs["type"] == "dynamic":
self.wizard[self.lastStep]["dynamiclist"] = attrs.get("source")
if attrs["type"] == "config":
self.wizard[self.lastStep]["configelement"] = attrs.get("configelement")
if attrs.has_key("evaluation"):
#print "evaluation"
self.wizard[self.lastStep]["listevaluation"] = attrs.get("evaluation")
if attrs.has_key("onselect"):
self.wizard[self.lastStep]["onselect"] = attrs.get("onselect")
elif name == "listentry":
self.wizard[self.lastStep]["list"].append((str(attrs.get('caption')), str(attrs.get('step'))))
elif name == "config":
type = str(attrs.get('type'))
self.wizard[self.lastStep]["config"]["type"] = type
if type == "ConfigList" or type == "standalone":
try:
exec "from Screens." + str(attrs.get('module')) + " import *"
except:
exec "from " + str(attrs.get('module')) + " import *"
self.wizard[self.lastStep]["config"]["screen"] = eval(str(attrs.get('screen')))
if attrs.has_key('args'):
#print "has args"
self.wizard[self.lastStep]["config"]["args"] = str(attrs.get('args'))
elif type == "dynamic":
self.wizard[self.lastStep]["config"]["source"] = str(attrs.get('source'))
if attrs.has_key('evaluation'):
self.wizard[self.lastStep]["config"]["evaluation"] = str(attrs.get('evaluation'))
elif name == "code":
self.async_code = attrs.has_key('async') and str(attrs.get('async')) == "yes"
if attrs.has_key('pos') and str(attrs.get('pos')) == "after":
self.codeafter = True
else:
self.codeafter = False
elif name == "condition":
pass
def endElement(self, name):
self.currContent = ""
if name == 'code':
if self.async_code:
if self.codeafter:
self.wizard[self.lastStep]["codeafter_async"] = self.wizard[self.lastStep]["codeafter_async"].strip()
else:
self.wizard[self.lastStep]["code_async"] = self.wizard[self.lastStep]["code_async"].strip()
else:
if self.codeafter:
self.wizard[self.lastStep]["codeafter"] = self.wizard[self.lastStep]["codeafter"].strip()
else:
self.wizard[self.lastStep]["code"] = self.wizard[self.lastStep]["code"].strip()
elif name == 'condition':
self.wizard[self.lastStep]["condition"] = self.wizard[self.lastStep]["condition"].strip()
elif name == 'step':
#print "Step number", self.lastStep, ":", self.wizard[self.lastStep]
pass
def characters(self, ch):
if self.currContent == "code":
if self.async_code:
if self.codeafter:
self.wizard[self.lastStep]["codeafter_async"] = self.wizard[self.lastStep]["codeafter_async"] + ch
else:
self.wizard[self.lastStep]["code_async"] = self.wizard[self.lastStep]["code_async"] + ch
else:
if self.codeafter:
self.wizard[self.lastStep]["codeafter"] = self.wizard[self.lastStep]["codeafter"] + ch
else:
self.wizard[self.lastStep]["code"] = self.wizard[self.lastStep]["code"] + ch
elif self.currContent == "condition":
self.wizard[self.lastStep]["condition"] = self.wizard[self.lastStep]["condition"] + ch
def __init__(self, session, showSteps = True, showStepSlider = True, showList = True, showConfig = True):
Screen.__init__(self, session)
self.isLastWizard = False # can be used to skip a "goodbye"-screen in a wizard
self.stepHistory = []
self.wizard = {}
parser = make_parser()
if not isinstance(self.xmlfile, list):
self.xmlfile = [self.xmlfile]
# print "Reading ", self.xmlfile
wizardHandler = self.parseWizard(self.wizard)
parser.setContentHandler(wizardHandler)
for xmlfile in self.xmlfile:
if xmlfile[0] != '/':
parser.parse(eEnv.resolve('${datadir}/enigma2/') + xmlfile)
else:
parser.parse(xmlfile)
self.showSteps = showSteps
self.showStepSlider = showStepSlider
self.showList = showList
self.showConfig = showConfig
self.numSteps = len(self.wizard)
self.currStep = self.getStepWithID("start") + 1
self.timeoutTimer = eTimer()
self.timeoutTimer.callback.append(self.timeoutCounterFired)
self["text"] = Label()
if showConfig:
self["config"] = ConfigList([], session = session)
if self.showSteps:
self["step"] = Label()
if self.showStepSlider:
self["stepslider"] = Slider(1, self.numSteps)
if self.showList:
self.list = []
self["list"] = List(self.list, enableWrapAround = True)
self["list"].onSelectionChanged.append(self.selChanged)
#self["list"] = MenuList(self.list, enableWrapAround = True)
self.onShown.append(self.updateValues)
self.configInstance = None
self.currentConfigIndex = None
Wizard.instance = self
self.lcdCallbacks = []
self.disableKeys = False
self["actions"] = NumberActionMap(["WizardActions", "DirectionActions", "NumberActions", "ColorActions", "SetupActions", "InputAsciiActions", "KeyboardInputActions"],
{
"gotAsciiCode": self.keyGotAscii,
"ok": self.ok,
"back": self.back,
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down,
"red": self.red,
"green": self.green,
"yellow": self.yellow,
"blue":self.blue,
"deleteBackward": self.deleteBackward,
"deleteForward": self.deleteForward,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self["VirtualKB"] = NumberActionMap(["VirtualKeyboardActions"],
{
"showVirtualKeyboard": self.KeyText,
}, -2)
self["VirtualKB"].setEnabled(False)
def red(self):
# print "red"
pass
def green(self):
# print "green"
pass
def yellow(self):
# print "yellow"
pass
def blue(self):
# print "blue"
pass
def deleteForward(self):
self.resetCounter()
if self.wizard[self.currStep]["config"]["screen"] is not None:
self.configInstance.keyDelete()
elif self.wizard[self.currStep]["config"]["type"] == "dynamic":
self["config"].handleKey(KEY_DELETE)
# print "deleteForward"
def deleteBackward(self):
self.resetCounter()
if self.wizard[self.currStep]["config"]["screen"] is not None:
self.configInstance.keyBackspace()
elif self.wizard[self.currStep]["config"]["type"] == "dynamic":
self["config"].handleKey(KEY_BACKSPACE)
# print "deleteBackward"
def setLCDTextCallback(self, callback):
self.lcdCallbacks.append(callback)
def back(self):
if self.disableKeys:
return
# print "getting back..."
# print "stepHistory:", self.stepHistory
if len(self.stepHistory) > 1:
self.currStep = self.stepHistory[-2]
self.stepHistory = self.stepHistory[:-2]
else:
self.session.openWithCallback(self.exitWizardQuestion, MessageBox, (_("Are you sure you want to exit this wizard?") ) )
if self.currStep < 1:
self.currStep = 1
# print "currStep:", self.currStep
# print "new stepHistory:", self.stepHistory
self.updateValues()
# print "after updateValues stepHistory:", self.stepHistory
def exitWizardQuestion(self, ret = False):
if ret:
self.markDone()
self.exit()
def markDone(self):
pass
def exit(self):
Wizard.instance = None
self.close()
def getStepWithID(self, id):
# print "getStepWithID:", id
count = 0
for x in self.wizard.keys():
if self.wizard[x]["id"] == id:
print "result:", count
return count
count += 1
# print "result: nothing"
return 0
def finished(self, gotoStep = None, *args, **kwargs):
# print "finished"
currStep = self.currStep
if self.updateValues not in self.onShown:
self.onShown.append(self.updateValues)
if self.showConfig:
if self.wizard[currStep]["config"]["type"] == "dynamic":
eval("self." + self.wizard[currStep]["config"]["evaluation"])()
if self.showList:
if len(self.wizard[currStep]["evaluatedlist"]) > 0:
# print "current:", self["list"].current
nextStep = self["list"].current[1]
if self.wizard[currStep].has_key("listevaluation"):
exec("self." + self.wizard[self.currStep]["listevaluation"] + "('" + nextStep + "')")
elif (self.wizard[currStep].has_key("configelement")):
configelement = self.wizard[currStep]["configelement"]
element = eval(configelement)
element.value = self["list"].current[1]
element.save()
else:
self.currStep = self.getStepWithID(nextStep)
print_now = True
if (currStep == self.numSteps and self.wizard[currStep]["nextstep"] is None) or self.wizard[currStep]["id"] == "end": # wizard finished
# print "wizard finished"
self.markDone()
self.exit()
else:
self.codeafter = True
self.runCode(self.wizard[currStep]["codeafter"])
self.prevStep = currStep
self.gotoStep = gotoStep
if not self.runCode(self.wizard[currStep]["codeafter_async"]):
self.afterAsyncCode()
else:
if self.updateValues in self.onShown:
self.onShown.remove(self.updateValues)
# if print_now:
# print "Now: " + str(self.currStep)
def ok(self):
# print "OK"
if self.disableKeys:
return
currStep = self.currStep
if self.showConfig:
if self.wizard[currStep]["config"]["screen"] is not None:
# TODO: don't die, if no run() is available
# there was a try/except here, but i can't see a reason
# for this. If there is one, please do a more specific check
# and/or a comment in which situation there is no run()
if callable(getattr(self.configInstance, "runAsync", None)):
if self.updateValues in self.onShown:
self.onShown.remove(self.updateValues)
self.configInstance.runAsync(self.finished)
return
else:
self.configInstance.run()
self.finished()
def keyNumberGlobal(self, number):
if self.wizard[self.currStep]["config"]["screen"] is not None:
self.configInstance.keyNumberGlobal(number)
elif self.wizard[self.currStep]["config"]["type"] == "dynamic":
self["config"].handleKey(KEY_0 + number)
def keyGotAscii(self):
if self.wizard[self.currStep]["config"]["screen"] is not None:
self["config"].handleKey(KEY_ASCII)
elif self.wizard[self.currStep]["config"]["type"] == "dynamic":
self["config"].handleKey(KEY_ASCII)
def left(self):
self.resetCounter()
if self.wizard[self.currStep]["config"]["screen"] is not None:
self.configInstance.keyLeft()
elif self.wizard[self.currStep]["config"]["type"] == "dynamic":
self["config"].handleKey(KEY_LEFT)
# print "left"
def right(self):
self.resetCounter()
if self.wizard[self.currStep]["config"]["screen"] is not None:
self.configInstance.keyRight()
elif self.wizard[self.currStep]["config"]["type"] == "dynamic":
self["config"].handleKey(KEY_RIGHT)
# print "right"
def up(self):
self.resetCounter()
if self.showConfig and self.wizard[self.currStep]["config"]["screen"] is not None or self.wizard[self.currStep]["config"]["type"] == "dynamic":
self["config"].instance.moveSelection(self["config"].instance.moveUp)
self.handleInputHelpers()
elif self.showList and len(self.wizard[self.currStep]["evaluatedlist"]) > 0:
self["list"].selectPrevious()
if self.wizard[self.currStep].has_key("onselect"):
print "current:", self["list"].current
self.selection = self["list"].current[-1]
#self.selection = self.wizard[self.currStep]["evaluatedlist"][self["list"].l.getCurrentSelectionIndex()][1]
exec("self." + self.wizard[self.currStep]["onselect"] + "()")
# print "up"
def down(self):
self.resetCounter()
if self.showConfig and self.wizard[self.currStep]["config"]["screen"] is not None or self.wizard[self.currStep]["config"]["type"] == "dynamic":
self["config"].instance.moveSelection(self["config"].instance.moveDown)
self.handleInputHelpers()
elif self.showList and len(self.wizard[self.currStep]["evaluatedlist"]) > 0:
#self["list"].instance.moveSelection(self["list"].instance.moveDown)
self["list"].selectNext()
if self.wizard[self.currStep].has_key("onselect"):
# print "current:", self["list"].current
#self.selection = self.wizard[self.currStep]["evaluatedlist"][self["list"].l.getCurrentSelectionIndex()][1]
#exec("self." + self.wizard[self.currStep]["onselect"] + "()")
self.selection = self["list"].current[-1]
#self.selection = self.wizard[self.currStep]["evaluatedlist"][self["list"].l.getCurrentSelectionIndex()][1]
exec("self." + self.wizard[self.currStep]["onselect"] + "()")
# print "down"
def selChanged(self):
self.resetCounter()
if self.showConfig and self.wizard[self.currStep]["config"]["screen"] is not None:
self["config"].instance.moveSelection(self["config"].instance.moveUp)
elif self.showList and len(self.wizard[self.currStep]["evaluatedlist"]) > 0:
if self.wizard[self.currStep].has_key("onselect"):
self.selection = self["list"].current[-1]
print "self.selection:", self.selection
exec("self." + self.wizard[self.currStep]["onselect"] + "()")
def resetCounter(self):
self.timeoutCounter = self.wizard[self.currStep]["timeout"]
def runCode(self, code):
if code != "":
# print "code", code
exec code
return True
return False
def getTranslation(self, text):
return _(text).replace("%s %s","%s %s" % (getMachineBrand(), getMachineName()))
def updateText(self, firstset = False):
text = self.getTranslation(self.wizard[self.currStep]["text"])
if "[timeout]" in text:
text = text.replace("[timeout]", str(self.timeoutCounter))
self["text"].setText(text)
else:
if firstset:
self["text"].setText(text)
def updateValues(self):
# print "Updating values in step " + str(self.currStep)
# calling a step which doesn't exist can only happen if the condition in the last step is not fulfilled
# if a non-existing step is called, end the wizard
if self.currStep > len(self.wizard):
self.markDone()
self.exit()
return
self.timeoutTimer.stop()
if self.configInstance is not None:
# remove callbacks
self.configInstance["config"].onSelectionChanged = []
del self.configInstance["config"]
self.configInstance.doClose()
self.configInstance = None
self.condition = True
exec (self.wizard[self.currStep]["condition"])
if not self.condition:
# print "keys*******************:", self.wizard[self.currStep].keys()
if self.wizard[self.currStep].has_key("laststep"): # exit wizard, if condition of laststep doesn't hold
self.markDone()
self.exit()
return
else:
self.currStep += 1
self.updateValues()
else:
if self.wizard[self.currStep].has_key("displaytext"):
displaytext = self.getTranslation(self.wizard[self.currStep]["displaytext"])
# print "set LCD text"
for x in self.lcdCallbacks:
x(displaytext)
if len(self.stepHistory) == 0 or self.stepHistory[-1] != self.currStep:
self.stepHistory.append(self.currStep)
# print "wizard step:", self.wizard[self.currStep]
if self.showSteps:
self["step"].setText(self.getTranslation("Step ") + str(self.currStep) + "/" + str(self.numSteps))
if self.showStepSlider:
self["stepslider"].setValue(self.currStep)
if self.wizard[self.currStep]["timeout"] is not None:
self.resetCounter()
self.timeoutTimer.start(1000)
# print "wizard text", self.getTranslation(self.wizard[self.currStep]["text"])
self.updateText(firstset = True)
if self.wizard[self.currStep].has_key("displaytext"):
displaytext = self.getTranslation(self.wizard[self.currStep]["displaytext"])
# print "set LCD text"
for x in self.lcdCallbacks:
x(displaytext)
self.codeafter=False
self.runCode(self.wizard[self.currStep]["code"])
if self.runCode(self.wizard[self.currStep]["code_async"]):
if self.updateValues in self.onShown:
self.onShown.remove(self.updateValues)
else:
self.afterAsyncCode()
def afterAsyncCode(self):
if not self.updateValues in self.onShown:
self.onShown.append(self.updateValues)
if self.codeafter:
if self.wizard[self.prevStep]["nextstep"] is not None:
self.currStep = self.getStepWithID(self.wizard[self.prevStep]["nextstep"])
if self.gotoStep is not None:
self.currStep = self.getStepWithID(self.gotoStep)
self.currStep += 1
self.updateValues()
# print "Now: " + str(self.currStep)
else:
if self.showList:
# print "showing list,", self.currStep
index = 0
for renderer in self.renderer:
rootrenderer = renderer
while renderer.source is not None:
if renderer.source is self["list"]:
print "setZPosition"
rootrenderer.instance.setZPosition(1)
renderer = renderer.source
#self["list"].instance.setZPosition(1)
self.list = []
if self.wizard[self.currStep].has_key("dynamiclist"):
# print "dynamic list, calling", self.wizard[self.currStep]["dynamiclist"]
newlist = eval("self." + self.wizard[self.currStep]["dynamiclist"] + "()")
#self.wizard[self.currStep]["evaluatedlist"] = []
for entry in newlist:
#self.wizard[self.currStep]["evaluatedlist"].append(entry)
self.list.append(entry)
#del self.wizard[self.currStep]["dynamiclist"]
if (self.wizard[self.currStep].has_key("configelement")):
configelement = self.wizard[self.currStep]["configelement"]
print "configelement:", configelement
element = eval(configelement)
if isinstance(element, ConfigSelection):
for choice in element.choices.choices:
print "choice:", choice
if configelement == "config.timezone.val":
self.list.append((choice, choice))
else:
self.list.append((choice[1], choice[0]))
index = element.getIndex()
elif isinstance(element, ConfigBoolean):
self.list.append((_(element.descriptions[True]), True))
self.list.append((_(element.descriptions[False]), False))
index = 1
if element.value:
index = 0
if len(self.wizard[self.currStep]["list"]) > 0:
#self["list"].instance.setZPosition(2)
for x in self.wizard[self.currStep]["list"]:
self.list.append((self.getTranslation(x[0]), x[1]))
self.wizard[self.currStep]["evaluatedlist"] = self.list
self["list"].list = self.list
self["list"].index = index
else:
self["list"].hide()
if self.showConfig:
print "showing config"
# self["config"].instance.setZPosition(1)
if self.wizard[self.currStep]["config"]["type"] == "dynamic":
print "config type is dynamic"
self["config"].instance.setZPosition(2)
self["config"].l.setList(eval("self." + self.wizard[self.currStep]["config"]["source"])())
elif self.wizard[self.currStep]["config"]["screen"] is not None:
if self.wizard[self.currStep]["config"]["type"] == "standalone":
print "Type is standalone"
self.session.openWithCallback(self.ok, self.wizard[self.currStep]["config"]["screen"])
else:
self["config"].instance.setZPosition(2)
print "wizard screen", self.wizard[self.currStep]["config"]["screen"]
if self.wizard[self.currStep]["config"]["args"] is None:
self.configInstance = self.session.instantiateDialog(self.wizard[self.currStep]["config"]["screen"])
else:
self.configInstance = self.session.instantiateDialog(self.wizard[self.currStep]["config"]["screen"], eval(self.wizard[self.currStep]["config"]["args"]))
self["config"].l.setList(self.configInstance["config"].list)
callbacks = self.configInstance["config"].onSelectionChanged
self.configInstance["config"].destroy()
print "clearConfigList", self.configInstance["config"], self["config"]
self.configInstance["config"] = self["config"]
self.configInstance["config"].onSelectionChanged = callbacks
print "clearConfigList", self.configInstance["config"], self["config"]
else:
self["config"].l.setList([])
self.handleInputHelpers()
else:
if self.has_key("config"):
self["config"].hide()
def timeoutCounterFired(self):
self.timeoutCounter -= 1
# print "timeoutCounter:", self.timeoutCounter
if self.timeoutCounter == 0:
if self.wizard[self.currStep]["timeoutaction"] == "selectnext":
# print "selection next item"
self.down()
else:
if self.wizard[self.currStep]["timeoutaction"] == "changestep":
self.finished(gotoStep = self.wizard[self.currStep]["timeoutstep"])
self.updateText()
def handleInputHelpers(self):
if self["config"].getCurrent() is not None:
if isinstance(self["config"].getCurrent()[1], ConfigText) or isinstance(self["config"].getCurrent()[1], ConfigPassword):
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(True)
self["VKeyIcon"].boolean = True
if self.has_key("HelpWindow"):
if self["config"].getCurrent()[1].help_window.instance is not None:
helpwindowpos = self["HelpWindow"].getPosition()
from enigma import ePoint
self["config"].getCurrent()[1].help_window.instance.move(ePoint(helpwindowpos[0],helpwindowpos[1]))
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
def KeyText(self):
from Screens.VirtualKeyBoard import VirtualKeyBoard
self.currentConfigIndex = self["config"].getCurrentIndex()
self.session.openWithCallback(self.VirtualKeyBoardCallback, VirtualKeyBoard, title = self["config"].getCurrent()[0], text = self["config"].getCurrent()[1].value)
def VirtualKeyBoardCallback(self, callback = None):
if callback is not None and len(callback):
if isinstance(self["config"].getCurrent()[1], ConfigText) or isinstance(self["config"].getCurrent()[1], ConfigPassword):
if self.has_key("HelpWindow"):
if self["config"].getCurrent()[1].help_window.instance is not None:
helpwindowpos = self["HelpWindow"].getPosition()
from enigma import ePoint
self["config"].getCurrent()[1].help_window.instance.move(ePoint(helpwindowpos[0],helpwindowpos[1]))
self["config"].instance.moveSelectionTo(self.currentConfigIndex)
self["config"].setCurrentIndex(self.currentConfigIndex)
self["config"].getCurrent()[1].setValue(callback)
self["config"].invalidate(self["config"].getCurrent())
class WizardManager:
def __init__(self):
self.wizards = []
def registerWizard(self, wizard, precondition, priority = 0):
self.wizards.append((wizard, precondition, priority))
def getWizards(self):
# x[1] is precondition
for wizard in self.wizards:
wizard[0].isLastWizard = False
if len(self.wizards) > 0:
self.wizards[-1][0].isLastWizard = True
return [(x[2], x[0]) for x in self.wizards if x[1] == 1]
wizardManager = WizardManager()
| gpl-2.0 | 5,259,132,894,376,810,000 | 35.143875 | 311 | 0.676664 | false |
kingmotley/SickRage | sickbeard/processTV.py | 1 | 29337 | # coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: https://sickrage.github.io/
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import stat
# from functools import wraps
import shutil
import sickbeard
from sickbeard import postProcessor
from sickbeard import db, helpers
from sickbeard import logger
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard import common
from sickbeard import failedProcessor
from sickrage.helper.common import is_sync_file, is_torrent_or_nzb_file
from sickrage.helper.encoding import ek, ss
from sickrage.helper.exceptions import EpisodePostProcessingFailedException, ex, FailedPostProcessingFailedException
from unrar2 import RarFile
from unrar2.rar_exceptions import FileOpenError
from unrar2.rar_exceptions import ArchiveHeaderBroken
from unrar2.rar_exceptions import InvalidRARArchive
from unrar2.rar_exceptions import InvalidRARArchiveUsage
from unrar2.rar_exceptions import IncorrectRARPassword
import shutil_custom
shutil.copyfile = shutil_custom.copyfile_custom
class ProcessResult(object): # pylint: disable=too-few-public-methods
def __init__(self):
self.result = True
self.output = ''
self.missedfiles = []
self.aggresult = True
def delete_folder(folder, check_empty=True):
"""
Removes a folder from the filesystem
:param folder: Path to folder to remove
:param check_empty: Boolean, check if the folder is empty before removing it, defaults to True
:return: True on success, False on failure
"""
# check if it's a folder
if not ek(os.path.isdir, folder):
return False
# check if it isn't TV_DOWNLOAD_DIR
if sickbeard.TV_DOWNLOAD_DIR:
if helpers.real_path(folder) == helpers.real_path(sickbeard.TV_DOWNLOAD_DIR):
return False
# check if it's empty folder when wanted checked
if check_empty:
check_files = ek(os.listdir, folder)
if check_files:
logger.log(u"Not deleting folder {0} found the following files: {1}".format(folder, check_files), logger.INFO)
return False
try:
logger.log(u"Deleting folder (if it's empty): {0}".format(folder))
ek(os.rmdir, folder)
except (OSError, IOError) as e:
logger.log(u"Warning: unable to delete folder: {0}: {1}".format(folder, ex(e)), logger.WARNING)
return False
else:
try:
logger.log(u"Deleting folder: " + folder)
shutil.rmtree(folder)
except (OSError, IOError) as e:
logger.log(u"Warning: unable to delete folder: {0}: {1}".format(folder, ex(e)), logger.WARNING)
return False
return True
def delete_files(processPath, notwantedFiles, result, force=False):
"""
Remove files from filesystem
:param processPath: path to process
:param notwantedFiles: files we do not want
:param result: Processor results
:param force: Boolean, force deletion, defaults to false
"""
if not result.result and force:
result.output += logHelper(u"Forcing deletion of files, even though last result was not success", logger.DEBUG)
elif not result.result:
return
# Delete all file not needed
for cur_file in notwantedFiles:
cur_file_path = ek(os.path.join, processPath, cur_file)
if not ek(os.path.isfile, cur_file_path):
continue # Prevent error when a notwantedfiles is an associated files
result.output += logHelper(u"Deleting file: {0}".format(cur_file), logger.DEBUG)
# check first the read-only attribute
file_attribute = ek(os.stat, cur_file_path)[0]
if not file_attribute & stat.S_IWRITE:
# File is read-only, so make it writeable
result.output += logHelper(u"Changing ReadOnly Flag for file: {0}".format(cur_file), logger.DEBUG)
try:
ek(os.chmod, cur_file_path, stat.S_IWRITE)
except OSError as e:
result.output += logHelper(u"Cannot change permissions of {0}: {1}".format(cur_file_path, ex(e)), logger.DEBUG)
try:
ek(os.remove, cur_file_path)
except OSError as e:
result.output += logHelper(u"Unable to delete file {0}: {1}".format(cur_file, e.strerror), logger.DEBUG)
def logHelper(logMessage, logLevel=logger.INFO):
logger.log(logMessage, logLevel)
return logMessage + u"\n"
# def OneRunPP():
# isRunning = [False]
#
# def decorate(func):
# @wraps(func)
# def func_wrapper(*args, **kargs):
# if isRunning[0]:
# return logHelper(u'Post processor is already running', logger.WARNING)
#
# isRunning[0] = True
# ret = func(*args, **kargs)
# isRunning[0] = False
# return ret
# return func_wrapper
# return decorate
# pylint: disable=too-many-arguments,too-many-branches,too-many-statements,too-many-locals
# @OneRunPP()
def processDir(dirName, nzbName=None, process_method=None, force=False, is_priority=None, delete_on=False, failed=False, proc_type="auto"):
"""
Scans through the files in dirName and processes whatever media files it finds
:param dirName: The folder name to look in
:param nzbName: The NZB name which resulted in this folder being downloaded
:param force: True to postprocess already postprocessed files
:param failed: Boolean for whether or not the download failed
:param proc_type: Type of postprocessing auto or manual
"""
result = ProcessResult()
# if they passed us a real dir then assume it's the one we want
if ek(os.path.isdir, dirName):
dirName = ek(os.path.realpath, dirName)
result.output += logHelper(u"Processing folder {0}".format(dirName), logger.DEBUG)
# if the client and SickRage are not on the same machine translate the directory into a network directory
elif all([sickbeard.TV_DOWNLOAD_DIR,
ek(os.path.isdir, sickbeard.TV_DOWNLOAD_DIR),
ek(os.path.normpath, dirName) == ek(os.path.normpath, sickbeard.TV_DOWNLOAD_DIR)]):
dirName = ek(os.path.join, sickbeard.TV_DOWNLOAD_DIR, ek(os.path.abspath, dirName).split(os.path.sep)[-1])
result.output += logHelper(u"Trying to use folder: {0} ".format(dirName), logger.DEBUG)
# if we didn't find a real dir then quit
if not ek(os.path.isdir, dirName):
result.output += logHelper(u"Unable to figure out what folder to process. "
u"If your downloader and SickRage aren't on the same PC "
u"make sure you fill out your TV download dir in the config.",
logger.DEBUG)
return result.output
path, dirs, files = get_path_dir_files(dirName, nzbName, proc_type)
files = [x for x in files if not is_torrent_or_nzb_file(x)]
SyncFiles = [x for x in files if is_sync_file(x)]
nzbNameOriginal = nzbName
# Don't post process if files are still being synced and option is activated
postpone = SyncFiles and sickbeard.POSTPONE_IF_SYNC_FILES
if not postpone:
result.output += logHelper(u"PostProcessing Path: {0}".format(path), logger.INFO)
result.output += logHelper(u"PostProcessing Dirs: {0}".format(str(dirs)), logger.DEBUG)
videoFiles = [x for x in files if helpers.isMediaFile(x)]
rarFiles = [x for x in files if helpers.isRarFile(x)]
rarContent = []
if rarFiles:
rarContent = unRAR(path, rarFiles, force, result)
files += rarContent
videoFiles += [x for x in rarContent if helpers.isMediaFile(x)]
videoInRar = [x for x in rarContent if helpers.isMediaFile(x)] if rarContent else []
result.output += logHelper(u"PostProcessing Files: {0}".format(files), logger.DEBUG)
result.output += logHelper(u"PostProcessing VideoFiles: {0}".format(videoFiles), logger.DEBUG)
result.output += logHelper(u"PostProcessing RarContent: {0}".format(rarContent), logger.DEBUG)
result.output += logHelper(u"PostProcessing VideoInRar: {0}".format(videoInRar), logger.DEBUG)
# If nzbName is set and there's more than one videofile in the folder, files will be lost (overwritten).
nzbName = None if len(videoFiles) >= 2 else nzbName
process_method = process_method if process_method else sickbeard.PROCESS_METHOD
result.result = True
# Don't Link media when the media is extracted from a rar in the same path
if process_method in (u'hardlink', u'symlink') and videoInRar:
process_media(path, videoInRar, nzbName, u'move', force, is_priority, result)
delete_files(path, rarContent, result)
for video in set(videoFiles) - set(videoInRar):
process_media(path, [video], nzbName, process_method, force, is_priority, result)
elif sickbeard.DELRARCONTENTS and videoInRar:
process_media(path, videoInRar, nzbName, process_method, force, is_priority, result)
delete_files(path, rarContent, result, True)
for video in set(videoFiles) - set(videoInRar):
process_media(path, [video], nzbName, process_method, force, is_priority, result)
else:
for video in videoFiles:
process_media(path, [video], nzbName, process_method, force, is_priority, result)
else:
result.output += logHelper(u"Found temporary sync files: {0} in path: {1}".format(SyncFiles, path))
result.output += logHelper(u"Skipping post processing for folder: {0}".format(path))
result.missedfiles.append(u"{0} : Syncfiles found".format(path))
# Process Video File in all TV Subdir
for curDir in [x for x in dirs if validateDir(path, x, nzbNameOriginal, failed, result)]:
result.result = True
for processPath, dirlist_, fileList in ek(os.walk, ek(os.path.join, path, curDir), topdown=False):
if not validateDir(path, processPath, nzbNameOriginal, failed, result):
continue
SyncFiles = [x for x in fileList if is_sync_file(x)]
# Don't post process if files are still being synced and option is activated
postpone = SyncFiles and sickbeard.POSTPONE_IF_SYNC_FILES
if not postpone:
videoFiles = [x for x in fileList if helpers.isMediaFile(x)]
rarFiles = [x for x in fileList if helpers.isRarFile(x)]
rarContent = []
if rarFiles:
rarContent = unRAR(processPath, rarFiles, force, result)
fileList = set(fileList + rarContent)
videoFiles += [x for x in rarContent if helpers.isMediaFile(x)]
videoInRar = [x for x in rarContent if helpers.isMediaFile(x)] if rarContent else []
notwantedFiles = [x for x in fileList if x not in videoFiles]
if notwantedFiles:
result.output += logHelper(u"Found unwanted files: {0}".format(notwantedFiles), logger.DEBUG)
# Don't Link media when the media is extracted from a rar in the same path
if process_method in (u'hardlink', u'symlink') and videoInRar:
process_media(processPath, videoInRar, nzbName, u'move', force, is_priority, result)
process_media(processPath, set(videoFiles) - set(videoInRar), nzbName, process_method, force,
is_priority, result)
delete_files(processPath, rarContent, result)
elif sickbeard.DELRARCONTENTS and videoInRar:
process_media(processPath, videoInRar, nzbName, process_method, force, is_priority, result)
process_media(processPath, set(videoFiles) - set(videoInRar), nzbName, process_method, force,
is_priority, result)
delete_files(processPath, rarContent, result, True)
else:
process_media(processPath, videoFiles, nzbName, process_method, force, is_priority, result)
# Delete all file not needed and avoid deleting files if Manual PostProcessing
if not(process_method == u"move" and result.result) or (proc_type == u"manual" and not delete_on):
continue
delete_folder(ek(os.path.join, processPath, u'@eaDir'))
delete_files(processPath, notwantedFiles, result)
if all([not sickbeard.NO_DELETE or proc_type == u"manual",
process_method == u"move",
ek(os.path.normpath, processPath) != ek(os.path.normpath, sickbeard.TV_DOWNLOAD_DIR)]):
if delete_folder(processPath, check_empty=True):
result.output += logHelper(u"Deleted folder: {0}".format(processPath), logger.DEBUG)
else:
result.output += logHelper(u"Found temporary sync files: {0} in path: {1}".format(SyncFiles, processPath))
result.output += logHelper(u"Skipping post processing for folder: {0}".format(processPath))
result.missedfiles.append(u"{0} : Syncfiles found".format(path))
if result.aggresult:
result.output += logHelper(u"Successfully processed")
if result.missedfiles:
result.output += logHelper(u"I did encounter some unprocessable items: ")
for missedfile in result.missedfiles:
result.output += logHelper(u"[{0}]".format(missedfile))
else:
result.output += logHelper(u"Problem(s) during processing, failed the following files/folders: ", logger.WARNING)
for missedfile in result.missedfiles:
result.output += logHelper(u"[{0}]".format(missedfile), logger.WARNING)
return result.output
def validateDir(path, dirName, nzbNameOriginal, failed, result): # pylint: disable=too-many-locals,too-many-branches,too-many-return-statements
"""
Check if directory is valid for processing
:param path: Path to use
:param dirName: Directory to check
:param nzbNameOriginal: Original NZB name
:param failed: Previously failed objects
:param result: Previous results
:return: True if dir is valid for processing, False if not
"""
dirName = ss(dirName)
IGNORED_FOLDERS = [u'.AppleDouble', u'.@__thumb', u'@eaDir']
folder_name = ek(os.path.basename, dirName)
if folder_name in IGNORED_FOLDERS:
return False
result.output += logHelper(u"Processing folder " + dirName, logger.DEBUG)
if folder_name.upper().startswith(u'_FAILED_') or folder_name.upper().endswith(u'_FAILED_'):
result.output += logHelper(u"The directory name indicates it failed to extract.", logger.DEBUG)
failed = True
elif folder_name.upper().startswith(u'_UNDERSIZED_') or folder_name.upper().endswith(u'_UNDERSIZED_'):
result.output += logHelper(u"The directory name indicates that it was previously rejected for being undersized.", logger.DEBUG)
failed = True
elif folder_name.upper().startswith(u'_UNPACK') or folder_name.upper().endswith(u'_UNPACK'):
result.output += logHelper(u"The directory name indicates that this release is in the process of being unpacked.", logger.DEBUG)
result.missedfiles.append(u"{0} : Being unpacked".format(dirName))
return False
if failed:
process_failed(ek(os.path.join, path, dirName), nzbNameOriginal, result)
result.missedfiles.append(u"{0} : Failed download".format(dirName))
return False
if helpers.is_hidden_folder(ek(os.path.join, path, dirName)):
result.output += logHelper(u"Ignoring hidden folder: {0}".format(dirName), logger.DEBUG)
result.missedfiles.append(u"{0} : Hidden folder".format(dirName))
return False
# make sure the dir isn't inside a show dir
main_db_con = db.DBConnection()
sql_results = main_db_con.select("SELECT location FROM tv_shows")
for sqlShow in sql_results:
if dirName.lower().startswith(ek(os.path.realpath, sqlShow["location"]).lower() + os.sep) or \
dirName.lower() == ek(os.path.realpath, sqlShow["location"]).lower():
result.output += logHelper(
u"Cannot process an episode that's already been moved to its show dir, skipping " + dirName,
logger.WARNING)
return False
# Get the videofile list for the next checks
allFiles = []
allDirs = []
for root_, processdir, fileList in ek(os.walk, ek(os.path.join, path, dirName), topdown=False):
allDirs += processdir
allFiles += fileList
videoFiles = [x for x in allFiles if helpers.isMediaFile(x)]
allDirs.append(dirName)
# check if the dir have at least one tv video file
for video in videoFiles:
try:
NameParser().parse(video, cache_result=False)
return True
except (InvalidNameException, InvalidShowException) as error:
result.output += logHelper(u"{0}".format(error), logger.DEBUG)
for proc_dir in allDirs:
try:
NameParser().parse(proc_dir, cache_result=False)
return True
except (InvalidNameException, InvalidShowException) as error:
result.output += logHelper(u"{0}".format(error), logger.DEBUG)
if sickbeard.UNPACK:
# Search for packed release
packedFiles = [x for x in allFiles if helpers.isRarFile(x)]
for packed in packedFiles:
try:
NameParser().parse(packed, cache_result=False)
return True
except (InvalidNameException, InvalidShowException) as error:
result.output += logHelper(u"{0}".format(error), logger.DEBUG)
result.output += logHelper(u"{0} : No processable items found in folder".format(dirName), logger.DEBUG)
return False
def unRAR(path, rarFiles, force, result): # pylint: disable=too-many-branches,too-many-statements
"""
Extracts RAR files
:param path: Path to look for files in
:param rarFiles: Names of RAR files
:param force: process currently processing items
:param result: Previous results
:return: List of unpacked file names
"""
unpacked_files = []
if sickbeard.UNPACK and rarFiles:
result.output += logHelper(u"Packed Releases detected: {0}".format(rarFiles), logger.DEBUG)
for archive in rarFiles:
result.output += logHelper(u"Unpacking archive: {0}".format(archive), logger.DEBUG)
failure = None
try:
rar_handle = RarFile(ek(os.path.join, path, archive))
# Skip extraction if any file in archive has previously been extracted
skip_file = False
for file_in_archive in [ek(os.path.basename, x.filename) for x in rar_handle.infolist() if not x.isdir]:
if already_postprocessed(path, file_in_archive, force, result):
result.output += logHelper(
u"Archive file already post-processed, extraction skipped: {0}".format
(file_in_archive), logger.DEBUG)
skip_file = True
break
if skip_file:
continue
rar_handle.extract(path=path, withSubpath=False, overwrite=False)
for x in rar_handle.infolist():
if not x.isdir:
basename = ek(os.path.basename, x.filename)
if basename not in unpacked_files:
unpacked_files.append(basename)
del rar_handle
except ArchiveHeaderBroken:
failure = (u'Archive Header Broken', u'Unpacking failed because the Archive Header is Broken')
except IncorrectRARPassword:
failure = (u'Incorrect RAR Password', u'Unpacking failed because of an Incorrect Rar Password')
except FileOpenError:
failure = (u'File Open Error, check the parent folder and destination file permissions.',
u'Unpacking failed with a File Open Error (file permissions?)')
except InvalidRARArchiveUsage:
failure = (u'Invalid Rar Archive Usage', u'Unpacking Failed with Invalid Rar Archive Usage')
except InvalidRARArchive:
failure = (u'Invalid Rar Archive', u'Unpacking Failed with an Invalid Rar Archive Error')
except Exception as e:
failure = (ex(e), u'Unpacking failed for an unknown reason')
if failure is not None:
result.output += logHelper(u'Failed Unrar archive {0}: {1}'.format(archive, failure[0]), logger.ERROR)
result.missedfiles.append(u'{0} : Unpacking failed: {1}'.format(archive, failure[1]))
result.result = False
continue
result.output += logHelper(u"UnRar content: {0}".format(unpacked_files), logger.DEBUG)
return unpacked_files
def already_postprocessed(dirName, videofile, force, result): # pylint: disable=unused-argument
"""
Check if we already post processed a file
:param dirName: Directory a file resides in
:param videofile: File name
:param force: Force checking when already checking (currently unused)
:param result: True if file is already postprocessed, False if not
:return:
"""
if force:
return False
# Avoid processing the same dir again if we use a process method <> move
main_db_con = db.DBConnection()
sql_result = main_db_con.select("SELECT release_name FROM tv_episodes WHERE release_name IN (?, ?) LIMIT 1", [dirName, videofile.rpartition('.')[0]])
if sql_result:
# result.output += logHelper(u"You're trying to post process a dir that's already been processed, skipping", logger.DEBUG)
return True
# Needed if we have downloaded the same episode @ different quality
# But we need to make sure we check the history of the episode we're going to PP, and not others
try: # if it fails to find any info (because we're doing an unparsable folder (like the TV root dir) it will throw an exception, which we want to ignore
parse_result = NameParser(dirName, tryIndexers=True).parse(dirName)
except (InvalidNameException, InvalidShowException): # ignore the exception, because we kind of expected it, but create parse_result anyway so we can perform a check on it.
parse_result = False # pylint: disable=redefined-variable-type
search_sql = "SELECT tv_episodes.indexerid, history.resource FROM tv_episodes INNER JOIN history ON history.showid=tv_episodes.showid" # This part is always the same
search_sql += " WHERE history.season=tv_episodes.season AND history.episode=tv_episodes.episode"
# If we find a showid, a season number, and one or more episode numbers then we need to use those in the query
if parse_result and parse_result.show.indexerid and parse_result.episode_numbers and parse_result.season_number:
search_sql += " AND tv_episodes.showid={0} AND tv_episodes.season={1} AND tv_episodes.episode={2}".format(
parse_result.show.indexerid, parse_result.season_number, parse_result.episode_numbers[0])
search_sql += " AND tv_episodes.status IN (" + ",".join([str(x) for x in common.Quality.DOWNLOADED]) + ")"
search_sql += " AND history.resource LIKE ? LIMIT 1"
sql_result = main_db_con.select(search_sql, ['%' + videofile])
if sql_result:
# result.output += logHelper(u"You're trying to post process a video that's already been processed, skipping", logger.DEBUG)
return True
return False
def process_media(processPath, videoFiles, nzbName, process_method, force, is_priority, result): # pylint: disable=too-many-arguments
"""
Postprocess mediafiles
:param processPath: Path to postprocess in
:param videoFiles: Filenames to look for and postprocess
:param nzbName: Name of NZB file related
:param process_method: auto/manual
:param force: Postprocess currently postprocessing file
:param is_priority: Boolean, is this a priority download
:param result: Previous results
"""
processor = None
for cur_video_file in videoFiles:
cur_video_file_path = ek(os.path.join, processPath, cur_video_file)
if already_postprocessed(processPath, cur_video_file, force, result):
result.output += logHelper(u"Skipping already processed file: {0}".format(cur_video_file), logger.DEBUG)
continue
try:
processor = postProcessor.PostProcessor(cur_video_file_path, nzbName, process_method, is_priority)
result.result = processor.process()
process_fail_message = u""
except EpisodePostProcessingFailedException as e:
result.result = False
process_fail_message = ex(e)
if processor:
result.output += processor.log
if result.result:
result.output += logHelper(u"Processing succeeded for {0}".format(cur_video_file_path))
else:
result.output += logHelper(u"Processing failed for {0}: {1}".format(cur_video_file_path, process_fail_message), logger.WARNING)
result.missedfiles.append(u"{0} : Processing failed: {1}".format(cur_video_file_path, process_fail_message))
result.aggresult = False
def get_path_dir_files(dirName, nzbName, proc_type):
"""
Get files in a path
:param dirName: Directory to start in
:param nzbName: NZB file, if present
:param proc_type: auto/manual
:return: a tuple of (path,dirs,files)
"""
path = u""
dirs = []
files = []
if dirName == sickbeard.TV_DOWNLOAD_DIR and not nzbName or proc_type == u"manual": # Scheduled Post Processing Active
# Get at first all the subdir in the dirName
for path, dirs, files in ek(os.walk, dirName):
break
else:
path, dirs = ek(os.path.split, dirName) # Script Post Processing
if not (nzbName is None or nzbName.endswith(u'.nzb')) and ek(os.path.isfile, ek(os.path.join, dirName, nzbName)): # For single torrent file without Dir
dirs = []
files = [ek(os.path.join, dirName, nzbName)]
else:
dirs = [dirs]
files = []
return path, dirs, files
def process_failed(dirName, nzbName, result):
"""Process a download that did not complete correctly"""
if sickbeard.USE_FAILED_DOWNLOADS:
processor = None
try:
processor = failedProcessor.FailedProcessor(dirName, nzbName)
result.result = processor.process()
process_fail_message = u""
except FailedPostProcessingFailedException as e:
result.result = False
process_fail_message = ex(e)
if processor:
result.output += processor.log
if sickbeard.DELETE_FAILED and result.result:
if delete_folder(dirName, check_empty=False):
result.output += logHelper(u"Deleted folder: {0}".format(dirName), logger.DEBUG)
if result.result:
result.output += logHelper(u"Failed Download Processing succeeded: ({0}, {1})".format(nzbName, dirName))
else:
result.output += logHelper(u"Failed Download Processing failed: ({0}, {1}): {2}".format(nzbName, dirName, process_fail_message), logger.WARNING)
def subtitles_enabled(video):
"""
Parse video filename to a show to check if it has subtitle enabled
:param video: video filename to be parsed
"""
try:
parse_result = NameParser().parse(video, cache_result=True)
except (InvalidNameException, InvalidShowException):
logger.log(u'Not enough information to parse filename into a valid show. Consider add scene exceptions or improve naming for: {0}'.format(video), logger.WARNING)
return False
if parse_result.show.indexerid:
main_db_con = db.DBConnection()
sql_results = main_db_con.select("SELECT subtitles FROM tv_shows WHERE indexer_id = ? LIMIT 1", [parse_result.show.indexerid])
return bool(sql_results[0]["subtitles"]) if sql_results else False
else:
logger.log(u'Empty indexer ID for: {0}'.format(video), logger.WARNING)
return False
| gpl-3.0 | 4,548,903,236,749,104,000 | 43.995399 | 177 | 0.646965 | false |
Huyuwei/tvm | python/tvm/relay/op/nn/_nn.py | 1 | 26327 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Backend compiler related feature registration"""
from __future__ import absolute_import
import topi
from topi.util import get_const_tuple
from .. import op as reg
from ..op import OpPattern, schedule_injective
# relu
reg.register_schedule("nn.relu", schedule_injective)
reg.register_pattern("nn.relu", OpPattern.ELEMWISE)
# softmax
@reg.register_schedule("nn.softmax")
def schedule_softmax(_, outputs, target):
"""Schedule definition of softmax"""
with target:
return topi.generic.schedule_softmax(outputs)
reg.register_pattern("nn.softmax", OpPattern.OPAQUE)
schedule_broadcast = schedule_injective
@reg.register_schedule("nn.log_softmax")
def schedule_log_softmax(_, outputs, target):
"""Schedule definition of log_softmax"""
with target:
return topi.generic.schedule_softmax(outputs)
reg.register_pattern("nn.log_softmax", OpPattern.OPAQUE)
# dense
@reg.register_compute("nn.dense")
def compute_dense(attrs, inputs, out_type, target):
"""Compute definition of dense"""
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
return [topi.nn.dense(inputs[0], inputs[1], None, out_dtype)]
@reg.register_schedule("nn.dense")
def schedule_dense(attrs, outputs, target):
"""Schedule definition of dense"""
with target:
return topi.generic.schedule_dense(outputs)
reg.register_pattern("nn.dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# batch_matmul
@reg.register_compute("nn.batch_matmul")
def compute_batch_matmul(attrs, inputs, out_type, target):
"""Compute definition of batch_matmul"""
with target:
return [topi.nn.batch_matmul(inputs[0], inputs[1])]
@reg.register_schedule("nn.batch_matmul")
def schedule_batch_matmul(attrs, outputs, target):
"""Schedule definition of batch_matmul"""
with target:
return topi.generic.schedule_batch_matmul(outputs)
reg.register_pattern("nn.batch_matmul", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_dense
@reg.register_compute("nn.sparse_dense")
def compute_sparse_dense(attrs, inputs, out_type, target):
"""Compute definition of sparse_dense"""
return [topi.nn.sparse_dense(inputs[0], inputs[1], inputs[2], inputs[3])]
@reg.register_schedule("nn.sparse_dense")
def schedule_sparse_dense(attrs, outputs, target):
"""Schedule definition of batch_matmul"""
with target:
return topi.generic.schedule_sparse_dense(outputs)
reg.register_pattern("nn.sparse_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_transpose
@reg.register_compute("nn.sparse_transpose")
def compute_sparse_transpose(attrs, inputs, out_type, target):
"""Compute definition of sparse_transpose"""
return topi.nn.sparse_transpose(inputs[0], inputs[1], inputs[2])
@reg.register_schedule("nn.sparse_transpose")
def schedule_sparse_transpose(attrs, outputs, target):
"""Schedule definition of batch_matmul"""
with target:
return topi.generic.schedule_sparse_transpose(outputs)
reg.register_pattern("nn.sparse_transpose", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# conv2d
def _find_conv2d_op(op):
"""Find the op with conv2d in its tag by traversing."""
if 'conv2d' in op.tag:
return op
for tensor in op.input_tensors:
op_ = _find_conv2d_op(tensor.op)
if op_ is not None:
return op_
return None
@reg.register_compute("nn.conv2d")
def compute_conv2d(attrs, inputs, out_type, target):
"""Compute definition of conv2d"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
assert layout in ["NCHW", "NHWC", "NCHW4c"]
(dilation_h, dilation_w) = dilation
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
def _get_out_depth():
weight_shape = get_const_tuple(inputs[1].shape)
if kernel_layout == "HWOI":
return weight_shape[2] * weight_shape[3]
return weight_shape[0] * weight_shape[1]
if groups == 1:
out = topi.nn.conv2d(
inputs[0], inputs[1], strides, padding,
dilation, layout, out_dtype)
elif layout == "NCHW" and _get_out_depth() == groups:
out = topi.nn.depthwise_conv2d_nchw(
inputs[0], inputs[1], strides, padding, dilation, out_dtype)
elif layout == "NHWC" and kernel_layout == "HWOI" and _get_out_depth() == groups:
out = topi.nn.depthwise_conv2d_nhwc(
inputs[0], inputs[1], strides, padding, dilation, out_dtype)
elif layout in ['NCHW', 'NCHW4c']:
out = topi.nn.group_conv2d_nchw(inputs[0], inputs[1], strides, padding, dilation, groups,
out_dtype)
else:
raise ValueError("not support arbitrary group number for now")
return [out]
@reg.register_schedule("nn.conv2d")
def schedule_conv2d(attrs, outs, target):
"""Schedule definition of conv2d"""
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
with target:
if groups == 1 and layout == "NCHW":
return topi.generic.schedule_conv2d_nchw(outs)
if groups == 1 and layout == "NCHW4c":
return topi.generic.schedule_conv2d_nchw(outs)
if groups == 1 and layout == "NHWC":
return topi.generic.schedule_conv2d_nhwc(outs)
if groups != 1:
# collect in_channels to distinguish depthwise and group conv2d
op = _find_conv2d_op(outs[0].op)
assert op is not None
is_depthwise = 'depthwise' in op.tag
if is_depthwise:
if layout == "NCHW":
# TODO(leyuan, merrymercy, Huyuwei): fold depthwise topi into conv2d.
return topi.generic.schedule_depthwise_conv2d_nchw(outs)
if layout == "NHWC" and kernel_layout == "HWOI":
return topi.generic.schedule_depthwise_conv2d_nhwc(outs)
else:
if layout in ["NCHW", "NCHW4c"]:
return topi.generic.schedule_group_conv2d_nchw(outs)
raise ValueError("No compatible schedule")
@reg.register_alter_op_layout("nn.conv2d")
def alter_op_layout_conv2d(attrs, inputs, tinfos):
"""Alternate the layout of conv2d"""
from ... import op
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, op)
@reg.register_legalize("nn.conv2d")
def legalize_conv2d(attrs, inputs, types):
"""Legalize conv2d op.
Parameters
----------
attrs : tvm.attrs.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_legalize(attrs, inputs, types)
reg.register_pattern("nn.conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv2d_transpose
@reg.register_compute("nn.conv2d_transpose")
def compute_conv2d_transpose(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
out = topi.nn.conv2d_transpose_nchw(
inputs[0], inputs[1], strides, padding, out_dtype)
output_padding = get_const_tuple(attrs.output_padding)
out = topi.nn.pad(out,
[0, 0, 0, 0], [0, 0, output_padding[0], output_padding[1]])
return [out]
@reg.register_schedule("nn.conv2d_transpose")
def schedule_conv2d_transpose(attrs, outs, target):
"""Schedule definition of conv2d_transpose"""
with target:
return topi.generic.schedule_conv2d_transpose_nchw(outs)
reg.register_pattern("nn.conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
# bias_add
reg.register_schedule("nn.bias_add", schedule_injective)
reg.register_pattern("nn.bias_add", OpPattern.BROADCAST)
# max_pool2d
@reg.register_schedule("nn.max_pool2d")
def schedule_max_pool2d(attrs, outs, target):
"""Schedule definition of max_pool2d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)
reg.register_pattern("nn.max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d
@reg.register_schedule("nn.avg_pool2d")
def schedule_avg_pool2d(attrs, outs, target):
"""Schedule definition of avg_pool2d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)
reg.register_pattern("nn.avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d_grad
@reg.register_schedule("nn.max_pool2d_grad")
def schedule_max_pool2d_grad(attrs, outs, target):
"""Schedule definition of max_pool2d_grad"""
with target:
return topi.generic.schedule_pool_grad(outs)
reg.register_pattern("nn.max_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d_grad
@reg.register_schedule("nn.avg_pool2d_grad")
def schedule_avg_pool2d_grad(attrs, outs, target):
"""Schedule definition of avg_pool2d_grad"""
with target:
return topi.generic.schedule_pool_grad(outs)
reg.register_pattern("nn.avg_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_max_pool2d
@reg.register_schedule("nn.global_max_pool2d")
def schedule_global_max_pool2d(_, outs, target):
"""Schedule definition of global_max_pool2d"""
with target:
return topi.generic.schedule_adaptive_pool(outs)
reg.register_pattern("nn.global_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_avg_pool2d
@reg.register_schedule("nn.global_avg_pool2d")
def schedule_global_avg_pool2d(_, outs, target):
"""Schedule definition of global_avg_pool2d"""
with target:
return topi.generic.schedule_adaptive_pool(outs)
reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# leaky_relu
reg.register_schedule("nn.leaky_relu", schedule_broadcast)
reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
# prelu
reg.register_schedule("nn.prelu", schedule_broadcast)
reg.register_pattern("nn.prelu", OpPattern.BROADCAST)
# flatten
reg.register_schedule("nn.batch_flatten", schedule_broadcast)
reg.register_pattern("nn.batch_flatten", OpPattern.INJECTIVE)
# lrn
@reg.register_compute("nn.lrn")
def compute_lrn(attrs, inputs, out_dtype, target):
"""Compute definition of lrn"""
assert len(inputs) == 1
return [topi.nn.lrn(inputs[0], attrs.size, attrs.axis,
attrs.alpha, attrs.beta, attrs.bias)]
@reg.register_schedule("nn.lrn")
def schedule_lrn(attrs, outs, target):
"""Schedule definition of lrn"""
with target:
return topi.generic.schedule_lrn(outs)
reg.register_pattern("nn.lrn", OpPattern.OPAQUE)
# l2_normalize
@reg.register_compute("nn.l2_normalize")
def compute_l2_normalize(attrs, inputs, out_dtype, target):
"""Compute definition of l2 normalize"""
return [topi.nn.l2_normalize(inputs[0], attrs.eps, attrs.axis)]
@reg.register_schedule("nn.l2_normalize")
def schedule_l2_normalize(attrs, outs, target):
"""Schedule definition of l2 normalize"""
with target:
return topi.generic.schedule_l2_normalize(outs)
reg.register_pattern("nn.l2_normalize", OpPattern.OUT_ELEMWISE_FUSABLE)
# upsampling
reg.register_schedule("nn.upsampling", reg.schedule_injective)
def schedule_upsampling(_, outs, target):
"""Schedule definition of upsampling"""
with target:
return topi.generic.schedule_injective(outs)
@reg.register_compute("nn.upsampling")
def compute_upsampling(attrs, inputs, out_dtype, target):
scale = attrs.scale
layout = attrs.layout
method = attrs.method
align_corners = attrs.align_corners
return [topi.nn.upsampling(inputs[0], scale, layout, method, align_corners)]
# pad
reg.register_schedule("nn.pad", schedule_broadcast)
# mirror_pad
reg.register_schedule("nn.mirror_pad", schedule_broadcast)
@reg.register_compute("nn.mirror_pad")
def compute_mirror_pad(attrs, inputs, out_dtype, target):
pad_before, pad_after = list(zip(*attrs.pad_width))
mode = attrs.mode
out = topi.nn.mirror_pad(inputs[0], pad_before=pad_before, pad_after=pad_after, mode=mode)
return [out]
# winograd related operators
@reg.register_compute("nn.contrib_conv2d_winograd_without_weight_transform")
def compute_contrib_conv2d_winograd_without_weight_transform(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d_winograd_without_weight_transform"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.get_str("out_dtype")
tile_size = attrs.get_int("tile_size")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
assert dilation == (1, 1), "Do not support dilate now"
assert groups == 1, "Do not supoort arbitrary group number"
out = topi.nn.conv2d_winograd_without_weight_transform(
inputs[0], inputs[1], strides, padding, dilation, data_layout,
out_dtype, tile_size)
return [out]
@reg.register_schedule("nn.contrib_conv2d_winograd_without_weight_transform")
def schedule_contrib_conv2d_winograd_without_weight_transform(attrs, outs, target):
"""Schedule definition of conv2d_winograd_without_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_without_weight_transform(outs)
reg.register_pattern("nn.contrib_conv2d_winograd_without_weight_transform",
OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_weight_transform")
def compute_contrib_conv2d_winograd_weight_transform(attrs, inputs, out_dtype, target):
"""Compute definition of contrib_conv2d_winograd_weight_transform"""
out = topi.nn.conv2d_winograd_weight_transform(
inputs[0], attrs.get_int('tile_size'))
return [out]
@reg.register_schedule("nn.contrib_conv2d_winograd_weight_transform")
def schedule_contrib_conv2d_winograd_weight_transform(attrs, outs, target):
"""Schedule definition of contrib_conv2d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_weight_transform(outs)
reg.register_pattern("nn.contrib_conv2d_winograd_weight_transform",
OpPattern.OUT_ELEMWISE_FUSABLE)
# winograd nnpack related operators
@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_without_weight_transform")
def compute_contrib_conv2d_winograd_nnpack_without_weight_transform(
attrs, inputs, out_dtype, target):
"""Compute definition of conv2d_winograd_nnpack_without_weight_transform"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
assert dilation == (1, 1), "Do not support dilate now"
assert groups == 1, "Do not supoort arbitrary group number"
# No bias
out = topi.nn.conv2d_winograd_nnpack_without_weight_transform(
inputs[0], inputs[1], None, strides, padding, dilation, data_layout,
out_dtype)
return [out]
@reg.register_schedule("nn.contrib_conv2d_winograd_nnpack_without_weight_transform")
def schedule_contrib_conv2d_winograd_nnpack_without_weight_transform(attrs, outs, target):
"""Schedule definition of conv2d_winograd_nnpack_without_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_nnpack_without_weight_transform(outs)
reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_without_weight_transform",
OpPattern.OPAQUE)
@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_weight_transform")
def compute_contrib_conv2d_winograd_nnpack_weight_transform(attrs, inputs, out_dtype, target):
"""Compute definition of contrib_conv2d_winograd_nnpack_weight_transform"""
convolution_algorithm = attrs.get_int('convolution_algorithm')
out = topi.nn.conv2d_winograd_nnpack_weight_transform(
inputs[0], convolution_algorithm, out_dtype)
return [out]
@reg.register_schedule("nn.contrib_conv2d_winograd_nnpack_weight_transform")
def schedule_contrib_conv2d_winograd_nnpack_weight_transform(attrs, outs, target):
"""Schedule definition of contrib_conv2d_winograd_nnpack_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_nnpack_weight_transform(outs)
reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_weight_transform",
OpPattern.OPAQUE)
@reg.register_compute("nn.contrib_conv2d_NCHWc")
def compute_contrib_conv2d_NCHWc(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d NCHWc"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
out = topi.nn.conv2d_NCHWc(inputs[0], inputs[1], strides, padding, dilation,
data_layout, out_layout, out_dtype)
return [out]
@reg.register_schedule("nn.contrib_conv2d_NCHWc")
def schedule_contrib_conv2d_NCHWc(attrs, outs, target):
"""Schedule definition of contrib_conv2d_NCHWc"""
with target:
return topi.generic.schedule_conv2d_NCHWc(outs)
reg.register_pattern("nn.contrib_conv2d_NCHWc",
OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_NCHWc_int8")
def compute_contrib_conv2d_NCHWc_int8(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d NCHWc"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
out = topi.nn.conv2d_NCHWc_int8(inputs[0], inputs[1], strides, padding, dilation,
data_layout, out_layout, out_dtype)
return [out]
@reg.register_schedule("nn.contrib_conv2d_NCHWc_int8")
def schedule_contrib_conv2d_NCHWc_int8(attrs, outs, target):
"""Schedule definition of contrib_conv2d_NCHWc_int8"""
with target:
return topi.generic.schedule_conv2d_NCHWc_int8(outs)
reg.register_pattern("nn.contrib_conv2d_NCHWc_int8",
OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_depthwise_conv2d_NCHWc")
def compute_contrib_depthwise_conv2d_NCHWc(attrs, inputs, out_dtype, target):
"""Compute definition of depthwise conv2d NCHWc"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
out = topi.nn.depthwise_conv2d_NCHWc(inputs[0], inputs[1], strides, padding, dilation,
data_layout, out_layout, out_dtype)
return [out]
@reg.register_schedule("nn.contrib_depthwise_conv2d_NCHWc")
def schedule_contrib_depthwise_conv2d_NCHWc(attrs, outs, target):
"""Schedule definition of contrib_conv2d_NCHWc"""
with target:
return topi.generic.schedule_depthwise_conv2d_NCHWc(outs)
reg.register_pattern("nn.contrib_depthwise_conv2d_NCHWc",
OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.deformable_conv2d")
def compute_deformable_conv2d(attrs, inputs, out_dtype, target):
"""Compute definition of deformable_conv2d"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
deformable_groups = attrs.deformable_groups
groups = attrs.groups
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
with target:
out = topi.nn.deformable_conv2d_nchw(inputs[0], inputs[1], inputs[2], strides, padding,
dilation, deformable_groups, groups, out_dtype)
return [out]
@reg.register_schedule("nn.deformable_conv2d")
def schedule_deformable_conv2d(attrs, outs, target):
"""Schedule definition of deformable_conv2d"""
with target:
return topi.generic.schedule_deformable_conv2d_nchw(outs)
reg.register_pattern("nn.deformable_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.bitpack")
def compute_bitpack(attrs, inputs, out_dtype, target):
"""Compute definition for bitpack"""
bits = attrs.bits
pack_axis = attrs.pack_axis
bit_axis = attrs.bit_axis
pack_type = attrs.pack_type
name = attrs.name
with target:
out = topi.nn.bitpack(inputs[0], bits, pack_axis, bit_axis, pack_type,
name)
return [out]
@reg.register_schedule("nn.bitpack")
def schedule_bitpack(attrs, outs, target):
with target:
return topi.generic.schedule_bitpack(outs)
reg.register_pattern("nn.bitpack", OpPattern.INJECTIVE)
@reg.register_compute("nn.bitserial_conv2d")
def compute_bitserial_conv2d(attrs, inputs, out_dtype, target):
"""Compute definition for bitserial conv2d."""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
activation_bits = attrs.activation_bits
weight_bits = attrs.weight_bits
layout = attrs.data_layout
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
unipolar = attrs.unipolar
if layout == 'NCHW':
with target:
out = topi.nn.bitserial_conv2d_nchw(
inputs[0], inputs[1], strides, padding, activation_bits,
weight_bits, pack_dtype, out_dtype, unipolar)
elif layout == 'NHWC':
with target:
out = topi.nn.bitserial_conv2d_nhwc(
inputs[0], inputs[1], strides, padding, activation_bits,
weight_bits, pack_dtype, out_dtype, unipolar)
else:
raise ValueError("Data layout not supported.")
return [out]
@reg.register_schedule("nn.bitserial_conv2d")
def schedule_bitserial_conv2d(attrs, outs, target):
"""Schedule definition for bitserial conv2d."""
layout = attrs.data_layout
if layout == 'NCHW':
with target:
return topi.generic.schedule_bitserial_conv2d_nchw(outs)
elif layout == 'NHWC':
with target:
return topi.generic.schedule_bitserial_conv2d_nhwc(outs)
else:
raise ValueError("Data layout not supported.")
@reg.register_legalize("nn.bitserial_conv2d")
def legalize_bitserial_conv2d(attrs, inputs, types):
"""Legalize bitserial_conv2d op.
Parameters
----------
attrs : tvm.attrs.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.bitserial_conv2d_legalize(attrs, inputs, types)
reg.register_pattern("nn.bitserial_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# bitserial_dense
@reg.register_compute("nn.bitserial_dense")
def compute_bitserial_dense(attrs, inputs, out_type, target):
"""Compute definition of bitserial_dense"""
data_bits = attrs.data_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
unipolar = attrs.unipolar
return [
topi.nn.bitserial_dense(
inputs[0],
inputs[1],
data_bits,
weight_bits,
pack_dtype,
out_dtype,
unipolar)
]
@reg.register_schedule("nn.bitserial_dense")
def schedule_bitserial_dense(attrs, outputs, target):
"""Schedule definition of bitserial_dense"""
with target:
return topi.generic.schedule_bitserial_dense(outputs)
reg.register_pattern("nn.bitserial_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
| apache-2.0 | -8,713,760,548,054,790,000 | 34.243641 | 97 | 0.685722 | false |
adrn/tilt-shift | scripts/companion.py | 1 | 5554 | # coding: utf-8
from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os
import sys
import urllib2
import warnings
# Third-party
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import truncnorm, scoreatpercentile
# Neutron star distribution properties (fixed)
bounds_NS = (1.3, 2.) # Msun
mean_NS = 1.4 # Msun
stddev_NS = 0.05 # Msun
# White dwarf mass bounds
bounds_WD = (0.2, 1.44)
# Number of steps to use in numerical integration below
Nintegrate = 4096
def integrand_factor(m2, mf, m1):
""" Compute the factor multiplying p(M_2|θ) in the integral of Equation XXX in the paper """
mtot = m1 + m2
return mtot**(4/3.) * mf**(-1/3.) / m2 / np.sqrt(m2**2 - (mf*mtot**2)**(2/3.)) / 3.
def m2_func(p, mf, m1, bounds_WD, m2s):
mean_WD,stddev_WD,f_NS = p
# White dwarf companion mixture component
lower, upper = bounds_WD
dist_WD = truncnorm((lower - mean_WD) / stddev_WD, (upper - mean_WD) / stddev_WD, loc=mean_WD, scale=stddev_WD)
# Neutron star companion mixture component
lower, upper = bounds_NS
dist_NS = truncnorm((lower - mean_NS) / stddev_NS, (upper - mean_NS) / stddev_NS, loc=mean_NS, scale=stddev_NS)
p_WD = (1-f_NS) * dist_WD.pdf(m2s)
p_NS = f_NS * dist_NS.pdf(m2s)
return p_WD + p_NS
def likelihood(p, mf, m1, bounds_WD):
mean_WD,stddev_WD,f_NS = p
m2s = np.linspace(0., 2., Nintegrate)
dm2 = m2s[1] - m2s[0]
integ_fac = integrand_factor(m2s, mf, m1)
# White dwarf companion mixture component
lower, upper = bounds_WD
dist_WD = truncnorm((lower - mean_WD) / stddev_WD, (upper - mean_WD) / stddev_WD, loc=mean_WD, scale=stddev_WD)
# Neutron star companion mixture component
lower, upper = bounds_NS
dist_NS = truncnorm((lower - mean_NS) / stddev_NS, (upper - mean_NS) / stddev_NS, loc=mean_NS, scale=stddev_NS)
p_WD = (1-f_NS) * dist_WD.pdf(m2s)
p_NS = f_NS * dist_NS.pdf(m2s)
# Zero out when evaluating outside of allowed bounds (normally NaN)
integ_fac[np.isnan(integ_fac)] = 0.
p_WD[np.isnan(p_WD)] = 0.
p_NS[np.isnan(p_NS)] = 0.
# we approximate the integral using the trapezoidal rule
integrand_WD = p_WD * integ_fac
integrand_NS = p_NS * integ_fac
p_WD = dm2/2. * (integrand_WD[0] + np.sum(2*integrand_WD[1:-1], axis=0) + integrand_WD[-1])
p_NS = dm2/2. * (integrand_NS[0] + np.sum(2*integrand_NS[1:-1], axis=0) + integrand_NS[-1])
return np.vstack((p_WD, p_NS))
def main(m1, mf, nsamples):
file_url = "http://files.figshare.com/1720018/posterior_samples.txt"
cache_path = "data"
local_file = os.path.join(cache_path, "posterior_samples.txt")
if not os.path.exists(cache_path):
os.mkdir(cache_path)
if not os.path.exists(local_file):
print("Posterior sample file doesn't exist locally.")
print("Downloading and caching to: {}".format(os.path.abspath(local_file)))
# download and save
f = urllib2.urlopen(file_url)
with open(local_file, 'w') as f2:
f2.write(f.read())
else:
print("Reading cached file from: {}".format(os.path.abspath(local_file)))
samples = np.genfromtxt(local_file, delimiter=',', names=True)
m2s = np.linspace(0, 2., 50)
p_m2s = np.zeros((nsamples, len(m2s)))
P_NS = np.zeros(nsamples)
for i,p in enumerate(samples[:nsamples]):
p_WD,p_NS = likelihood(p, mf, m1, bounds_WD)[:,0]
P_NS[i] = p_NS / (p_WD + p_NS)
p_m2s[i] = integrand_factor(m2s, mf, m1) * m2_func(p, mf, m1, bounds_WD, m2s)
fig,axes = plt.subplots(2,1,figsize=(10,12))
binw = 3.5*np.std(P_NS) / len(P_NS)**(1/3.)
axes[0].hist(P_NS, bins=np.arange(0.,1.+binw,binw), normed=True)
axes[0].set_xlabel(r"$P_{\rm NS}$")
axes[0].axvline(np.mean(P_NS), alpha=0.5, lw=2., color='g')
axes[0].axvline(scoreatpercentile(P_NS,16), alpha=0.5, lw=2., color='g', linestyle='dashed')
axes[0].axvline(scoreatpercentile(P_NS,84), alpha=0.5, lw=2., color='g', linestyle='dashed')
axes[0].set_xlim(0,max(P_NS)+0.05)
axes[1].errorbar(m2s, np.mean(p_m2s,axis=0), np.std(p_m2s,axis=0),
marker='o', ecolor='#666666')
# for i in np.random.randint(0,nsamples,100):
# axes[1].plot(m2s, p_m2s[i], marker=None, lw=2., color='#666666', alpha=0.25)
# axes[1].plot(m2s, np.mean(p_m2s,axis=0), marker=None, lw=2., color='k')
axes[1].set_xlabel(r"${\rm M}_2 [{\rm M}_\odot]$")
print("Mean P_NS: {:.3f}".format(np.mean(P_NS)))
print("Std. deviation P_NS: {:.3f}".format(np.std(P_NS)))
print("Median P_NS: {:.3f}".format(np.median(P_NS)))
print("16th percentile P_NS: {:.3f}".format(scoreatpercentile(P_NS,16)))
print("84th percentile P_NS: {:.3f}".format(scoreatpercentile(P_NS,84)))
plt.show()
if __name__ == '__main__':
from argparse import ArgumentParser
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("--m1", dest="m1", default=None, required=True,
type=float, help="Mass of the primary.")
parser.add_argument("--mf", dest="mf", default=None, required=True,
type=float, help="Mass function.")
parser.add_argument("--nsamples", dest="nsamples", default=1000,
type=int, help="Number of posterior samples to use.")
args = parser.parse_args()
warnings.simplefilter("ignore", RuntimeWarning)
main(args.m1, args.mf, nsamples=args.nsamples)
| mit | 9,173,242,311,238,589,000 | 34.14557 | 115 | 0.618585 | false |
pombreda/pyamg | pyamg/gallery/demo.py | 1 | 2087 | """Basic PyAMG demo showing AMG standalone convergence versus preconditioned CG
with AMG"""
__docformat__ = "restructuredtext en"
__all__ = ['demo']
import scipy
import numpy
from pyamg.gallery import poisson
from pyamg.aggregation import smoothed_aggregation_solver
def demo():
A = poisson((100,100), format='csr') # 2D FD Poisson problem
B = None # no near-null spaces guesses for SA
b = scipy.rand(A.shape[0],1) # a random right-hand side
# Construct solver using AMG based on Smoothed Aggregation (SA) and display info
mls = smoothed_aggregation_solver(A, B=B)
print mls
# Solve Ax=b with no acceleration ('standalone' solver)
standalone_residuals = []
x = mls.solve(b, tol=1e-10, accel=None, residuals=standalone_residuals)
# Solve Ax=b with Conjugate Gradient (AMG as a preconditioner to CG)
accelerated_residuals = []
x = mls.solve(b, tol=1e-10, accel='cg', residuals=accelerated_residuals)
# Compute relative residuals
standalone_residuals = numpy.array(standalone_residuals)/standalone_residuals[0]
accelerated_residuals = numpy.array(accelerated_residuals)/accelerated_residuals[0]
# Compute (geometric) convergence factors
factor1 = standalone_residuals[-1]**(1.0/len(standalone_residuals))
factor2 = accelerated_residuals[-1]**(1.0/len(accelerated_residuals))
print " MG convergence factor: %g"%(factor1)
print "MG with CG acceleration convergence factor: %g"%(factor2)
# Plot convergence history
try:
import pylab
pylab.figure()
pylab.title('Convergence History')
pylab.xlabel('Iteration')
pylab.ylabel('Relative Residual')
pylab.semilogy(standalone_residuals, label='Standalone', linestyle='-', marker='o')
pylab.semilogy(accelerated_residuals, label='Accelerated', linestyle='-', marker='s')
pylab.legend()
pylab.show()
except ImportError:
print "\n\nNote: pylab not available on your system."
| bsd-3-clause | -2,089,875,078,849,389,000 | 39.134615 | 93 | 0.663153 | false |
karimbahgat/PyCRS | pycrs/elements/projections.py | 1 | 8280 | """
Named projection classes that can be created or parsed.
"""
def find(projname, crstype, strict=False):
"""
Search for a projection name located in this module.
Arguments:
- **projname**: The projection name to search for.
- **crstype**: Which CRS naming convention to search (different
CRS formats have different names for the same projection).
- **strict** (optional): If False, ignores minor name mismatches
such as underscore or character casing, otherwise must be exact
match (defaults to False).
"""
if not strict:
projname = projname.lower().replace(" ","_")
for itemname,item in globals().items():
if itemname.startswith("_"):
continue
try:
if hasattr(item.name, crstype):
itemname = getattr(item.name, crstype)
if not strict:
itemname = itemname.lower().replace(" ","_")
if projname == itemname:
return item
except:
pass
else:
return None
##+proj Projection name (see `proj -l`)
class Projection:
proj4 = "+proj"
ogc_wkt = "PROJECTION"
esri_wkt = "PROJECTION"
name = None
def __init__(self, **kwargs):
"""
A generic container for the specific projection used.
Args:
- **name**: A pycrs.projections.ProjName instance with the name given by each supported format.
"""
self.name = kwargs.get('name', self.name)
def to_proj4(self):
return "+proj=%s" %self.name.proj4
def to_ogc_wkt(self):
return 'PROJECTION["%s"]' %self.name.ogc_wkt
def to_esri_wkt(self):
return 'PROJECTION["%s"]' %self.name.esri_wkt
class ProjName:
def __init__(self, proj4="", ogc_wkt="", esri_wkt=""):
self.proj4 = proj4
self.ogc_wkt = ogc_wkt
self.esri_wkt = esri_wkt
# Specific predefined ellipsoid classes
class Robinson(Projection):
name = ProjName(
proj4 = "robin",
ogc_wkt = "Robinson",
esri_wkt = "Robinson",
)
class UTM(Projection):
name = ProjName(
proj4 = "utm",
ogc_wkt = "Transverse_Mercator",
esri_wkt = "Transverse_Mercator",
)
class ObliqueMercator(Projection):
name = ProjName(
proj4 = "omerc",
ogc_wkt = "Hotine_Oblique_Mercator_Two_Point_Natural_Origin", #"Hotine_Oblique_Mercator"
esri_wkt = "Hotine_Oblique_Mercator_Two_Point_Natural_Origin", #"Hotine_Oblique_Mercator_Azimuth_Natural_Origin"
)
class AlbersEqualArea(Projection):
name = ProjName(
proj4 = "aea",
ogc_wkt = "Albers_Conic_Equal_Area",
esri_wkt = "Albers",
)
class CylindricalEqualArea(Projection):
name = ProjName(
proj4 = "cea",
ogc_wkt = "Cylindrical_Equal_Area",
esri_wkt = "Cylindrical_Equal_Area",
)
class EquiDistantConic(Projection):
name = ProjName(
proj4 = "eqdc",
ogc_wkt = "Equidistant_Conic",
esri_wkt = "Equidistant_Conic",
)
class EquiDistantCylindrical(Projection):
# same as equirectangular...?
name = ProjName(
proj4 = "eqc",
ogc_wkt = "Equidistant_Cylindrical",
esri_wkt = "Equidistant_Cylindrical",
)
class EquiRectangular(Projection):
# same as equidistant cylindrical
name = ProjName(
proj4 = "eqc",
ogc_wkt = "Equirectangular",
esri_wkt = "Equirectangular",
)
class TransverseMercator(Projection):
name = ProjName(
proj4 = "tmerc",
ogc_wkt = "Transverse_Mercator",
esri_wkt = "Transverse_Mercator",
)
class GallStereographic(Projection):
name = ProjName(
proj4 = "gall",
ogc_wkt = "Gall_Stereographic",
esri_wkt = "Gall_Stereographic",
)
class Gnomonic(Projection):
name = ProjName(
proj4 = "gnom",
ogc_wkt = "Gnomonic",
esri_wkt = "Gnomonic",
)
class LambertAzimuthalEqualArea(Projection):
name = ProjName(
proj4 = "laea",
ogc_wkt = "Lambert_Azimuthal_Equal_Area",
esri_wkt = "Lambert_Azimuthal_Equal_Area",
)
class MillerCylindrical(Projection):
name = ProjName(
proj4 = "mill",
ogc_wkt = "Miller_Cylindrical",
esri_wkt = "Miller_Cylindrical",
)
class Mollweide(Projection):
name = ProjName(
proj4 = "moll",
ogc_wkt = "Mollweide",
esri_wkt = "Mollweide",
)
class ObliqueStereographic(Projection):
name = ProjName(
proj4 = "sterea",
ogc_wkt = "Oblique_Stereographic",
esri_wkt = "Oblique Stereographic", #"Stereographic_North_Pole"
)
class Orthographic(Projection):
name = ProjName(
proj4 = "ortho",
ogc_wkt = "Orthographic",
esri_wkt = "Orthographic",
)
class Stereographic(Projection):
name = ProjName(
proj4 = "stere",
ogc_wkt = "Stereographic",
esri_wkt = "Stereographic",
)
class PolarStereographic(Projection):
name = ProjName(
proj4 = "stere",
ogc_wkt = "Polar_Stereographic", # could also be just stereographic
esri_wkt = "Stereographic", # but also spelled with additional _South/North_Pole, for the same projection and diff params (maybe just for humans)?...
)
class Sinusoidal(Projection):
name = ProjName(
proj4 = "sinu",
ogc_wkt = "Sinusoidal",
esri_wkt = "Sinusoidal",
)
class VanDerGrinten(Projection):
name = ProjName(
proj4 = "vandg",
ogc_wkt = "VanDerGrinten",
esri_wkt = "Van_der_Grinten_I",
)
class LambertConformalConic(Projection):
name = ProjName(
proj4 = "lcc",
ogc_wkt = "Lambert_Conformal_Conic", # possible has some variants
esri_wkt = "Lambert_Conformal_Conic",
)
class Krovak(Projection):
name = ProjName(
proj4 = "krovak",
ogc_wkt = "Krovak",
esri_wkt = "Krovak",
)
class NearSidedPerspective(Projection):
name = ProjName(
proj4 = "nsper",
ogc_wkt = "Near_sided_perspective",
esri_wkt = "Near_sided_perspective", # not confirmed
)
class TiltedPerspective(Projection):
name = ProjName(
proj4 = "tsper",
ogc_wkt = "Tilted_perspective",
esri_wkt = "Tilted_perspective", # not confirmed
)
class InteruptedGoodeHomolosine(Projection):
name = ProjName(
proj4 = "igh",
ogc_wkt = "Interrupted_Goodes_Homolosine",
esri_wkt = "Interrupted_Goodes_Homolosine",
)
class Larrivee(Projection):
name = ProjName(
proj4 = "larr",
ogc_wkt = "Larrivee",
esri_wkt = "Larrivee", # not confirmed
)
class LamberEqualAreaConic(Projection):
name = ProjName(
proj4 = "leac",
ogc_wkt = "Lambert_Equal_Area_Conic",
esri_wkt = "Lambert_Equal_Area_Conic", # not confirmed
)
class Mercator(Projection):
name = ProjName(
proj4 = "merc",
ogc_wkt = "Mercator", # has multiple varieties
esri_wkt = "Mercator",
)
class ObliqueCylindricalEqualArea(Projection):
name = ProjName(
proj4 = "ocea",
ogc_wkt = "Oblique_Cylindrical_Equal_Area",
esri_wkt = "Oblique_Cylindrical_Equal_Area",
)
class Polyconic(Projection):
name = ProjName(
proj4 = "poly",
ogc_wkt = "Polyconic",
esri_wkt = "Polyconic",
)
class EckertIV(Projection):
name = ProjName(
proj4 = "eck4",
ogc_wkt = "Eckert_IV",
esri_wkt = "Eckert_IV",
)
class EckertVI(Projection):
name = ProjName(
proj4 = "eck6",
ogc_wkt = "Eckert_VI",
esri_wkt = "Eckert_VI",
)
class AzimuthalEquidistant(Projection):
name = ProjName(
proj4 = "aeqd",
ogc_wkt = "Azimuthal_Equidistant",
esri_wkt = "Azimuthal_Equidistant",
)
class GeostationarySatellite(Projection):
name = ProjName(
proj4 = "geos",
ogc_wkt = "Geostationary_Satellite",
esri_wkt = "Geostationary_Satellite",
)
| mit | -6,452,796,678,563,627,000 | 24.555556 | 157 | 0.577415 | false |
ivansib/sibcoin | qa/rpc-tests/sporks.py | 1 | 3286 | #!/usr/bin/env python3
# Copyright (c) 2018 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from time import *
'''
'''
class SporkTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = True
self.is_network_split = False
def setup_network(self):
disable_mocktime()
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir,
["-debug", "-sporkkey=cS7h4AVtDr5sCWQZAnr3sW3MJMmkFeNi46CB1mZ39nN82PfXyWui"]))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir,
["-debug"]))
# connect only 2 first nodes at start
connect_nodes(self.nodes[0], 1)
def get_test_spork_state(self, node):
info = node.spork('active')
# use InstantSend spork for tests
return info['SPORK_2_INSTANTSEND_ENABLED']
def set_test_spork_state(self, node, state):
if state:
value = 0
else:
value = 4070908800
# use InstantSend spork for tests
node.spork('SPORK_2_INSTANTSEND_ENABLED', value)
def run_test(self):
# check test spork default state
assert(self.get_test_spork_state(self.nodes[0]))
assert(self.get_test_spork_state(self.nodes[1]))
assert(self.get_test_spork_state(self.nodes[2]))
# check spork propagation for connected nodes
self.set_test_spork_state(self.nodes[0], False)
start = time()
sent = False
while True:
if not self.get_test_spork_state(self.nodes[1]):
sent = True
break
if time() > start + 10:
break
sleep(0.1)
assert(sent)
# restart nodes to check spork persistence
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug"])
self.nodes[1] = start_node(1, self.options.tmpdir, ["-debug"])
assert(not self.get_test_spork_state(self.nodes[0]))
assert(not self.get_test_spork_state(self.nodes[1]))
# Force finish mnsync node as otherwise it will never send out headers to other peers
wait_to_sync(self.nodes[1], fast_mnsync=True)
# Generate one block to kick off masternode sync, which also starts sporks syncing for node2
self.nodes[1].generate(1)
# connect new node and check spork propagation after restoring from cache
connect_nodes(self.nodes[1], 2)
start = time()
sent = False
while True:
if not self.get_test_spork_state(self.nodes[2]):
sent = True
break
if time() > start + 10:
break
sleep(0.1)
assert(sent)
if __name__ == '__main__':
SporkTest().main()
| mit | 5,502,376,864,289,304,000 | 33.957447 | 115 | 0.58521 | false |
Kilghaz/pyd20 | battle/ki.py | 1 | 1967 | from battle.actions import *
from character import Character
class KiCharacter(Character):
def __can_attack(self, battle):
return False
def __move_to_next_enemy(self, battle):
current_tile = battle.tile(combatant=self)
enemy_tiles = list()
for tile in battle.grid.get_tiles():
if not tile.is_empty() and not tile.has_occupation(self):
enemy_tiles.append(tile)
if len(enemy_tiles) is 0:
return EndTurnAction()
closest_enemy = None
shortest_distance = 999
for enemy_tile in enemy_tiles:
# calculating euclidean distance
dx = abs(current_tile.x - enemy_tile.x)
dy = abs(current_tile.y - enemy_tile.y)
d_euclidean = dx + dy
if d_euclidean < shortest_distance or closest_enemy is None:
closest_enemy = enemy_tile
shortest_distance = d_euclidean
shortest_move_tile = None
shortest_distance = 999
for adjacent_tile in battle.grid.get_adjacent_tiles(closest_enemy):
# calculating euclidean distance
dx = abs(current_tile.x - adjacent_tile.x)
dy = abs(current_tile.y - adjacent_tile.y)
d_euclidean = dx + dy
if d_euclidean < shortest_distance or shortest_move_tile is None:
shortest_move_tile = adjacent_tile
shortest_distance = d_euclidean
if shortest_move_tile.has_occupation(self):
return EndTurnAction()
return MoveAction(self, battle.grid.path_between_tiles(current_tile, shortest_move_tile))
def __attack_next_enemy(self, battle):
return EndTurnAction()
def next_action(self, battle):
if self._action_points == 0:
return EndTurnAction()
if not self.__can_attack(battle):
return self.__move_to_next_enemy(battle)
return self.__attack_next_enemy(battle)
| gpl-2.0 | -1,008,709,219,438,071,400 | 38.34 | 97 | 0.60244 | false |
superfluidity/RDCL3D | code/toscaparser/imports.py | 1 | 14032 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from toscaparser.common.exception import ExceptionCollector
from toscaparser.common.exception import InvalidPropertyValueError
from toscaparser.common.exception import MissingRequiredFieldError
from toscaparser.common.exception import UnknownFieldError
from toscaparser.common.exception import ValidationError
from toscaparser.elements.tosca_type_validation import TypeValidation
from toscaparser.utils.gettextutils import _
import toscaparser.utils.urlutils
import toscaparser.utils.yamlparser
YAML_LOADER = toscaparser.utils.yamlparser.load_yaml
log = logging.getLogger("tosca")
class ImportsLoader(object):
IMPORTS_SECTION = (FILE, REPOSITORY, NAMESPACE_URI, NAMESPACE_PREFIX) = \
('file', 'repository', 'namespace_uri',
'namespace_prefix')
def __init__(self, importslist, path, type_definition_list=None,
tpl=None, project=None):
self.project = project
self.importslist = importslist
self.custom_defs = {}
self.nested_tosca_tpls = []
if not path and not tpl:
msg = _('Input tosca template is not provided.')
log.warning(msg)
ExceptionCollector.appendException(ValidationError(message=msg))
self.path = path
self.repositories = {}
if tpl and tpl.get('repositories'):
self.repositories = tpl.get('repositories')
self.type_definition_list = []
if type_definition_list:
if isinstance(type_definition_list, list):
self.type_definition_list = type_definition_list
else:
self.type_definition_list.append(type_definition_list)
self._validate_and_load_imports()
def get_custom_defs(self):
return self.custom_defs
def get_nested_tosca_tpls(self):
return self.nested_tosca_tpls
def _validate_and_load_imports(self):
imports_names = set()
if not self.importslist:
msg = _('"imports" keyname is defined without including '
'templates.')
log.error(msg)
ExceptionCollector.appendException(ValidationError(message=msg))
return
for import_def in self.importslist:
if isinstance(import_def, dict):
for import_name, import_uri in import_def.items():
if import_name in imports_names:
msg = (_('Duplicate import name "%s" was found.') %
import_name)
log.error(msg)
ExceptionCollector.appendException(
ValidationError(message=msg))
imports_names.add(import_name)
full_file_name, custom_type = self._load_import_template(
import_name, import_uri)
namespace_prefix = None
if isinstance(import_uri, dict):
namespace_prefix = import_uri.get(
self.NAMESPACE_PREFIX)
if custom_type:
TypeValidation(custom_type, import_def)
self._update_custom_def(custom_type, namespace_prefix)
else: # old style of imports
full_file_name, custom_type = self._load_import_template(
None, import_def)
if custom_type:
TypeValidation(
custom_type, import_def)
self._update_custom_def(custom_type, None)
self._update_nested_tosca_tpls(full_file_name, custom_type)
def _update_custom_def(self, custom_type, namespace_prefix):
outer_custom_types = {}
for type_def in self.type_definition_list:
outer_custom_types = custom_type.get(type_def)
if outer_custom_types:
if type_def == "imports":
self.custom_defs.update({'imports': outer_custom_types})
else:
if namespace_prefix:
prefix_custom_types = {}
for type_def_key in outer_custom_types.keys():
namespace_prefix_to_key = (namespace_prefix +
"." + type_def_key)
prefix_custom_types[namespace_prefix_to_key] = \
outer_custom_types[type_def_key]
self.custom_defs.update(prefix_custom_types)
else:
self.custom_defs.update(outer_custom_types)
def _update_nested_tosca_tpls(self, full_file_name, custom_tpl):
if full_file_name and custom_tpl:
topo_tpl = {full_file_name: custom_tpl}
self.nested_tosca_tpls.append(topo_tpl)
def _validate_import_keys(self, import_name, import_uri_def):
if self.FILE not in import_uri_def.keys():
log.warning(_('Missing keyname "file" in import "%(name)s".')
% {'name': import_name})
ExceptionCollector.appendException(
MissingRequiredFieldError(
what='Import of template "%s"' % import_name,
required=self.FILE))
for key in import_uri_def.keys():
if key not in self.IMPORTS_SECTION:
log.warning(_('Unknown keyname "%(key)s" error in '
'imported definition "%(def)s".')
% {'key': key, 'def': import_name})
ExceptionCollector.appendException(
UnknownFieldError(
what='Import of template "%s"' % import_name,
field=key))
def _load_import_template(self, import_name, import_uri_def):
"""Handle custom types defined in imported template files
This method loads the custom type definitions referenced in "imports"
section of the TOSCA YAML template by determining whether each import
is specified via a file reference (by relative or absolute path) or a
URL reference.
Possibilities:
+----------+--------+------------------------------+
| template | import | comment |
+----------+--------+------------------------------+
| file | file | OK |
| file | URL | OK |
| preparsed| file | file must be a full path |
| preparsed| URL | OK |
| URL | file | file must be a relative path |
| URL | URL | OK |
+----------+--------+------------------------------+
"""
short_import_notation = False
if isinstance(import_uri_def, dict):
self._validate_import_keys(import_name, import_uri_def)
file_name = import_uri_def.get(self.FILE)
repository = import_uri_def.get(self.REPOSITORY)
repos = self.repositories.keys()
if repository is not None:
if repository not in repos:
ExceptionCollector.appendException(
InvalidPropertyValueError(
what=_('Repository is not found in "%s"') % repos))
else:
file_name = import_uri_def
repository = None
short_import_notation = True
if not file_name:
msg = (_('A template file name is not provided with import '
'definition "%(import_name)s".')
% {'import_name': import_name})
log.error(msg)
ExceptionCollector.appendException(ValidationError(message=msg))
return None, None
yaml_template = None
if toscaparser.utils.urlutils.UrlUtils.validate_url(file_name):
return file_name, YAML_LOADER(file_name, False)
elif not repository:
import_template = None
if self.path:
if toscaparser.utils.urlutils.UrlUtils.validate_url(self.path):
if os.path.isabs(file_name):
msg = (_('Absolute file name "%(name)s" cannot be '
'used in a URL-based input template '
'"%(template)s".')
% {'name': file_name, 'template': self.path})
log.error(msg)
ExceptionCollector.appendException(ImportError(msg))
return None, None
import_template = toscaparser.utils.urlutils.UrlUtils.\
join_url(self.path, file_name)
a_file = False
else:
a_file = True
main_a_file = os.path.isfile(self.path)
if main_a_file:
if os.path.isfile(file_name):
import_template = file_name
else:
full_path = os.path.join(
os.path.dirname(os.path.abspath(self.path)),
file_name)
if os.path.isfile(full_path):
import_template = full_path
else:
file_path = file_name.rpartition("/")
dir_path = os.path.dirname(os.path.abspath(
self.path))
if file_path[0] != '' and dir_path.endswith(
file_path[0]):
import_template = dir_path + "/" +\
file_path[2]
if not os.path.isfile(import_template):
msg = (_('"%(import_template)s" is'
'not a valid file')
% {'import_template':
import_template})
log.error(msg)
ExceptionCollector.appendException
(ValueError(msg))
else: # template is pre-parsed
id_name, file_extension = os.path.splitext(file_name)
if self.project is not None and id_name in self.project:
a_file = False
yaml_template = self.project[id_name]
import_template = file_name
elif os.path.isabs(file_name) and os.path.isfile(file_name):
a_file = True
import_template = file_name
else:
msg = (_('Relative file name "%(name)s" cannot be used '
'in a pre-parsed input template.')
% {'name': file_name})
log.error(msg)
ExceptionCollector.appendException(ImportError(msg))
return None, None
if not import_template:
log.error(_('Import "%(name)s" is not valid.') %
{'name': import_uri_def})
ExceptionCollector.appendException(
ImportError(_('Import "%s" is not valid.') %
import_uri_def))
return None, None
if yaml_template is not None:
#print yaml_template
return None, yaml_template
else:
return import_template, YAML_LOADER(import_template, a_file)
if short_import_notation:
log.error(_('Import "%(name)s" is not valid.') % import_uri_def)
ExceptionCollector.appendException(
ImportError(_('Import "%s" is not valid.') % import_uri_def))
return None, None
full_url = ""
if repository:
if self.repositories:
for repo_name, repo_def in self.repositories.items():
if repo_name == repository:
# Remove leading, ending spaces and strip
# the last character if "/"
repo_url = ((repo_def['url']).strip()).rstrip("//")
full_url = repo_url + "/" + file_name
if not full_url:
msg = (_('referenced repository "%(n_uri)s" in import '
'definition "%(tpl)s" not found.')
% {'n_uri': repository, 'tpl': import_name})
log.error(msg)
ExceptionCollector.appendException(ImportError(msg))
return None, None
if toscaparser.utils.urlutils.UrlUtils.validate_url(full_url):
return full_url, YAML_LOADER(full_url, False)
else:
msg = (_('repository url "%(n_uri)s" is not valid in import '
'definition "%(tpl)s".')
% {'n_uri': repo_url, 'tpl': import_name})
log.error(msg)
ExceptionCollector.appendException(ImportError(msg))
| apache-2.0 | 8,705,253,170,228,588,000 | 46.087248 | 79 | 0.498788 | false |
Akrog/cinder | cinder/tests/test_ibmnas.py | 1 | 18933 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Nilesh Bhosale <[email protected]>
# Sasikanth Eda <[email protected]>
"""
Tests for the IBM NAS family (SONAS, Storwize V7000 Unified,
NAS based IBM GPFS Storage Systems).
"""
import mock
from oslo_config import cfg
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import ibmnas
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class FakeEnv(object):
fields = {}
def __setitem__(self, key, value):
self.fields[key] = value
def __getitem__(self, item):
return self.fields[item]
class IBMNASDriverTestCase(test.TestCase):
TEST_NFS_EXPORT = 'nfs-host1:/export'
TEST_SIZE_IN_GB = 1
TEST_EXTEND_SIZE_IN_GB = 2
TEST_MNT_POINT = '/mnt/nfs'
TEST_MNT_POINT_BASE = '/mnt'
TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
TEST_VOLUME_PATH = '/export/volume-123'
TEST_SNAP_PATH = '/export/snapshot-123'
def setUp(self):
super(IBMNASDriverTestCase, self).setUp()
self._driver = ibmnas.IBMNAS_NFSDriver(configuration=
conf.Configuration(None))
self._mock = mock.Mock()
self._def_flags = {'nas_ip': 'hostname',
'nas_login': 'user',
'nas_ssh_port': 22,
'nas_password': 'pass',
'nas_private_key': 'nas.key',
'ibmnas_platform_type': 'v7ku',
'nfs_shares_config': None,
'nfs_sparsed_volumes': True,
'nfs_used_ratio': 0.95,
'nfs_oversub_ratio': 1.0,
'nfs_mount_point_base':
self.TEST_MNT_POINT_BASE,
'nfs_mount_options': None}
self.context = context.get_admin_context()
self.context.user_id = 'fake'
self.context.project_id = 'fake'
def _set_flag(self, flag, value):
group = self._driver.configuration.config_group
self._driver.configuration.set_override(flag, value, group)
def _reset_flags(self):
self._driver.configuration.local_conf.reset()
for k, v in self._def_flags.iteritems():
self._set_flag(k, v)
def test_check_for_setup_error(self):
"""Check setup with bad parameters."""
drv = self._driver
required_flags = [
'nas_ip',
'nas_login',
'nas_ssh_port']
for flag in required_flags:
self._set_flag(flag, None)
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
self._set_flag('nas_password', None)
self._set_flag('nas_private_key', None)
self.assertRaises(exception.InvalidInput,
self._driver.check_for_setup_error)
self._set_flag('ibmnas_platform_type', None)
self.assertRaises(exception.InvalidInput,
self._driver.check_for_setup_error)
self._reset_flags()
def test_get_provider_location(self):
"""Check provider location for given volume id."""
mock = self._mock
volume = FakeEnv()
volume['id'] = '123'
mock.drv._get_provider_location.return_value = self.TEST_NFS_EXPORT
self.assertEqual(self.TEST_NFS_EXPORT,
mock.drv._get_provider_location(volume['id']))
def test_get_export_path(self):
"""Check export path for the given volume."""
mock = self._mock
volume = FakeEnv()
volume['id'] = '123'
mock.drv._get_export_path.return_value = self.TEST_NFS_EXPORT.\
split(':')[1]
self.assertEqual(self.TEST_NFS_EXPORT.split(':')[1],
mock.drv._get_export_path(volume['id']))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ensure_shares_mounted')
def test_update_volume_stats(self, mock_ensure):
"""Check update volume stats."""
drv = self._driver
mock_ensure.return_value = True
fake_avail = 80 * units.Gi
fake_size = 2 * fake_avail
fake_used = 10 * units.Gi
with mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_capacity_info',
return_value=(fake_avail, fake_size, fake_used)):
stats = drv.get_volume_stats()
self.assertEqual(stats['volume_backend_name'], 'IBMNAS_NFS')
self.assertEqual(stats['storage_protocol'], 'nfs')
self.assertEqual(stats['driver_version'], '1.1.0')
self.assertEqual(stats['vendor_name'], 'IBM')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver._run_ssh')
def test_ssh_operation(self, mock_ssh):
drv = self._driver
mock_ssh.return_value = None
self.assertEqual(None, drv._ssh_operation('ssh_cmd'))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver._run_ssh')
def test_ssh_operation_exception(self, mock_ssh):
drv = self._driver
mock_ssh.side_effect = (
exception.VolumeBackendAPIException(data='Failed'))
self.assertRaises(exception.VolumeBackendAPIException,
drv._ssh_operation, 'ssh_cmd')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ssh_operation')
@mock.patch('oslo_concurrency.processutils.execute')
def test_create_ibmnas_snap_mount_point_provided(self, mock_ssh,
mock_execute):
"""Create ibmnas snap if mount point is provided."""
drv = self._driver
mock_ssh.return_value = True
mock_execute.return_value = True
self.assertEqual(None, drv._create_ibmnas_snap(self.TEST_VOLUME_PATH,
self.TEST_SNAP_PATH,
self.TEST_MNT_POINT))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ssh_operation')
@mock.patch('oslo_concurrency.processutils.execute')
def test_create_ibmnas_snap_nas_gpfs(self, mock_execute, mock_ssh):
"""Create ibmnas snap if mount point is provided."""
drv = self._driver
drv.configuration.platform = 'gpfs-nas'
mock_ssh.return_value = True
mock_execute.return_value = True
self.assertEqual(None, drv._create_ibmnas_snap(self.TEST_VOLUME_PATH,
self.TEST_SNAP_PATH,
self.TEST_MNT_POINT))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ssh_operation')
def test_create_ibmnas_snap_no_mount_point_provided(self, mock_ssh):
"""Create ibmnas snap if no mount point is provided."""
drv = self._driver
mock_ssh.return_value = True
self.assertEqual(None, drv._create_ibmnas_snap(self.TEST_VOLUME_PATH,
self.TEST_SNAP_PATH,
None))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ssh_operation')
def test_create_ibmnas_snap_nas_gpfs_no_mount(self, mock_ssh):
"""Create ibmnas snap (gpfs-nas) if mount point is provided."""
drv = self._driver
drv.configuration.platform = 'gpfs-nas'
mock_ssh.return_value = True
drv._create_ibmnas_snap(self.TEST_VOLUME_PATH,
self.TEST_SNAP_PATH, None)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ssh_operation')
def test_create_ibmnas_copy(self, mock_ssh):
"""Create ibmnas copy test case."""
drv = self._driver
TEST_DEST_SNAP = '/export/snapshot-123.snap'
TEST_DEST_PATH = '/export/snapshot-123'
mock_ssh.return_value = True
drv._create_ibmnas_copy(self.TEST_VOLUME_PATH,
TEST_DEST_PATH,
TEST_DEST_SNAP)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ssh_operation')
def test_create_ibmnas_copy_nas_gpfs(self, mock_ssh):
"""Create ibmnas copy for gpfs-nas platform test case."""
drv = self._driver
TEST_DEST_SNAP = '/export/snapshot-123.snap'
TEST_DEST_PATH = '/export/snapshot-123'
drv.configuration.platform = 'gpfs-nas'
mock_ssh.return_value = True
drv._create_ibmnas_copy(self.TEST_VOLUME_PATH,
TEST_DEST_PATH,
TEST_DEST_SNAP)
@mock.patch('cinder.image.image_utils.resize_image')
def test_resize_volume_file(self, mock_size):
"""Resize volume file test case."""
drv = self._driver
mock_size.return_value = True
self.assertTrue(drv._resize_volume_file(self.TEST_LOCAL_PATH,
self.TEST_EXTEND_SIZE_IN_GB))
@mock.patch('cinder.image.image_utils.resize_image')
def test_resize_volume_exception(self, mock_size):
"""Resize volume file test case."""
drv = self._driver
mock_size.side_effect = (
exception.VolumeBackendAPIException(data='Failed'))
self.assertRaises(exception.VolumeBackendAPIException,
drv._resize_volume_file,
self.TEST_LOCAL_PATH,
self.TEST_EXTEND_SIZE_IN_GB)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_resize_volume_file')
def test_extend_volume(self, mock_resize, mock_local):
"""Extend volume to greater size test case."""
drv = self._driver
mock_local.return_value = self.TEST_LOCAL_PATH
mock_resize.return_value = True
volume = FakeEnv()
volume['name'] = 'vol-123'
drv.extend_volume(volume,
self.TEST_EXTEND_SIZE_IN_GB)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver._run_ssh')
@mock.patch('oslo_concurrency.processutils.execute')
def test_delete_snapfiles(self, mock_execute, mock_ssh):
"""Delete_snapfiles test case."""
drv = self._driver
expected = ('Parent Depth Parent inode'
'File name\n yes 0 /ibm/gpfs0/gshare/\n'
'volume-123\n EFSSG1000I The command'
'completed successfully.', '')
mock_ssh.return_value = expected
mock_execute.return_value = expected
drv._delete_snapfiles(self.TEST_VOLUME_PATH,
self.TEST_MNT_POINT)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver._run_ssh')
@mock.patch('oslo_concurrency.processutils.execute')
def test_delete_snapfiles_nas_gpfs(self, mock_execute, mock_ssh):
"""Delete_snapfiles for gpfs-nas platform test case."""
drv = self._driver
drv.configuration.platform = 'gpfs-nas'
expected = ('Parent Depth Parent inode'
'File name\n'
'------ ----- -------------'
'- ---------\n'
'yes 0\n'
'/ibm/gpfs0/gshare/volume-123', '')
mock_ssh.return_value = expected
mock_execute.return_value = expected
drv._delete_snapfiles(self.TEST_VOLUME_PATH,
self.TEST_MNT_POINT)
def test_delete_volume_no_provider_location(self):
"""Delete volume with no provider location specified."""
drv = self._driver
volume = FakeEnv()
volume['name'] = 'volume-123'
volume['provider_location'] = None
result = drv.delete_volume(volume)
self.assertIsNone(result)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_export_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_delete_snapfiles')
def test_delete_volume(self, mock_snap, mock_export):
"""Delete volume test case."""
drv = self._driver
mock_export.return_value = self.TEST_VOLUME_PATH
mock_snap.return_value = True
volume = FakeEnv()
volume['id'] = '123'
volume['name'] = '/volume-123'
volume['provider_location'] = self.TEST_VOLUME_PATH
self.assertEqual(None, drv.delete_volume(volume))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_export_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_provider_location')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_mount_point_for_share')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_create_ibmnas_snap')
def test_create_snapshot(self, mock_snap, mock_mount, mock_provider,
mock_export):
"""Create snapshot simple test case."""
drv = self._driver
mock_export.return_value = self.TEST_LOCAL_PATH
mock_provider.return_value = self.TEST_VOLUME_PATH
mock_mount.return_value = self.TEST_MNT_POINT
mock_snap.return_value = True
volume = FakeEnv()
volume['id'] = '123'
volume['name'] = 'volume-123'
snapshot = FakeEnv()
snapshot['volume_id'] = volume['id']
snapshot['volume_name'] = '/volume-123'
snapshot['name'] = '/snapshot-123'
drv.create_snapshot(snapshot)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_provider_location')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_mount_point_for_share')
@mock.patch('oslo_concurrency.processutils.execute')
def test_delete_snapshot(self, mock_execute, mock_mount, mock_provider):
"""Delete snapshot simple test case."""
drv = self._driver
mock_provider.return_value = self.TEST_VOLUME_PATH
mock_mount.return_value = self.TEST_LOCAL_PATH
mock_execute.return_value = True
volume = FakeEnv()
volume['id'] = '123'
volume['provider_location'] = self.TEST_NFS_EXPORT
snapshot = FakeEnv()
snapshot['volume_id'] = volume['id']
snapshot['volume_name'] = 'volume-123'
snapshot['name'] = 'snapshot-123'
drv.delete_snapshot(snapshot)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_export_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_create_ibmnas_copy')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_find_share')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_set_rw_permissions_for_owner')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_resize_volume_file')
def test_create_cloned_volume(self, mock_resize, mock_rw, mock_local,
mock_find, mock_copy, mock_export):
"""Clone volume with equal size test case."""
drv = self._driver
mock_export.return_value = self.TEST_VOLUME_PATH
mock_copy.return_value = True
mock_find.return_value = self.TEST_LOCAL_PATH
mock_local.return_value = self.TEST_LOCAL_PATH
mock_rw.return_value = True
mock_resize.return_value = True
volume_src = FakeEnv()
volume_src['id'] = '123'
volume_src['name'] = '/volume-123'
volume_src.size = self.TEST_SIZE_IN_GB
volume_dest = FakeEnv()
volume_dest['id'] = '456'
volume_dest['name'] = '/volume-456'
volume_dest['size'] = self.TEST_SIZE_IN_GB
volume_dest.size = self.TEST_SIZE_IN_GB
self.assertEqual({'provider_location': self.TEST_LOCAL_PATH},
drv.create_cloned_volume(volume_dest, volume_src))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_export_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_create_ibmnas_snap')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_find_share')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_set_rw_permissions_for_owner')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_resize_volume_file')
def test_create_volume_from_snapshot(self, mock_resize, mock_rw,
mock_local, mock_find, mock_snap,
mock_export):
"""Create volume from snapshot test case."""
drv = self._driver
mock_export.return_value = '/export'
mock_snap.return_value = self.TEST_LOCAL_PATH
mock_find.return_value = self.TEST_LOCAL_PATH
mock_local.return_value = self.TEST_VOLUME_PATH
mock_rw.return_value = True
mock_resize.return_value = True
volume = FakeEnv()
volume['id'] = '123'
volume['name'] = '/volume-123'
volume['size'] = self.TEST_SIZE_IN_GB
snapshot = FakeEnv()
snapshot['volume_id'] = volume['id']
snapshot['volume_name'] = 'volume-123'
snapshot['volume_size'] = self.TEST_SIZE_IN_GB
snapshot.name = '/snapshot-123'
self.assertEqual({'provider_location': self.TEST_LOCAL_PATH},
drv.create_volume_from_snapshot(volume, snapshot))
| apache-2.0 | -5,162,997,413,893,734,000 | 37.403651 | 79 | 0.580521 | false |
openxc/openxc-python | openxc/controllers/base.py | 1 | 18242 | """Contains the abstract interface for sending commands back to a vehicle
interface.
"""
import numbers
import time
import threading
import binascii
try:
from queue import Queue
from queue import Empty
except ImportError:
# Python 3
from queue import Queue
from queue import Empty
class ResponseReceiver(object):
"""All commands to a vehicle interface are asynchronous. This class is used to
wait for the response for a particular request in a thread. Before making a
request, a ResponseReceiver is created to wait for the response. All
responses received from the VI (which may or may not be in response to this
particular command) are passed to the ResponseReceiver, until it either
times out waiting or finds a matching response.
The synchronization mechanism is a multiprocessing Queue. The
ResponseReceiver blocks waiting on a new response to be added to the queue,
and the vehicle interface class puts newly received responses in the queues
of ResponseReceivers as they arrive.
"""
COMMAND_RESPONSE_TIMEOUT_S = 0.5
def __init__(self, queue, request, quit_after_first=True):
"""Construct a new ResponseReceiver.
queue - A multithreading queue that this receiver will pull potential responses from.
request - The request we are trying to match up with a response.
"""
self.diag_dict = {}
self.request = request
self.queue = queue
self.responses = []
self.running = True
self.quit_after_first = quit_after_first
def _response_matches_request(self, response):
"""Inspect the given response and return true if it's a response to this
ResponseReceiver's request.
This implementation is the base class no-op - it returns True for any
response. You probably want to override this in a subclass.
response - the response to inspect.
"""
return True
def wait_for_responses(self):
"""Block the thread and wait for the response to the given request to
arrive from the VI. If no matching response is received in
COMMAND_RESPONSE_TIMEOUT_S seconds, returns anyway.
"""
self.thread.join(self.COMMAND_RESPONSE_TIMEOUT_S)
self.running = False
return self.responses
def start(self):
self.thread = threading.Thread(target=self.handle_responses)
self.thread.start()
def handle_responses(self):
"""Block and wait for responses to this object's original request, or
until a timeout (self.COMMAND_RESPONSE_TIMEOUT_S).
This function is handy to use as the target function for a thread.
The responses received (or None if none was received before the timeout)
is stored in a list at self.responses.
"""
while self.running:
try:
response = self.queue.get(
timeout=self.COMMAND_RESPONSE_TIMEOUT_S)
if self._response_matches_request(response):
if type(self) == DiagnosticResponseReceiver:
if self._response_is_multiframe(response):
if response['id'] in self.diag_dict:
self.diag_dict[response['id']].addFrame(response)
else:
self.diag_dict[response['id']] = MultiframeDiagnosticMessage(response)
if self._return_final(response):
self.responses.append(self.diag_dict[response['id']].getResponse())
self.diag_dict.pop(response['id'])
self.responses.append(response)
if self.quit_after_first:
self.running = False
self.queue.task_done()
except Empty:
break
class MultiframeDiagnosticMessage:
def __init__(self, response):
self.id = response['id'] - 16
self.mode = response['mode']
self.bus = response['bus']
self.pid = response['pid']
self.payload = '0x' + response['payload'][8:]
def addFrame(self, response):
self.payload += response['payload'][8:]
def getResponse(self):
request = {
'timestamp': 0,
'bus': self.bus,
'id': self.id,
'mode': self.mode,
'success': True,
'pid': self.pid,
'payload': self.payload
}
return request
class CommandResponseReceiver(ResponseReceiver):
"""A receiver that matches the 'command' field in responses to the
original request.
"""
def _response_matches_request(self, response):
"""Return true if the 'command' field in the response matches the
original request.
"""
return response.get('command_response', None) == self.request['command']
class DiagnosticResponseReceiver(ResponseReceiver):
"""A receiver that matches the bus, ID, mode and PID from a
diagnostic request to an incoming response.
"""
def __init__(self, queue, request):
super(DiagnosticResponseReceiver, self).__init__(queue, request,
quit_after_first=False)
# Make sure to key off of the diagnostic request, not the command to
# create the request
self.diagnostic_request = request['request']
def _response_matches_request(self, response):
"""Return true if the response is to a diagnostic request, and the bus,
id, mode match. If the request was successful, the PID echo is also
checked.
"""
# Accept success/failure command responses
if super(DiagnosticResponseReceiver,
self)._response_matches_request(response):
return True
if ('bus' in self.diagnostic_request and
response.get('bus', None) != self.diagnostic_request['bus']):
return False
if (self.diagnostic_request['id'] != 0x7df and
response.get('id', None) != self.diagnostic_request['id']):
return False
if (response.get('success', True) and
response.get('pid', None) !=
self.diagnostic_request.get('pid', None)):
return False
return response.get('mode', None) == self.diagnostic_request['mode']
def _response_is_multiframe(self, response):
if 'frame' in response:
return True
return False
def _return_final(self, response):
if response['frame'] == -1:
return True
return False
class Controller(object):
"""A Controller is a physical vehicle interface that accepts commands to be
send back to the vehicle. This class is abstract, and implementations of the
interface must define at least the ``write_bytes`` method.
"""
def _prepare_response_receiver(self, request,
receiver_class=CommandResponseReceiver):
queue = Queue()
self.open_requests = getattr(self, 'open_requests', [])
self.open_requests.append(queue)
receiver = receiver_class(queue, request)
receiver.start()
# Give it a brief moment to get started so we make sure get the response
time.sleep(.2)
return receiver
def complex_request(self, request, wait_for_first_response=True):
"""Send a compound command request to the interface over the normal data
channel.
request - A dict storing the request to send to the VI. It will be
serialized to the currently selected output format.
wait_for_first_response - If true, this function will block waiting for
a response from the VI and return it to the caller. Otherwise, it
will send the command and return immediately and any response will
be lost.
"""
receiver = self._prepare_response_receiver(request,
receiver_class=CommandResponseReceiver)
self._send_complex_request(request)
responses = []
if wait_for_first_response:
responses = receiver.wait_for_responses()
return responses
def _send_complex_request(self, request):
self.write_bytes(self.streamer.serialize_for_stream(request))
@classmethod
def _build_diagnostic_request(cls, id, mode, bus=None, pid=None,
frequency=None, payload=None, decoded_type=None):
request = {
'command': "diagnostic_request",
'request': {
'id': id,
'mode': mode
}
}
if bus is not None:
request['request']['bus'] = bus
request['request']['mode'] = mode
if payload is not None and len(payload) > 0:
# payload must be a bytearray
request['request']['payload'] = "0x%s" % binascii.hexlify(payload)
if pid is not None:
request['request']['pid'] = pid
if frequency is not None:
request['request']['frequency'] = frequency
if decoded_type is not None:
request['request']['decoded_type'] = decoded_type
return request
def delete_diagnostic_request(self, id, mode, bus=None, pid=None):
request = self._build_diagnostic_request(id, mode, bus, pid)
request['action'] = 'cancel'
return self._check_command_response_status(request)
def create_diagnostic_request(self, id, mode, bus=None, pid=None,
frequency=None, payload=None, wait_for_ack=True,
wait_for_first_response=False, decoded_type=None):
"""Send a new diagnostic message request to the VI
Required:
id - The message ID (arbitration ID) for the request.
mode - the diagnostic mode (or service).
Optional:
bus - The address of the CAN bus controller to send the request, either
1 or 2 for current VI hardware.
pid - The parameter ID, or PID, for the request (e.g. for a mode 1
request).
frequency - The frequency in hertz to add this as a recurring diagnostic
requests. Must be greater than 0, or None if it is a one-time
request.
payload - A bytearray to send as the request's optional payload. Only
single frame diagnostic requests are supported by the VI firmware in
the current version, so the payload has a maximum length of 6.
wait_for_ack - If True, will wait for an ACK of the command message.
wait_for_first_response - If True, this function will block waiting for
a diagnostic response to be received for the request. It will return
either after timing out or after 1 matching response is received -
there may be more responses to functional broadcast requests that
arrive after returning.
Returns a tuple of
([list of ACK responses to create request],
[list of diagnostic responses received])
"""
request = self._build_diagnostic_request(id, mode, bus, pid,
frequency, payload, decoded_type)
diag_response_receiver = None
if wait_for_first_response:
diag_response_receiver = self._prepare_response_receiver(
request, DiagnosticResponseReceiver)
request['action'] = 'add'
ack_responses = self.complex_request(request, wait_for_ack)
diag_responses = None
if diag_response_receiver is not None:
diag_responses = diag_response_receiver.wait_for_responses()
return ack_responses, diag_responses
def _check_command_response_status(self, request):
responses = self.complex_request(request)
return len(responses) > 0 and responses[0]['status']
def set_passthrough(self, bus, enabled):
"""Control the status of CAN message passthrough for a bus.
Returns True if the command was successful.
"""
request = {
"command": "passthrough",
"bus": bus,
"enabled": enabled
}
return self._check_command_response_status(request)
def set_payload_format(self, payload_format):
"""Set the payload format for messages sent to and from the VI.
Returns True if the command was successful.
"""
request = {
"command": "payload_format",
"format": payload_format
}
status = self._check_command_response_status(request)
# Always change the format regardless because if it was already in the
# right format, the command will have failed.
self.format = payload_format
return status
def rtc_configuration(self, unix_time):
"""Set the Unix time if RTC is supported on the device.
Returns True if the command was successful.
"""
request = {
"command": "rtc_configuration",
"unix_time": unix_time
}
status = self._check_command_response_status(request)
return status
def modem_configuration(self, host, port):
"""Set the host:port for the Cellular device to send data to.
Returns True if the command was successful.
"""
request = {
"command": "modem_configuration",
"host": host,
"port": port
}
status = self._check_command_response_status(request)
return status
def set_acceptance_filter_bypass(self, bus, bypass):
"""Control the status of CAN acceptance filter for a bus.
Returns True if the command was successful.
"""
request = {
"command": "af_bypass",
"bus": bus,
"bypass": bypass
}
return self._check_command_response_status(request)
def set_predefined_obd2_requests(self, enabled):
"""Control if pre-defined OBD2 requests should be sent.
Returns True if the command was successful.
"""
request = {
"command": "predefined_obd2",
"enabled": enabled
}
return self._check_command_response_status(request)
def _check_command_response_message(self, request):
responses = self.complex_request(request)
result = None
if len(responses) > 0:
result = responses[0].get('message')
return result
def version(self):
"""Request a firmware version identifier from the VI.
"""
request = {
"command": "version"
}
return self._check_command_response_message(request)
def platform(self):
"""Request the VI platform.
"""
request = {
"command": "platform"
}
return self._check_command_response_message(request)
def sd_mount_status(self):
"""Request for SD Mount status if available.
"""
request = {
"command": "sd_mount_status"
}
responses = self.complex_request(request)
result = None
if len(responses) > 0:
result = responses[0].get('status')
return result
def device_id(self):
"""Request the unique device ID of the attached VI.
"""
request = {
"command": "device_id"
}
return self._check_command_response_message(request)
def write(self, **kwargs):
"""Serialize a raw or translated write request and send it to the VI,
following the OpenXC message format.
"""
if 'id' in kwargs and 'data' in kwargs:
result = self.write_raw(kwargs['id'], kwargs['data'],
bus=kwargs.get('bus', None),
frame_format=kwargs.get('frame_format', None))
else:
result = self.write_translated(kwargs['name'], kwargs['value'],
event=kwargs.get('event', None))
return result
def write_translated(self, name, value, event=None):
"""Send a translated write request to the VI.
"""
data = {'name': name}
if value is not None:
data['value'] = self._massage_write_value(value)
if event is not None:
data['event'] = self._massage_write_value(event);
message = self.streamer.serialize_for_stream(data)
bytes_written = self.write_bytes(message)
assert bytes_written == len(message)
return bytes_written
def write_raw(self, id, data, bus=None, frame_format=None):
"""Send a raw write request to the VI.
"""
if not isinstance(id, numbers.Number):
try:
id = int(id, 0)
except ValueError:
raise ValueError("ID must be numerical")
data = {'id': id, 'data': data}
if bus is not None:
data['bus'] = bus
if frame_format is not None:
data['frame_format'] = frame_format
message = self.streamer.serialize_for_stream(data)
bytes_written = self.write_bytes(message)
assert bytes_written == len(message)
return bytes_written
def stop(self):
pass
def write_bytes(self, data):
"""Write the bytes in ``data`` to the controller interface."""
raise NotImplementedError("Don't use Controller directly")
@classmethod
def _massage_write_value(cls, value):
"""Convert string values from command-line arguments into first-order
Python boolean and float objects, if applicable.
"""
if not isinstance(value, numbers.Number):
if value == "true":
value = True
elif value == "false":
value = False
elif value[0] == '"' and value[-1] == '"':
value = value[1:-1]
else:
try:
value = float(value)
except ValueError:
pass
return value
class ControllerError(Exception):
pass
| bsd-3-clause | -6,919,577,415,630,533,000 | 35.338645 | 102 | 0.592589 | false |
apenchev/tangowithdjango | rango/migrations/0001_initial.py | 1 | 1093 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=128)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=128)),
('url', models.URLField()),
('views', models.IntegerField(default=0)),
('category', models.ForeignKey(to='rango.Category')),
],
options={
},
bases=(models.Model,),
),
] | mit | 5,240,946,657,495,729,000 | 29.388889 | 114 | 0.508692 | false |
glimix/limix-inference | glimix_core/cov/_eye.py | 1 | 2215 | from numpy import exp, eye, log
from optimix import Function, Scalar
from .._util import format_function
class EyeCov(Function):
"""
Identity covariance function, K = s·I.
The parameter s is the scale of the matrix.
Example
-------
.. doctest::
>>> from glimix_core.cov import EyeCov
>>>
>>> cov = EyeCov(2)
>>> cov.scale = 2.5
>>> print(cov.value())
[[2.5 0. ]
[0. 2.5]]
>>> g = cov.gradient()
>>> print(g['logscale'])
[[2.5 0. ]
[0. 2.5]]
>>> cov.name = "I"
>>> print(cov)
EyeCov(dim=2): I
scale: 2.5
Parameters
----------
dim : int
Matrix dimension, d.
"""
def __init__(self, dim):
"""
Constructor.
Parameters
----------
dim : int
Matrix dimension, d.
"""
self._dim = dim
self._I = eye(dim)
self._logscale = Scalar(0.0)
Function.__init__(self, "EyeCov", logscale=self._logscale)
self._logscale.bounds = (-20.0, +10)
@property
def scale(self):
"""
Scale parameter.
"""
return exp(self._logscale)
@scale.setter
def scale(self, scale):
from numpy_sugar import epsilon
scale = max(scale, epsilon.tiny)
self._logscale.value = log(scale)
@property
def dim(self):
"""
Dimension of the matrix, d.
It corresponds to the number of rows and to the number of columns.
"""
return self._I.shape[0]
def value(self):
"""
Covariance matrix.
Returns
-------
K : ndarray
s⋅I, for scale s and a d×d identity matrix I.
"""
return self.scale * self._I
def gradient(self):
"""
Derivative of the covariance matrix over log(s), s⋅I.
Returns
-------
logscale : ndarray
s⋅I, for scale s and a d×d identity matrix I.
"""
return dict(logscale=self.value())
def __str__(self):
return format_function(self, {"dim": self._I.shape[0]}, [("scale", self.scale)])
| mit | -9,038,107,732,543,751,000 | 20.627451 | 88 | 0.485041 | false |
Suwmlee/XX-Net | Python3/lib/distutils/command/upload.py | 1 | 7515 | """
distutils.command.upload
Implements the Distutils 'upload' subcommand (upload package to a package
index).
"""
import os
import io
import platform
import hashlib
from base64 import standard_b64encode
from urllib.request import urlopen, Request, HTTPError
from urllib.parse import urlparse
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils.core import PyPIRCCommand
from distutils.spawn import spawn
from distutils import log
class upload(PyPIRCCommand):
description = "upload binary package to PyPI"
user_options = PyPIRCCommand.user_options + [
('sign', 's',
'sign files to upload using gpg'),
('identity=', 'i', 'GPG identity used to sign files'),
]
boolean_options = PyPIRCCommand.boolean_options + ['sign']
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.username = ''
self.password = ''
self.show_response = 0
self.sign = False
self.identity = None
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
if self.identity and not self.sign:
raise DistutilsOptionError(
"Must use --sign for --identity to have meaning"
)
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
# getting the password from the distribution
# if previously set by the register command
if not self.password and self.distribution.password:
self.password = self.distribution.password
def run(self):
if not self.distribution.dist_files:
msg = "No dist file created in earlier command"
raise DistutilsOptionError(msg)
for command, pyversion, filename in self.distribution.dist_files:
self.upload_file(command, pyversion, filename)
def upload_file(self, command, pyversion, filename):
# Makes sure the repository URL is compliant
schema, netloc, url, params, query, fragments = \
urlparse(self.repository)
if params or query or fragments:
raise AssertionError("Incompatible url %s" % self.repository)
if schema not in ('http', 'https'):
raise AssertionError("unsupported schema " + schema)
# Sign if requested
if self.sign:
gpg_args = ["gpg", "--detach-sign", "-a", filename]
if self.identity:
gpg_args[2:2] = ["--local-user", self.identity]
spawn(gpg_args,
dry_run=self.dry_run)
# Fill in the data - send all the meta-data in case we need to
# register a new release
f = open(filename,'rb')
try:
content = f.read()
finally:
f.close()
meta = self.distribution.metadata
data = {
# action
':action': 'file_upload',
'protcol_version': '1',
# identify release
'name': meta.get_name(),
'version': meta.get_version(),
# file content
'content': (os.path.basename(filename),content),
'filetype': command,
'pyversion': pyversion,
'md5_digest': hashlib.md5(content).hexdigest(),
# additional meta-data
'metadata_version': '1.0',
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
comment = ''
if command == 'bdist_rpm':
dist, version, id = platform.dist()
if dist:
comment = 'built for %s %s' % (dist, version)
elif command == 'bdist_dumb':
comment = 'built for %s' % platform.platform(terse=1)
data['comment'] = comment
if self.sign:
data['gpg_signature'] = (os.path.basename(filename) + ".asc",
open(filename+".asc", "rb").read())
# set up the authentication
user_pass = (self.username + ":" + self.password).encode('ascii')
# The exact encoding of the authentication string is debated.
# Anyway PyPI only accepts ascii for both username or password.
auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\r\n--' + boundary.encode('ascii')
end_boundary = sep_boundary + b'--\r\n'
body = io.BytesIO()
for key, value in data.items():
title = '\r\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = str(value).encode('utf-8')
body.write(sep_boundary)
body.write(title.encode('utf-8'))
body.write(b"\r\n\r\n")
body.write(value)
if value and value[-1:] == b'\r':
body.write(b'\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body = body.getvalue()
msg = "Submitting %s to %s" % (filename, self.repository)
self.announce(msg, log.INFO)
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s' % boundary,
'Content-length': str(len(body)),
'Authorization': auth,
}
request = Request(self.repository, data=body,
headers=headers)
# send the data
try:
result = urlopen(request)
status = result.getcode()
reason = result.msg
except OSError as e:
self.announce(str(e), log.ERROR)
raise
except HTTPError as e:
status = e.code
reason = e.msg
if status == 200:
self.announce('Server response (%s): %s' % (status, reason),
log.INFO)
else:
msg = 'Upload failed (%s): %s' % (status, reason)
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
if self.show_response:
text = self._read_pypi_response(result)
msg = '\n'.join(('-' * 75, text, '-' * 75))
self.announce(msg, log.INFO)
| bsd-2-clause | -7,851,997,256,913,855,000 | 35.38806 | 76 | 0.534398 | false |
Ayrx/pyDropSecure | src/dropsecure.py | 1 | 1964 | #!/usr/bin/env python
import argparse
import os
import configure
from settings import APP_PATH, CONFIG_DB
import watcher
__author__ = 'Terry Chia'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
action = parser.add_mutually_exclusive_group(required=True)
action.add_argument('--start', help='Start the script',
action='store_true')
action.add_argument('--stop', help='Stop the script',
action='store_true')
action.add_argument('--restart', help='Restart the script',
action='store_true')
action.add_argument('--export', help='Export application keys')
action.add_argument('--import', help='Import application keys',
dest='imp')
action.add_argument('--configure', help='Configure the application',
action='store_true')
args = parser.parse_args()
if args.export is not None:
configure.export_configuration(args.export)
elif args.imp is not None:
configure.import_configuration(args.imp)
elif args.configure:
if not os.path.exists(APP_PATH):
os.makedirs(APP_PATH)
if os.path.exists(CONFIG_DB):
while 1:
choice = raw_input('Configuration file exists. '
'Do you want to overwrite? (Y/n)')
if choice == 'Y':
configure.new_configuration()
break
elif choice == 'n':
break
else:
pass
else:
configure.new_configuration()
elif args.start:
daemon = watcher.Watcher('/tmp/dropsecure.pid')
daemon.start()
elif args.stop:
daemon = watcher.Watcher('/tmp/dropsecure.pid')
daemon.stop()
elif args.restart:
daemon = watcher.Watcher('/tmp/dropsecure.pid')
daemon.restart() | mit | 4,697,290,282,911,959,000 | 28.772727 | 72 | 0.55499 | false |
AppVentus/AvTime-client | packages/wakatime/wakatime/queue.py | 1 | 3769 | # -*- coding: utf-8 -*-
"""
wakatime.queue
~~~~~~~~~~~~~~
Queue for offline time logging.
http://wakatime.com
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import traceback
from time import sleep
try:
import sqlite3
HAS_SQL = True
except ImportError:
HAS_SQL = False
log = logging.getLogger(__name__)
class Queue(object):
DB_FILE = os.path.join(os.path.expanduser('~'), '.wakatime.db')
def connect(self):
conn = sqlite3.connect(self.DB_FILE)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS action (
file text,
time real,
project text,
language text,
lines integer,
branch text,
is_write integer,
plugin text)
''')
return (conn, c)
def push(self, data, plugin):
if not HAS_SQL:
return
try:
conn, c = self.connect()
action = {
'file': data.get('file'),
'time': data.get('time'),
'project': data.get('project'),
'language': data.get('language'),
'lines': data.get('lines'),
'branch': data.get('branch'),
'is_write': 1 if data.get('is_write') else 0,
'plugin': plugin,
}
c.execute('INSERT INTO action VALUES (:file,:time,:project,:language,:lines,:branch,:is_write,:plugin)', action)
conn.commit()
conn.close()
except sqlite3.Error:
log.error(traceback.format_exc())
def pop(self):
if not HAS_SQL:
return None
tries = 3
wait = 0.1
action = None
try:
conn, c = self.connect()
except sqlite3.Error:
log.debug(traceback.format_exc())
return None
loop = True
while loop and tries > -1:
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT * FROM action LIMIT 1')
row = c.fetchone()
if row is not None:
values = []
clauses = []
index = 0
for row_name in ['file', 'time', 'project', 'language', 'lines', 'branch', 'is_write']:
if row[index] is not None:
clauses.append('{0}=?'.format(row_name))
values.append(row[index])
else:
clauses.append('{0} IS NULL'.format(row_name))
index += 1
if len(values) > 0:
c.execute('DELETE FROM action WHERE {0}'.format(' AND '.join(clauses)), values)
else:
c.execute('DELETE FROM action WHERE {0}'.format(' AND '.join(clauses)))
conn.commit()
if row is not None:
action = {
'file': row[0],
'time': row[1],
'project': row[2],
'language': row[3],
'lines': row[4],
'branch': row[5],
'is_write': True if row[6] is 1 else False,
'plugin': row[7],
}
loop = False
except sqlite3.Error:
log.debug(traceback.format_exc())
sleep(wait)
tries -= 1
try:
conn.close()
except sqlite3.Error:
log.debug(traceback.format_exc())
return action
| bsd-3-clause | -8,956,881,329,862,973,000 | 29.893443 | 124 | 0.439639 | false |
cpcloud/numba | numba/cuda/nvvmutils.py | 1 | 5407 | from __future__ import print_function, absolute_import, division
import itertools
import llvmlite.llvmpy.core as lc
from .cudadrv import nvvm
from numba import cgutils
def declare_atomic_cas_int32(lmod):
fname = '___numba_cas_hack'
fnty = lc.Type.function(lc.Type.int(32),
(lc.Type.pointer(lc.Type.int(32)), lc.Type.int(32), lc.Type.int(32)))
return lmod.get_or_insert_function(fnty, fname)
def declare_atomic_add_float32(lmod):
fname = 'llvm.nvvm.atomic.load.add.f32.p0f32'
fnty = lc.Type.function(lc.Type.float(),
(lc.Type.pointer(lc.Type.float(), 0), lc.Type.float()))
return lmod.get_or_insert_function(fnty, name=fname)
def declare_atomic_add_float64(lmod):
fname = '___numba_atomic_double_add'
fnty = lc.Type.function(lc.Type.double(),
(lc.Type.pointer(lc.Type.double()), lc.Type.double()))
return lmod.get_or_insert_function(fnty, fname)
def declare_atomic_max_float32(lmod):
fname = '___numba_atomic_float_max'
fnty = lc.Type.function(lc.Type.float(),
(lc.Type.pointer(lc.Type.float()), lc.Type.float()))
return lmod.get_or_insert_function(fnty, fname)
def declare_atomic_max_float64(lmod):
fname = '___numba_atomic_double_max'
fnty = lc.Type.function(lc.Type.double(),
(lc.Type.pointer(lc.Type.double()), lc.Type.double()))
return lmod.get_or_insert_function(fnty, fname)
def declare_atomic_min_float32(lmod):
fname = '___numba_atomic_float_min'
fnty = lc.Type.function(lc.Type.float(),
(lc.Type.pointer(lc.Type.float()), lc.Type.float()))
return lmod.get_or_insert_function(fnty, fname)
def declare_atomic_min_float64(lmod):
fname = '___numba_atomic_double_min'
fnty = lc.Type.function(lc.Type.double(),
(lc.Type.pointer(lc.Type.double()), lc.Type.double()))
return lmod.get_or_insert_function(fnty, fname)
def insert_addrspace_conv(lmod, elemtype, addrspace):
addrspacename = {
nvvm.ADDRSPACE_SHARED: 'shared',
nvvm.ADDRSPACE_LOCAL: 'local',
nvvm.ADDRSPACE_CONSTANT: 'constant',
}[addrspace]
tyname = str(elemtype)
tyname = {'float': 'f32', 'double': 'f64'}.get(tyname, tyname)
s2g_name_fmt = 'llvm.nvvm.ptr.' + addrspacename + '.to.gen.p0%s.p%d%s'
s2g_name = s2g_name_fmt % (tyname, addrspace, tyname)
elem_ptr_ty = lc.Type.pointer(elemtype)
elem_ptr_ty_addrspace = lc.Type.pointer(elemtype, addrspace)
s2g_fnty = lc.Type.function(elem_ptr_ty,
[elem_ptr_ty_addrspace])
return lmod.get_or_insert_function(s2g_fnty, s2g_name)
def declare_string(builder, value):
lmod = builder.basic_block.function.module
cval = lc.Constant.stringz(value)
gl = lmod.add_global_variable(cval.type, name="_str",
addrspace=nvvm.ADDRSPACE_CONSTANT)
gl.linkage = lc.LINKAGE_INTERNAL
gl.global_constant = True
gl.initializer = cval
charty = lc.Type.int(8)
constcharptrty = lc.Type.pointer(charty, nvvm.ADDRSPACE_CONSTANT)
charptr = builder.bitcast(gl, constcharptrty)
conv = insert_addrspace_conv(lmod, charty, nvvm.ADDRSPACE_CONSTANT)
return builder.call(conv, [charptr])
def declare_vprint(lmod):
voidptrty = lc.Type.pointer(lc.Type.int(8))
# NOTE: the second argument to vprintf() points to the variable-length
# array of arguments (after the format)
vprintfty = lc.Type.function(lc.Type.int(), [voidptrty, voidptrty])
vprintf = lmod.get_or_insert_function(vprintfty, "vprintf")
return vprintf
# -----------------------------------------------------------------------------
SREG_MAPPING = {
'tid.x': 'llvm.nvvm.read.ptx.sreg.tid.x',
'tid.y': 'llvm.nvvm.read.ptx.sreg.tid.y',
'tid.z': 'llvm.nvvm.read.ptx.sreg.tid.z',
'ntid.x': 'llvm.nvvm.read.ptx.sreg.ntid.x',
'ntid.y': 'llvm.nvvm.read.ptx.sreg.ntid.y',
'ntid.z': 'llvm.nvvm.read.ptx.sreg.ntid.z',
'ctaid.x': 'llvm.nvvm.read.ptx.sreg.ctaid.x',
'ctaid.y': 'llvm.nvvm.read.ptx.sreg.ctaid.y',
'ctaid.z': 'llvm.nvvm.read.ptx.sreg.ctaid.z',
'nctaid.x': 'llvm.nvvm.read.ptx.sreg.nctaid.x',
'nctaid.y': 'llvm.nvvm.read.ptx.sreg.nctaid.y',
'nctaid.z': 'llvm.nvvm.read.ptx.sreg.nctaid.z',
'warpsize': 'llvm.nvvm.read.ptx.sreg.warpsize',
'laneid': 'llvm.nvvm.read.ptx.sreg.laneid',
}
def call_sreg(builder, name):
module = builder.module
fnty = lc.Type.function(lc.Type.int(), ())
fn = module.get_or_insert_function(fnty, name=SREG_MAPPING[name])
return builder.call(fn, ())
class SRegBuilder(object):
def __init__(self, builder):
self.builder = builder
def tid(self, xyz):
return call_sreg(self.builder, 'tid.%s' % xyz)
def ctaid(self, xyz):
return call_sreg(self.builder, 'ctaid.%s' % xyz)
def ntid(self, xyz):
return call_sreg(self.builder, 'ntid.%s' % xyz)
def nctaid(self, xyz):
return call_sreg(self.builder, 'nctaid.%s' % xyz)
def getdim(self, xyz):
tid = self.tid(xyz)
ntid = self.ntid(xyz)
nctaid = self.ctaid(xyz)
res = self.builder.add(self.builder.mul(ntid, nctaid), tid)
return res
def get_global_id(builder, dim):
sreg = SRegBuilder(builder)
it = (sreg.getdim(xyz) for xyz in 'xyz')
seq = list(itertools.islice(it, None, dim))
if dim == 1:
return seq[0]
else:
return seq
| bsd-2-clause | 2,699,991,475,804,289,000 | 32.79375 | 80 | 0.634918 | false |
crackhopper/TFS-toolbox | tfs/core/layer/fc.py | 1 | 1671 | import tensorflow as tf
import numpy as np
from tfs.core.layer import ops as ops
from tfs.core.layer.base import Layer
import tfs.core.initializer.init_func as init
from tfs.core.util import get_arg_dict
class FullyConnect(Layer):
def __init__(self,
net,
outdim,
activation = ops.relu,
name=None,
print_names=['outdim','activation']
):
vtable = get_arg_dict(excludes=['self','net'])
super(FullyConnect,self).__init__(net,**vtable)
def _build(self):
inTensor = self._in
input_shape = inTensor.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = np.prod(input_shape.as_list()[1:])
output = tf.reshape(inTensor, [-1,dim])
else:
output, dim = (inTensor, input_shape[-1].value)
weights = self._make_variable('weights', shape=[dim, self.param.outdim],init=init.xavier())
biases = self._make_variable('biases', [self.param.outdim],init=init.constant())
output = tf.nn.xw_plus_b(output, weights, biases,name=self.name)
if self.param.activation:
output= self.param.activation(output, name=self.name)
return output
def _inverse(self):
outTensor = self._inv_in
name = 'inv_'+self.name
act = self.param.activation
if act:
outTensor = act(outTensor)
weights = tf.transpose(self._variables['weights'])
inv_fc = tf.matmul(outTensor,weights)
shape = self._in.get_shape().as_list()
shape[0]=-1
inv_fc = tf.reshape(inv_fc,shape)
print('inv_fc '+str(outTensor.get_shape().as_list()) + '->' + str(inv_fc.get_shape().as_list()))
return inv_fc
| mit | 4,118,972,735,694,420,000 | 33.102041 | 100 | 0.630162 | false |
mahak/cinder | cinder/tests/unit/api/contrib/test_backup_project_attribute.py | 2 | 5070 | # Copyright (c) 2016 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from oslo_serialization import jsonutils
import webob
from cinder.api import microversions as mv
from cinder.api.v3 import router as router_v3
from cinder.backup import api as backup_api
from cinder import context
from cinder import objects
from cinder.tests.unit.api import fakes
from cinder.tests.unit.backup import fake_backup
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
def fake_backup_get(*args, **kwargs):
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False)
bak = {
'id': fake.BACKUP_ID,
'project_id': fake.PROJECT_ID,
'user_id': fake.USER_ID,
}
return fake_backup.fake_backup_obj(ctx, **bak)
def fake_backup_get_all(*args, **kwargs):
return objects.BackupList(objects=[fake_backup_get()])
def app():
# no auth, just let environ['cinder.context'] pass through
api = router_v3.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v3'] = api
return mapper
@ddt.ddt
class BackupProjectAttributeTest(test.TestCase):
def setUp(self):
super(BackupProjectAttributeTest, self).setUp()
self.mock_object(backup_api.API, 'get', fake_backup_get)
self.mock_object(backup_api.API, 'get_all', fake_backup_get_all)
def _send_backup_request(self, ctx, detail=False,
version=mv.BACKUP_PROJECT):
req = None
if detail:
req = webob.Request.blank(('/v3/%s/backups/detail'
% fake.PROJECT_ID))
else:
req = webob.Request.blank('/v3/%s/backups/%s' % (fake.PROJECT_ID,
fake.BACKUP_ID))
req.method = 'GET'
req.environ['cinder.context'] = ctx
req.headers = mv.get_mv_header(version)
req.api_version_request = mv.get_api_version(version)
res = req.get_response(app())
if detail:
return jsonutils.loads(res.body)['backups']
return jsonutils.loads(res.body)['backup']
@ddt.data(True, False)
def test_get_backup_with_project(self, is_admin):
ctx = context.RequestContext(fake.USER2_ID, fake.PROJECT_ID, is_admin)
bak = self._send_backup_request(ctx)
if is_admin:
self.assertEqual(fake.PROJECT_ID,
bak['os-backup-project-attr:project_id'])
else:
self.assertNotIn('os-backup-project-attr:project_id', bak)
@ddt.data(True, False)
def test_list_detail_backups_with_project(self, is_admin):
ctx = context.RequestContext(fake.USER2_ID, fake.PROJECT_ID, is_admin)
baks = self._send_backup_request(ctx, detail=True)
if is_admin:
self.assertEqual(fake.PROJECT_ID,
baks[0]['os-backup-project-attr:project_id'])
else:
self.assertNotIn('os-backup-project-attr:project_id', baks[0])
def test_get_backup_under_allowed_api_version(self):
ctx = context.RequestContext(fake.USER2_ID, fake.PROJECT_ID, True)
bak = self._send_backup_request(
ctx, version=mv.get_prior_version(mv.BACKUP_PROJECT))
self.assertNotIn('os-backup-project-attr:project_id', bak)
@ddt.data(True, False)
def test_get_backup_with_user_id(self, is_admin):
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin)
bak = self._send_backup_request(ctx,
version=mv.BACKUP_PROJECT_USER_ID)
if is_admin:
self.assertEqual(fake.USER_ID, bak['user_id'])
else:
self.assertNotIn('user_id', bak)
@ddt.data(True, False)
def test_list_detail_backups_with_user_id(self, is_admin):
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin)
baks = self._send_backup_request(ctx, detail=True,
version=mv.BACKUP_PROJECT_USER_ID)
if is_admin:
self.assertEqual(fake.USER_ID,
baks[0]['user_id'])
else:
self.assertNotIn('user_id', baks[0])
def test_get_backup_user_id_before_microversion_v356(self):
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
bak = self._send_backup_request(
ctx, version=mv.get_prior_version(mv.BACKUP_PROJECT_USER_ID))
self.assertNotIn('user_id', bak)
| apache-2.0 | -5,119,520,243,205,410,000 | 38 | 78 | 0.624852 | false |
raphaelrpl/portal | backend/test/recommendation_tests/recommendation_rest_tests.py | 1 | 3088 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from datetime import datetime, date
from decimal import Decimal
from base import GAETestCase
from recommendation_app.model import Recommendation
from routes.recommendations import rest
from gaegraph.model import Node
from mock import Mock
from mommygae import mommy
class IndexTests(GAETestCase):
def test_success(self):
mommy.save_one(Recommendation)
mommy.save_one(Recommendation)
json_response = rest.index()
context = json_response.context
self.assertEqual(2, len(context))
recommendation_dct = context[0]
self.assertSetEqual(set(['id', 'creation', 'name']), set(recommendation_dct.iterkeys()))
self.assert_can_serialize_as_json(json_response)
class NewTests(GAETestCase):
def test_success(self):
self.assertIsNone(Recommendation.query().get())
json_response = rest.new(None, name='name_string')
db_recommendation = Recommendation.query().get()
self.assertIsNotNone(db_recommendation)
self.assertEquals('name_string', db_recommendation.name)
self.assert_can_serialize_as_json(json_response)
def test_error(self):
resp = Mock()
json_response = rest.new(resp)
errors = json_response.context
self.assertEqual(500, resp.status_code)
self.assertSetEqual(set(['name']), set(errors.keys()))
self.assert_can_serialize_as_json(json_response)
class EditTests(GAETestCase):
def test_success(self):
recommendation = mommy.save_one(Recommendation)
old_properties = recommendation.to_dict()
json_response = rest.edit(None, recommendation.key.id(), name='name_string')
db_recommendation = recommendation.key.get()
self.assertEquals('name_string', db_recommendation.name)
self.assertNotEqual(old_properties, db_recommendation.to_dict())
self.assert_can_serialize_as_json(json_response)
def test_error(self):
recommendation = mommy.save_one(Recommendation)
old_properties = recommendation.to_dict()
resp = Mock()
json_response = rest.edit(resp, recommendation.key.id())
errors = json_response.context
self.assertEqual(500, resp.status_code)
self.assertSetEqual(set(['name']), set(errors.keys()))
self.assertEqual(old_properties, recommendation.key.get().to_dict())
self.assert_can_serialize_as_json(json_response)
class DeleteTests(GAETestCase):
def test_success(self):
recommendation = mommy.save_one(Recommendation)
rest.delete(None, recommendation.key.id())
self.assertIsNone(recommendation.key.get())
def test_non_recommendation_deletion(self):
non_recommendation = mommy.save_one(Node)
response = Mock()
json_response = rest.delete(response, non_recommendation.key.id())
self.assertIsNotNone(non_recommendation.key.get())
self.assertEqual(500, response.status_code)
self.assert_can_serialize_as_json(json_response)
| mit | -3,372,677,403,984,990,000 | 38.589744 | 96 | 0.686528 | false |
hhauer/myinfo | oam_base/urls.py | 1 | 1496 | from django.conf.urls import include, url
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
from MyInfo import views as my_info_views
from django_cas import views as cas_views
from oam_base import views as base_views
from Duo import views as duo_views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'^$', my_info_views.index, name='index'),
url(r'^MyInfo/', include('MyInfo.urls', namespace='MyInfo')),
url(r'^AccountPickup/', include('AccountPickup.urls', namespace='AccountPickup')),
url(r'^PasswordReset/', include('PasswordReset.urls', namespace='PasswordReset')),
url(r'^accounts/login/$', cas_views.login, {'next_page': reverse_lazy('AccountPickup:next_step')}, name='CASLogin'),
url(r'^duo/login/$', cas_views.login, name='duoLogin'),
url(r'^accounts/logout/$', cas_views.logout, name='CASLogout'),
url(r'^status/denied/$', base_views.rate_limited, name='rate_limited'),
url(r'^ajax/', include('ajax.urls')),
url(r'^admin/', include(admin.site.urls)),
# Simple redirects for static files that browsers expect to be at the root.
url(r'^robots\.txt$', RedirectView.as_view(url='/static/robots.txt', permanent=True)),
url(r'^favicon\.ico$', RedirectView.as_view(url='/static/favicon.ico', permanent=True)),
url(r'^duo$', duo_views.login, name='duo_login')
]
handler500 = 'oam_base.views.custom_error' | mit | -2,922,288,893,709,480,000 | 39.459459 | 120 | 0.706551 | false |
Flamacue/pretix | src/pretix/control/utils/i18n.py | 2 | 2085 | # Inspired by https://github.com/asaglimbeni/django-datetime-widget/blob/master/datetimewidget/widgets.py
# Copyright (c) 2013, Alfredo Saglimbeni (BSD license)
import re
from django.utils import translation
from django.utils.formats import get_format
from pretix import settings
date_conversion_to_moment = {
'%a': 'ddd',
'%A': 'dddd',
'%w': 'd',
'%d': 'DD',
'%b': 'MMM',
'%B': 'MMMM',
'%m': 'MM',
'%y': 'YY',
'%Y': 'YYYY',
'%H': 'HH',
'%I': 'hh',
'%p': 'a',
'%M': 'mm',
'%S': 'ss',
'%f': 'SSSSSS',
'%z': 'ZZ',
'%Z': 'zz',
'%j': 'DDDD',
'%U': 'ww', # fuzzy translation
'%W': 'WW',
'%c': '',
'%x': '',
'%X': ''
}
moment_locales = {
'af', 'az', 'bs', 'de-at', 'en-gb', 'et', 'fr-ch', 'hi', 'it', 'ko', 'me', 'ms-my', 'pa-in', 'se', 'sr', 'th',
'tzm-latn', 'zh-hk', 'ar', 'be', 'ca', 'de', 'en-ie', 'eu', 'fr', 'hr', 'ja', 'ky', 'mi', 'my', 'pl', 'si', 'ss',
'tlh', 'uk', 'zh-tw', 'ar-ly', 'bg', 'cs', 'dv', 'en-nz', 'fa', 'fy', 'hu', 'jv', 'lb', 'mk', 'nb', 'pt-br', 'sk',
'sv', 'tl-ph', 'uz', 'ar-ma', 'bn', 'cv', 'el', 'eo', 'fi', 'gd', 'hy-am', 'ka', 'lo', 'ml', 'ne', 'pt', 'sl', 'sw',
'tr', 'vi', 'ar-sa', 'bo', 'cy', 'en-au', 'es-do', 'fo', 'gl', 'id', 'kk', 'lt', 'mr', 'nl', 'ro', 'sq', 'ta',
'tzl', 'x-pseudo', 'ar-tn', 'br', 'da', 'en-ca', 'es', 'fr-ca', 'he', 'is', 'km', 'lv', 'ms', 'nn', 'ru', 'sr-cyrl',
'te', 'tzm', 'zh-cn',
}
toJavascript_re = re.compile(r'(?<!\w)(' + '|'.join(date_conversion_to_moment.keys()) + r')\b')
def get_javascript_format(format_name):
f = get_format(format_name)[0]
return toJavascript_re.sub(
lambda x: date_conversion_to_moment[x.group()],
f
)
def get_moment_locale(locale=None):
cur_lang = locale or translation.get_language()
if cur_lang in moment_locales:
return cur_lang
if '-' in cur_lang or '_' in cur_lang:
main = cur_lang.replace("_", "-").split("-")[0]
if main in moment_locales:
return main
return settings.LANGUAGE_CODE
| apache-2.0 | -4,935,072,335,436,854,000 | 31.076923 | 120 | 0.478177 | false |
ctogle/make_places | mp/make_places/roads.py | 1 | 33156 | import make_places.fundamental as fu
import mp_utils as mpu
import mp_bboxes as mpbb
import make_places.primitives as pr
#from make_places.fundamental import element
from make_places.scenegraph import node
from make_places.floors import floor
from make_places.primitives import arbitrary_primitive
from make_places.primitives import ucube
from make_places.primitives import uoctagon
#from make_places.primitives import unit_cube
import make_places.pkler as pk
import os, pdb
import numpy as np
import random as rm
from math import sqrt
from math import cos
from math import sin
from math import tan
from copy import deepcopy as dcopy
cardinal_directions = [
'north', 'northeast',
'east', 'southeast',
'south', 'southwest',
'west', 'northwest']
cardinal_norms = [
[0,1,0],mpu.normalize([1,1,0]),
[1,0,0],mpu.normalize([1,-1,0]),
[0,-1,0],mpu.normalize([-1,-1,0]),
[-1,0,0],mpu.normalize([-1,1,0])]
class vehicle_primitive(arbitrary_primitive):
vehiclexml = os.path.join(pr.primitive_data_path, 'truck.mesh.xml')
#vehicledata = pr.primitive_data_from_xml(vehiclexml)
offset = [0,0,0]
def __init__(self, *args, **kwargs):
pvehdata = pr.primitive_data_from_xml(self.vehiclexml)
#pvehdata = self.vehicledata
arbitrary_primitive.__init__(self, *args, **pvehdata)
self._default_('tag','_vehicle_',**kwargs)
self._scale_uvs_ = False
self.translate(self.offset)
class truck_primitive(vehicle_primitive):
vehiclexml = os.path.join(pr.primitive_data_path, 'truck.mesh.xml')
#vehicledata = pr.primitive_data_from_xml(vehiclexml)
class taxi_primitive(vehicle_primitive):
vehiclexml = os.path.join(pr.primitive_data_path, 'Body.mesh.xml')
#vehicledata = pr.primitive_data_from_xml(vehiclexml)
offset = [0,0,0.5]
class car_batch(node):
possible_vehicles = [truck_primitive,taxi_primitive]
def __init__(self, *args, **kwargs):
self._default_('cargs',[],**kwargs)
self._default_('consumes_children',True,**kwargs)
self._default_('grit_renderingdistance',100,**kwargs)
self._default_('grit_lod_renderingdistance',2000,**kwargs)
self.primitives = self.make_batch(self.cargs)
node.__init__(self, *args, **kwargs)
def make_batch(self, cargs):
cars = []
for cgs in cargs:
new = rm.choice(self.possible_vehicles)()
new.rotate_z(cgs['rotation'][2])
new.translate(cgs['position'])
cars.append(new)
return cars
clip_length = 25
class intersection(node):
def __init__(self, *args, **kwargs):
#self._default_('consumes_children',True,**kwargs)
self._default_('grit_renderingdistance',1000,**kwargs)
self._default_('grit_lod_renderingdistance',2000,**kwargs)
self._default_('tform',self.def_tform(*args,**kwargs),**kwargs)
self._default_('road_width',20,**kwargs)
self._default_('road_height',2,**kwargs)
self.primitives = self.make_segments(*args, **kwargs)
children = self.place_vehicles()
self.add_child(*children)
node.__init__(self, *args, **kwargs)
def find_corners(self):
v1 = [ clip_length, clip_length*tan(fu.to_rad(22.5)),0]
v2 = [ clip_length,-clip_length*tan(fu.to_rad(22.5)),0]
v3 = [-clip_length, clip_length*tan(fu.to_rad(22.5)),0]
v4 = [-clip_length,-clip_length*tan(fu.to_rad(22.5)),0]
v5 = [ clip_length*tan(fu.to_rad(22.5)), clip_length,0]
v6 = [-clip_length*tan(fu.to_rad(22.5)), clip_length,0]
v7 = [ clip_length*tan(fu.to_rad(22.5)),-clip_length,0]
v8 = [-clip_length*tan(fu.to_rad(22.5)),-clip_length,0]
corners = [v1, v2, v3, v4, v5, v6, v7, v8]
return corners
def terrain_points(self):
# i need the location of the octagon verts!
#rh2 = self.road_height/2.0
rh2 = 0.4
corners = self.find_corners()
center = mpu.center_of_mass(corners)
mpu.translate_vector(center,[0,0,-0.5])
corners = mpu.dice_edges(corners, dices = 1)
corners.append(center)
position = self.tform.true().position
x,y,z = position
mpu.translate_coords(corners,[x,y,z-rh2])
return corners
def place_vehicles(self, cnt = 2):
rotz1 = rm.randrange(12) * fu.to_rad(30.0)
rotz2 = rm.randrange(12) * fu.to_rad(30.0)
rotz3 = rm.randrange(12) * fu.to_rad(30.0)
trargs1 = {
'position':[0,0,0],
'rotation':[0,0,rotz1],
}
trargs2 = {
'position':[10,10,0],
'rotation':[0,0,rotz2],
}
trargs3 = {
'position':[-10,-10,0],
'rotation':[0,0,rotz3],
}
trk_batch = car_batch(parent = self,
cargs = [trargs1,trargs2,trargs3])
return [trk_batch]
def make_segments(self, *args, **kwargs):
segs = []
#rw = self.road_width
rh = self.road_height
octang = 22.5
clipln = clip_length
octscl = clipln / cos(fu.to_rad(octang))
uo = uoctagon()
uo.scale([octscl,octscl,rh])
rh = 0.25
uo.translate([0,0,-rh-2.0])
#uo.translate_face([0,0,-rh],'top')
segs.append(uo)
return segs
def get_bbox(self):
corners = self.find_corners()
#corners = [[0,0,0],[50,0,0],[50,50,0],[0,50,0]]
#fu.rotate_z_coords(corners,theta)
position = self.tform.true().position
x,y,z = position
mpu.translate_coords(corners,[x,y,z])
bboxes = [mpbb.bbox(corners = corners)]
#bboxes = [fu.bbox(corners = corners)]
return bboxes
class road_segment_primitive(arbitrary_primitive):
roadxml = os.path.join(pr.primitive_data_path, 'road.mesh.xml')
def __init__(self, *args, **kwargs):
proaddata = pr.primitive_data_from_xml(self.roadxml)
arbitrary_primitive.__init__(self, *args, **proaddata)
self.coords_by_face = self.find_faces()
self.tag = '_road_'
self._scale_uvs_ = False
def find_faces(self):
fronts = [v for v in self.coords if v[1] < 0.0]
backs = [v for v in self.coords if v[1] > 0.0]
lefts = [v for v in self.coords if v[0] < 0.0]
rights = [v for v in self.coords if v[0] > 0.0]
bottoms = [v for v in self.coords if v[2] <= 0.0]
tops = [v for v in self.coords if v[2] > 0.0]
facedict = {
'front':fronts,
'back':backs,
'left':lefts,
'right':rights,
'top':tops,
'bottom':bottoms,
}
return facedict
def translate_face(self, vect, face = 'top'):
cfaces = self.coords_by_face
face_coords = cfaces[face]
mpu.translate_coords(face_coords, vect)
self.calculate_normals()
self.modified = True
def rotate_z_face(self, ang_z, face = 'top'):
cfaces = self.coords_by_face
face_coords = cfaces[face]
foff = mpu.center_of_mass(face_coords)
mpu.translate_coords(face_coords, mpu.flip(foff))
mpu.rotate_z_coords(face_coords, ang_z)
mpu.translate_coords(face_coords, foff)
self.calculate_normals()
self.modified = True
class highway_segment_primitive(road_segment_primitive):
roadxml = os.path.join(pr.primitive_data_path, 'highroad.mesh.xml')
class road(node):
road_prim_type = road_segment_primitive
def __init__(self, *args, **kwargs):
kwargs['uv_scales'] = [1,1,1]
self._default_('uv_tform',
self.def_uv_tform(*args,**kwargs),**kwargs)
self._default_('grit_renderingdistance',1000,**kwargs)
self._default_('grit_lod_renderingdistance',2000,**kwargs)
self._default_('consumes_children',True,**kwargs)
self._default_('road_width', 10, **kwargs)
self._default_('road_height', 1, **kwargs)
self.clip_length = clip_length
self.set_segmented_vertices(*args, **kwargs)
self.set_corners(self.segmented_vertices)
segs = self.make_segments(*args, **kwargs)
litter = self.litter(segs)
self.primitives = segs + litter
node.__init__(self, *args, **kwargs)
def pick_seg_count(self, vs):
ds = mpu.distance(vs[0],vs[-1])
seglen = 15
return int(ds/seglen)
def litter(self, segs):
lit = []
return lit
def terrain_points(self):
tpts = []
for corns in self.corners:
tcorns = mpu.translate_coords(corns[:],[0,0,-0.25])
tcorns = mpu.dice_edges(tcorns, dices = 1)
mcorns = [tcorns[3],tcorns[7]]
mpu.translate_coords(mcorns,[0,0,-0.25])
tpts.extend([tc for tc in tcorns if not tc in tpts])
return tpts
def set_corners(self, verts):
corners = []
vcnt = len(verts)
for sgdx in range(1,vcnt):
p1,p2 = verts[sgdx-1],verts[sgdx]
corns = self.make_corners(p1,p2)
corners.append(corns)
self.corners = corners
def make_corners(self, p1, p2):
widt = self.road_width
p1_p2 = mpu.v1_v2(p1,p2)
leng = mpu.magnitude(p1_p2)
p1_p2 = mpu.normalize(p1_p2)
ang_z = fu.angle_from_xaxis(p1_p2)
corns = [[0,-widt/2.0,0],[leng,-widt/2.0,0],
[leng,widt/2.0,0],[0,widt/2.0,0]]
mpu.rotate_z_coords(corns,ang_z)
mpu.translate_coords(corns,p1)
mpu.translate_coords(corns[1:3],[0,0,p2[2]-p1[2]])
return corns
def get_bbox(self):
bboxes = []
for corns in self.corners:
bboxes.append(mpbb.bbox(corners = corns))
#bboxes.append(fu.bbox(corners = corns))
return bboxes
def get_cardinal_normals(self, dirs):
def getcardnorm(dx):
cardx = cardinal_directions.index(dirs[dx])
cardn = cardinal_norms[cardx]
return cardn
norms = [getcardnorm(x) for x in range(2)]
return norms
def info_from_topology(self, *args, **kwargs):
topol = kwargs['topology']
nodes = kwargs['nodes']
st,en = topol[nodes[0]],topol[nodes[1]]
stp = st['inter']['position']
enp = en['inter']['position']
return stp, enp
def set_segmented_vertices(self, *args, **kwargs):
kweys = kwargs.keys()
if 'topology' in kweys:
stp, enp = self.info_from_topology(*args, **kwargs)
else:
stp = kwargs['start']
enp = kwargs['end']
dirs = kwargs['directions']
norms = self.get_cardinal_normals(dirs)
self.stnorm = norms[0]
self.ednorm = mpu.flip(norms[1])
segdice = True
verts = [stp,enp]
verts = self.clip_tips(verts,norms[0],norms[1])
verts = self.add_tips(verts,norms[0],norms[1])
scnt = self.pick_seg_count(verts)
self.segment_count = scnt
def bend(vs):
tips = vs[:2] + vs[-2:]
cox,coy,coz = [list(i) for i in zip(*tips)]
tim = [0.0,1.0,2.0,3.0]
alpha = 1.0/2.0
mpu.parameterize_time(tips,tim,alpha)
cox = mpu.catmull_rom(cox,tim,scnt)
coy = mpu.catmull_rom(coy,tim,scnt)
coz = mpu.catmull_rom(coz,tim,scnt)
new = [list(i) for i in zip(cox,coy,coz)]
return new
if segdice: verts = bend(verts)
self.segmented_vertices = verts
return verts
def add_tips(self,verts,n1,n2):
clip = 25
v1 = verts[0][:]
v2 = verts[1][:]
cl1,cl2 = clip,clip
mpu.translate_vector(v1,mpu.scale_vector(n1[:],[cl1,cl1,cl1]))
mpu.translate_vector(v2,mpu.scale_vector(n2[:],[cl2,cl2,cl2]))
verts.extend([v1, v2])
verts.append(verts.pop(-3))
return verts
def clip_tips(self,verts,n1,n2):
cl = self.clip_length
v1 = mpu.translate_vector(verts[0][:],
mpu.scale_vector(n1[:],[cl,cl,cl]))
v2 = mpu.translate_vector(verts[-1][:],
mpu.scale_vector(n2[:],[cl,cl,cl]))
verts[0] = v1
verts[1] = v2
return verts
def make_segments(self, *args, **kwargs):
verts = self.segmented_vertices
rw = self.road_width
rh = self.road_height
segments = []
vcnt = len(verts)
tangs = [self.stnorm]
angs = []
for sgdx in range(1,vcnt):
p1,p2 = verts[sgdx-1],verts[sgdx]
tangs.append(mpu.normalize(mpu.v1_v2(p1,p2)))
tangs.append(self.ednorm)
for tgdx in range(1,vcnt+1):
t1,t2 = tangs[tgdx-1],tangs[tgdx]
a12 = fu.angle_between_xy(t1,t2)
sign = 0.0 if a12 == 0.0 else a12/abs(a12)
if abs(a12) > np.pi/2:
a12 = 0.0
angs.append(sign * abs(a12))
legs = [True]*vcnt
legs[1::2] = [False]*(int(vcnt/2))
for sgdx in range(1,vcnt):
a1,a2 = angs[sgdx-1],angs[sgdx]
p1,p2 = verts[sgdx-1],verts[sgdx]
strips = self.make_segment(p1,p2,rw,rh,a1,a2,legs[sgdx])
#segments.append(strip)
segments.extend(strips)
return segments
def make_segment(self, p1, p2, widt, depth, a1, a2, leg = False):
leng = mpu.distance_xy(p1,p2)
p1_p2 = mpu.normalize(mpu.v1_v2(p1,p2))
zdiff = p2[2] - p1[2]
ang_z = fu.angle_from_xaxis_xy(p1_p2)
#strip = ucube()
strip = self.road_prim_type()#road_segment_primitive()
#strip = road_segment_primitive()
strip.scale([leng,widt,depth])
strip.scale_uvs([leng/widt,1,1])
strip.translate([leng/2.0,0,-depth])
strip.rotate_z(ang_z)
theta1 = -1.0*a1/2.0
theta2 = a2/2.0
strip.rotate_z_face(theta1, 'left')
strip.translate_face([0,0,zdiff], 'right')
strip.rotate_z_face(theta2, 'right')
strip.translate(p1)
return [strip]
class road_system(node):
def __init__(self, *args, **kwargs):
self._default_('name','road_system',**kwargs)
self._default_('reuse',False,**kwargs)
self._default_('linkmin', 200, **kwargs)
self._default_('linkmax', 400, **kwargs)
self._default_('linkangles',
[90*x for x in range(4)], **kwargs)
self._default_('growth_tips', 5, **kwargs)
self._default_('region_bounds',[(0,1000),(0,1000)],**kwargs)
self._default_('seeds',[[0,0,0],[1000,1000,0]],**kwargs)
self._default_('intersection_count',20,**kwargs)
rwidth = 2*clip_length*tan(fu.to_rad(22.5))
self._default_('road_width', rwidth, **kwargs)
#kwargs['road_width'] = rwidth
children = self.reusing(*args, **kwargs)
if not children:children = self.children_from_kwargs(*args,**kwargs)
self._default_('tform',self.def_tform(*args,**kwargs),**kwargs)
self.add_child(*children)
node.__init__(self, *args, **kwargs)
def children_from_kwargs(self, *args, **kwargs):
rwidth = self.road_width
if 'interargs' in kwargs.keys():
interargs = kwargs['interargs']
children = self.make_system_from_intersections(interargs,rwidth)
else: children = self.make_primitives_web(*args, **kwargs)
return children
# will be class specific
def children_from_reuse_file(self, info_file_name):
info_file_name = os.path.join(os.getcwd(),info_file_name)
self.reuse_data = pk.load_pkl(info_file_name)
#self.reuse_data = {'rargs':[],'iargs':[],'topology':None}
elements = []
self.roads = []
for ig in self.reuse_data['iargs']:
elements.append(intersection(**ig))
for rarg in self.reuse_data['rargs']:
newrd = road(**rarg)
self.roads.append(newrd)
elements.append(newrd)
self.topology = self.reuse_data['topology']
return elements
def output_reuse_file(self, info_file_name):
info_file_name = os.path.join(os.getcwd(),info_file_name)
pk.save_pkl(self.reuse_data, info_file_name)
def reusing(self, *args, **kwargs):
if not self.reuse or not self.name: return
info_file_name = '.'.join([self.name,'reusable','data','pkl'])
if not pk.file_exists(info_file_name):
chds = self.children_from_kwargs(*args, **kwargs)
self.output_reuse_file(info_file_name)
return chds
else:
chds = self.children_from_reuse_file(info_file_name)
return chds
def terrain_points(self):
#pts = [ch.tform.true().position for ch in self.children]
pts = []
[pts.extend(ch.owner.terrain_points())
for ch in self.tform.children]
return pts
def make_primitives_web(self, *args, **kwargs):
def good_dir(tip, ang):
link = rm.choice(range(linkmin,linkmax,50))
#link = rm.randrange(linkmin,linkmax)
tippos = tip['position'][:]
angrad = (np.pi/180.0)*ang
z_off_min = -25
z_off_max = 25
z_offset = rm.randrange(z_off_min, z_off_max)
offset = [link*cos(angrad),link*sin(angrad),z_offset]
newtip = mpu.translate_vector(tippos, offset)
if not mpu.in_region(region_bounds, newtip):
return False,None
for ipos in [i['position'] for i in interargs]:
d = mpu.distance(newtip, ipos)
if d < linkmin: return False,None
return True,newtip
def get_angle(tip):
nodes = [i['position'] for i in interargs]
cmass = [np.mean([s[0] for s in nodes]), np.mean([s[1]
for s in nodes]), np.mean([s[2] for s in nodes])]
#cmass = [0,0,0]
cmass_ang = fu.to_deg(fu.angle_from_xaxis(
mpu.v1_v2(tip['position'],cmass)))
tangs = angs[:]
angdists = [abs(x-cmass_ang) for x in tangs]
closestang = tangs[angdists.index(min(angdists))]
tangs.extend([closestang]*20)
while len(tangs) > 0:
angdx = rm.randrange(len(tangs))
ang = tangs.pop(angdx)
passes,newpos = good_dir(tip, ang)
if passes:
return ang,newpos
return None,None
def place_inter(tip):
ang,newpos = get_angle(tip)
if ang is None: return
return newpos
growth_tips = self.growth_tips
region_bounds = self.region_bounds
linkmin, linkmax = self.linkmin,self.linkmax
seeds = self.seeds
angs = self.linkangles
intercnt = self.intersection_count
seedcnt = len(seeds)
branches = []
for idx in range(seedcnt):
branches.append([{
'position' : seeds[idx],
}])
interargs = [br[0] for br in branches]
sealevelvals = []
for idx in range(intercnt):
tips = [br[-min([len(interargs),growth_tips]):]
for br in branches]
bdx = rm.randrange(seedcnt)
tip = rm.choice(tips[bdx])
newpos = place_inter(tip)
if not newpos is None:
sealevelvals.append(newpos[2])
interargs.append({
'position' : newpos,
})
branches[bdx].append(interargs[-1])
else: print('cant place intersection!!')
#rwidth = kwargs['road_width']
rwidth = self.road_width
self._suggested_sea_level_ = self.pick_sea_level(sealevelvals)
return self.make_system_from_intersections(interargs, rwidth)
def pick_sea_level(self, vals):
maxval = max(vals)
minval = min(vals)
rng = maxval - minval
return minval + rng/10.0
def make_system_from_intersections(self, interargs, rwidth):
elements = []
topology = [{} for inter in interargs]
for inter, topo in zip(interargs, topology):
for card in cardinal_directions:
topo[card] = None
topo['inter'] = inter
topo['roads'] = []
topo['linkcnt'] = 0
self.reuse_data = {'rargs':[],'iargs':[],'topology':None}
self.roads = []
self.highways = []
for tdx, topo in enumerate(topology):
topology[tdx] = find_neighbors(topology,topo,rwidth)
rdbbs = []
hwbbs = []
for tdx, topo in enumerate(topology):
inter = topo['inter']
inter['topology'] = topology
inter['topodex'] = tdx
self.reuse_data['iargs'].append(inter)
elements.append(intersection(**inter))
for rarg in topo['roads']:
self.reuse_data['rargs'].append(rarg)
newrd = road(**rarg)
newbb = newrd.get_bbox()
if not mpbb.intersects(rdbbs,newbb):
rdbbs.extend(newbb)
self.roads.append(newrd)
elements.append(newrd)
else:
newrd = highway(**rarg)
newbb = newrd.get_bbox()
if not mpbb.intersects(hwbbs,newbb):
hwbbs.extend(newbb)
self.highways.append(newrd)
elements.append(newrd)
print('topology mistake from road intersection!')
self.topology = topology
self.reuse_data['topology'] = topology
return elements
def make_primitives_from_blocks(self, *args, **kwargs):
prims = []
# given a list of blocks, determine a set of roads which bounds them
# assume them do not overlap, and that a road should bound each
# determine locations of intersections as all corners of every block
# determine the width, length, and position of each road connecting
# intersections
# also assume that intersections will never intersect by construction
# that is the blocks are sized/positioned to prevent strange
# intersections
# create the kwargs which includes them all
def get_inter_length():
return 40
def get_inter_width():
return 40
blocks = args[0]
used_bcorners = []
corner_signs = [(-1,-1), (0, -1), (0, 0), (-1, 0)]
interargs = []
for bl in blocks:
corn = bl.corners
c1, c2, c3, c4 = corn
for c_, signs in zip(corn, corner_signs):
ilength = get_inter_length()
iwidth = get_inter_width()
ipos = mpu.translate_vector(c_[:],
[signs[0]*ilength,signs[1]*iwidth,0]),
if not ipos in used_bcorners:
used_bcorners.append(ipos)
interargs.append({
'name' : 'intersection_' + str(len(used_bcorners)),
'position' : mpu.translate_vector(
c_[:],[signs[0]*ilength,signs[1]*iwidth,0]),
'length' : ilength,
'width' : iwidth,
'floor_height' : 1.0})
return self.make_system_from_intersections(interargs)
def get_bbox(self):
bboxes = []
roads = self.tform.children
for rdtf in roads:
rdboxes = rdtf.owner.get_bbox()
bboxes.extend(rdboxes)
return bboxes
class highway(road):
road_prim_type = highway_segment_primitive
def terrain_points(self):
tpts = [mpu.translate_vector(l,[0,0,5])
for l in self.leg_positions]
return tpts
def make_segments(self, *args, **kwargs):
self.leg_positions = []
scnt = self.segment_count
sverts = self.segmented_vertices
self.sverts_ground = self.segmented_vertices[:]
#sverts[1][2] += 1
#sverts[-2][2] += 1
tim = [0.0,1.0,2.0,3.0]
alpha = 1.0/2.0
tips = sverts[:2] + sverts[-2:]
#tips = sverts[1:3] + sverts[-3:-1]
coz = [t[2] for t in tips]
mpu.parameterize_time(tips,tim,alpha)
coz = mpu.catmull_rom(coz,tim,scnt)
for sv,co in zip(sverts[1:-1],coz): sv[2] = min(co,sv[2]+20)
rdsegs = road.make_segments(self, *args, **kwargs)
return rdsegs
def make_leg(self, v):
leg = pr.ucube()
leg_leng = 20
leg.scale([5,5,leg_leng])
leg_pos = [v[0],v[1],v[2]-leg_leng-2.0]
leg.translate(leg_pos)
self.leg_positions.append(leg_pos)
return leg
def make_segment(self, p1, p2, widt, depth, a1, a2, leg = False):
depth = 8 # unacceptable...
rs = road.make_segment(self,p1,p2,widt,depth,a1,a2)
[r.translate([0,0,1.75]) for r in rs]# unacceptable...
# use a bbox check to decide to place a leg or not
if not leg: return rs
leg = self.make_leg(p1)
rs.append(leg)
return rs
def pick_closest(pots,ndists):
if pots:
ndx = ndists.index(min(ndists))
return pots[ndx]
return None,None
def select_outlet(outlets,ordered):
for ord_ in ordered:
if ord_ in outlets:
return ord_
def north_check(topology,topo,seek_fov,linkmax):
antidirs = ['west','southwest','south','southeast','east']
tpos = topo['inter']['position']
potentials = []
ndists = []
tthresh = 50
max_slope = 0.5
for pntopo in topology:
outlets = [ake for ake in antidirs if pntopo[ake] is None]
if outlets:
pnpos = pntopo['inter']['position']
if tpos[1] < pnpos[1]:
ndist = float(pnpos[1] - tpos[1])
zdiff = abs(tpos[2] - pnpos[2])
slope = zdiff/abs(ndist)
if slope > max_slope: continue
if ndist < linkmax:
tdist = float(pnpos[0] - tpos[0])
pn_fov_theta = fu.to_deg(np.arctan(abs(tdist)/ndist))
if pn_fov_theta < seek_fov/2.0:
if abs(tdist) <= tthresh:
order = ['south','southeast','southwest']
elif tdist < -tthresh:
order = ['southeast','south','east']
elif tdist > tthresh:
order = ['southwest','south','west']
dir_ = select_outlet(outlets,order)
if not dir_ is None:
potentials.append((pntopo,dir_))
ndists.append(ndist)
return pick_closest(potentials,ndists)
def south_check(topology,topo,seek_fov,linkmax):
antidirs = ['west','northwest','north','northeast','east']
tpos = topo['inter']['position']
potentials = []
ndists = []
tthresh = 50
max_slope = 0.5
for pntopo in topology:
outlets = [ake for ake in antidirs if pntopo[ake] is None]
if outlets:
pnpos = pntopo['inter']['position']
if tpos[1] > pnpos[1]:
ndist = -1*float(pnpos[1] - tpos[1])
zdiff = abs(tpos[2] - pnpos[2])
slope = zdiff/abs(ndist)
if slope > max_slope: continue
if ndist < linkmax:
tdist = float(pnpos[0] - tpos[0])
pn_fov_theta = fu.to_deg(np.arctan(abs(tdist)/ndist))
if pn_fov_theta < seek_fov/2.0:
if abs(tdist) <= tthresh:
order = ['north','northeast','northwest']
elif tdist < -tthresh:
order = ['northeast','north','east']
elif tdist > tthresh:
order = ['northwest','north','west']
dir_ = select_outlet(outlets,order)
if not dir_ is None:
potentials.append((pntopo,dir_))
ndists.append(ndist)
return pick_closest(potentials,ndists)
def east_check(topology,topo,seek_fov,linkmax):
antidirs = ['north','northwest','west','southwest','south']
tpos = topo['inter']['position']
potentials = []
ndists = []
normdx = 0
trandx = 1
tthresh = 50
max_slope = 0.5
for pntopo in topology:
outlets = [ake for ake in antidirs if pntopo[ake] is None]
if outlets:
pnpos = pntopo['inter']['position']
if tpos[normdx] < pnpos[normdx]:
ndist = float(pnpos[normdx] - tpos[normdx])
zdiff = abs(tpos[2] - pnpos[2])
slope = zdiff/abs(ndist)
if slope > max_slope: continue
if ndist < linkmax:
tdist = float(pnpos[trandx] - tpos[trandx])
pn_fov_theta = fu.to_deg(np.arctan(abs(tdist)/ndist))
if pn_fov_theta < seek_fov/2.0:
if abs(tdist) <= tthresh:
order = ['west','southwest','northwest']
elif tdist < -tthresh:
order = ['northwest','west','north']
elif tdist > tthresh:
order = ['southwest','west','south']
dir_ = select_outlet(outlets,order)
if not dir_ is None:
potentials.append((pntopo,dir_))
ndists.append(ndist)
return pick_closest(potentials,ndists)
def west_check(topology,topo,seek_fov,linkmax):
antidirs = ['north','northeast','east','southeast','south']
tpos = topo['inter']['position']
potentials = []
ndists = []
normdx = 0
trandx = 1
tthresh = 50
max_slope = 0.5
for pntopo in topology:
outlets = [ake for ake in antidirs if pntopo[ake] is None]
if outlets:
pnpos = pntopo['inter']['position']
if tpos[normdx] > pnpos[normdx]:
ndist = -1*float(pnpos[normdx] - tpos[normdx])
zdiff = abs(tpos[2] - pnpos[2])
slope = zdiff/abs(ndist)
if slope > max_slope: continue
if ndist < linkmax:
tdist = float(pnpos[trandx] - tpos[trandx])
pn_fov_theta = fu.to_deg(np.arctan(abs(tdist)/ndist))
if pn_fov_theta < seek_fov/2.0:
if abs(tdist) <= tthresh:
order = ['east','southeast','northeast']
elif tdist < -tthresh:
order = ['northeast','east','north']
elif tdist > tthresh:
order = ['southeast','east','south']
dir_ = select_outlet(outlets,order)
if not dir_ is None:
potentials.append((pntopo,dir_))
ndists.append(ndist)
return pick_closest(potentials,ndists)
neighbor_checks = {
'north' : north_check,
'east' : east_check,
'south' : south_check,
'west' : west_check,
}
def find_neighbors(topology,topo,rwidth):
topoid = [t is topo for t in topology].index(True)
seek_fov = 60
maxlinks = 4
linkmax = 1000
for card in cardinal_directions:
if topo['linkcnt'] >= maxlinks: return topo
if not card in neighbor_checks.keys(): continue
neighb,neighbdir = neighbor_checks[card](
topology,topo,seek_fov,linkmax)
if not neighb is None:
topodx = [n is neighb for n in topology].index(True)
if neighb['linkcnt'] >= maxlinks: continue
if topodx in topo.values(): continue
topo[card] = topodx
topo['linkcnt'] += 1
neighb[neighbdir] = topoid
neighb['linkcnt'] += 1
topo['roads'].append({
'road_width' : rwidth,
'topology' : topology,
'directions' : (card,neighbdir),
'nodes' : (topoid,topodx)})
return topo
def no_road_intersects(topology,idx1,idx2):
topo1 = topology[idx1]
topo2 = topology[idx2]
s1 = (topo1['inter']['position'],
topo2['inter']['position'],)
cardinals = ['north', 'south', 'east', 'west']
for x in topology:
links = [x[key] for key in cardinals if not x[key] is None]
for ldx in links:
y = topology[ldx]
s2 = (x['inter']['position'],
y['inter']['position'],)
if mpu.segments_intersect(s1,s2): return False
return True
| gpl-2.0 | -6,110,025,529,739,502,000 | 36.849315 | 77 | 0.533991 | false |
okolisny/integration_tests | cfme/utils/appliance/__init__.py | 1 | 109008 | import json
import logging
import os
import re
import socket
import traceback
import warnings
from copy import copy
from datetime import datetime
from tempfile import NamedTemporaryFile
from time import sleep, time
from urlparse import ParseResult, urlparse
import attr
import dateutil.parser
from debtcollector import removals
import fauxfactory
import requests
import yaml
from cached_property import cached_property
from manageiq_client.api import ManageIQClient as VanillaMiqApi
from sentaku import ImplementationContext
from werkzeug.local import LocalStack, LocalProxy
from fixtures import ui_coverage
from fixtures.pytest_store import store
from cfme.utils import clear_property_cache
from cfme.utils import conf, ssh, ports
from cfme.utils.datafile import load_data_file
from cfme.utils.events import EventListener
from cfme.utils.log import logger, create_sublogger, logger_wrap
from cfme.utils.net import net_check
from cfme.utils.path import data_path, patches_path, scripts_path, conf_path
from cfme.utils.ssh import SSHTail
from cfme.utils.version import Version, get_stream, pick
from cfme.utils.wait import wait_for, TimedOutError
from .db import ApplianceDB
from .implementations.ui import ViaUI
from .implementations.ssui import ViaSSUI
from .services import SystemdService
RUNNING_UNDER_SPROUT = os.environ.get("RUNNING_UNDER_SPROUT", "false") != "false"
CREATE_IS_PEDANTIC = True # sidechannel to ease shell use
# EMS types recognized by IP or credentials
RECOGNIZED_BY_IP = [
"InfraManager", "ContainerManager", "MiddlewareManager", "Openstack::CloudManager"
]
RECOGNIZED_BY_CREDS = ["CloudManager"]
# A helper for the IDs
SEQ_FACT = 1e12
def _current_miqqe_version():
"""Parses MiqQE JS patch version from the patch file
Returns: Version as int
"""
with patches_path.join('miq_application.js.diff').open("r") as f:
match = re.search("MiqQE_version = (\d+);", f.read(), flags=0)
version = int(match.group(1))
return version
current_miqqe_version = _current_miqqe_version()
class MiqApi(VanillaMiqApi):
def get_entity_by_href(self, href):
"""Parses the collections"""
parsed = urlparse(href)
# TODO: Check the netloc, scheme
path = [step for step in parsed.path.split('/') if step]
# Drop the /api
path = path[1:]
collection = getattr(self.collections, path.pop(0))
entity = collection(int(path.pop(0)))
if path:
raise ValueError('Subcollections not supported! ({})'.format(parsed.path))
return entity
class ApplianceException(Exception):
pass
class ApplianceConsole(object):
"""ApplianceConsole is used for navigating and running appliance_console commands against an
appliance."""
def __init__(self, appliance):
self.appliance = appliance
def timezone_check(self, timezone):
channel = self.appliance.ssh_client.invoke_shell()
channel.settimeout(20)
channel.send("ap")
result = ''
try:
while True:
result += channel.recv(1)
if ("{}".format(timezone[0])) in result:
break
except socket.timeout:
pass
logger.debug(result)
def run_commands(self, commands, autoreturn=True, timeout=10, channel=None):
if not channel:
channel = self.appliance.ssh_client.invoke_shell()
self.commands = commands
for command in commands:
if isinstance(command, basestring):
command_string, timeout = command, timeout
else:
command_string, timeout = command
channel.settimeout(timeout)
if autoreturn:
command_string = (command_string + '\n')
channel.send("{}".format(command_string))
result = ''
try:
while True:
result += channel.recv(1)
if 'Press any key to continue' in result:
break
except socket.timeout:
pass
logger.debug(result)
class ApplianceConsoleCli(object):
def __init__(self, appliance):
self.appliance = appliance
def _run(self, appliance_console_cli_command):
return self.appliance.ssh_client.run_command(
"appliance_console_cli {}".format(appliance_console_cli_command))
def set_hostname(self, hostname):
self._run("--host {host}".format(host=hostname))
def configure_appliance_external_join(self, dbhostname,
username, password, dbname, fetch_key, sshlogin, sshpass):
self._run("--hostname {dbhostname} --username {username} --password {password}"
" --dbname {dbname} --verbose --fetch-key {fetch_key} --sshlogin {sshlogin}"
" --sshpassword {sshpass}".format(dbhostname=dbhostname, username=username,
password=password, dbname=dbname, fetch_key=fetch_key, sshlogin=sshlogin,
sshpass=sshpass))
def configure_appliance_external_create(self, region, dbhostname,
username, password, dbname, fetch_key, sshlogin, sshpass):
self._run("--region {region} --hostname {dbhostname} --username {username}"
" --password {password} --dbname {dbname} --verbose --fetch-key {fetch_key}"
" --sshlogin {sshlogin} --sshpassword {sshpass}".format(
region=region, dbhostname=dbhostname, username=username, password=password,
dbname=dbname, fetch_key=fetch_key, sshlogin=sshlogin, sshpass=sshpass))
def configure_appliance_internal_fetch_key(self, region, dbhostname,
username, password, dbname, fetch_key, sshlogin, sshpass):
self._run("--region {region} --internal --hostname {dbhostname} --username {username}"
" --password {password} --dbname {dbname} --verbose --fetch-key {fetch_key}"
" --sshlogin {sshlogin} --sshpassword {sshpass}".format(
region=region, dbhostname=dbhostname, username=username, password=password,
dbname=dbname, fetch_key=fetch_key, sshlogin=sshlogin, sshpass=sshpass))
def configure_ipa(self, ipaserver, username, password, domain, realm):
self._run("--ipaserver {ipaserver} --ipaprincipal {username} --ipapassword {password}"
" --ipadomain {domain} --iparealm {realm}".format(
ipaserver=ipaserver, username=username, password=password, domain=domain,
realm=realm))
assert self.appliance.ssh_client.run_command("systemctl status sssd | grep running")
return_code, output = self.appliance.ssh_client.run_command(
"cat /etc/ipa/default.conf | grep 'enable_ra = True'")
assert return_code == 0
def uninstall_ipa_client(self):
self._run("--uninstall-ipa")
return_code, output = self.appliance.ssh_client.run_command(
"cat /etc/ipa/default.conf")
assert return_code != 0
class IPAppliance(object):
"""IPAppliance represents an already provisioned cfme appliance whos provider is unknown
but who has an IP address. This has a lot of core functionality that Appliance uses, since
it knows both the provider, vm_name and can there for derive the IP address.
Args:
ipaddress: The IP address of the provider
browser_steal: If True then then current browser is killed and the new appliance
is used to generate a new session.
container: If the appliance is running as a container or as a pod, specifies its name.
openshift_creds: If the appliance runs as a project on openshift, provides credentials for
the openshift host so the framework can interact with the project.
db_host: If the database is located somewhere else than on the appliance itself, specify
the host here.
db_port: Database port.
ssh_port: SSH port.
"""
_nav_steps = {}
evmserverd = SystemdService.declare(unit_name='evmserverd')
db = ApplianceDB.declare()
CONFIG_MAPPING = {
'base_url': 'address',
'browser_steal': 'browser_steal',
'container': 'container',
'pod': 'container',
'openshift_creds': 'openshift_creds',
'db_host': 'db_host',
'db_port': 'db_port',
'ssh_port': 'ssh_port',
}
CONFIG_NONGLOBAL = {'base_url'}
@property
def as_json(self):
"""Dumps the arguments that can create this appliance as a JSON. None values are ignored."""
return json.dumps({
k: getattr(self, k)
for k in set(self.CONFIG_MAPPING.values())})
@classmethod
def from_json(cls, json_string):
return cls(**json.loads(json_string))
def __init__(
self, address=None, browser_steal=False, container=None, openshift_creds=None,
db_host=None, db_port=None, ssh_port=None):
self._server = None
self.browser = ViaUI(owner=self)
self.ssui = ViaSSUI(owner=self)
self.context = ImplementationContext.from_instances(
[self.browser, self.ssui])
from cfme.modeling.base import ApplianceCollections
self.collections = ApplianceCollections(self)
self.ssh_port = ssh_port or ports.SSH
self.db_port = db_port or ports.DB
if address is not None:
if not isinstance(address, ParseResult):
address = urlparse(str(address))
if not (address.scheme and address.netloc):
# Use .path (w.x.y.z ip format)
self.address = address.path
self.scheme = "https"
self._url = "https://{}/".format(address.path)
else:
# schema://w.x.y.z/ format
self.address = address.netloc
self.scheme = address.scheme
self._url = address.geturl()
self.browser_steal = browser_steal
self.container = container
self.openshift_creds = openshift_creds or {}
self.db_host = db_host
self._user = None
self.appliance_console = ApplianceConsole(self)
self.appliance_console_cli = ApplianceConsoleCli(self)
self.is_pod = False
def unregister(self):
""" unregisters appliance from RHSM/SAT6 """
self.ssh_client.run_command('subscription-manager remove --all')
self.ssh_client.run_command('subscription-manager unregister')
self.ssh_client.run_command('subscription-manager clean')
self.ssh_client.run_command('mv -f /etc/rhsm/rhsm.conf.kat-backup /etc/rhsm/rhsm.conf')
self.ssh_client.run_command('rpm -qa | grep katello-ca-consumer | xargs rpm -e')
def is_registration_complete(self, used_repo_or_channel):
""" Checks if an appliance has the correct repos enabled with RHSM or SAT6 """
ret, out = self.ssh_client.run_command('yum repolist enabled')
# Check that the specified (or default) repo (can be multiple, separated by a space)
# is enabled and that there are packages available
for repo in used_repo_or_channel.split(' '):
if (repo not in out) or (not re.search(r'repolist: [^0]', out)):
return False
return True
@property
def default_zone(self):
from cfme.base import Region, Zone
return Zone(self, region=Region(self, self.server_region()))
@property
def server(self):
if self._server is None:
from cfme.base import Server
self._server = Server(appliance=self, zone=self.default_zone, sid=self.server_id())
return self._server
@property
def user(self):
from cfme.configure.access_control import User
from cfme.base.credential import Credential
if self._user is None:
# Admin by default
username = conf.credentials['default']['username']
password = conf.credentials['default']['password']
logger.info(
'%r.user was set to None before, therefore generating an admin user: %s/%s',
self, username, password)
cred = Credential(principal=username, secret=password)
self._user = User(credential=cred, appliance=self, name='Administrator')
return self._user
@user.setter
def user(self, user_object):
if user_object is None:
logger.info('%r.user set to None, will be set to admin on next access', self)
self._user = user_object
@property
def appliance(self):
return self
def __repr__(self):
return '{}(address={!r}, container={!r}, db_host={!r}, db_port={!r}, ssh_port={!r})'.format(
type(self).__name__, self.address, self.container, self.db_host, self.db_port,
self.ssh_port)
def __call__(self, **kwargs):
"""Syntactic sugar for overriding certain instance variables for context managers.
Currently possible variables are:
* `browser_steal`
"""
self.browser_steal = kwargs.get("browser_steal", self.browser_steal)
return self
def __enter__(self):
""" This method will replace the current appliance in the store """
stack.push(self)
return self
def _screenshot_capture_at_context_leave(self, exc_type, exc_val, exc_tb):
try:
from fixtures.artifactor_plugin import fire_art_hook
from pytest import config
from fixture.pytest_store import store
except ImportError:
logger.info('Not inside pytest run, ignoring')
return
if (
exc_type is not None and not RUNNING_UNDER_SPROUT):
from cfme.fixtures.pytest_selenium import take_screenshot
logger.info("Before we pop this appliance, a screenshot and a traceback will be taken.")
ss, ss_error = take_screenshot()
full_tb = "".join(traceback.format_tb(exc_tb))
short_tb = "{}: {}".format(exc_type.__name__, str(exc_val))
full_tb = "{}\n{}".format(full_tb, short_tb)
g_id = "appliance-cm-screenshot-{}".format(fauxfactory.gen_alpha(length=6))
fire_art_hook(
config, 'filedump',
slaveid=store.slaveid,
description="Appliance CM error traceback", contents=full_tb, file_type="traceback",
display_type="danger", display_glyph="align-justify", group_id=g_id)
if ss:
fire_art_hook(
config, 'filedump',
slaveid=store.slaveid, description="Appliance CM error screenshot",
file_type="screenshot", mode="wb", contents_base64=True, contents=ss,
display_glyph="camera", group_id=g_id)
if ss_error:
fire_art_hook(
config, 'filedump',
slaveid=store.slaveid,
description="Appliance CM error screenshot failure", mode="w",
contents_base64=False, contents=ss_error, display_type="danger", group_id=g_id)
elif exc_type is not None:
logger.info("Error happened but we are not inside a test run so no screenshot now.")
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self._screenshot_capture_at_context_leave(exc_type, exc_val, exc_tb)
except Exception:
# repr is used in order to avoid having the appliance object in the log record
logger.exception("taking a screenshot for %s failed", repr(self))
finally:
assert stack.pop() is self, 'appliance stack inconsistent'
def __eq__(self, other):
return isinstance(other, IPAppliance) and self.address == other.address
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.address)
@cached_property
def rest_logger(self):
return create_sublogger('rest-api')
# Configuration methods
@logger_wrap("Configure IPAppliance: {}")
def configure(self, log_callback=None, **kwargs):
"""Configures appliance - database setup, rename, ntp sync
Utility method to make things easier.
Note:
db_address, name_to_set are not used currently.
Args:
db_address: Address of external database if set, internal database if ``None``
(default ``None``)
name_to_set: Name to set the appliance name to if not ``None`` (default ``None``)
region: Number to assign to region (default ``0``)
fix_ntp_clock: Fixes appliance time if ``True`` (default ``True``)
loosen_pgssl: Loosens postgres connections if ``True`` (default ``True``)
key_address: Fetch encryption key from this address if set, generate a new key if
``None`` (default ``None``)
"""
log_callback("Configuring appliance {}".format(self.address))
loosen_pgssl = kwargs.pop('loosen_pgssl', True)
fix_ntp_clock = kwargs.pop('fix_ntp_clock', True)
region = kwargs.pop('region', 0)
key_address = kwargs.pop('key_address', None)
with self as ipapp:
ipapp.wait_for_ssh()
# Debugging - ifcfg-eth0 overwritten by unknown process
# Rules are permanent and will be reloade after machine reboot
self.ssh_client.run_command(
"cp -pr /etc/sysconfig/network-scripts/ifcfg-eth0 /var/tmp", ensure_host=True)
self.ssh_client.run_command(
"echo '-w /etc/sysconfig/network-scripts/ifcfg-eth0 -p wa' >> "
"/etc/audit/rules.d/audit.rules", ensure_host=True)
self.ssh_client.run_command("systemctl daemon-reload", ensure_host=True)
self.ssh_client.run_command("service auditd restart", ensure_host=True)
self.deploy_merkyl(start=True, log_callback=log_callback)
if fix_ntp_clock:
self.fix_ntp_clock(log_callback=log_callback)
# TODO: Handle external DB setup
self.db.setup(region=region, key_address=key_address)
self.wait_for_evm_service(timeout=1200, log_callback=log_callback)
# Some conditionally ran items require the evm service be
# restarted:
restart_evm = False
if loosen_pgssl:
self.db.loosen_pgssl()
restart_evm = True
if self.version >= '5.8':
self.configure_vm_console_cert(log_callback=log_callback)
restart_evm = True
if restart_evm:
self.restart_evm_service(log_callback=log_callback)
self.wait_for_web_ui(timeout=1800, log_callback=log_callback)
# TODO: this method eventually needs to be moved to provider class..
@logger_wrap("Configure GCE IPAppliance: {}")
def configure_gce(self, log_callback=None):
self.wait_for_ssh(timeout=1200)
self.deploy_merkyl(start=True, log_callback=log_callback)
# TODO: Fix NTP on GCE instances.
# self.fix_ntp_clock(log_callback=log_callback)
self.db.enable_internal()
# evm serverd does not auto start on GCE instance..
self.start_evm_service(log_callback=log_callback)
self.wait_for_evm_service(timeout=1200, log_callback=log_callback)
self.wait_for_web_ui(timeout=1800, log_callback=log_callback)
self.db.loosen_pgssl()
self.wait_for_web_ui(timeout=1800, log_callback=log_callback)
def seal_for_templatizing(self):
"""Prepares the VM to be "generalized" for saving as a template."""
with self.ssh_client as ssh_client:
# Seals the VM in order to work when spawned again.
ssh_client.run_command("rm -rf /etc/ssh/ssh_host_*", ensure_host=True)
if ssh_client.run_command(
"grep '^HOSTNAME' /etc/sysconfig/network", ensure_host=True).rc == 0:
# Replace it
ssh_client.run_command(
"sed -i -r -e 's/^HOSTNAME=.*$/HOSTNAME=localhost.localdomain/' "
"/etc/sysconfig/network", ensure_host=True)
else:
# Set it
ssh_client.run_command(
"echo HOSTNAME=localhost.localdomain >> /etc/sysconfig/network",
ensure_host=True)
ssh_client.run_command(
"sed -i -r -e '/^HWADDR/d' /etc/sysconfig/network-scripts/ifcfg-eth0",
ensure_host=True)
ssh_client.run_command(
"sed -i -r -e '/^UUID/d' /etc/sysconfig/network-scripts/ifcfg-eth0",
ensure_host=True)
ssh_client.run_command("rm -f /etc/udev/rules.d/70-*", ensure_host=True)
# Fix SELinux things
ssh_client.run_command("restorecon -R /etc/sysconfig/network-scripts", ensure_host=True)
ssh_client.run_command("restorecon /etc/sysconfig/network", ensure_host=True)
# Stop the evmserverd and move the logs somewhere
ssh_client.run_command("systemctl stop evmserverd", ensure_host=True)
ssh_client.run_command("mkdir -p /var/www/miq/vmdb/log/preconfigure-logs",
ensure_host=True)
ssh_client.run_command(
"mv /var/www/miq/vmdb/log/*.log /var/www/miq/vmdb/log/preconfigure-logs/",
ensure_host=True)
ssh_client.run_command(
"mv /var/www/miq/vmdb/log/*.gz /var/www/miq/vmdb/log/preconfigure-logs/",
ensure_host=True)
# Reduce swapping, because it can do nasty things to our providers
ssh_client.run_command('echo "vm.swappiness = 1" >> /etc/sysctl.conf',
ensure_host=True)
def _encrypt_string(self, string):
try:
# Let's not log passwords
logging.disable(logging.CRITICAL)
rc, out = self.ssh_client.run_rails_command(
"\"puts MiqPassword.encrypt('{}')\"".format(string))
return out.strip()
finally:
logging.disable(logging.NOTSET)
@property
def managed_provider_names(self):
"""Returns a list of names for all providers configured on the appliance
Note:
Unlike ``managed_known_providers``, this will also return names of providers that were
not recognized, but are present.
"""
known_ems_list = []
for ems in self.rest_api.collections.providers:
if not any(
p_type in ems['type'] for p_type in RECOGNIZED_BY_IP + RECOGNIZED_BY_CREDS):
continue
known_ems_list.append(ems['name'])
return known_ems_list
@property
def managed_known_providers(self):
"""Returns a set of provider crud objects of known providers managed by this appliance
Note:
Recognized by name only.
"""
from cfme.utils.providers import list_providers
prov_cruds = list_providers(use_global_filters=False, appliance=self)
found_cruds = set()
unrecognized_ems_names = set()
for ems_name in self.managed_provider_names:
for prov in prov_cruds:
# Name check is authoritative and the only proper way to recognize a known provider
if ems_name == prov.name:
found_cruds.add(prov)
break
else:
unrecognized_ems_names.add(ems_name)
if unrecognized_ems_names:
self.log.warning(
"Unrecognized managed providers: {}".format(', '.join(unrecognized_ems_names)))
return list(found_cruds)
@classmethod
def from_url(cls, url):
return cls(urlparse(url))
def new_rest_api_instance(
self, entry_point=None, auth=None, logger="default", verify_ssl=False):
"""Returns new REST API instance."""
return MiqApi(
entry_point=entry_point or "{}://{}:{}/api".format(
self.scheme, self.address, self.ui_port),
auth=auth or (conf.credentials["default"]["username"],
conf.credentials["default"]["password"]),
logger=self.rest_logger if logger == "default" else logger,
verify_ssl=verify_ssl)
@cached_property
def rest_api(self):
return self.new_rest_api_instance()
@cached_property
def miqqe_version(self):
"""Returns version of applied JS patch or None if not present"""
rc, out = self.ssh_client.run_command('grep "[0-9]\+" /var/www/miq/vmdb/.miqqe_version')
if rc == 0:
return int(out)
return None
@cached_property
def address(self):
# If address wasn't set in __init__, use the hostname from base_url
if getattr(self, "_url", None) is not None:
parsed_url = urlparse(self._url)
return parsed_url.netloc
else:
parsed_url = urlparse(store.base_url)
return parsed_url.netloc
@cached_property
def hostname(self):
parsed_url = urlparse(self.url)
return parsed_url.hostname
@cached_property
def product_name(self):
try:
return self.rest_api.product_info['name']
except (AttributeError, KeyError, IOError):
self.log.exception(
'appliance.product_name could not be retrieved from REST, falling back')
try:
# We need to print to a file here because the deprecation warnings make it hard
# to get robust output and they do not seem to go to stderr
result = self.ssh_client.run_rails_command(
'"File.open(\'/tmp/product_name.txt\', \'w\') '
'{|f| f.write(I18n.t(\'product.name\')) }"')
result = self.ssh_client.run_command('cat /tmp/product_name.txt')
return result.output
except Exception:
logger.exception(
"Couldn't fetch the product name from appliance, using ManageIQ as default")
return 'ManageIQ'
@property
def ui_port(self):
parsed_url = urlparse(self.url)
if parsed_url.port is not None:
return parsed_url.port
elif parsed_url.scheme == "https":
return 443
elif parsed_url.scheme == "http":
return 80
else:
raise Exception("Unknown scheme {} for {}".format(parsed_url.scheme, store.base_url))
@cached_property
def scheme(self):
return "https" # By default
@cached_property
def url(self):
return "{}://{}/".format(self.scheme, self.address)
@cached_property
def is_downstream(self):
return self.product_name == 'CFME'
@cached_property
def version(self):
try:
version_string = self.rest_api.server_info['version']
except (AttributeError, KeyError, IOError):
self.log.exception('appliance.version could not be retrieved from REST, falling back')
res = self.ssh_client.run_command('cat /var/www/miq/vmdb/VERSION')
if res.rc != 0:
raise RuntimeError('Unable to retrieve appliance VMDB version')
version_string = res.output
return Version(version_string)
@cached_property
def build(self):
if not self.is_downstream:
return 'master'
try:
return self.rest_api.server_info['build']
except (AttributeError, KeyError, IOError):
self.log.exception('appliance.build could not be retrieved from REST, falling back')
res = self.ssh_client.run_command('cat /var/www/miq/vmdb/BUILD')
if res.rc != 0:
raise RuntimeError('Unable to retrieve appliance VMDB version')
return res.output.strip("\n")
@cached_property
def os_version(self):
# Currently parses the os version out of redhat release file to allow for
# rhel and centos appliances
res = self.ssh_client.run_command(
r"cat /etc/redhat-release | sed 's/.* release \(.*\) (.*/\1/' #)")
if res.rc != 0:
raise RuntimeError('Unable to retrieve appliance OS version')
return Version(res.output)
@cached_property
def log(self):
return create_sublogger(self.address)
@cached_property
def coverage(self):
return ui_coverage.CoverageManager(self)
def ssh_client_with_privatekey(self):
with open(conf_path.join('appliance_private_key').strpath, 'w') as key:
key.write(conf.credentials['ssh']['private_key'])
connect_kwargs = {
'hostname': self.hostname,
'username': conf.credentials['ssh']['ssh-user'],
'key_filename': conf_path.join('appliance_private_key').strpath,
}
ssh_client = ssh.SSHClient(**connect_kwargs)
# FIXME: propperly store ssh clients we made
store.ssh_clients_to_close.append(ssh_client)
return ssh_client
@cached_property
def ssh_client(self):
"""Creates an ssh client connected to this appliance
Returns: A configured :py:class:``utils.ssh.SSHClient`` instance.
Usage:
with appliance.ssh_client as ssh:
status, output = ssh.run_command('...')
Note:
The credentials default to those found under ``ssh`` key in ``credentials.yaml``.
"""
if not self.is_ssh_running:
raise Exception('SSH is unavailable')
# IPAppliance.ssh_client only connects to its address
if self.openshift_creds:
connect_kwargs = {
'hostname': self.openshift_creds['hostname'],
'username': self.openshift_creds['username'],
'password': self.openshift_creds['password'],
'container': self.container,
'is_pod': True,
'port': self.ssh_port,
}
self.is_pod = True
else:
connect_kwargs = {
'hostname': self.hostname,
'username': conf.credentials['ssh']['username'],
'password': conf.credentials['ssh']['password'],
'container': self.container,
'is_pod': False,
'port': self.ssh_port,
}
ssh_client = ssh.SSHClient(**connect_kwargs)
try:
ssh_client.get_transport().is_active()
logger.info('default appliance ssh credentials are valid')
except Exception as e:
logger.error(e)
logger.error('default appliance ssh credentials failed, trying establish ssh connection'
' using ssh private key')
ssh_client = self.ssh_client_with_privatekey()
# FIXME: propperly store ssh clients we made
store.ssh_clients_to_close.append(ssh_client)
return ssh_client
@property
def swap(self):
"""Retrieves the value of swap for the appliance. Might raise an exception if SSH fails.
Return:
An integer value of swap in the VM in megabytes. If ``None`` is returned, it means it
was not possible to parse the command output.
Raises:
:py:class:`paramiko.ssh_exception.SSHException` or :py:class:`socket.error`
"""
try:
server = self.rest_api.get_entity_by_href(self.rest_api.server_info['server_href'])
return server.system_swap_used / 1024 / 1024
except (AttributeError, KeyError, IOError):
self.log.exception('appliance.swap could not be retrieved from REST, falling back')
value = self.ssh_client.run_command(
'free -m | tr -s " " " " | cut -f 3 -d " " | tail -n 1', reraise=True, timeout=15)
try:
value = int(value.output.strip())
except (TypeError, ValueError):
value = None
return value
def event_listener(self):
"""Returns an instance of the event listening class pointed to this appliance."""
return EventListener(self)
def diagnose_evm_failure(self):
"""Go through various EVM processes, trying to figure out what fails
Returns: A string describing the error, or None if no errors occurred.
This is intended to be run after an appliance is configured but failed for some reason,
such as in the template tester.
"""
logger.info('Diagnosing EVM failures, this can take a while...')
if not self.address:
return 'appliance has no IP Address; provisioning failed or networking is broken'
logger.info('Checking appliance SSH Connection')
if not self.is_ssh_running:
return 'SSH is not running on the appliance'
# Now for the DB
logger.info('Checking appliance database')
if not self.db.online:
# postgres isn't running, try to start it
cmd = 'systemctl restart {}-postgresql'.format(self.db.postgres_version)
result = self.db.ssh_client.run_command(cmd)
if result.rc != 0:
return 'postgres failed to start:\n{}'.format(result.output)
else:
return 'postgres was not running for unknown reasons'
if not self.db.has_database:
return 'vmdb_production database does not exist'
if not self.db.has_tables:
return 'vmdb_production has no tables'
# try to start EVM
logger.info('Checking appliance evmserverd service')
try:
self.restart_evm_service()
except ApplianceException as ex:
return 'evmserverd failed to start:\n{}'.format(ex.args[0])
# This should be pretty comprehensive, but we might add some net_checks for
# 3000, 4000, and 80 at this point, and waiting a reasonable amount of time
# before exploding if any of them don't appear in time after evm restarts.
@logger_wrap("Fix NTP Clock: {}")
def fix_ntp_clock(self, log_callback=None):
"""Fixes appliance time using ntpdate on appliance"""
log_callback('Fixing appliance clock')
client = self.ssh_client
# checking whether chrony is installed
check_cmd = 'yum list installed chrony'
if client.run_command(check_cmd).rc != 0:
raise ApplianceException("Chrony isn't installed")
# # checking whether it is enabled and enable it
is_enabled_cmd = 'systemctl is-enabled chronyd'
if client.run_command(is_enabled_cmd).rc != 0:
logger.debug("chrony will start on system startup")
client.run_command('systemctl enable chronyd')
client.run_command('systemctl daemon-reload')
# Retrieve time servers from yamls
server_template = 'server {srv} iburst'
time_servers = set()
try:
logger.debug('obtaining clock servers from config file')
clock_servers = conf.cfme_data.get('clock_servers')
for clock_server in clock_servers:
time_servers.add(server_template.format(srv=clock_server))
except TypeError:
msg = 'No clock servers configured in cfme_data.yaml'
log_callback(msg)
raise ApplianceException(msg)
filename = '/etc/chrony.conf'
chrony_conf = set(client.run_command("cat {f}".format(f=filename)).output.strip()
.split('\n'))
modified_chrony_conf = chrony_conf.union(time_servers)
if modified_chrony_conf != chrony_conf:
modified_chrony_conf = "\n".join(list(modified_chrony_conf))
client.run_command('echo "{txt}" > {f}'.format(txt=modified_chrony_conf, f=filename))
logger.info("chrony's config file updated")
conf_file_updated = True
else:
logger.info("chrony's config file hasn't been changed")
conf_file_updated = False
if conf_file_updated or client.run_command('systemctl status chronyd').rc != 0:
logger.debug('restarting chronyd')
client.run_command('systemctl restart chronyd')
# check that chrony is running correctly now
result = client.run_command('chronyc tracking')
if result.rc == 0:
logger.info('chronyc is running correctly')
else:
raise ApplianceException("chrony doesn't work. "
"Error message: {e}".format(e=result.output))
@property
def is_miqqe_patch_candidate(self):
return self.version < "5.6.3"
@property
def miqqe_patch_applied(self):
return self.miqqe_version == current_miqqe_version
@logger_wrap("Patch appliance with MiqQE js: {}")
def patch_with_miqqe(self, log_callback=None):
# (local_path, remote_path, md5/None) trio
autofocus_patch = pick({
'5.5': 'autofocus.js.diff',
'5.7': 'autofocus_57.js.diff'
})
patch_args = (
(str(patches_path.join('miq_application.js.diff')),
'/var/www/miq/vmdb/app/assets/javascripts/miq_application.js',
None),
(str(patches_path.join(autofocus_patch)),
'/var/www/miq/vmdb/app/assets/javascripts/directives/autofocus.js',
None),
)
for local_path, remote_path, md5 in patch_args:
self.ssh_client.patch_file(local_path, remote_path, md5)
self.precompile_assets()
self.restart_evm_service()
logger.info("Waiting for Web UI to start")
wait_for(
func=self.is_web_ui_running,
message='appliance.is_web_ui_running',
delay=20,
timeout=300)
logger.info("Web UI is up and running")
self.ssh_client.run_command(
"echo '{}' > /var/www/miq/vmdb/.miqqe_version".format(current_miqqe_version))
# Invalidate cached version
del self.miqqe_version
@logger_wrap("Work around missing Gem file: {}")
def workaround_missing_gemfile(self, log_callback=None):
"""Fix Gemfile issue.
Early 5.4 builds have issues with Gemfile not present (BUG 1191496). This circumvents the
issue with pointing the env variable that Bundler uses to get the Gemfile to the Gemfile in
vmdb which *should* be correct.
When this issue is resolved, this method will do nothing.
"""
client = self.ssh_client
status, out = client.run_command("ls /opt/rh/cfme-gemset")
if status != 0:
return # Not needed
log_callback('Fixing Gemfile issue')
# Check if the error is there
status, out = client.run_rails_command("puts 1")
if status == 0:
return # All OK!
client.run_command('echo "export BUNDLE_GEMFILE=/var/www/miq/vmdb/Gemfile" >> /etc/bashrc')
# To be 100% sure
self.reboot(wait_for_web_ui=False, log_callback=log_callback)
@logger_wrap("Precompile assets: {}")
def precompile_assets(self, log_callback=None):
"""Precompile the static assets (images, css, etc) on an appliance
"""
log_callback('Precompiling assets')
client = self.ssh_client
store.terminalreporter.write_line('Precompiling assets')
store.terminalreporter.write_line(
'THIS IS NOT STUCK. Just wait until it\'s done, it will be only done once', red=True)
store.terminalreporter.write_line('Phase 1 of 2: rake assets:clobber')
status, out = client.run_rake_command("assets:clobber")
if status != 0:
msg = 'Appliance {} failed to nuke old assets'.format(self.address)
log_callback(msg)
raise ApplianceException(msg)
store.terminalreporter.write_line('Phase 2 of 2: rake assets:precompile')
status, out = client.run_rake_command("assets:precompile")
if status != 0:
msg = 'Appliance {} failed to precompile assets'.format(self.address)
log_callback(msg)
raise ApplianceException(msg)
store.terminalreporter.write_line('Asset precompilation done')
return status
@logger_wrap("Clone automate domain: {}")
def clone_domain(self, source="ManageIQ", dest="Default", log_callback=None):
"""Clones Automate domain
Args:
src: Source domain name.
dst: Destination domain name.
"""
client = self.ssh_client
# Make sure the database is ready
log_callback('Waiting for database')
self.db.wait_for()
# Make sure the working dir exists
client.run_command('mkdir -p /tmp/{}'.format(source))
export_opts = 'DOMAIN={} EXPORT_DIR=/tmp/{} PREVIEW=false OVERWRITE=true'.format(source,
source)
export_cmd = 'evm:automate:export {}'.format(export_opts)
log_callback('Exporting domain ({}) ...'.format(export_cmd))
status, output = client.run_rake_command(export_cmd)
if status != 0:
msg = 'Failed to export {} domain'.format(source)
log_callback(msg)
raise ApplianceException(msg)
ro_fix_cmd = ("sed -i 's/system: true/system: false/g' "
"/tmp/{}/{}/__domain__.yaml".format(source, source))
status, output = client.run_command(ro_fix_cmd)
if status != 0:
msg = 'Setting {} domain to read/write failed'.format(dest)
log_callback(msg)
raise ApplianceException(msg)
import_opts = 'DOMAIN={} IMPORT_DIR=/tmp/{} PREVIEW=false'.format(source, source)
import_opts += ' OVERWRITE=true IMPORT_AS={} ENABLED=true'.format(dest)
import_cmd = 'evm:automate:import {}'.format(import_opts)
log_callback('Importing domain ({}) ...'.format(import_cmd))
status, output = client.run_rake_command(import_cmd)
if status != 0:
msg = 'Failed to import {} domain'.format(dest)
log_callback(msg)
raise ApplianceException(msg)
return status, output
@logger_wrap("Deploying Merkyl: {}")
def deploy_merkyl(self, start=False, log_callback=None):
"""Deploys the Merkyl log relay service to the appliance"""
client = self.ssh_client
client.run_command('mkdir -p /root/merkyl')
for filename in ['__init__.py', 'merkyl.tpl', ('bottle.py.dontflake', 'bottle.py'),
'allowed.files']:
try:
src, dest = filename
except (TypeError, ValueError):
# object is not iterable or too many values to unpack
src = dest = filename
log_callback('Sending {} to appliance'.format(src))
client.put_file(data_path.join(
'bundles', 'merkyl', src).strpath, os.path.join('/root/merkyl', dest))
client.put_file(data_path.join(
'bundles', 'merkyl', 'merkyl').strpath, os.path.join('/etc/init.d/merkyl'))
client.run_command('chmod 775 /etc/init.d/merkyl')
client.run_command(
'/bin/bash -c \'if ! [[ $(iptables -L -n | grep "state NEW tcp dpt:8192") ]]; then '
'iptables -I INPUT 6 -m state --state NEW -m tcp -p tcp --dport 8192 -j ACCEPT; fi\'')
if start:
log_callback("Starting ...")
client.run_command('systemctl restart merkyl')
log_callback("Setting it to start after reboot")
client.run_command("chkconfig merkyl on")
def get_repofile_list(self):
"""Returns list of repofiles present at the appliance.
Ignores certain files, like redhat.repo.
"""
repofiles = self.ssh_client.run_command('ls /etc/yum.repos.d').output.strip().split('\n')
return [f for f in repofiles if f not in {"redhat.repo"} and f.endswith(".repo")]
def read_repos(self):
"""Reads repofiles so it gives you mapping of id and url."""
result = {}
name_regexp = re.compile(r"^\[update-([^\]]+)\]")
baseurl_regexp = re.compile(r"baseurl\s*=\s*([^\s]+)")
for repofile in self.get_repofile_list():
rc, out = self.ssh_client.run_command("cat /etc/yum.repos.d/{}".format(repofile))
if rc != 0:
# Something happened meanwhile?
continue
out = out.strip()
name_match = name_regexp.search(out)
if name_match is None:
continue
baseurl_match = baseurl_regexp.search(out)
if baseurl_match is None:
continue
result[name_match.groups()[0]] = baseurl_match.groups()[0]
return result
# Regexp that looks for product type and version in the update URL
product_url_regexp = re.compile(
r"/((?:[A-Z]+|CloudForms|rhel|RHEL_Guest))(?:-|/|/server/)(\d+[^/]*)/")
def find_product_repos(self):
"""Returns a dictionary of products, where the keys are names of product (repos) and values
are dictionaries where keys are the versions and values the names of the repositories.
"""
products = {}
for repo_name, repo_url in self.read_repos().iteritems():
match = self.product_url_regexp.search(repo_url)
if match is None:
continue
product, ver = match.groups()
if product not in products:
products[product] = {}
products[product][ver] = repo_name
return products
def write_repofile(self, repo_id, repo_url, **kwargs):
"""Wrapper around writing a repofile. You can specify conf options in kwargs."""
if "gpgcheck" not in kwargs:
kwargs["gpgcheck"] = 0
if "enabled" not in kwargs:
kwargs["enabled"] = 1
filename = "/etc/yum.repos.d/{}.repo".format(repo_id)
logger.info("Writing a new repofile %s %s", repo_id, repo_url)
self.ssh_client.run_command('echo "[update-{}]" > {}'.format(repo_id, filename))
self.ssh_client.run_command('echo "name=update-url-{}" >> {}'.format(repo_id, filename))
self.ssh_client.run_command('echo "baseurl={}" >> {}'.format(repo_url, filename))
for k, v in kwargs.iteritems():
self.ssh_client.run_command('echo "{}={}" >> {}'.format(k, v, filename))
return repo_id
def add_product_repo(self, repo_url, **kwargs):
"""This method ensures that when we add a new repo URL, there will be no other version
of such product present in the yum.repos.d. You can specify conf options in kwargs. They
will be applied only to newly created repo file.
Returns:
The repo id.
"""
match = self.product_url_regexp.search(repo_url)
if match is None:
raise ValueError(
"The URL {} does not contain information about product and version.".format(
repo_url))
for repo_id, url in self.read_repos().iteritems():
if url == repo_url:
# It is already there, so just enable it
self.enable_disable_repo(repo_id, True)
return repo_id
product, ver = match.groups()
repos = self.find_product_repos()
if product in repos:
for v, i in repos[product].iteritems():
logger.info("Deleting %s repo with version %s (%s)", product, v, i)
self.ssh_client.run_command("rm -f /etc/yum.repos.d/{}.repo".format(i))
return self.write_repofile(fauxfactory.gen_alpha(), repo_url, **kwargs)
def enable_disable_repo(self, repo_id, enable):
logger.info("%s repository %s", "Enabling" if enable else "Disabling", repo_id)
return self.ssh_client.run_command(
"sed -i 's/^enabled=./enabled={}/' /etc/yum.repos.d/{}.repo".format(
1 if enable else 0, repo_id)).rc == 0
@logger_wrap("Update RHEL: {}")
def update_rhel(self, *urls, **kwargs):
"""Update RHEL on appliance
Will pull URLs from the 'updates_urls' environment variable (whitespace-separated URLs),
or cfme_data.
If the env var is not set, URLs will be pulled from cfme_data.
If the env var is set, it is the only source for update URLs.
Generic rhel update URLs cfme_data.get('basic_info', {})['rhel_updates_urls'] (yaml list)
On downstream builds, an additional RH SCL updates url can be inserted at
cfme_data.get('basic_info', {})['rhscl_updates_urls'].
If the ``skip_broken`` kwarg is passed, and evaluated as True, broken packages will be
ignored in the yum update.
"""
urls = list(urls)
log_callback = kwargs.pop("log_callback")
skip_broken = kwargs.pop("skip_broken", False)
reboot = kwargs.pop("reboot", True)
streaming = kwargs.pop("streaming", False)
cleanup = kwargs.pop('cleanup', False)
log_callback('updating appliance')
if not urls:
basic_info = conf.cfme_data.get('basic_info', {})
if os.environ.get('updates_urls'):
# try to pull URLs from env if var is non-empty
urls.extend(os.environ['update_urls'].split())
else:
# fall back to cfme_data
if self.version >= "5.5":
updates_url = basic_info.get('rhel7_updates_url')
else:
updates_url = basic_info.get('rhel_updates_url')
if updates_url:
urls.append(updates_url)
if streaming:
client = self.ssh_client(stream_output=True)
else:
client = self.ssh_client
if cleanup:
client.run_command(
"cd /etc/yum.repos.d && find . -not -name 'redhat.repo' "
"-not -name 'rhel-source.repo' -not -name . -exec rm {} \;")
for url in urls:
self.add_product_repo(url)
# update
log_callback('Running rhel updates on appliance')
# clean yum beforehand to clear metadata from earlier update repos, if any
try:
skip = '--skip-broken' if skip_broken else ''
result = client.run_command('yum update -y --nogpgcheck {}'.format(skip),
timeout=3600)
except socket.timeout:
msg = 'SSH timed out while updating appliance, exiting'
log_callback(msg)
# failure to update is fatal, kill this process
raise KeyboardInterrupt(msg)
self.log.error(result.output)
if result.rc != 0:
self.log.error('appliance update failed')
msg = 'Appliance {} failed to update RHEL, error in logs'.format(self.address)
log_callback(msg)
raise ApplianceException(msg)
if reboot:
self.reboot(wait_for_web_ui=False, log_callback=log_callback)
return result
def utc_time(self):
client = self.ssh_client
status, output = client.run_command('date --iso-8601=seconds -u')
if not status:
return dateutil.parser.parse(output)
else:
raise Exception("Couldn't get datetime: {}".format(output))
def _check_appliance_ui_wait_fn(self):
# Get the URL, don't verify ssl cert
try:
response = requests.get(self.url, timeout=15, verify=False)
if response.status_code == 200:
self.log.info("Appliance online")
return True
else:
self.log.debug('Appliance online, status code %s', response.status_code)
except requests.exceptions.Timeout:
self.log.debug('Appliance offline, connection timed out')
except ValueError:
# requests exposes invalid URLs as ValueErrors, which is excellent
raise
except Exception as ex:
self.log.debug('Appliance online, but connection failed: %s', str(ex))
return False
def is_web_ui_running(self, unsure=False):
"""Triple checks if web UI is up and running
Args:
unsure: Variable to return when not sure if web UI is running or not
(default ``False``)
"""
num_of_tries = 3
was_running_count = 0
for try_num in range(num_of_tries):
if self._check_appliance_ui_wait_fn():
was_running_count += 1
sleep(3)
if was_running_count == 0:
return False
elif was_running_count == num_of_tries:
return True
else:
return unsure
def _evm_service_command(self, command, log_callback, expected_exit_code=None):
"""Runs given systemctl command against the ``evmserverd`` service
Args:
command: Command to run, e.g. "start"
expected_exit_code: If the exit codes don't match, ApplianceException is raised
"""
log_callback("Running command '{}' against the evmserverd service".format(command))
with self.ssh_client as ssh:
status, output = ssh.run_command('systemctl {} evmserverd'.format(command))
if expected_exit_code is not None and status != expected_exit_code:
msg = 'Failed to {} evmserverd on {}\nError: {}'.format(command, self.address, output)
log_callback(msg)
raise ApplianceException(msg)
return status
@logger_wrap("Status of EVM service: {}")
def is_evm_service_running(self, log_callback=None):
"""Checks the ``evmserverd`` service status on this appliance
"""
return self._evm_service_command("status", log_callback=log_callback) == 0
@logger_wrap("Start EVM Service: {}")
def start_evm_service(self, log_callback=None):
"""Starts the ``evmserverd`` service on this appliance
"""
self._evm_service_command('start', expected_exit_code=0, log_callback=log_callback)
@logger_wrap("Stop EVM Service: {}")
def stop_evm_service(self, log_callback=None):
"""Stops the ``evmserverd`` service on this appliance
"""
self._evm_service_command('stop', expected_exit_code=0, log_callback=log_callback)
@logger_wrap("Restart EVM Service: {}")
def restart_evm_service(self, rude=False, log_callback=None):
"""Restarts the ``evmserverd`` service on this appliance
"""
store.terminalreporter.write_line('evmserverd is being restarted, be patient please')
with self.ssh_client as ssh:
if rude:
log_callback('restarting evm service by killing processes')
status, msg = ssh.run_command(
'killall -9 ruby; systemctl restart {}-postgresql'.format(
self.db.postgres_version))
self._evm_service_command("start", expected_exit_code=0, log_callback=log_callback)
else:
self._evm_service_command(
"restart", expected_exit_code=0, log_callback=log_callback)
self.server_details_changed()
@logger_wrap("Waiting for EVM service: {}")
def wait_for_evm_service(self, timeout=900, log_callback=None):
"""Waits for the evemserverd service to be running
Args:
timeout: Number of seconds to wait until timeout (default ``900``)
"""
log_callback('Waiting for evmserverd to be running')
result, wait = wait_for(self.is_evm_service_running, num_sec=timeout,
fail_condition=False, delay=10)
return result
@logger_wrap("Rebooting Appliance: {}")
def reboot(self, wait_for_web_ui=True, log_callback=None):
log_callback('Rebooting appliance')
client = self.ssh_client
old_uptime = client.uptime()
status, out = client.run_command('reboot')
wait_for(lambda: client.uptime() < old_uptime, handle_exception=True,
num_sec=600, message='appliance to reboot', delay=10)
if wait_for_web_ui:
self.wait_for_web_ui()
@logger_wrap("Waiting for web_ui: {}")
def wait_for_web_ui(self, timeout=900, running=True, log_callback=None):
"""Waits for the web UI to be running / to not be running
Args:
timeout: Number of seconds to wait until timeout (default ``600``)
running: Specifies if we wait for web UI to start or stop (default ``True``)
``True`` == start, ``False`` == stop
"""
prefix = "" if running else "dis"
(log_callback or self.log.info)('Waiting for web UI to ' + prefix + 'appear')
result, wait = wait_for(self._check_appliance_ui_wait_fn, num_sec=timeout,
fail_condition=not running, delay=10)
return result
@logger_wrap("Install VDDK: {}")
def install_vddk(self, force=False, vddk_url=None, log_callback=None):
"""Install the vddk on a appliance"""
def log_raise(exception_class, message):
log_callback(message)
raise exception_class(message)
if vddk_url is None: # fallback to VDDK 5.5
vddk_url = conf.cfme_data.get("basic_info", {}).get("vddk_url", {}).get("v5_5")
if vddk_url is None:
raise Exception("vddk_url not specified!")
with self.ssh_client as client:
is_already_installed = False
if client.run_command('test -d /usr/lib/vmware-vix-disklib/lib64')[0] == 0:
is_already_installed = True
if not is_already_installed or force:
# start
filename = vddk_url.split('/')[-1]
# download
log_callback('Downloading VDDK')
result = client.run_command('curl {} -o {}'.format(vddk_url, filename))
if result.rc != 0:
log_raise(Exception, "Could not download VDDK")
# install
log_callback('Installing vddk')
status, out = client.run_command(
'yum -y install {}'.format(filename))
if status != 0:
log_raise(
Exception, 'VDDK installation failure (rc: {})\n{}'.format(out, status))
# verify
log_callback('Verifying vddk')
status, out = client.run_command('ldconfig -p | grep vix')
if len(out) < 2:
log_raise(
Exception,
"Potential installation issue, libraries not detected\n{}".format(out))
@logger_wrap("Uninstall VDDK: {}")
def uninstall_vddk(self, log_callback=None):
"""Uninstall the vddk from an appliance"""
with self.ssh_client as client:
is_installed = client.run_command('test -d /usr/lib/vmware-vix-disklib/lib64').success
if is_installed:
status, out = client.run_command('yum -y remove vmware-vix-disklib')
if status != 0:
log_callback('VDDK removing failure (rc: {})\n{}'.format(out, status))
raise Exception('VDDK removing failure (rc: {})\n{}'.format(out, status))
else:
log_callback('VDDK has been successfully removed.')
else:
log_callback('VDDK is not installed.')
@logger_wrap("Install Netapp SDK: {}")
def install_netapp_sdk(self, sdk_url=None, reboot=False, log_callback=None):
"""Installs the Netapp SDK.
Args:
sdk_url: Where the SDK zip file is located? (optional)
reboot: Whether to reboot the appliance afterwards? (Default False but reboot is needed)
"""
def log_raise(exception_class, message):
log_callback(message)
raise exception_class(message)
if sdk_url is None:
try:
sdk_url = conf.cfme_data['basic_info']['netapp_sdk_url']
except KeyError:
raise Exception("cfme_data.yaml/basic_info/netapp_sdk_url is not present!")
filename = sdk_url.split('/')[-1]
foldername = os.path.splitext(filename)[0]
with self.ssh_client as ssh:
log_callback('Downloading SDK from {}'.format(sdk_url))
status, out = ssh.run_command(
'wget {url} -O {file} > /root/unzip.out 2>&1'.format(
url=sdk_url, file=filename))
if status != 0:
log_raise(Exception, 'Could not download Netapp SDK: {}'.format(out))
log_callback('Extracting SDK ({})'.format(filename))
status, out = ssh.run_command(
'unzip -o -d /var/www/miq/vmdb/lib/ {}'.format(filename))
if status != 0:
log_raise(Exception, 'Could not extract Netapp SDK: {}'.format(out))
path = '/var/www/miq/vmdb/lib/{}/lib/linux-64'.format(foldername)
# Check if we haven't already added this line
if ssh.run_command("grep -F '{}' /etc/default/evm".format(path)).rc != 0:
log_callback('Installing SDK ({})'.format(foldername))
status, out = ssh.run_command(
'echo "export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:{}" >> /etc/default/evm'.format(
path))
if status != 0:
log_raise(Exception, 'SDK installation failure ($?={}): {}'.format(status, out))
else:
log_callback("Not needed to install, already done")
log_callback('ldconfig')
ssh.run_command('ldconfig')
log_callback('Modifying YAML configuration')
c_yaml = self.get_yaml_config()
c_yaml['product']['storage'] = True
self.set_yaml_config(c_yaml)
# To mark that we installed netapp
ssh.run_command("touch /var/www/miq/vmdb/HAS_NETAPP")
if reboot:
self.reboot(log_callback=log_callback)
else:
log_callback(
'Appliance must be restarted before the netapp functionality can be used.')
clear_property_cache(self, 'is_storage_enabled')
@logger_wrap('Updating appliance UUID: {}')
def update_guid(self, log_callback=None):
guid_gen = 'uuidgen |tee /var/www/miq/vmdb/GUID'
log_callback('Running {} to generate UUID'.format(guid_gen))
with self.ssh_client as ssh:
result = ssh.run_command(guid_gen)
assert result.success, 'Failed to generate UUID'
log_callback('Updated UUID: {}'.format(str(result)))
try:
del self.__dict__['guid'] # invalidate cached_property
except KeyError:
logger.exception('Exception clearing cached_property "guid"')
return str(result).rstrip('\n') # should return UUID from stdout
def wait_for_ssh(self, timeout=600):
"""Waits for appliance SSH connection to be ready
Args:
timeout: Number of seconds to wait until timeout (default ``600``)
"""
wait_for(func=lambda: self.is_ssh_running,
message='appliance.is_ssh_running',
delay=5,
num_sec=timeout)
@property
def is_supervisord_running(self):
output = self.ssh_client.run_command("systemctl status supervisord")
return output.success
@property
def is_nginx_running(self):
output = self.ssh_client.run_command("systemctl status nginx")
return output.success
@property
def is_rabbitmq_running(self):
output = self.ssh_client.run_command("systemctl status rabbitmq-server")
return output.success
@property
def is_embedded_ensible_role_enabled(self):
return self.server_roles.get("embedded_ansible", False)
@property
def is_embedded_ansible_running(self):
return self.is_embedded_ensible_role_enabled and self.is_supervisord_running
def wait_for_embedded_ansible(self, timeout=900):
"""Waits for embedded ansible to be ready
Args:
timeout: Number of seconds to wait until timeout (default ``900``)
"""
wait_for(
func=lambda: self.is_embedded_ansible_running,
message='appliance.is_embedded_ansible_running',
delay=60,
num_sec=timeout
)
@cached_property
def get_host_address(self):
try:
server = self.get_yaml_config().get('server')
if server:
return server.get('host')
except Exception as e:
logger.exception(e)
self.log.error('Exception occured while fetching host address')
def wait_for_host_address(self):
try:
wait_for(func=lambda: getattr(self, 'get_host_address'),
fail_condition=None,
delay=5,
num_sec=120)
return self.get_host_address
except Exception as e:
logger.exception(e)
self.log.error('waiting for host address from yaml_config timedout')
@property
def is_ssh_running(self):
return net_check(ports.SSH, self.hostname, force=True)
@property
def has_cli(self):
if self.ssh_client.run_command('ls -l /bin/appliance_console_cli')[0] == 0:
return True
else:
return False
@property
def is_idle(self):
"""Return appliance idle state measured by last production.log activity.
It runs one liner script, which first gathers current date on appliance and then gathers
date of last entry in production.log(which has to be parsed) with /api calls filtered
(These calls occur every minute.)
Then it deducts that last time in log from current date and if it is lower than idle_time it
returns False else True.
Args:
Returns:
True if appliance is idling for longer or equal to idle_time seconds.
False if appliance is not idling for longer or equal to idle_time seconds.
"""
idle_time = 3600
ssh_output = self.ssh_client.run_command('if [ $((`date "+%s"` - `date -d "$(egrep -v '
'"(Processing by Api::ApiController\#index as JSON|Started GET "/api" for '
'127.0.0.1|Completed 200 OK in)" /var/www/miq/vmdb/log/production.log | tail -1 |cut '
'-d"[" -f3 | cut -d"]" -f1 | cut -d" " -f1)\" \"+%s\"`)) -lt {} ];'
'then echo "False";'
'else echo "True";'
'fi;'.format(idle_time))
return True if 'True' in ssh_output else False
@cached_property
def build_datetime(self):
build_datetime_string = self.build.split('_', 1)[0]
return datetime.strptime(build_datetime_string, '%Y%m%d%H%M%S')
@cached_property
def build_date(self):
return self.build_datetime.date()
def has_netapp(self):
return self.ssh_client.appliance_has_netapp()
@cached_property
def guid(self):
try:
server = self.rest_api.get_entity_by_href(self.rest_api.server_info['server_href'])
return server.guid
except (AttributeError, KeyError, IOError):
self.log.exception('appliance.guid could not be retrieved from REST, falling back')
result = self.ssh_client.run_command('cat /var/www/miq/vmdb/GUID')
return result.output
@cached_property
def evm_id(self):
try:
server = self.rest_api.get_entity_by_href(self.rest_api.server_info['server_href'])
return server.id
except (AttributeError, KeyError, IOError):
self.log.exception('appliance.evm_id could not be retrieved from REST, falling back')
miq_servers = self.db.client['miq_servers']
return self.db.client.session.query(
miq_servers.id).filter(miq_servers.guid == self.guid)[0][0]
@property
def server_roles(self):
"""Return a dictionary of server roles from database"""
asr = self.db.client['assigned_server_roles']
sr = self.db.client['server_roles']
all_role_names = {row[0] for row in self.db.client.session.query(sr.name)}
# Query all active server roles assigned to this server
query = self.db.client.session\
.query(sr.name)\
.join(asr, asr.server_role_id == sr.id)\
.filter(asr.miq_server_id == self.evm_id)\
.filter(asr.active == True) # noqa
active_roles = {row[0] for row in query}
roles = {role_name: role_name in active_roles for role_name in all_role_names}
dead_keys = ['database_owner', 'vdi_inventory']
for key in roles:
if not self.is_storage_enabled:
if key.startswith('storage'):
dead_keys.append(key)
if key == 'vmdb_storage_bridge':
dead_keys.append(key)
for key in dead_keys:
try:
del roles[key]
except KeyError:
pass
return roles
@server_roles.setter
def server_roles(self, roles):
"""Sets the server roles. Requires a dictionary full of the role keys with bool values."""
if self.server_roles == roles:
self.log.debug(' Roles already match, returning...')
return
yaml = self.get_yaml_config()
yaml['server']['role'] = ','.join([role for role, boolean in roles.iteritems() if boolean])
self.set_yaml_config(yaml)
wait_for(lambda: self.server_roles == roles, num_sec=300, delay=15)
def enable_embedded_ansible_role(self):
"""Enables embbeded ansible role
This is necessary because server_roles does not wait long enough"""
roles = self.server_roles
roles['embedded_ansible'] = True
try:
self.server_roles = roles
except TimedOutError:
wait_for(lambda: self.server_roles == roles, num_sec=600, delay=15)
self.wait_for_embedded_ansible()
def update_server_roles(self, changed_roles):
server_roles = self.server_roles.copy()
server_roles.update(changed_roles)
self.server_roles = server_roles
return server_roles == self.server_roles
@cached_property
def configuration_details(self):
"""Return details that are necessary to navigate through Configuration accordions.
Args:
ip_address: IP address of the server to match. If None, uses hostname from
``conf.env['base_url']``
Returns:
If the data weren't found in the DB, :py:class:`NoneType`
If the data were found, it returns tuple ``(region, server name,
server id, server zone id)``
"""
try:
servers = self.rest_api.collections.servers.all
chosen_server = None
if len(servers) == 1:
chosen_server = servers[0]
else:
for server in servers:
if self.guid == server.guid:
chosen_server = server
if chosen_server:
chosen_server.reload(attributes=['region_number'])
return (chosen_server.region_number, chosen_server.name,
chosen_server.id, chosen_server.zone_id)
else:
return None, None, None, None
except:
return None
@cached_property
def configuration_details_old(self):
try:
miq_servers = self.db.client['miq_servers']
for region in self.db.client.session.query(self.db.client['miq_regions']):
reg_min = region.region * SEQ_FACT
reg_max = reg_min + SEQ_FACT
all_servers = self.db.client.session.query(miq_servers).all()
server = None
if len(all_servers) == 1:
# If there's only one server, it's the one we want
server = all_servers[0]
else:
# Otherwise, filter based on id and ip/guid
def server_filter(server):
return all([
server.id >= reg_min,
server.id < reg_max,
# second check because of openstack ip addresses
server.ipaddress == self.db.address or server.guid == self.guid
])
servers = filter(server_filter, all_servers)
if servers:
server = servers[0]
if server:
return region.region, server.name, server.id, server.zone_id
else:
return None, None, None, None
else:
return None
except KeyError:
return None
def server_id(self):
try:
return self.configuration_details[2]
except TypeError:
return None
def server_region(self):
try:
return self.configuration_details[0]
except TypeError:
return None
def server_name(self):
try:
return self.configuration_details[1]
except TypeError:
return None
def server_zone_id(self):
try:
return self.configuration_details[3]
except TypeError:
return None
def server_region_string(self):
r = self.server_region()
return "{} Region: Region {} [{}]".format(
self.product_name, r, r)
def slave_server_zone_id(self):
table = self.db.client["miq_servers"]
try:
return self.db.client.session.query(table.id).filter(
table.is_master == 'false').first()[0]
except TypeError:
return None
def slave_server_name(self):
table = self.db.client["miq_servers"]
try:
return self.db.client.session.query(table.name).filter(
table.id == self.slave_server_zone_id()).first()[0]
except TypeError:
return None
@cached_property
def company_name(self):
return self.get_yaml_config()["server"]["company"]
@cached_property
def zone_description(self):
zone_id = self.server_zone_id()
zones = list(
self.db.client.session.query(self.db.client["zones"]).filter(
self.db.client["zones"].id == zone_id
)
)
if zones:
return zones[0].description
else:
return None
def host_id(self, hostname):
hosts = list(
self.db.client.session.query(self.db.client["hosts"]).filter(
self.db.client["hosts"].name == hostname
)
)
if hosts:
return str(hosts[0].id)
else:
return None
@cached_property
def is_storage_enabled(self):
return 'storage' in self.get_yaml_config().get('product', {})
def get_yaml_config(self):
writeout = self.ssh_client.run_rails_command(
'"File.open(\'/tmp/yam_dump.yaml\', \'w\') '
'{|f| f.write(Settings.to_hash.deep_stringify_keys.to_yaml) }"'
)
if writeout.rc:
logger.error("Config couldn't be found")
logger.error(writeout.output)
raise Exception('Error obtaining config')
base_data = self.ssh_client.run_command('cat /tmp/yam_dump.yaml')
if base_data.rc:
logger.error("Config couldn't be found")
logger.error(base_data.output)
raise Exception('Error obtaining config')
try:
return yaml.load(base_data.output)
except:
logger.debug(base_data.output)
raise
def set_yaml_config(self, data_dict):
temp_yaml = NamedTemporaryFile()
dest_yaml = '/tmp/conf.yaml'
yaml.dump(data_dict, temp_yaml, default_flow_style=False)
self.ssh_client.put_file(temp_yaml.name, dest_yaml)
# Build and send ruby script
dest_ruby = '/tmp/set_conf.rb'
ruby_template = data_path.join('utils', 'cfmedb_set_config.rbt')
ruby_replacements = {
'config_file': dest_yaml
}
temp_ruby = load_data_file(ruby_template.strpath, ruby_replacements)
self.ssh_client.put_file(temp_ruby.name, dest_ruby)
# Run it
result = self.ssh_client.run_rails_command(dest_ruby)
if result:
self.server_details_changed()
else:
raise Exception('Unable to set config: {!r}:{!r}'.format(result.rc, result.output))
def set_session_timeout(self, timeout=86400, quiet=True):
"""Sets the timeout of UI timeout.
Args:
timeout: Timeout in seconds
quiet: Whether to ignore any errors
"""
try:
vmdb_config = self.get_yaml_config()
if vmdb_config["session"]["timeout"] != timeout:
vmdb_config["session"]["timeout"] = timeout
self.set_yaml_config(vmdb_config)
except Exception as ex:
logger.error('Setting session timeout failed:')
logger.exception(ex)
if not quiet:
raise
def delete_all_providers(self):
logger.info('Destroying all appliance providers')
for prov in self.rest_api.collections.providers:
prov.action.delete()
def reset_automate_model(self):
with self.ssh_client as ssh_client:
ssh_client.run_rake_command("evm:automate:reset")
def clean_appliance(self):
starttime = time()
self.ssh_client.run_command('service evmserverd stop')
self.ssh_client.run_command('sync; sync; echo 3 > /proc/sys/vm/drop_caches')
self.ssh_client.run_command('service collectd stop')
self.ssh_client.run_command('service {}-postgresql restart'.format(
self.db.postgres_version))
self.ssh_client.run_command(
'cd /var/www/miq/vmdb; bin/rake evm:db:reset')
self.ssh_client.run_rake_command('db:seed')
self.ssh_client.run_command('service collectd start')
self.ssh_client.run_command('rm -rf /var/www/miq/vmdb/log/*.log*')
self.ssh_client.run_command('rm -rf /var/www/miq/vmdb/log/apache/*.log*')
self.ssh_client.run_command('service evmserverd start')
self.wait_for_evm_service()
logger.debug('Cleaned appliance in: {}'.format(round(time() - starttime, 2)))
def set_full_refresh_threshold(self, threshold=100):
yaml = self.get_yaml_config()
yaml['ems_refresh']['full_refresh_threshold'] = threshold
self.set_yaml_config(yaml)
def set_cap_and_util_all_via_rails(self):
"""Turns on Collect for All Clusters and Collect for all Datastores without using Web UI."""
command = (
'Metric::Targets.perf_capture_always = {:storage=>true, :host_and_cluster=>true};')
self.ssh_client.run_rails_console(command, timeout=None)
def set_cfme_server_relationship(self, vm_name, server_id=1):
"""Set MiqServer record to the id of a VM by name, effectively setting the CFME Server
Relationship without using the Web UI."""
command = ('miq_server = MiqServer.find_by(id: {});'
'miq_server.vm_id = Vm.find_by(name: \'{}\').id;'
'miq_server.save'.format(server_id, vm_name))
self.ssh_client.run_rails_console(command, timeout=None)
def set_pglogical_replication(self, replication_type=':none'):
"""Set pglogical replication type (:none, :remote, :global) without using the Web UI."""
command = ('MiqRegion.replication_type = {}'.format(replication_type))
self.ssh_client.run_rails_console(command, timeout=None)
def add_pglogical_replication_subscription(self, host):
"""Add a pglogical replication subscription without using the Web UI."""
user = conf.credentials['ssh']['username']
password = conf.credentials['ssh']['password']
dbname = 'vmdb_production'
port = 5432
command = ('sub = PglogicalSubscription.new;'
'sub.dbname = \'{}\';'
'sub.host = \'{}\';'
'sub.user = \'{}\';'
'sub.password = \'{}\';'
'sub.port = {};'
'sub.save'.format(dbname, host, user, password, port))
self.ssh_client.run_rails_console(command, timeout=None)
def set_rubyrep_replication(self, host, port=5432, database='vmdb_production',
username='root', password=None):
"""Sets up rubyrep replication via advanced configuration settings yaml."""
password = password or self._encrypt_string(conf.credentials['ssh']['password'])
yaml = self.get_yaml_config()
if 'replication_worker' in yaml['workers']['worker_base']:
dest = yaml['workers']['worker_base']['replication_worker']['replication'][
'destination']
dest['database'] = database
dest['username'] = username
dest['password'] = password
dest['port'] = port
dest['host'] = host
else: # 5.5 configuration:
dest = yaml['workers']['worker_base'][':replication_worker'][':replication'][
':destination']
dest[':database'] = database
dest[':username'] = username
dest[':password'] = password
dest[':port'] = port
dest[':host'] = host
logger.debug('Dest: {}'.format(dest))
self.set_yaml_config(yaml)
def wait_for_miq_server_workers_started(self, evm_tail=None, poll_interval=5):
"""Waits for the CFME's workers to be started by tailing evm.log for:
'INFO -- : MIQ(MiqServer#wait_for_started_workers) All workers have been started'
"""
if evm_tail is None:
logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
evm_tail.set_initial_file_end()
attempts = 0
detected = False
max_attempts = 60
while (not detected and attempts < max_attempts):
logger.debug('Attempting to detect MIQ Server workers started: {}'.format(attempts))
for line in evm_tail:
if 'MiqServer#wait_for_started_workers' in line:
if ('All workers have been started' in line):
logger.info('Detected MIQ Server is ready.')
detected = True
break
sleep(poll_interval) # Allow more log lines to accumulate
attempts += 1
if not (attempts < max_attempts):
logger.error('Could not detect MIQ Server workers started in {}s.'.format(
poll_interval * max_attempts))
evm_tail.close()
def server_details_changed(self):
clear_property_cache(self, 'configuration_details', 'zone_description')
@logger_wrap("Setting dev branch: {}")
def use_dev_branch(self, repo, branch, log_callback=None):
"""Sets up an exitsing appliance to change the branch to specified one and reset it.
Args:
repo: URL to the repo
branch: Branch of that repo
"""
with self.ssh_client as ssh_client:
dev_branch_cmd = 'cd /var/www/miq/vmdb; git remote add dev_branch {}'.format(repo)
if not ssh_client.run_command(dev_branch_cmd):
ssh_client.run_command('cd /var/www/miq/vmdb; git remote remove dev_branch')
if not ssh_client.run_command(dev_branch_cmd):
raise Exception('Could not add the dev_branch remote')
# We now have the repo and now let's update it
ssh_client.run_command('cd /var/www/miq/vmdb; git remote update')
self.evmserverd.stop()
ssh_client.run_command(
'cd /var/www/miq/vmdb; git checkout dev_branch/{}'.format(branch))
ssh_client.run_command('cd /var/www/miq/vmdb; bin/update')
self.start_evm_service()
self.wait_for_evm_service()
self.wait_for_web_ui()
def check_domain_enabled(self, domain):
namespaces = self.db.client["miq_ae_namespaces"]
q = self.db.client.session.query(namespaces).filter(
namespaces.parent_id == None, namespaces.name == domain) # NOQA (for is/==)
try:
return list(q)[0].enabled
except IndexError:
raise KeyError("No such Domain: {}".format(domain))
def configure_appliance_for_openldap_ext_auth(self, appliance_fqdn):
"""This method changes the /etc/sssd/sssd.conf and /etc/openldap/ldap.conf files to set
up the appliance for an external authentication with OpenLdap.
Apache file configurations are updated, for webui to take effect.
arguments:
appliance_name: FQDN for the appliance.
"""
openldap_domain1 = conf.cfme_data['auth_modes']['ext_openldap']
assert self.ssh_client.run_command('appliance_console_cli --host {}'.format(appliance_fqdn))
self.ssh_client.run_command('echo "{}\t{}" > /etc/hosts'.format(
openldap_domain1['ipaddress'], openldap_domain1['hostname']))
self.ssh_client.put_file(
local_file=conf_path.join(openldap_domain1['cert_filename']).strpath,
remote_file=openldap_domain1['cert_filepath'])
ldap_conf_data = conf.cfme_data['auth_modes']['ext_openldap']['ldap_conf']
sssd_conf_data = conf.cfme_data['auth_modes']['ext_openldap']['sssd_conf']
command1 = 'echo "{}" > /etc/openldap/ldap.conf'.format(ldap_conf_data)
command2 = 'echo "{}" > /etc/sssd/sssd.conf && chown -R root:root /etc/sssd/sssd.conf && ' \
'chmod 600 /etc/sssd/sssd.conf'.format(sssd_conf_data)
assert self.ssh_client.run_command(command1)
assert self.ssh_client.run_command(command2)
template_dir = '/opt/rh/cfme-appliance/TEMPLATE'
if self.version == 'master':
template_dir = '/var/www/miq/system/TEMPLATE'
httpd_auth = '/etc/pam.d/httpd-auth'
manageiq_ext_auth = '/etc/httpd/conf.d/manageiq-external-auth.conf'
apache_config = """
cp {template_dir}/etc/pam.d/httpd-auth {httpd_auth} &&
cp {template_dir}/etc/httpd/conf.d/manageiq-remote-user.conf /etc/httpd/conf.d/ &&
cp {template_dir}/etc/httpd/conf.d/manageiq-external-auth.conf.erb {manageiq_ext_auth}
""".format(template_dir=template_dir, httpd_auth=httpd_auth,
manageiq_ext_auth=manageiq_ext_auth)
assert self.ssh_client.run_command(apache_config)
self.ssh_client.run_command(
'setenforce 0 && systemctl restart sssd && systemctl restart httpd')
self.wait_for_web_ui()
@logger_wrap("Configuring VM Console: {}")
def configure_vm_console_cert(self, log_callback=None):
"""This method generates a self signed SSL cert and installs it
in the miq/vmdb/certs dir. This cert will be used by the
HTML 5 VM Console feature. Note evmserverd needs to be restarted
after running this.
"""
log_callback('Installing SSL certificate')
cert = conf.cfme_data['vm_console'].get('cert')
if cert is None:
raise Exception('vm_console:cert does not exist in cfme_data.yaml')
cert_file = os.path.join(cert.install_dir, 'server.cer')
key_file = os.path.join(cert.install_dir, 'server.cer.key')
cert_generator = scripts_path.join('gen_ssl_cert.py').strpath
remote_cert_generator = os.path.join('/usr/bin', 'gen_ssl_cert.py')
# Copy self signed SSL certificate generator to the appliance
# because it needs to get the FQDN for the cert it generates.
self.ssh_client.put_file(cert_generator, remote_cert_generator)
# Generate cert
command = '''
{cert_generator} \\
--C="{country}" \\
--ST="{state}" \\
--L="{city}" \\
--O="{organization}" \\
--OU="{organizational_unit}" \\
--keyFile="{key}" \\
--certFile="{cert}"
'''.format(
cert_generator=remote_cert_generator,
country=cert.country,
state=cert.state,
city=cert.city,
organization=cert.organization,
organizational_unit=cert.organizational_unit,
key=key_file,
cert=cert_file,
)
result = self.ssh_client.run_command(command)
if not result == 0:
raise Exception(
'Failed to generate self-signed SSL cert on appliance: {}'.format(
result[1]
)
)
class Appliance(IPAppliance):
"""Appliance represents an already provisioned cfme appliance vm
Args:
provider_name: Name of the provider this appliance is running under
vm_name: Name of the VM this appliance is running as
browser_steal: Setting of the browser_steal attribute.
"""
_default_name = 'EVM'
# For JSON Serialization
CONFIG_MAPPING = {
'provider_name': 'provider_name',
'vm_name': 'vm_name',
'container': 'container',
}
CONFIG_NONGLOBAL = {'vm_name'}
def __init__(self, provider_name, vm_name, browser_steal=False, container=None):
"""Initializes a deployed appliance VM
"""
super(Appliance, self).__init__(browser_steal=browser_steal, container=None)
self.name = Appliance._default_name
self._provider_key = provider_name
self.vmname = vm_name
def __eq__(self, other):
return isinstance(other, type(self)) and (
self.vmname == other.vmname and self._provider_key == other._provider_key)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.vmname, self._provider_key))
@property
def ipapp(self):
# For backwards compat
return self
@cached_property
def provider(self):
"""
Note:
Cannot be cached because provider object is unpickable.
"""
from cfme.utils.providers import get_mgmt
return get_mgmt(self._provider_key)
@property
def vm_name(self):
""" VM's name of the appliance on the provider """
return self.vmname
@cached_property
def address(self):
def is_ip_available():
try:
ip = self.provider.get_ip_address(self.vm_name)
if ip is None:
return False
else:
return ip
except AttributeError:
return False
ec, tc = wait_for(is_ip_available,
delay=5,
num_sec=600)
return str(ec)
def _custom_configure(self, **kwargs):
log_callback = kwargs.pop(
"log_callback",
lambda msg: logger.info("Custom configure %s: %s", self.vmname, msg))
region = kwargs.get('region', 0)
db_address = kwargs.get('db_address')
key_address = kwargs.get('key_address')
db_username = kwargs.get('db_username')
db_password = kwargs.get('ssh_password')
ssh_password = kwargs.get('ssh_password')
db_name = kwargs.get('db_name')
if kwargs.get('fix_ntp_clock', True) is True:
self.fix_ntp_clock(log_callback=log_callback)
if kwargs.get('db_address') is None:
self.db.enable_internal(
region, key_address, db_password, ssh_password)
else:
self.db.enable_external(
db_address, region, db_name, db_username, db_password)
self.wait_for_web_ui(timeout=1800, log_callback=log_callback)
if kwargs.get('loosen_pgssl', True) is True:
self.db.loosen_pgssl()
name_to_set = kwargs.get('name_to_set')
if name_to_set is not None and name_to_set != self.name:
self.rename(name_to_set)
self.restart_evm_service(log_callback=log_callback)
self.wait_for_web_ui(log_callback=log_callback)
@logger_wrap("Configure Appliance: {}")
def configure(self, setup_fleece=False, log_callback=None, **kwargs):
"""Configures appliance - database setup, rename, ntp sync
Utility method to make things easier.
Args:
db_address: Address of external database if set, internal database if ``None``
(default ``None``)
name_to_set: Name to set the appliance name to if not ``None`` (default ``None``)
region: Number to assign to region (default ``0``)
fix_ntp_clock: Fixes appliance time if ``True`` (default ``True``)
loosen_pgssl: Loosens postgres connections if ``True`` (default ``True``)
key_address: Fetch encryption key from this address if set, generate a new key if
``None`` (default ``None``)
"""
log_callback("Configuring appliance {} on {}".format(self.vmname, self._provider_key))
if kwargs:
with self:
self._custom_configure(**kwargs)
else:
# Defer to the IPAppliance.
super(Appliance, self).configure(log_callback=log_callback)
# And do configure the fleecing if requested
if setup_fleece:
self.configure_fleecing(log_callback=log_callback)
@logger_wrap("Configure fleecing: {}")
def configure_fleecing(self, log_callback=None):
with self(browser_steal=True):
if self.is_on_vsphere:
self.install_vddk(reboot=True, log_callback=log_callback)
self.wait_for_web_ui(log_callback=log_callback)
if self.is_on_rhev:
self.add_rhev_direct_lun_disk()
log_callback('Enabling smart proxy role...')
roles = self.server.settings.server_roles_db
if not roles["smartproxy"]:
self.server.settings.enable_server_roles("smartproxy")
# web ui crashes
if str(self.version).startswith("5.2.5") or str(self.version).startswith("5.5"):
try:
self.wait_for_web_ui(timeout=300, running=False)
except:
pass
self.wait_for_web_ui(running=True)
# add provider
log_callback('Setting up provider...')
self.provider.setup()
# credential hosts
log_callback('Credentialing hosts...')
if not RUNNING_UNDER_SPROUT:
from cfme.utils.hosts import setup_providers_hosts_credentials
setup_providers_hosts_credentials(self._provider_key, ignore_errors=True)
# if rhev, set relationship
if self.is_on_rhev:
from cfme.infrastructure.virtual_machines import Vm # For Vm.CfmeRelationship
log_callback('Setting up CFME VM relationship...')
from cfme.common.vm import VM
from cfme.utils.providers import get_crud
vm = VM.factory(self.vm_name, get_crud(self._provider_key))
cfme_rel = Vm.CfmeRelationship(vm)
cfme_rel.set_relationship(str(self.server_name()), self.server_id())
def does_vm_exist(self):
return self.provider.does_vm_exist(self.vm_name)
def rename(self, new_name):
"""Changes appliance name
Args:
new_name: Name to set
Note:
Database must be up and running and evm service must be (re)started afterwards
for the name change to take effect.
"""
vmdb_config = self.get_yaml_config()
vmdb_config['server']['name'] = new_name
self.set_yaml_config(vmdb_config)
self.name = new_name
def destroy(self):
"""Destroys the VM this appliance is running as
"""
if self.is_on_rhev:
# if rhev, try to remove direct_lun just in case it is detach
self.remove_rhev_direct_lun_disk()
self.provider.delete_vm(self.vm_name)
def stop(self):
"""Stops the VM this appliance is running as
"""
self.provider.stop_vm(self.vm_name)
self.provider.wait_vm_stopped(self.vm_name)
def start(self):
"""Starts the VM this appliance is running as
"""
self.provider.start_vm(self.vm_name)
self.provider.wait_vm_running(self.vm_name)
def templatize(self, seal=True):
"""Marks the appliance as a template. Destroys the original VM in the process.
By default it runs the sealing process. If you have done it differently, you can opt out.
Args:
seal: Whether to run the sealing process (making the VM 'universal').
"""
if seal:
if not self.is_running:
self.start()
self.seal_for_templatizing()
self.stop()
else:
if self.is_running:
self.stop()
self.provider.mark_as_template(self.vm_name)
@property
def is_running(self):
return self.provider.is_vm_running(self.vm_name)
@property
def is_on_rhev(self):
from cfme.infrastructure.provider.rhevm import RHEVMProvider
return isinstance(self.provider, RHEVMProvider.mgmt_class)
@property
def is_on_vsphere(self):
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
return isinstance(self.provider, VMwareProvider.mgmt_class)
def add_rhev_direct_lun_disk(self, log_callback=None):
if log_callback is None:
log_callback = logger.info
if not self.is_on_rhev:
log_callback("appliance NOT on rhev, unable to connect direct_lun")
raise ApplianceException("appliance NOT on rhev, unable to connect direct_lun")
log_callback('Adding RHEV direct_lun hook...')
self.wait_for_ssh()
try:
self.provider.connect_direct_lun_to_appliance(self.vm_name, False)
except Exception as e:
log_callback("Appliance {} failed to connect RHEV direct LUN.".format(self.vm_name))
log_callback(str(e))
raise
@logger_wrap("Remove RHEV LUN: {}")
def remove_rhev_direct_lun_disk(self, log_callback=None):
if not self.is_on_rhev:
msg = "appliance {} NOT on rhev, unable to disconnect direct_lun".format(self.vmname)
log_callback(msg)
raise ApplianceException(msg)
log_callback('Removing RHEV direct_lun hook...')
self.wait_for_ssh()
try:
self.provider.connect_direct_lun_to_appliance(self.vm_name, True)
except Exception as e:
log_callback("Appliance {} failed to connect RHEV direct LUN.".format(self.vm_name))
log_callback(str(e))
raise
def provision_appliance(version=None, vm_name_prefix='cfme', template=None, provider_name=None,
vm_name=None):
"""Provisions fresh, unconfigured appliance of a specific version
Note:
Version must be mapped to template name under ``appliance_provisioning > versions``
in ``cfme_data.yaml``.
If no matching template for given version is found, and trackerbot is set up,
the latest available template of the same stream will be used.
E.g.: if there is no template for 5.5.5.1 but there is 5.5.5.3, it will be used instead.
If both template name and version are specified, template name takes priority.
Args:
version: version of appliance to provision
vm_name_prefix: name prefix to use when deploying the appliance vm
Returns: Unconfigured appliance; instance of :py:class:`Appliance`
Usage:
my_appliance = provision_appliance('5.5.1.8', 'my_tests')
my_appliance.fix_ntp_clock()
...other configuration...
my_appliance.db.enable_internal()
my_appliance.wait_for_web_ui()
or
my_appliance = provision_appliance('5.5.1.8', 'my_tests')
my_appliance.configure()
"""
def _generate_vm_name():
if version is not None:
version_digits = ''.join([letter for letter in version if letter.isdigit()])
return '{}_{}_{}'.format(
vm_name_prefix, version_digits, fauxfactory.gen_alphanumeric(8))
else:
return '{}_{}'.format(vm_name_prefix, fauxfactory.gen_alphanumeric(8))
def _get_latest_template():
from cfme.utils import trackerbot
api = trackerbot.api()
stream = get_stream(version)
template_data = trackerbot.latest_template(api, stream, provider_name)
return template_data.get('latest_template')
if provider_name is None:
provider_name = conf.cfme_data.get('appliance_provisioning', {})['default_provider']
if template is not None:
template_name = template
elif version is not None:
templates_by_version = conf.cfme_data.get('appliance_provisioning', {}).get('versions', {})
try:
template_name = templates_by_version[version]
except KeyError:
# We try to get the latest template from the same stream - if trackerbot is set up
if conf.env.get('trackerbot', {}):
template_name = _get_latest_template()
if not template_name:
raise ApplianceException('No template found for stream {} on provider {}'
.format(get_stream(version), provider_name))
logger.warning('No template found matching version %s, using %s instead.',
version, template_name)
else:
raise ApplianceException('No template found matching version {}'.format(version))
else:
raise ApplianceException('Either version or template name must be specified')
prov_data = conf.cfme_data.get('management_systems', {})[provider_name]
from cfme.utils.providers import get_mgmt
provider = get_mgmt(provider_name)
if not vm_name:
vm_name = _generate_vm_name()
deploy_args = {}
deploy_args['vm_name'] = vm_name
if prov_data['type'] == 'rhevm':
deploy_args['cluster'] = prov_data['default_cluster']
if prov_data["type"] == "virtualcenter":
if "allowed_datastores" in prov_data:
deploy_args["allowed_datastores"] = prov_data["allowed_datastores"]
provider.deploy_template(template_name, **deploy_args)
return Appliance(provider_name, vm_name)
class ApplianceStack(LocalStack):
def push(self, obj):
was_before = self.top
super(ApplianceStack, self).push(obj)
logger.info("Pushed appliance {} on stack (was {} before) ".format(
obj.address, getattr(was_before, 'address', 'empty')))
if obj.browser_steal:
from cfme.utils import browser
browser.start()
def pop(self):
was_before = super(ApplianceStack, self).pop()
current = self.top
logger.info(
"Popped appliance {} from the stack (now there is {})".format(
getattr(was_before, 'address', 'empty'),
getattr(current, 'address', 'empty')))
if getattr(was_before, 'browser_steal', False):
from cfme.utils import browser
browser.start()
return was_before
stack = ApplianceStack()
def load_appliances(appliance_list, global_kwargs):
"""Instantiate a list of appliances from configuration data.
Args:
appliance_list: List of dictionaries that contain parameters for :py:class:`IPAppliance`
global_kwargs: Arguments that will be defined for each appliances. Appliance can override.
Result:
List of :py:class:`IPAppliance`
"""
result = []
for appliance_kwargs in appliance_list:
kwargs = {}
kwargs.update(global_kwargs)
kwargs.update(appliance_kwargs)
if kwargs.pop('dummy', False):
result.append(DummyAppliance(**kwargs))
continue
if not kwargs.get('base_url'):
raise ValueError('Appliance definition {!r} is missing base_url'.format(kwargs))
result.append(IPAppliance(**{IPAppliance.CONFIG_MAPPING[k]: v for k, v in kwargs.items()}))
return result
@attr.s
class DummyAppliance(object):
"""a dummy with minimal attribute set"""
address = '0.0.0.0'
browser_steal = False
version = Version('5.8.0')
is_downstream = True
is_pod = False
build = 'missing :)'
managed_known_providers = []
def set_session_timeout(self, *k):
pass
def load_appliances_from_config(config):
"""Backwards-compatible config loader.
The ``config`` contains some global values and ``appliances`` key which contains a list of dicts
that have the same keys as ``IPAppliance.CONFIG_MAPPING``'s keys. If ``appliances`` key is not
present, it is assumed it is old-format definition and the whole dict is used as a reference
for one single appliance.
The global values in the root of the dict (in case of ``appliances`` present) have lesser
priority than the values in appliance definitions themselves
Args:
config: A dictionary with the configuration
"""
if 'appliances' not in config:
# old-style setup
warnings.warn(
'Your conf.env has old-style base_url', category=DeprecationWarning, stacklevel=2)
appliances = [{
k: config[k]
for k in IPAppliance.CONFIG_MAPPING.keys()
if k in config}]
global_kwargs = {}
else:
# new-style setup
appliances = config['appliances']
global_kwargs = {
k: config[k]
for k in IPAppliance.CONFIG_MAPPING.keys()
if k not in IPAppliance.CONFIG_NONGLOBAL and k in config}
return load_appliances(appliances, global_kwargs)
def get_or_create_current_appliance():
if CREATE_IS_PEDANTIC:
assert stack.top is not None, "we no longer create"
if stack.top is None:
stack.push(load_appliances_from_config(conf.env)[0])
return stack.top
current_appliance = LocalProxy(get_or_create_current_appliance)
@removals.removed_class(
"CurrentAppliance", message=("The CurrentAppliance descriptor is being phased out"
"in favour of collections.")
)
class CurrentAppliance(object):
def __get__(self, instance, owner):
return get_or_create_current_appliance()
class NavigatableMixin(object):
"""NavigatableMixin ensures that an object can navigate properly
The NavigatableMixin object ensures that a Collection/Entity object inside the
framework has access to be able to **create** a Widgetastic View, and that it
has access to the browser.
Note: The browser access will have to change once proliferation of the Sentaku
system becomes common place
"""
@property
def browser(self):
return self.appliance.browser.widgetastic
def create_view(self, view_class, o=None, override=None):
o = o or self
if override is not None:
new_obj = copy(o)
new_obj.__dict__.update(override)
else:
new_obj = o
return self.appliance.browser.create_view(
view_class, additional_context={'object': new_obj})
@removals.removed_class(
"Navigatable", message=("Navigatable is being deprecated in favour of using Collections "
"objects with the NavigatableMixin")
)
class Navigatable(NavigatableMixin):
appliance = CurrentAppliance()
def __init__(self, appliance=None):
self.appliance = appliance or get_or_create_current_appliance()
| gpl-2.0 | -3,455,165,208,481,523,700 | 39.614009 | 100 | 0.590672 | false |
loopCM/chromium | tools/perf/perf_tools/image_decoding_measurement.py | 1 | 1268 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page_measurement
class ImageDecoding(page_measurement.PageMeasurement):
def WillNavigateToPage(self, page, tab):
tab.StartTimelineRecording()
def MeasurePage(self, page, tab, results):
tab.StopTimelineRecording()
def _IsDone():
return tab.EvaluateJavaScript('isDone')
decode_image_events = \
tab.timeline_model.GetAllOfName('DecodeImage')
# If it is a real image page, then store only the last-minIterations
# decode tasks.
if (hasattr(page,
'image_decoding_measurement_limit_results_to_min_iterations') and
page.image_decoding_measurement_limit_results_to_min_iterations):
assert _IsDone()
min_iterations = tab.EvaluateJavaScript('minIterations')
decode_image_events = decode_image_events[-min_iterations:]
durations = [d.duration for d in decode_image_events]
if not durations:
results.Add('ImageDecoding_avg', 'ms', 'unsupported')
return
image_decoding_avg = sum(durations) / len(durations)
results.Add('ImageDecoding_avg', 'ms', image_decoding_avg)
| bsd-3-clause | 44,070,709,298,462,310 | 36.294118 | 80 | 0.708991 | false |
kimgerdes/arborator | lib/parser.py | 1 | 3443 | # -*- coding: utf-8 -*-
#!/usr/bin/python
import argparse, datetime, glob, os
from mate import parsing, createNonExistingFolders
from retokenisation import retokeniser
from conll import makeEmpty
memory="40G"
def parseFile(infile, lemodel, tagmodel, parsemodel, folderpref="mate/parses/"):
timestamp=datetime.datetime.now().strftime('%Y-%m-%d_%H:%M')
if folderpref: prelimfolder=folderpref+"_prelim/"
else: prelimfolder=folderpref+timestamp+"_prelim/"
parsefile = parsing(infile=infile, lemodel=lemodel, tagmodel=tagmodel,parsemodel=parsemodel , outfolder=prelimfolder, memory=memory) # , depparse=False
#parsefile="mate/parses/2016-09-22_01:18/Louise_Liotard_F_85_et_Jeanne_Mallet_F_75_SO-2-one-word-per-line.conll14_parse"
print "retokenizing..."
newname=retokeniser(parsefile, addtoout="_retok")
print "retokenization done"
if folderpref: outfolder=folderpref+"/"
else: outfolder=folderpref+timestamp+"/"
createNonExistingFolders(outfolder)
emptyname=makeEmpty(newname, outfolder=outfolder)
parsefile = parsing(infile=emptyname, lemodel=modeldir+args.get("lemmodel",None), tagmodel=modeldir+args.get("tagmodel",None), parsemodel=modeldir+args.get("parsemodel",None), outfolder=outfolder, memory="40G")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='wrapper for mate parser with orfeo dictionaries')
parser.add_argument('-ci','--conllinfile', help='file to be parsed', type=lambda s: unicode(s, 'utf8'), required=False)
parser.add_argument('-cf','--conllfilter', help='files to be parsed', type=lambda s: unicode(s, 'utf8'), required=False)
parser.add_argument('-md','--modeldir', help='folder containing the models', type=lambda s: unicode(s, 'utf8'), required=True)
parser.add_argument('-lm','--lemmodel', help='lemmatizing model', type=lambda s: unicode(s, 'utf8'), required=False, nargs='?', default="LemModel")
parser.add_argument('-tm','--tagmodel', help='tagging model', type=lambda s: unicode(s, 'utf8'), required=False, nargs='?', default="TagModel")
parser.add_argument('-pm','--parsemodel', help='parsing model', type=lambda s: unicode(s, 'utf8'), required=False, nargs='?', default="ParseModel")
args = vars(parser.parse_args())
modeldir=args.get("modeldir",".")
infile=args.get("conllinfile",None)
conllfilter=args.get("conllfilter",None)
lemodel=modeldir+args.get("lemmodel",None)
tagmodel=modeldir+args.get("tagmodel",None)
parsemodel=modeldir+args.get("parsemodel",None)
if infile:
parseFile(infile, lemodel, tagmodel, parsemodel)
elif conllfilter:
timestamp=datetime.datetime.now().strftime('%Y-%m-%d_%H:%M')
for infile in glob.glob(conllfilter):
head, tail = os.path.split(infile) # put the parse output next to the infile
parseFile(infile, lemodel, tagmodel, parsemodel, folderpref=head+"/"+timestamp)
# python parser.py -ci mate/AParser/echantillon/Louise_Liotard_F_85_et_Jeanne_Mallet_F_75_SO-2-one-word-per-line.conll14 -lm mate/AParser/LemModel -tm mate/AParser/TagModel -pm mate/AParser/ParseModel
# python parser.py -ci mate/AParser/echantillon/Louise_Liotard_F_85_et_Jeanne_Mallet_F_75_SO-2-one-word-per-line.conll14 -md mate/AParser/
# python parser.py -ci mate/AParser/echantillon/Louise_Liotard_F_85_et_Jeanne_Mallet_F_75_SO-2-one-word-per-line.conll14 -md mate/AParser/
# python parser.py -cf "mate/AParser/echantillon/*.conll14" -md mate/AParser/
# python parser.py -cf "mate/AParser/tcof/*.conll14" -md mate/AParser/
| agpl-3.0 | -919,325,971,577,784,000 | 56.4 | 211 | 0.743247 | false |
BeenzSyed/tempest | tempest/api/identity/admin/test_tokens.py | 1 | 2209 | # Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest.test import attr
class TokensTestJSON(base.BaseIdentityAdminTest):
_interface = 'json'
@attr(type='gate')
def test_create_delete_token(self):
# get a token by username and password
user_name = data_utils.rand_name(name='user-')
user_password = data_utils.rand_name(name='pass-')
# first:create a tenant
tenant_name = data_utils.rand_name(name='tenant-')
resp, tenant = self.client.create_tenant(tenant_name)
self.assertEqual(200, resp.status)
self.data.tenants.append(tenant)
# second:create a user
resp, user = self.client.create_user(user_name, user_password,
tenant['id'], '')
self.assertEqual(200, resp.status)
self.data.users.append(user)
# then get a token for the user
rsp, body = self.token_client.auth(user_name,
user_password,
tenant['name'])
access_data = json.loads(body)['access']
self.assertEqual(rsp['status'], '200')
self.assertEqual(access_data['token']['tenant']['name'],
tenant['name'])
# then delete the token
token_id = access_data['token']['id']
resp, body = self.client.delete_token(token_id)
self.assertEqual(resp['status'], '204')
class TokensTestXML(TokensTestJSON):
_interface = 'xml'
| apache-2.0 | -1,222,319,164,164,308,500 | 38.446429 | 78 | 0.62517 | false |
mikeand/tubby | functional_tests/tests.py | 1 | 4437 | from contextlib import contextmanager
from django.contrib.auth.models import User
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.expected_conditions import staleness_of
from selenium.webdriver.support.wait import WebDriverWait
from django.conf import settings
class AddSubscriptionsTest(StaticLiveServerTestCase):
def __init__(self, *args, **kwargs):
super(AddSubscriptionsTest, self).__init__(*args, **kwargs)
if not settings.DEBUG:
settings.DEBUG = True
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
user = User.objects.create_user('michael',
'[email protected]', 'just_testing')
user.save()
def tearDown(self):
self.browser.quit()
@contextmanager
def wait_for_page_load(self, timeout=30):
old_page = self.browser.find_element_by_tag_name('html')
yield
WebDriverWait(self.browser, timeout).until(
staleness_of(old_page)
)
def test_can_add_subscriptions(self):
# User Story: bob wants to register his subscriptions
# he goes to the website and *tries* to put them in
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
# Must be the right website, says so in the title
self.assertIn('Tubby', self.browser.title)
# Bob sees a button to add a subscription
links = self.browser.find_elements_by_tag_name('a')
self.assertTrue(len(links) >= 1)
# links[0].click()
self.browser.get(links[0].get_attribute('href'))
# need to log in
self.assertIn('Login', self.browser.title)
username = self.browser.find_element_by_id('id_username')
password = self.browser.find_element_by_id('id_password')
username.send_keys('michael')
password.send_keys('just_testing')
password.send_keys(Keys.ENTER)
with self.wait_for_page_load(timeout=10):
self.browser.find_element_by_id('id_new_sub')
# seems to be on the right page
self.assertIn('Subscriptions', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('Add a Subscription', header_text)
# He types his URL into the box to see what happens.
input_box = self.browser.find_element_by_id('id_new_sub')
# The input box is centered
self.assertAlmostEqual(
input_box.location['x'] + input_box.size['width'] / 2,
512,
delta=5
)
self.assertEqual(
input_box.get_attribute('placeholder'),
'Enter a subscription link'
)
# After hitting enter the page updates and now shows the item
# in a list of subscriptions.
input_box.send_keys('https://www.youtube.com/user/PewDiePie')
input_box.send_keys(Keys.ENTER)
with self.wait_for_page_load(timeout=30):
self.browser.find_element_by_id('id_new_sub')
# wow it shows up somehow, is it magic
rows = self.browser.find_elements_by_class_name('channel_display_row')
self.assertTrue(
any(row.text.find('https://www.youtube.com/user/PewDiePie') >= 0
for row in rows),
"The subscription didn't appear in the list"
)
# There is now another blank box where you can type.
input_box = self.browser.find_element_by_id('id_new_sub')
input_box.send_keys('https://www.youtube.com/user/AngryBob')
input_box.send_keys(Keys.ENTER)
with self.wait_for_page_load(timeout=30):
self.browser.find_element_by_id('id_new_sub')
rows = self.browser.find_elements_by_class_name('channel_display_row')
self.assertTrue(
any(row.text.find('https://www.youtube.com/user/AngryBob') >= 0
for row in rows),
"The second subscription didn't appear in the list"
)
self.assertTrue(
any(row.text.find('https://www.youtube.com/user/PewDiePie') >= 0
for row in rows),
"The first subscription didn't appear in the list"
)
| mit | -5,084,760,237,444,575,000 | 35.368852 | 78 | 0.625197 | false |
chrisxue815/leetcode_python | problems/test_0307_binary_indexed_tree.py | 1 | 1616 | import unittest
from typing import List
import utils
def lowest_bit(val):
return val & (-val)
# O(n) space. Binary indexed tree, prefix.
class NumArray:
# O(nlog(n)) time. O(1) space.
def __init__(self, nums: List[int]):
self.nums = nums
self.tree = [0] * (len(nums) + 1)
for i, val in enumerate(nums):
self._add(i, val)
# O(log(n)) time. O(1) space.
def update(self, i: int, val: int) -> None:
self._add(i, val - self.nums[i])
self.nums[i] = val
# O(log(n)) time. O(1) space.
def sumRange(self, i: int, j: int) -> int:
return self._sum(j) - self._sum(i - 1)
# O(log(n)) time. O(1) space.
def _add(self, i, val):
x = i + 1
while x < len(self.tree):
self.tree[x] += val
x += lowest_bit(x)
# O(log(n)) time. O(1) space.
def _sum(self, i):
x = i + 1
s = 0
while x > 0:
s += self.tree[x]
x -= lowest_bit(x)
return s
class Test(unittest.TestCase):
def test(self):
cls = NumArray
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
obj = None
for func, parameters, expected in zip(case.functions, case.args, case.expected):
if func == cls.__name__:
obj = cls(*parameters)
else:
actual = getattr(obj, func)(*parameters)
self.assertEqual(expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| unlicense | 6,165,450,201,104,918,000 | 23.861538 | 92 | 0.493812 | false |
python-provy/provy | tests/unit/more/centos/database/test_mysql.py | 1 | 11786 | from mock import call, patch
from nose.tools import istest
from .fixtures import (
FOO_DB_WITH_JOHN_GRANTS,
FOO_DB_WITHOUT_JOHN_GRANTS,
FOO_DB_WITH_JOHN_GRANTS_AND_GRANT_OPTION,
HOSTS_FOR_USER,
DATABASES,
)
from provy.more.centos import YumRole, MySQLRole
from tests.unit.tools.helpers import ProvyTestCase
class MySQLRoleTest(ProvyTestCase):
def setUp(self):
super(MySQLRoleTest, self).setUp()
self.role = MySQLRole(prov=None, context={})
@istest
def has_no_grant_if_not_granted(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITHOUT_JOHN_GRANTS
self.assertFalse(self.role.has_grant('ALL', 'foo', 'john', '%', False))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def has_grant_if_granted(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITH_JOHN_GRANTS
self.assertTrue(self.role.has_grant('ALL', 'foo', 'john', '%', False))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def has_grant_if_granted_with_grant_option(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITH_JOHN_GRANTS_AND_GRANT_OPTION
self.assertTrue(self.role.has_grant('ALL', 'foo', 'john', '%', True))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def has_grant_if_granted_even_if_provided_full(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITH_JOHN_GRANTS
self.assertTrue(self.role.has_grant('ALL PRIVILEGES', 'foo', 'john', '%', False))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def has_grant_if_granted_even_if_provided_as_lowercase_string(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITH_JOHN_GRANTS
self.assertTrue(self.role.has_grant('all', 'foo', 'john', '%', False))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def can_get_user_grants(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITHOUT_JOHN_GRANTS
expected = ["GRANT USAGE ON *.* TO 'john'@'%' IDENTIFIED BY PASSWORD '*B9EE00DF55E7C816911C6DA56F1E3A37BDB31093'"]
self.assertEqual(expected, self.role.get_user_grants('john', '%'))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def installs_necessary_packages_to_provision(self):
with self.using_stub(YumRole) as mock_yum, self.execute_mock() as execute:
mock_yum.ensure_package_installed.return_value = 'some result'
self.role.provision()
self.assertEqual(execute.mock_calls, [
call("mysqladmin -u %s -p'temppass' password '%s'" % (self.role.mysql_root_user, self.role.mysql_root_pass),
stdout=False, sudo=True),
])
self.assertEqual(mock_yum.ensure_package_installed.mock_calls, [
call('mysql-server'),
call('mysql-devel'),
call('mysql-libs'),
])
@istest
def installs_necessary_packages_to_provision_again(self):
with self.using_stub(YumRole) as mock_yum, self.execute_mock() as execute:
mock_yum.ensure_package_installed.return_value = False
self.role.provision()
self.assertFalse(execute.called)
self.assertEqual(mock_yum.ensure_package_installed.mock_calls, [
call('mysql-server'),
call('mysql-devel'),
call('mysql-libs'),
])
@istest
def gets_user_hosts(self):
with self.execute_mock() as execute:
execute.return_value = HOSTS_FOR_USER
hosts = self.role.get_user_hosts('root')
self.assertEqual(hosts, [
'127.0.0.1',
'::1',
'my-desktop',
'localhost',
])
execute.assert_called_with('''mysql -u root -E -e "select Host from mysql.user where LOWER(User)='root'" mysql''',
sudo=True, stdout=False)
@istest
def gets_user_hosts_using_password(self):
with self.execute_mock() as execute:
execute.return_value = HOSTS_FOR_USER
self.role.mysql_root_pass = 'mypass'
hosts = self.role.get_user_hosts('root')
self.assertEqual(hosts, [
'127.0.0.1',
'::1',
'my-desktop',
'localhost',
])
execute.assert_called_with('''mysql -u root --password="mypass" -E -e "select Host from mysql.user where LOWER(User)='root'" mysql''',
sudo=True, stdout=False)
@istest
def gets_empty_user_hosts(self):
with self.execute_mock() as execute:
execute.return_value = ''
hosts = self.role.get_user_hosts('root')
self.assertEqual(hosts, [])
execute.assert_called_with('''mysql -u root -E -e "select Host from mysql.user where LOWER(User)='root'" mysql''',
sudo=True, stdout=False)
@istest
def checks_that_a_user_exists(self):
with patch.object(self.role, 'get_user_hosts') as get_user_hosts:
get_user_hosts.return_value = ['localhost']
self.assertTrue(self.role.user_exists('johndoe', 'localhost'))
get_user_hosts.assert_called_with('johndoe')
@istest
def checks_that_a_user_doesnt_exist(self):
with patch.object(self.role, 'get_user_hosts') as get_user_hosts:
get_user_hosts.return_value = ['localhost']
self.assertFalse(self.role.user_exists('johndoe', 'somewhere-else'))
get_user_hosts.assert_called_with('johndoe')
@istest
def creates_a_user_if_it_doesnt_exist_yet(self):
with patch.object(self.role, 'user_exists') as user_exists, self.execute_mock() as execute:
user_exists.return_value = False
result = self.role.ensure_user('johndoe', 'mypass', 'localhost')
self.assertTrue(result)
execute.assert_called_with("""mysql -u root -e "CREATE USER 'johndoe'@'localhost' IDENTIFIED BY 'mypass';" mysql""", sudo=True, stdout=False)
@istest
def doesnt_create_user_if_it_already_exists(self):
with patch.object(self.role, 'user_exists') as user_exists, self.execute_mock() as execute:
user_exists.return_value = True
result = self.role.ensure_user('johndoe', 'mypass', 'localhost')
self.assertFalse(result)
self.assertFalse(execute.called)
@istest
def creates_a_user_with_mysql_password(self):
with patch.object(self.role, 'user_exists') as user_exists, self.execute_mock() as execute:
user_exists.return_value = False
self.role.mysql_root_pass = 'otherpass'
result = self.role.ensure_user('johndoe', 'mypass', 'localhost')
self.assertTrue(result)
execute.assert_called_with("""mysql -u root --password="otherpass" -e "CREATE USER 'johndoe'@'localhost' IDENTIFIED BY 'mypass';" mysql""",
sudo=True, stdout=False)
@istest
def checks_that_a_database_is_present(self):
with self.execute_mock() as execute:
execute.return_value = DATABASES
result = self.role.is_database_present('performance_schema')
self.assertTrue(result)
execute.assert_called_with('mysql -u root -E -e "SHOW DATABASES" mysql', stdout=False, sudo=True)
@istest
def checks_that_a_database_is_not_present(self):
with self.execute_mock() as execute:
execute.return_value = DATABASES
result = self.role.is_database_present('bad_bad_database')
self.assertFalse(result)
execute.assert_called_with('mysql -u root -E -e "SHOW DATABASES" mysql', stdout=False, sudo=True)
@istest
def checks_that_a_database_is_not_present_when_there_is_none(self):
with self.execute_mock() as execute:
execute.return_value = ''
result = self.role.is_database_present('performance_schema')
self.assertFalse(result)
execute.assert_called_with('mysql -u root -E -e "SHOW DATABASES" mysql', stdout=False, sudo=True)
@istest
def creates_a_database_if_it_doesnt_exist_yet(self):
with patch.object(self.role, 'is_database_present') as is_database_present, self.execute_mock() as execute:
is_database_present.return_value = False
result = self.role.ensure_database('my_data')
self.assertTrue(result)
execute.assert_called_with('mysql -u root -e "CREATE DATABASE my_data" mysql', sudo=True, stdout=False)
@istest
def doesnt_create_a_database_if_it_already_exists(self):
with patch.object(self.role, 'is_database_present') as is_database_present, self.execute_mock() as execute:
is_database_present.return_value = True
result = self.role.ensure_database('my_data')
self.assertFalse(result)
self.assertFalse(execute.called)
@istest
def grants_privilege_if_not_granted_yet(self):
with patch.object(self.role, 'has_grant') as has_grant, self.execute_mock() as execute:
has_grant.return_value = False
result = self.role.ensure_grant('ALL PRIVILEGES', on='foo', username='john', login_from='%', with_grant_option=False)
self.assertTrue(result)
execute.assert_called_with('''mysql -u root -e "GRANT ALL PRIVILEGES ON foo.* TO 'john'@'%'" mysql''', stdout=False, sudo=True)
@istest
def grants_privilege_if_not_granted_yet_for_table(self):
with patch.object(self.role, 'has_grant') as has_grant, self.execute_mock() as execute:
has_grant.return_value = False
result = self.role.ensure_grant('ALL PRIVILEGES', on='foo.bar', username='john', login_from='%', with_grant_option=False)
self.assertTrue(result)
execute.assert_called_with('''mysql -u root -e "GRANT ALL PRIVILEGES ON foo.bar TO 'john'@'%'" mysql''', stdout=False, sudo=True)
@istest
def grants_privilege_with_grant_option_if_not_granted_yet(self):
with patch.object(self.role, 'has_grant') as has_grant, self.execute_mock() as execute:
has_grant.return_value = False
result = self.role.ensure_grant('ALL PRIVILEGES', on='foo', username='john', login_from='%', with_grant_option=True)
self.assertTrue(result)
execute.assert_called_with('''mysql -u root -e "GRANT ALL PRIVILEGES ON foo.* TO 'john'@'%' WITH GRANT OPTION" mysql''', stdout=False, sudo=True)
@istest
def doesnt_grant_privilege_if_already_granted(self):
with patch.object(self.role, 'has_grant') as has_grant, self.execute_mock() as execute:
has_grant.return_value = True
result = self.role.ensure_grant('ALL PRIVILEGES', on='foo', username='john', login_from='%', with_grant_option=True)
self.assertFalse(result)
self.assertFalse(execute.called)
| mit | -8,831,662,025,980,692,000 | 41.702899 | 157 | 0.6075 | false |
danpetrikin/djangoappengine_rdbms | management/commands/runserver.py | 1 | 7269 | from optparse import make_option
import logging
import sys
from django.db import connections
from ...boot import PROJECT_DIR
from ...db.backend.base import DatabaseWrapper
from django.core.management.base import BaseCommand
from django.core.management.commands.runserver import BaseRunserverCommand
from django.core.exceptions import ImproperlyConfigured
from google.appengine.tools import dev_appserver_main
from django.core.management import call_command
class Command(BaseRunserverCommand):
"""Overrides the default Django runserver command.
Instead of starting the default Django development server this command
fires up a copy of the full fledged App Engine dev_appserver that emulates
the live environment your application will be deployed to.
"""
option_list = BaseCommand.option_list + (
make_option('--debug', action='store_true', default=False,
help='Prints verbose debugging messages to the console while running.'),
make_option('--debug_imports', action='store_true', default=False,
help='Prints debugging messages related to importing modules, including \
search paths and errors.'),
make_option('-c', '--clear_datastore', action='store_true', default=False,
help='Clears the datastore data and history files before starting the web server.'),
make_option('--high_replication', action='store_true', default=False,
help='Use the high replication datastore consistency model.'),
make_option('--require_indexes', action='store_true', default=False,
help="""Disables automatic generation of entries in the index.yaml file. Instead, when
the application makes a query that requires that its index be defined in the
file and the index definition is not found, an exception will be raised,
similar to what would happen when running on App Engine."""),
make_option('--enable_sendmail', action='store_true', default=False,
help='Uses the local computer\'s Sendmail installation for sending email messages.'),
make_option('--datastore_path',
help="""The path to use for the local datastore data file. The server creates this file
if it does not exist."""),
make_option('--history_path',
help="""The path to use for the local datastore history file. The server uses the query
history file to generate entries for index.yaml."""),
make_option('--login_url',
help='The relative URL to use for the Users sign-in page. Default is /_ah/login.'),
make_option('--smtp_host',
help='The hostname of the SMTP server to use for sending email messages.'),
make_option('--smtp_port',
help='The port number of the SMTP server to use for sending email messages.'),
make_option('--smtp_user',
help='The username to use with the SMTP server for sending email messages.'),
make_option('--smtp_password',
help='The password to use with the SMTP server for sending email messages.'),
make_option('--use_sqlite', action='store_true', default=False,
help='Use the new, SQLite datastore stub.'),
)
help = 'Runs a copy of the App Engine development server.'
args = '[optional port number, or ipaddr:port]'
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
# hack __main__ so --help in dev_appserver_main works OK.
sys.modules['__main__'] = dev_appserver_main
return super(Command, self).create_parser(prog_name, subcommand)
def run_from_argv(self, argv):
"""
Captures the program name, usually "manage.py"
"""
self.progname = argv[0]
super(Command, self).run_from_argv(argv)
def run(self, *args, **options):
"""
Starts the App Engine dev_appserver program for the Django project.
The appserver is run with default parameters. If you need to pass any special
parameters to the dev_appserver you will have to invoke it manually.
Unlike the normal devserver, does not use the autoreloader as
App Engine dev_appserver needs to be run from the main thread
"""
args = []
# Set bind ip/port if specified.
if self.addr:
args.extend(["--address", self.addr])
if self.port:
args.extend(["--port", self.port])
# If runserver is called using handle(), progname will not be set
if not hasattr(self, 'progname'):
self.progname = "manage.py"
# Add email settings
from django.conf import settings
if not options.get('smtp_host', None) and not options.get('enable_sendmail', None):
args.extend(['--smtp_host', settings.EMAIL_HOST,
'--smtp_port', str(settings.EMAIL_PORT),
'--smtp_user', settings.EMAIL_HOST_USER,
'--smtp_password', settings.EMAIL_HOST_PASSWORD])
# Pass the application specific datastore location to the server.
preset_options = {}
for name in connections:
connection = connections[name]
if isinstance(connection, DatabaseWrapper):
args.extend(["--mysql_user", connection.settings_dict.get("USER")])
args.extend(["--mysql_password", connection.settings_dict.get("PASSWORD")])
#args.extend(["--mysql_port", "root")
#args.extend(["--mysql_host", "root")
preset_options = connection.settings_dict.get('DEV_APPSERVER_OPTIONS', {})
break
# Process the rest of the options here
bool_options = ['debug', 'debug_imports', 'clear_datastore', 'require_indexes',
'high_replication', 'enable_sendmail', 'use_sqlite',]
for opt in bool_options:
if options[opt] != False:
args.append("--%s" % opt)
str_options = ['datastore_path', 'history_path', 'login_url', 'smtp_host', 'smtp_port',
'smtp_user', 'smtp_password',]
for opt in str_options:
if options.get(opt, None) != None:
args.extend(["--%s" % opt, options[opt]])
# Fill any non-overridden options with presets from settings
for opt, value in preset_options.items():
arg = "--%s" % opt
if arg not in args:
if value and opt in bool_options:
args.append(arg)
elif opt in str_options:
args.extend([arg, value])
# TODO: issue warning about bogus option key(s)?
# Reset logging level to INFO as dev_appserver will spew tons of debug logs
logging.getLogger().setLevel(logging.INFO)
logging.info(args)
logging.info(PROJECT_DIR)
# Append the current working directory to the arguments.
dev_appserver_main.main([self.progname] + args + [PROJECT_DIR])
| bsd-3-clause | 2,640,274,047,164,670,000 | 46.207792 | 99 | 0.616729 | false |
rodrigob/downhill | docs/conf.py | 1 | 1515 | import os
import sys
import better
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
#'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
#'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'numpydoc',
]
autosummary_generate = True
autodoc_default_flags = ['members']
numpydoc_show_class_members = False
numpydoc_show_inherited_class_members = True
source_suffix = '.rst'
source_encoding = 'utf-8-sig'
master_doc = 'index'
project = u'Downhill'
copyright = u'2015, Leif Johnson'
version = '0.3'
release = '0.3.0pre'
exclude_patterns = ['_build']
templates_path = ['_templates']
pygments_style = 'tango'
html_theme = 'better'
html_theme_path = [better.better_theme_path]
html_theme_options = dict(
rightsidebar=False,
inlinecss='',
cssfiles=['_static/style-tweaks.css'],
showheader=True,
showrelbartop=True,
showrelbarbottom=True,
linktotheme=True,
sidebarwidth='15rem',
textcolor='#111',
headtextcolor='#333',
footertextcolor='#333',
ga_ua='',
ga_domain='',
)
html_short_title = 'Home'
html_static_path = ['_static']
def h(xs):
return ['{}.html'.format(x) for x in xs.split()]
html_sidebars = {
'index': h('gitwidgets globaltoc sourcelink searchbox'),
'**': h('gitwidgets localtoc sourcelink searchbox'),
}
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| mit | -1,181,340,417,915,678,700 | 23.435484 | 66 | 0.667327 | false |
freerangerouting/frr | doc/manpages/conf.py | 3 | 12720 | # -*- coding: utf-8 -*-
#
# FRR documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 31 16:00:52 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# prolog for various variable substitutions
rst_prolog = ''
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FRR'
copyright = u'2017, FRR'
author = u'FRR authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = u'?.?'
# The full version, including alpha/beta/rc tags.
release = u'?.?-?'
# -----------------------------------------------------------------------------
# Extract values from codebase for substitution into docs.
# -----------------------------------------------------------------------------
# Various installation prefixes. Values are extracted from config.status.
# Reasonable defaults are set in case that file does not exist.
replace_vars = {
'AUTHORS': author,
'COPYRIGHT_YEAR': '1999-2005',
'COPYRIGHT_STR': 'Copyright (c) 1999-2005',
'PACKAGE_NAME': project.lower(),
'PACKAGE_TARNAME': project.lower(),
'PACKAGE_STRING': project.lower() + ' latest',
'PACKAGE_URL': 'https://frrouting.org/',
'PACKAGE_VERSION': 'latest',
'INSTALL_PREFIX_ETC': '/etc/frr',
'INSTALL_PREFIX_SBIN': '/usr/lib/frr',
'INSTALL_PREFIX_STATE': '/var/run/frr',
'INSTALL_PREFIX_MODULES': '/usr/lib/frr/modules',
'INSTALL_USER': 'frr',
'INSTALL_GROUP': 'frr',
'INSTALL_VTY_GROUP': 'frrvty',
'GROUP': 'frr',
'USER': 'frr',
}
# extract version information, installation location, other stuff we need to
# use when building final documents
val = re.compile('^S\["([^"]+)"\]="(.*)"$')
try:
with open('../../config.status', 'r') as cfgstatus:
for ln in cfgstatus.readlines():
m = val.match(ln)
if not m or m.group(1) not in replace_vars.keys(): continue
replace_vars[m.group(1)] = m.group(2)
except IOError:
# if config.status doesn't exist, just ignore it
pass
# manually fill out some of these we can't get from config.status
replace_vars['COPYRIGHT_STR'] = "Copyright (c)"
replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['COPYRIGHT_YEAR'])
replace_vars['COPYRIGHT_STR'] += ' {0}'.format(replace_vars['AUTHORS'])
release = replace_vars['PACKAGE_VERSION']
version = release.split('-')[0]
# add substitutions to prolog
for key, value in replace_vars.items():
rst_prolog += '.. |{0}| replace:: {1}\n'.format(key, value)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'common-options.rst', 'epilogue.rst', 'defines.rst', 'bfd-options.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'FRRdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FRR.tex', u'FRR User Manual',
u'FRR', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
# If true, show URL addresses after external links.
#man_show_urls = False
fwfrr = "{0} routing engine for use with FRRouting."
man_pages = [
('frr-bfdd', 'frr-bfdd', fwfrr.format("a bfd"), [], 8),
('frr-bgpd', 'frr-bgpd', fwfrr.format("a BGPv4, BGPv4+, BGPv4-"), [], 8),
('frr-eigrpd', 'frr-eigrpd', fwfrr.format("an EIGRP"), [], 8),
('frr-fabricd', 'frr-fabricd', fwfrr.format("an OpenFabric"), [], 8),
('frr-isisd', 'frr-isisd', fwfrr.format("an IS-IS"), [], 8),
('frr-ldpd', 'frr-ldpd', fwfrr.format("an LDP"), [], 8),
('frr-nhrpd', 'frr-nhrpd', fwfrr.format("a Next Hop Routing Protocol"), [], 8),
('frr-ospf6d', 'frr-ospf6d', fwfrr.format("an OSPFv3"), [], 8),
('frr-ospfclient', 'frr-ospfclient', 'an example ospf-api client', [], 8),
('frr-ospfd', 'frr-ospfd', fwfrr.format("an OSPFv2"), [], 8),
('frr-pbrd', 'frr-pbrd', fwfrr.format("a PBR"), [], 8),
('frr-pimd', 'frr-pimd', fwfrr.format("a PIM"), [], 8),
('frr-ripd', 'frr-ripd', fwfrr.format("a RIP"), [], 8),
('frr-ripngd', 'frr-ripngd', fwfrr.format("a RIPNG"), [], 8),
('frr-sharpd', 'frr-sharpd', fwfrr.format("a SHARP"), [], 8),
('frr-staticd', 'frr-staticd', fwfrr.format("a static route manager"), [], 8),
('frr-vrrpd', 'frr-vrrpd', fwfrr.format("a VRRP"), [], 8),
('frr-watchfrr', 'frr-watchfrr', 'a program to monitor the status of FRRouting daemons', [], 8),
('frr-zebra', 'frr-zebra', 'a routing manager for use with associated FRRouting components.', [], 8),
('frr', 'frr', 'a systemd interaction script', [], 1),
('mtracebis', 'mtracebis', "a multicast trace client", [], 8),
('vtysh', 'vtysh', 'an integrated shell for FRRouting.', [], 1),
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# custom extensions here
| gpl-2.0 | 21,080,896,467,612,010 | 34.530726 | 110 | 0.667217 | false |
pmutale/www.mutale.nl | settings/core.py | 1 | 1265 | import os
gettext = lambda s: s
DATA_DIR = os.path.dirname(os.path.dirname(__file__))
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.17.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qs7s_mqq@1d6uz%rj@q((#p@a^%hzemhhjoh4nolyr^n5t3-k!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
ADMINS = [('Peter', '[email protected]'), ('Peter', '[email protected]')]
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'nl'
TIME_ZONE = 'Europe/Amsterdam'
USE_I18N = True
USE_L10N = True
USE_TZ = True
| unlicense | 6,090,058,569,688,771,000 | 24.3 | 73 | 0.731225 | false |
vitalyvolkov/fontbakery | bakery/tasks.py | 1 | 29958 | # coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
from __future__ import print_function
import os
import sys
import glob
import subprocess
import codecs
from flask.ext.rq import job
import plistlib
from .utils import RedisFd
import re
import yaml
from fontTools import ttLib
from fontaine.ext.subsets import Extension as SubsetExtension
def run(command, cwd, log):
""" Wrapper for subprocess.Popen with custom logging support.
:param command: shell command to run, required
:param cwd: - current working dir, required
:param log: - logging object with .write() method, required
"""
# print the command on the worker console
print("[%s]:%s" % (cwd, command))
# log the command
log.write('\n$ %s\n' % command)
# Start the command
env = os.environ.copy()
env.update({'PYTHONPATH': os.pathsep.join(sys.path)})
p = subprocess.Popen(command, shell=True, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True, env=env)
while True:
# Read output and errors
stdout = p.stdout.readline()
stderr = p.stderr.readline()
# Log output
log.write(stdout)
# Log error
if stderr:
# print the error on the worker console
print(stderr, end='')
# log error
log.write(stderr, prefix='Error: ')
# If no output and process no longer running, stop
if not stdout and not stderr and p.poll() is not None:
break
# if the command did not exit cleanly (with returncode 0)
if p.returncode:
msg = 'Fatal: Exited with return code %s \n' % p.returncode
# Log the exit status
log.write(msg)
# Raise an error on the worker
raise StandardError(msg)
def prun(command, cwd, log=None):
"""
Wrapper for subprocess.Popen that capture output and return as result
:param command: shell command to run
:param cwd: current working dir
:param log: loggin object with .write() method
"""
# print the command on the worker console
print("[%s]:%s" % (cwd, command))
env = os.environ.copy()
env.update({'PYTHONPATH': os.pathsep.join(sys.path)})
p = subprocess.Popen(command, shell=True, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True, env=env)
stdout = p.communicate()[0]
if log:
log.write('$ %s' % command)
log.write(stdout)
return stdout
@job
def project_git_sync(project):
"""
Sync _in git repo, or download it if it doesn't yet exist.
:param project: :class:`~bakery.models.Project` instance
:param log: :class:`~bakery.utils.RedisFd` as log
"""
from .app import db, app
project.is_ready = False
db.session.add(project)
db.session.commit()
db.session.refresh(project)
_in = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.in/' % project)
# Create the incoming repo directory (_in) if it doesn't exist
if not os.path.exists(_in):
# log.write('Creating Incoming Directory\n', prefix='### ')
prun('mkdir -p %s' % _in, cwd=app.config['DATA_ROOT'])
# Update _in if it already exists with a .git directory
if os.path.exists(os.path.join(_in, '.git')):
# log.write('Sync Git Repository\n', prefix='### ')
# remove anything in the _in directory that isn't checked in
prun('git reset --hard', cwd=_in)
prun('git clean --force', cwd=_in)
# pull from origin master branch
prun('git pull origin master', cwd=_in)
# Since it doesn't exist as a git repo, get the _in repo
else:
# clone the repository
# log.write('Copying Git Repository\n', prefix='### ')
try:
# TODO in the future, use http://schacon.github.io/git/git-ls-remote.html to validate the URL string
# http://stackoverflow.com/questions/9610131/how-to-check-the-validity-of-a-remote-git-repository-url
prun('git clone --depth=100 --quiet --branch=master %(clone)s .' % project, cwd=_in)
# if the clone action didn't work, just copy it
except:
# if this is a file URL, copy the files, and set up the _in directory as a git repo
if project.clone[:7] == "file://":
# cp recursively, keeping all attributes, not following symlinks, not deleting existing files, verbosely
prun('cp -a %(clone)s .' % project, cwd=_in)
#
prun('git init .', cwd=_in)
prun('git add *', cwd=_in)
msg = "Initial commit made automatically by Font Bakery"
prun('git commit -a -m "%s"' % msg, cwd=_in)
# Now we have it, create an initial project state
finally:
config = project.config
# set project state as ready after sync is done
project.is_ready = True
db.session.add(project)
db.session.commit()
def copy_ufo_files(project, build, log):
from .app import app
config = project.config
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_in = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.in/' % param)
_out = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param)
_out_src = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/sources/' % param)
log.write('Copy [and Rename] UFOs\n', prefix='### ')
# Set the familyName
if config['state'].get('familyname', None):
familyName = config['state']['familyname']
else:
familyName = False
# Copy UFO files from git repo to _out_src [renaming their filename and metadata]
ufo_dirs = []
for x in config['state'].get('process_files', []):
if x.endswith('.ufo'):
ufo_dirs.append(x)
for _in_ufo in ufo_dirs:
# Decide the incoming filepath
_in_ufo_path = os.path.join(_in, _in_ufo)
# Read the _in_ufo fontinfo.plist
_in_ufoPlist = os.path.join(_in_ufo_path, 'fontinfo.plist')
_in_ufoFontInfo = plistlib.readPlist(_in_ufoPlist)
# Get the styleName
styleName = _in_ufoFontInfo['styleName']
# Always have a regular style
if styleName == 'Normal':
styleName = 'Regular'
# Get the familyName, if its not set
if not familyName:
familyName = _in_ufoFontInfo.get('openTypeNamePreferredFamilyName', '') \
or _in_ufoFontInfo.get('familyName', '')
if not familyName:
log.write('Please set openTypeNamePreferredFamilyName or familyName in %s fontinfo.plist and run another bake process.' % _in_ufo, prefix='### ')
raise Exception('Please set openTypeNamePreferredFamilyName or familyName in %s fontinfo.plist and run another bake process.' % _in_ufo)
# Remove whitespace from names
styleNameNoWhitespace = re.sub(r'\s', '', styleName)
familyNameNoWhitespace = re.sub(r'\s', '', familyName)
# Decide the outgoing filepath
_out_ufo = "%s-%s.ufo" % (familyNameNoWhitespace, styleNameNoWhitespace)
_out_ufo_path = os.path.join(_out_src, _out_ufo)
# Copy the UFOs
run("cp -a '%s' '%s'" % (_in_ufo_path, _out_ufo_path), cwd=_out, log=log)
# If we rename, change the font family name metadata inside the _out_ufo
if familyName:
# Read the _out_ufo fontinfo.plist
_out_ufoPlist = os.path.join(_out_ufo_path, 'fontinfo.plist')
_out_ufoFontInfo = plistlib.readPlist(_out_ufoPlist)
# Set the familyName
_out_ufoFontInfo['familyName'] = familyName
# Set PS Name
# Ref: www.adobe.com/devnet/font/pdfs/5088.FontNames.pdf< Family Name > < Vendor ID > - < Weight > < Width > < Slant > < Character Set >
_out_ufoFontInfo['postscriptFontName'] = "%s-%s" % (familyNameNoWhitespace, styleNameNoWhitespace)
# Set Full Name
_out_ufoFontInfo['postscriptFullName'] = "%s %s" % (familyName, styleName)
# Write _out fontinfo.plist
plistlib.writePlist(_out_ufoFontInfo, _out_ufoPlist)
scripts_folder = os.path.join(app.config['ROOT'], 'scripts')
log.write('Convert UFOs to TTFs (ufo2ttf.py)\n', prefix='### ')
os.chdir(_out_src)
for name in glob.glob("*.ufo"):
name = name[:-4] # cut .ufo
cmd = "python ufo2ttf.py '{out_src}{name}.ufo' '{out}{name}.ttf' '{out_src}{name}.otf'".format(
out_src=_out_src, name=name, out=_out)
run(cmd, cwd=scripts_folder, log=log)
def copy_ttx_files(project, build, log):
from .app import app
config = project.config
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_in = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.in/' % param)
_out = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param)
_out_src = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/sources/' % param)
ttx_files = []
for x in config['state'].get('process_files', []):
if x.endswith('.ttx'):
ttx_files.append(x)
for ttx_file in ttx_files:
_ttx_path = os.path.join(_in, ttx_file)
if not os.path.exists(_ttx_path):
run("echo file '{}' not found".format(_ttx_path), cwd=_out, log=log)
continue
font = ttLib.TTFont(None, lazy=False, recalcBBoxes=True, verbose=False, allowVID=False)
font.importXML(_ttx_path, quiet=True)
_ttx_name = os.path.splitext(os.path.basename(_ttx_path))[0]
def nameTableRead(font, NameID, fallbackNameID=False):
for record in font['name'].names:
if record.nameID == NameID:
if b'\000' in record.string:
return record.string.decode('utf-16-be').encode('utf-8')
else:
return record.string
if fallbackNameID:
return nameTableRead(font, fallbackNameID)
styleName = nameTableRead(font, 17, 2)
# Always have a regular style
if styleName == 'Normal':
styleName = 'Regular'
# NameID=1 is required
familyName = nameTableRead(font, 16, 1)
# Remove whitespace from names
styleNameNoWhitespace = re.sub(r'\s', '', styleName)
familyNameNoWhitespace = re.sub(r'\s', '', familyName)
_out_ttx_name = "{familyname}-{stylename}".format(familyname=familyNameNoWhitespace, stylename=styleNameNoWhitespace)
if font.sfntVersion == '\x00\x01\x00\x00': # TTF
_out_name = '{}.ttf.ttx'.format(_out_ttx_name)
elif font.sfntVersion == 'OTTO': # OTF
_out_name = '{}.otf.ttx'.format(_out_ttx_name)
run("cp '{}' '{}'".format(_ttx_path, _out_src), cwd=_out, log=log)
run("mv '{ttx_name}.ttx' '{out_name}'".format(ttx_name=_ttx_name, out_name=_out_name), cwd=_out_src, log=log)
run("ttx -i -q {}".format(_out_name), cwd=_out_src, log=log)
run("mv {0}.ttf.ttf {0}.ttf".format(_out_ttx_name), cwd=_out_src, log=log)
if font.sfntVersion == 'OTTO': # OTF
scripts_folder = os.path.join(app.config['ROOT'], 'scripts')
cmd = "python autoconvert.py '{out_src}{ttx_name}.otf' '{out}{ttx_name}.ttf'".format(
out_src=_out_src, ttx_name=_out_ttx_name, out=_out)
run(cmd, cwd=scripts_folder, log=log)
else:
run("mv '{0}.ttf' '../{0}.ttf'".format(_out_ttx_name), _out_src, log=log)
def copy_and_rename_process(project, build, log):
"""
Setup UFOs for building
"""
from .app import app
config = project.config
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_user = os.path.join(app.config['DATA_ROOT'], '%(login)s/' % param)
_in = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.in/' % param)
_out = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param)
if project.source_files_type == 'ufo':
copy_ufo_files(project, build, log)
else:
copy_ttx_files(project, build, log)
# Copy licence file
# TODO: Infer license type from filename
# TODO: Copy file based on license type
if config['state'].get('license_file', None):
# Set _in license file name
licenseFileInFullPath = config['state']['license_file']
licenseFileIn = licenseFileInFullPath.split('/')[-1]
# List posible OFL and Apache filesnames
listOfOflFilenames = ['Open Font License.markdown', 'OFL.txt', 'OFL.md']
listOfApacheFilenames = ['APACHE.txt', 'LICENSE']
# Canonicalize _out license file name
if licenseFileIn in listOfOflFilenames:
licenseFileOut = 'OFL.txt'
elif licenseFileIn in listOfApacheFilenames:
licenseFileOut = 'LICENSE.txt'
else:
licenseFileOut = licenseFileIn
# Copy license file
_in_license = os.path.join(_in, licenseFileInFullPath)
_out_license = os.path.join(_out, licenseFileOut)
run('cp -a "%s" "%s"' % (_in_license, _out_license), cwd=_user, log=log)
else:
log.write('License file not copied\n', prefix='Error: ')
# Copy FONTLOG file
_in_fontlog = os.path.join(_in, 'FONTLOG.txt')
_out_fontlog = os.path.join(_out, 'FONTLOG.txt')
if os.path.exists(_in_fontlog) and os.path.isfile(_in_fontlog):
run('cp -a "%s" "%s"' % (_in_fontlog, _out_fontlog), cwd=_user, log=log)
else:
log.write('FONTLOG.txt does not exist\n', prefix='Error: ')
# Copy DESCRIPTION.en_us.html file
_in_desc = os.path.join(_in, 'DESCRIPTION.en_us.html')
_out_desc = os.path.join(_out, 'DESCRIPTION.en_us.html')
if os.path.exists(_in_desc) and os.path.isfile(_in_desc):
run('cp -a "%s" "%s"' % (_in_desc, _out_desc), cwd=_user, log=log)
else:
log.write('DESCRIPTION.en_us.html does not exist upstream, will generate one later\n', prefix='Error: ')
# Copy METADATA.json file
_in_meta = os.path.join(_in, 'METADATA.json')
_out_meta = os.path.join(_out, 'METADATA.json')
if os.path.exists(_in_meta) and os.path.isfile(_in_meta):
run('cp -a "%s" "%s"' % (_in_meta, _out_meta), cwd=_user, log=log)
else:
log.write('METADATA.json does not exist upstream, will generate one later\n', prefix='Error: ')
# Copy any txt files selected by user
if config['state'].get('txt_files_copied', None):
for filename in config['state']['txt_files_copied']:
_in_file = os.path.join(_in, filename)
_out_file = os.path.join(_out, filename)
run('cp -a "%s" "%s"' % (_in_file, _out_file), cwd=_user, log=log)
def ttfautohint_process(project, build, log):
"""
Run ttfautohint with project command line settings for each
ttf file in result src folder, outputting them in the _out root,
or just copy the ttfs there.
"""
from .app import app
# $ ttfautohint -l 7 -r 28 -G 0 -x 13 -w "" -W -c original_font.ttf final_font.ttf
config = project.config
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_out = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param)
if config['state'].get('ttfautohint', None):
log.write('Autohint TTFs (ttfautohint)\n', prefix='### ')
params = config['state']['ttfautohint']
os.chdir(_out)
for name in glob.glob("*.ttf"):
name = name[:-4] # cut .ttf
run("mv '{name}.ttf' '{name}.autohint.ttf'".format(name=name), cwd=_out, log=log)
run("ttfautohint {params} '{name}.autohint.ttf' '{name}.ttf'".format(params=params, name=name), cwd=_out, log=log)
run("rm '{name}.autohint.ttf'".format(name=name), cwd=_out, log=log)
def ttx_process(project, build, log):
"""
Roundtrip TTF files through TTX to compact their filesize
"""
from .app import app
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_out = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param)
_out_src = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/sources/' % param)
log.write('Compact TTFs with ttx\n', prefix='### ')
os.chdir(_out_src)
for name in glob.glob("*.ufo"):
name = name[:-4] # cut .ufo
filename = os.path.join(_out, name)
# convert the ttf to a ttx file - this may fail
cmd = "ttx -i '%s.ttf'" % filename # -q
run(cmd, cwd=_out, log=log)
# move the original ttf to the side
cmd = "mv '%s.ttf' '%s.ttf.orig'" % (filename, filename)
run(cmd, cwd=_out, log=log)
# convert the ttx back to a ttf file - this may fail
cmd = "ttx -i '%s.ttx'" % filename # -q
run(cmd, cwd=_out, log=log)
# compare filesizes TODO print analysis of this :)
cmd = "ls -l '%s.ttf'*" % filename
run(cmd, cwd=_out, log=log)
# remove the original (duplicate) ttf
cmd = "rm '%s.ttf.orig'" % filename
run(cmd, cwd=_out, log=log)
# move ttx files to src
cmd = "mv '%s.ttx' %s" % (filename, _out_src)
run(cmd, cwd=_out, log=log)
def subset_process(project, build, log):
from .app import app
config = project.config
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_out = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param)
_out_src = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/sources/' % param)
log.write('Subset TTFs (pyftsubset)\n', prefix='### ')
for subset in config['state']['subset']:
os.chdir(_out_src)
for name in glob.glob("*.ufo"):
name = name[:-4] # cut .ufo
glyphs = open(SubsetExtension.get_subset_path(subset)).read()
cmd = ("pyftsubset %(out)s.ttf %(glyphs)s"
" --layout-features='*' --glyph-names --symbol-cmap"
" --notdef-glyph --notdef-outline --recommended-glyphs"
" --name-IDs='*' --name-legacy --name-languages='*'"
" --hinting")
cmd = cmd % {'glyphs': glyphs.replace('\n', ' '),
'out': os.path.join(_out, name)}
run(cmd, cwd=_out, log=log)
run('mv %(out)s.ttf.subset %(out)s.%(subset)s' % {'subset': subset,
'out': os.path.join(_out, name)}, cwd=_out, log=log)
# remove +latin from the subset name
os.chdir(_out)
files = glob.glob('*+latin*')
for filename in files:
newfilename = filename.replace('+latin', '')
run("mv \"%s\" \"%s\"" % (filename, newfilename), cwd=_out, log=log)
def generate_metadata_process(project, build, log):
""" Generate METADATA.json using genmetadata.py """
from .app import app
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_out = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param)
cmd = "python %(wd)s/scripts/genmetadata.py '%(out)s'"
log.write('Generate METADATA.json (genmetadata.py)\n', prefix='### ')
run(cmd % {'wd': app.config['ROOT'], 'out': _out}, cwd=_out, log=log)
def fontaine_process(project, build, log):
"""
Run pyFontaine on ttf files
"""
from .app import app
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_out = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param)
log.write('pyFontaine (fontaine/main.py)\n', prefix='### ')
os.chdir(_out)
files = glob.glob('*.ttf')
for file in files:
cmd = "pyfontaine --text '%s' >> 'sources/fontaine.txt'" % file
try:
run(cmd, cwd=_out, log=log)
except StandardError:
log.write('PyFontaine raised exception. Check latest version.\n')
# Ignore pyfontaine if it raises error
pass
# TODO also save the totals for the dashboard....
# log.write('Running Fontaine on Results\n', prefix='### ')
# fonts = utils.project_fontaine(project)
# project.config['state']['fontaine'] = fonts
# project.save_state()
from checker import run_set
# register yaml serializer for tests result objects.
from checker.base import BakeryTestCase
def repr_testcase(dumper, data):
def method_doc(doc):
if doc is None:
return 'None'
else:
return " ".join(doc.encode('utf-8', 'xmlcharrefreplace').split())
return dumper.represent_mapping(u'tag:yaml.org,2002:map', {
'methodDoc': method_doc(data._testMethodDoc),
'tool': data.tool,
'name': data.name,
'methodName': data._testMethodName,
'targets': data.targets,
'tags': getattr(data, data._testMethodName).tags,
'err_msg': getattr(data, '_err_msg', '')
})
yaml.SafeDumper.add_multi_representer(BakeryTestCase, repr_testcase)
def upstream_revision_tests(project, revision):
""" This function run upstream tests set on
project.config['local']['ufo_dirs'] set in selected git revision.
This mean that success (aka getting any result) should be occasional
particular case. Because data and
set of folders are changing during font development process.
:param project: Project instance
:param revision: Git revision
:param force: force to make tests again
:return: dictionary with serialized tests results formatted by `repr_testcase`
"""
from .app import app
param = {'login': project.login, 'id': project.id, 'revision': revision}
_in = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.in/' % project)
_out_folder = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/utests/' % param)
_out_yaml = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/utests/%(revision)s.yaml' % param)
if os.path.exists(_out_yaml):
return yaml.safe_load(open(_out_yaml, 'r'))
if not os.path.exists(_out_folder):
os.makedirs(_out_folder)
result = {}
os.chdir(_in)
prun("git checkout %s" % revision, cwd=_in)
ufo_dirs = []
ttx_files = []
metadata_files = []
l = len(_in)
for root, dirs, files in os.walk(_in):
for f in files:
fullpath = os.path.join(root, f)
if os.path.splitext(fullpath)[1].lower() in ['.ttx', ]:
ttx_files.append(fullpath[l:])
if f.lower() == 'metadata.json':
metadata_files.append(fullpath[l:])
for d in dirs:
fullpath = os.path.join(root, d)
if os.path.splitext(fullpath)[1].lower() == '.ufo':
ufo_dirs.append(fullpath[l:])
for font in ufo_dirs:
if os.path.exists(os.path.join(_in, font)):
result[font] = run_set(os.path.join(_in, font), 'upstream')
for metadata_path in metadata_files:
result[metadata_path] = run_set(metadata_path, 'metadata')
for font in ttx_files:
print(font)
print(os.path.join(_in, font))
if os.path.exists(os.path.join(_in, font)):
result[font] = run_set(os.path.join(_in, font), 'upstream-ttx')
result['Consistency fonts'] = run_set(_in, 'consistency')
l = codecs.open(_out_yaml, mode='w', encoding="utf-8")
l.write(yaml.safe_dump(result))
l.close()
return yaml.safe_load(open(_out_yaml, 'r'))
def result_tests(project, build):
from .app import app
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_out_src = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param)
_out_yaml = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s.rtests.yaml' % param)
if os.path.exists(_out_yaml):
return yaml.safe_load(open(_out_yaml, 'r'))
result = {}
os.chdir(_out_src)
for font in glob.glob("*.ttf"):
result[font] = run_set(os.path.join(_out_src, font), 'result')
# Comment during debug
l = open(_out_yaml, 'w')
l.write(yaml.safe_dump(result))
l.close()
d = yaml.safe_load(open(_out_yaml, 'r'))
# os.remove(_out_yaml)
return d
from fixer import fix_font
def result_fixes(project, build):
from .app import app
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_out_src = os.path.join(app.config['DATA_ROOT'],
'%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param)
_out_yaml = os.path.join(app.config['DATA_ROOT'],
'%(login)s/%(id)s.out/%(build)s.%(revision)s.rtests.yaml' % param)
fix_font(_out_yaml, _out_src)
def discover_dashboard(project, build, log):
from .app import app
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_yaml = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.bakery.yaml' % param)
_out_src = os.path.join(app.config['DATA_ROOT'],
'%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param)
cmd = "python {wd}/scripts/discovery.py '{out}' '{yaml}'".format(
wd=app.config['ROOT'], out=_out_src, yaml=_yaml)
log.write('Discovery Dashboard data\n', prefix='### ')
run(cmd, cwd=_out_src, log=log)
@job
def process_project(project, build, revision, force_sync=False):
"""
Bake the project, building all fonts according to the project setup.
:param project: :class:`~bakery.models.Project` instance
:param log: :class:`~bakery.utils.RedisFd` as log
"""
from .app import app, db
if force_sync:
project_git_sync(project)
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_in = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.in/' % param)
_out_src = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/sources/' % param)
_out_log = os.path.join(app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s.process.log' % param)
# Make logest path
os.makedirs(_out_src)
log = RedisFd(_out_log, 'w')
# setup is set after 'bake' button is first pressed
if project.config['local'].get('setup', None):
# this code change upstream repository
try:
run("git checkout %s" % revision, cwd=_in, log=log)
log.write('Bake Begins!\n', prefix='### ')
copy_and_rename_process(project, build, log)
ttfautohint_process(project, build, log)
ttx_process(project, build, log)
subset_process(project, build, log)
generate_metadata_process(project, build, log)
fontaine_process(project, build, log)
# result_tests doesn't needed here, but since it is anyway
# background task make cache file for future use
result_tests(project, build)
# apply fixes
result_fixes(project, build)
# discover_dashboard(project, build, log)
log.write('Bake Succeeded!\n', prefix='### ')
# zip out folder with revision
param = {'login': project.login, 'id': project.id,
'revision': build.revision, 'build': build.id}
_out_src = os.path.join(app.config['DATA_ROOT'],
'%(login)s/%(id)s.out/%(build)s.%(revision)s' % param)
_out_url = app.config['DATA_URL'] + '%(login)s/%(id)s.out' % param
zipdir(_out_src, _out_url, log)
finally:
# save that project is done
build.is_done = True
db.session.add(build)
db.session.commit()
log.close()
def zipdir(path, url, log):
import zipfile
basename = os.path.basename(path)
zipfile_path = os.path.join(path, '..', '%s.zip' % basename)
zipf = zipfile.ZipFile(zipfile_path, 'w')
for root, dirs, files in os.walk(path):
for file in files:
arcpath = os.path.join(basename, root.replace(path, '').lstrip('/'), file)
zipf.write(os.path.join(root, file), arcpath)
log.write('add %s\n' % arcpath)
zipf.close()
log.write('### Link to archive [%s.zip](%s/%s.zip)\n' % (basename, url, basename))
def set_done(build):
""" Set done flag for build """
from .app import db
build.is_done = True
db.session.add(build)
db.session.commit()
| apache-2.0 | 3,740,855,287,434,305,000 | 39.371968 | 161 | 0.59337 | false |
mikeboers/strongtime | tests/test_times.py | 1 | 1687 | from . import *
class TestTimes(TestCase):
example_points = [float(x) for x in [-1, 0, 1, 2**32 - 1, 2**32 + 1]]
def test_cast_to_from_int(self):
x = Time(2.0)
y = float(x)
self.assertEqual(2.0, y)
def test_time_comparisons(self):
for x, y in itertools.permutations(self.example_points, 2):
for op in (getattr(operator, name) for name in ('lt', 'le', 'eq', 'ne', 'ge', 'gt')):
self.assertEqual(op(x, y), op(Time(x), Time(y)), '%s(%r, %r) != %s(Time(%r), Time(%r))' % (
op.__name__, x, y,
op.__name__, x, y,
))
def test_duration_comparisons(self):
for x, y in itertools.permutations(self.example_points, 2):
for op in (getattr(operator, name) for name in ('lt', 'le', 'eq', 'ne', 'ge', 'gt')):
self.assertEqual(op(x, y), op(Duration(x), Duration(y)), '%s(%r, %r) != %s(Duration(%r), Duration(%r))' % (
op.__name__, x, y,
op.__name__, x, y,
))
def test_approx_comparisons(self):
for cls in Time, Duration:
a = cls(0.1 + 0.2)
b = cls(0.3)
self.assertFalse(a == b)
self.assertTrue(a.almost_eq(b))
self.assertTrue(a.almost_lt(b))
self.assertTrue(a.almost_gt(b))
self.assertTrue(cls(1).almost_lt(2.0))
self.assertFalse(cls(1).almost_eq(2.0))
self.assertFalse(cls(1).almost_gt(2.0))
self.assertTrue(cls(2).almost_gt(1.0))
self.assertFalse(cls(2).almost_eq(1.0))
self.assertFalse(cls(2).almost_lt(1.0))
| bsd-3-clause | 5,049,231,663,557,329,000 | 34.893617 | 123 | 0.490219 | false |
vlegoff/tsunami | src/primaires/commerce/commandes/questeur/deposer.py | 1 | 4847 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# create of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this create of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'déposer' de la commande 'questeur'."""
from primaires.commerce.transaction import Transaction
from primaires.interpreteur.masque.parametre import Parametre
class PrmDeposer(Parametre):
"""Commande 'questeur déposer'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "déposer", "deposit")
self.schema = "<nombre> <nom_objet>"
self.aide_courte = "dépose de l'argent"
self.aide_longue = \
"Cette commande permet de déposer de l'argent dans les " \
"coffres d'un questeur. Vous devez vous trouvez dans la " \
"salle permettant l'opération et avoir l'argent désiré sur " \
"vous. Vous devez préciser d'abord le nombre de pièces " \
"à déposer et ensuite le nom de la pièce (|cmd|bronze|ff| " \
"par exemple). Notez que les questeurs se réservent un " \
"pourcentage plus ou moins important sur ce que vous leur " \
"confiez."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
nom_objet = self.noeud.get_masque("nom_objet")
nom_objet.proprietes["conteneurs"] = \
"(personnage.equipement.inventaire_simple.iter_objets_qtt()" \
", )"
nom_objet.proprietes["quantite"] = "True"
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
salle = personnage.salle
if not importeur.commerce.questeur_existe(salle):
personnage << "|err|Aucun questeur n'est présent là où " \
"vous vous trouvez.|ff|"
return
questeur = importeur.commerce.questeurs[salle]
somme = dic_masques["nombre"].nombre
objet = dic_masques["nom_objet"].objet
if not objet.est_de_type("argent"):
personnage << "|err|Ceci n'est pas de l'argent.|ff|"
return
prototype = objet
argent = Transaction.get_argent(personnage)
if prototype not in argent:
personnage << "|err|Vous ne possédez pas cela.|ff|" # improbable
return
if somme > argent[prototype]:
somme = argent[prototype]
if questeur.servant is None:
personnage << "|err|Personne n'est présent pour s'en charger.|ff|"
return
if prototype not in questeur.monnaies:
personnage << "|err|Vous ne pouvez déposer cette monnaie " \
"dans ce questeur.|ff|"
return
total = somme * prototype.m_valeur
if total < questeur.montant_min:
personnage << "|err|Vous ne pouvez déposer si peu.|ff|"
return
montant = questeur.deposer(personnage, prototype, somme)
if montant == 0:
personnage << "|err|Vous ne pouvez pas déposer cette somme.|ff|"
else:
personnage.envoyer("{{}} entrepose votre argent dans ses " \
"coffres et ajoute {} pièces de bronze sur votre " \
"compte.".format(montant), questeur.servant)
| bsd-3-clause | -7,925,732,464,028,695,000 | 43.183486 | 79 | 0.643065 | false |
haypo/fatoptimizer | fatoptimizer/tools.py | 1 | 20707 | import ast
import collections
import marshal
import sys
FLOAT_TYPES = (int, float)
COMPLEX_TYPES = FLOAT_TYPES + (complex,)
STR_TYPES = (bytes, str)
# Primitive Python types (not containers)
PRIMITIVE_TYPES = (type(None), bool, int, float, complex, bytes, str)
# Iterable types
ITERABLE_TYPES = (str, bytes, tuple, frozenset, list, set, dict)
# Maximum length of a "short" AST dump, limit used by error_what() and default
# limit of compact_dump()
COMPACT_DUMP_MAXLEN = 100
# Marker used for "not set" value, different than None
UNSET = object()
class OptimizerError(Exception):
pass
class OptimizerStep:
pass
def compact_ascii(value, maxlen=30):
text = ascii(value)
if len(text) > maxlen:
text = text[:maxlen] + '(...)'
return text
def compact_dump(node, maxlen=COMPACT_DUMP_MAXLEN):
if isinstance(node, list):
return repr([compact_dump(node_item, maxlen) for node_item in node])
node_repr = ast.dump(node)
if len(node_repr) > maxlen:
node_repr = node_repr[:maxlen] + '(...)'
return node_repr
# FIXME: replace it with FindNodes, see unroll.py
def _iter_all_ast(node):
yield node
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
for child in _iter_all_ast(item):
yield child
elif isinstance(value, ast.AST):
for child in _iter_all_ast(value):
yield child
def ast_contains(tree, obj_type):
if isinstance(tree, list):
return any(ast_contains(node, obj_type) for node in tree)
else:
return any(isinstance(node, obj_type) for node in _iter_all_ast(tree))
def copy_node(node):
new_node = type(node)()
for field, value in ast.iter_fields(node):
setattr(new_node, field, value)
for attr in node._attributes:
try:
value = getattr(node, attr)
except AttributeError:
pass
else:
setattr(new_node, attr, value)
return new_node
def get_constant_size(value):
return len(marshal.dumps(value))
def _is_constant(value):
if isinstance(value, (tuple, frozenset)):
return all(_is_constant(item) for item in value)
else:
return isinstance(value, PRIMITIVE_TYPES)
def _new_constant(node, value):
if isinstance(value, ast.AST):
# convenient shortcut: return the AST object unchanged
return value
# FIXME: test the config directly here?
if value is None:
new_node = ast.Constant(value=None)
elif isinstance(value, (bool, int, float, complex, str, bytes)):
new_node = ast.Constant(value=value)
elif isinstance(value, (tuple, frozenset)):
if not _is_constant(value):
raise TypeError("container items are not constant: %r" % (value,))
new_node = ast.Constant(value=value)
elif isinstance(value, list):
elts = [_new_constant(node, elt) for elt in value]
new_node = ast.List(elts=elts, ctx=ast.Load())
elif isinstance(value, dict):
keys = []
values = []
for key, value in value.items():
keys.append(_new_constant(node, key))
values.append(_new_constant(node, value))
new_node = ast.Dict(keys=keys, values=values, ctx=ast.Load())
elif isinstance(value, set):
elts = [_new_constant(node, elt) for elt in value]
new_node = ast.Set(elts=elts, ctx=ast.Load())
else:
raise TypeError("unknown type: %s" % type(value).__name__)
copy_lineno(node, new_node)
return new_node
# FIXME: use functools.singledispatch?
def _get_constant(node, *, types=None):
if isinstance(node, ast.Constant):
return node.value
if isinstance(node, ast.UnaryOp) and isinstance(node.op, ast.USub):
# FIXME: rely on constant folding for that!
value = get_constant(node.operand, types=types)
if value is UNSET:
return UNSET
return (-value)
return UNSET
def get_constant(node, *, types=None):
if types is not None:
value = _get_constant(node, types=types)
if not isinstance(value, types):
return UNSET
return value
else:
return _get_constant(node)
def _get_node_list(seq, literal=False):
values = []
for value in seq:
# only get constant items, otherwise optimizations will not produce
# a constant
if literal:
value = _get_literal(value)
else:
value = get_constant(value)
if value is UNSET:
return UNSET
values.append(value)
return values
def _get_literal(node, constant_items=False):
use_literal = (not constant_items)
value = get_constant(node)
if value is not UNSET:
return value
if isinstance(node, ast.Tuple) and use_literal:
elts = _get_node_list(node.elts, literal=True)
if elts is UNSET:
return UNSET
return list(elts)
if isinstance(node, ast.List):
elts = _get_node_list(node.elts, literal=use_literal)
if elts is UNSET:
return UNSET
return list(elts)
if isinstance(node, ast.Set):
# elements must be hashable
elts = _get_node_list(node.elts)
if elts is UNSET:
return UNSET
return set(elts)
if isinstance(node, ast.Dict):
# FIXME: this code is slow, only do it when get_literal() is
# called with types==dict (or dict in types)
# keys musts be hashable
keys = _get_node_list(node.keys)
if keys is UNSET:
return UNSET
values = _get_node_list(node.values, literal=use_literal)
if values is UNSET:
return UNSET
return dict(zip(keys, values))
return UNSET
def get_literal(node, *, constant_items=False, types=None):
if types is not None:
value = _get_literal(node, constant_items)
if not isinstance(value, types):
return UNSET
return value
else:
return _get_literal(node, constant_items)
def _set_lineno(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_set_lineno(child, lineno, col_offset)
def copy_lineno(orig_node, new_node):
_set_lineno(new_node, orig_node.lineno, orig_node.col_offset)
def pretty_dump(node, annotate_fields=True, include_attributes=False,
lineno=False, indent=' '):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
Recipe written by Alex Leone, January 2010:
http://alexleone.blogspot.fr/2010/01/python-ast-pretty-printer.html
"""
def _format(node, level=0):
if isinstance(node, ast.AST):
fields = [(a, _format(b, level)) for a, b in ast.iter_fields(node)]
if include_attributes and node._attributes:
fields.extend([(a, _format(getattr(node, a), level))
for a in node._attributes])
if lineno and getattr(node, 'lineno', None):
fields.append(('lineno', str(node.lineno)))
return ''.join([
node.__class__.__name__,
'(',
', '.join(('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)),
')'])
elif isinstance(node, list):
lines = ['[']
lines.extend((indent * (level + 2) + _format(x, level + 2) + ','
for x in node))
if len(lines) > 1:
lines.append(indent * (level + 1) + ']')
else:
lines[-1] += ']'
return '\n'.join(lines)
return repr(node)
if isinstance(node, list):
nodes = [_format(item, 1) for item in node]
nodes = (',\n' + indent).join(nodes)
spaces = ' ' * (len(indent) - 1)
return '[%s%s]' % (spaces, nodes)
if not isinstance(node, ast.AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
class NodeVisitorMeta(type):
def __new__(mcls, name, bases, namespace):
self_class = super().__new__(mcls, name, bases, namespace)
steps = [cls for cls in self_class.__mro__
if OptimizerStep in cls.__bases__]
# AST object name (ex: 'Name') => list of visitors
self_class._fullvisitors = collections.defaultdict(list)
self_class._visitors = collections.defaultdict(list)
for step in steps:
for name in dir(step):
if name.startswith('fullvisit_'):
key = name[10:]
func = getattr(step, name)
self_class._fullvisitors[key].append(func)
elif name.startswith('visit_'):
key = name[6:]
func = getattr(step, name)
self_class._visitors[key].append(func)
for name in dir(self_class):
if name.startswith('fullvisit_'):
key = name[10:]
func = getattr(self_class, name)
visitors = self_class._fullvisitors[key]
if func not in visitors:
visitors.append(func)
elif name.startswith('visit_'):
key = name[6:]
func = getattr(self_class, name)
visitors = self_class._visitors[key]
if func not in visitors:
visitors.append(func)
return self_class
class BaseNodeVisitor(metaclass=NodeVisitorMeta):
def __init__(self, filename):
self.filename = filename
def error_what(self, node):
return compact_dump(node, COMPACT_DUMP_MAXLEN)
def error_where(self, node):
where = self.filename
if hasattr(node, 'lineno'):
where = '%s:%s' % (where, node.lineno)
return where
def _call_visitor_method(self, visitor, node):
"""Call visitor(node).
Wrap exceptions to add more context on error.
OptimizerError exceptions are not catched.
"""
try:
return visitor(self, node)
except (OptimizerError, RecursionError):
raise
except Exception as exc:
what = self.error_what(node)
where = self.error_where(node)
raise OptimizerError("error at %s on visiting %s: %s"
% (where, what, exc))
class NodeVisitor(BaseNodeVisitor, ast.NodeVisitor):
"""Node visitor.
Differences with ast.NodeVisitor:
- Compute the mapping AST node name => list of methods when the class
is instanciated
- Support 'full' visitors (method name prefixed with 'fullvisit_') which
skip the call the generic_visit() and so give a full control
- If an exception is raised, it is wrapped into a new OptimizerError
which adds the location in the file (filename and line number)
of the current proceed AST node.
"""
def visit(self, node):
key = node.__class__.__name__
# "full" visitor calling generic_visit() internally?
if key in self._fullvisitors:
visitors = self._fullvisitors[key]
for visitor in visitors:
self._call_visitor_method(visitor, node)
else:
# visit attributes
new_node = self.generic_visit(node)
assert new_node is not UNSET
if new_node is not None:
node = new_node
if key in self._visitors:
# visit the node
visitors = self._visitors[key]
for visitor in visitors:
self._call_visitor_method(visitor, node)
class NodeTransformer(BaseNodeVisitor):
"""Node visitor.
Differences with ast.NodeTransformer:
- Create a new tree if at least one attribute is modified, so the input
tree is left unchanged
- Inherit advantages of NodeVisitor compared to ast.NodeVisitor
Creating a new tree is needed to be able to specialize a function:
basically, return [original_tree, specialized_tree].
"""
def optimize_node_list(self, node_list):
return node_list
def _visit_attr(self, parent_node, attr_name, node):
return self.visit(node)
def generic_visit(self, node, ignore_fields=None):
fields = {}
modified = False
if ignore_fields:
if isinstance(ignore_fields, str):
ignore_fields = {ignore_fields}
else:
ignore_fields = set(ignore_fields)
for field, value in ast.iter_fields(node):
if ignore_fields is not None and field in ignore_fields:
fields[field] = value
continue
if isinstance(value, list):
values = value
new_values = []
all_ast = True
for value in values:
if isinstance(value, ast.AST):
new_value = self._visit_attr(node, field, value)
modified |= (new_value != value)
if isinstance(new_value, list):
new_values.extend(new_value)
else:
new_values.append(new_value)
else:
# arguments.kw_defaults contains AST nodes
# (ex: Constant) and non-AST nodes (ex: None)
all_ast = False
new_values.append(value)
if all_ast:
value = new_values
new_values = self.optimize_node_list(new_values)
modified |= (new_values is not value)
value = new_values
elif isinstance(value, ast.AST):
old_value = value
value = self._visit_attr(node, field, value)
modified |= (value != old_value)
# Create a dictionary of fields used if any field is modified
# to create a new AST node
fields[field] = value
if modified:
# create a new AST node with the new fields
new_node = type(node)()
if 'lineno' in node._attributes:
copy_lineno(node, new_node)
for field, value in fields.items():
setattr(new_node, field, value)
return new_node
return node
def visit(self, node):
key = node.__class__.__name__
# "full" visitor calling generic_visit() internally?
if key in self._fullvisitors:
visitors = self._fullvisitors[key]
for visitor in visitors:
new_node = self._call_visitor_method(visitor, node)
if new_node is not None:
assert new_node is not UNSET
if type(new_node) != type(node):
# AST node type changed
return new_node
else:
node = new_node
else:
new_node = self.generic_visit(node)
assert new_node is not UNSET
if new_node is not None:
node = new_node
if key in self._visitors:
visitors = self._visitors[key]
for visitor in visitors:
new_node = self._call_visitor_method(visitor, node)
if new_node is not None:
assert new_node is not UNSET
if type(new_node) != type(node):
# AST node type changed
return new_node
else:
node = new_node
return node
def visit_node_list(self, node_list):
assert isinstance(node_list, list)
new_node_list = []
for node in node_list:
new_node = self.visit(node)
assert new_node is not None and new_node is not UNSET
if isinstance(new_node, list):
new_node_list.extend(new_node)
else:
new_node_list.append(new_node)
return new_node_list
class RestrictToFunctionDefMixin:
# don't visit children of nodes having their own namespace
def fullvisit_DictComp(self, node):
return node
def fullvisit_ListComp(self, node):
return node
def fullvisit_SetComp(self, node):
return node
def fullvisit_GeneratorExp(self, node):
return node
def fullvisit_FunctionDef(self, node):
return node
def fullvisit_AsyncFunctionDef(self, node):
return node
def fullvisit_Lambda(self, node):
return node
def fullvisit_ClassDef(self, node):
return node
class FindStrVisitor(NodeVisitor, RestrictToFunctionDefMixin):
"""Find Str nodes.
Find all Str nodes to compute constants.
"""
def __init__(self, filename):
super().__init__(filename)
self.str_constants = set()
@classmethod
def from_node(cls, filename, node):
visitor = cls(filename)
visitor.visit(node)
return visitor
def visit_Str(self, node):
self.str_constants.add(node.s)
# FIXME: add optional RestrictToFunctionDefMixin, see UnrollStep, unroll.py
class FindNodes:
"""Find AST nodes."""
def __init__(self, ast_types, callback):
self.ast_types = ast_types
self.callback = callback
def visit(self, node):
if isinstance(node, self.ast_types):
res = self.callback(node)
if not res:
return False
return self.generic_visit(node)
def generic_visit(self, node):
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
res = self.visit(item)
if not res:
return False
elif isinstance(value, ast.AST):
res = self.visit(value)
if not res:
return False
return True
class ReplaceVariable(NodeTransformer, RestrictToFunctionDefMixin):
def __init__(self, filename, name_mapping):
super().__init__(filename)
# Mapping (dict or whatever): old name => new name
self.name_mapping = name_mapping
def replace_func_def(self, node):
return self.generic_visit(node)
def visit_Name(self, node):
if node.id not in self.name_mapping:
return node
new_value = self.name_mapping[node.id]
return _new_constant(node, new_value)
def Call(**kw):
if sys.version_info >= (3, 5):
return ast.Call(**kw)
else:
return ast.Call(starargs=None, kwargs=None, **kw)
def get_starargs(callsite):
if not isinstance(callsite, ast.Call):
raise ValueError("ast.Call expected, got %s" % type(callsite))
if sys.version_info >= (3, 5):
for arg in callsite.args:
if isinstance(arg, ast.Starred):
return arg.value
return None
else:
return callsite.starargs
def get_keywords(callsite):
if not isinstance(callsite, ast.Call):
raise ValueError("ast.Call expected, got %s" % type(callsite))
keywords = callsite.keywords
if sys.version_info < (3, 5) and callsite.kwargs is not None:
keywords = keywords.copy()
keywords.append(ast.keyword(arg=None, value=callsite.kwargs))
return keywords
def get_varkeywords(callsite):
if not isinstance(callsite, ast.Call):
raise ValueError("ast.Call expected, got %s" % type(callsite))
if sys.version_info >= (3, 5):
for arg in callsite.keywords:
if arg.arg is None:
return arg.value
return None
else:
return callsite.kwargs
| mit | -3,791,908,399,633,060,400 | 31.456113 | 80 | 0.568455 | false |
e-koch/VLA_Lband | 17B-162/HI/analysis/convolve_and_match_aca.py | 1 | 3926 |
'''
Reproject onto the ACA CO(2-1) mosaic.
Since we have different versions of the full mosaic, we're
only going to make two version of the reprojected HI maps:
1) One to the ACA map without the highly asymmetric beam mosaics.
The HI map will not be convolved to a matching beam since they are already
quite similar. (But this can be checked later)
2) One to the full ACA map and convolved to its round beam of ~11 arcsec.
Also, these are just spatial reprojections, not spectral. So the CO
channel width won't matter.
'''
import os
from os.path import join as osjoin
from cube_analysis.reprojection import reproject_cube
from cube_analysis.run_pipe import run_pipeline
from paths import (seventeenB_HI_data_1kms_wGBT_path,
seventeenB_1kms_wGBT_HI_file_dict, aca_co21_data_path)
out_folder = seventeenB_HI_data_1kms_wGBT_path("aca_co21_match",
no_check=True)
if not os.path.exists(out_folder):
os.mkdir(out_folder)
run_noasymm = True
run_fullmos_round = True
if run_noasymm:
out_name = seventeenB_1kms_wGBT_HI_file_dict['Cube'].split("/")[-1].rstrip(".fits") + \
".aca_excasymm_spatialmatch.fits"
targ_cube = aca_co21_data_path("full_mosaic/12CO21/M33_ACA_12CO21_2p6kms_excludeasymmbeams_commonbeam.image_K.fits")
reproject_cube(seventeenB_1kms_wGBT_HI_file_dict['Cube'],
targ_cube,
out_name,
output_folder=out_folder,
save_spectral=False,
is_huge=True,
reproject_type='spatial',
common_beam=False,
verbose=True,
chunk=40)
run_pipeline(osjoin(out_folder, out_name),
out_folder,
masking_kwargs={"method": "ppv_connectivity",
"save_cube": True,
"is_huge": True,
"smooth_chans": 6,
"min_chan": 4,
"peak_snr": 4.,
"min_snr": 2,
"edge_thresh": 1,
"verbose": True,
},
moment_kwargs={"num_cores": 1,
"verbose": False,
"chunk_size": 1e5,
"make_peakvels": False},)
if run_fullmos_round:
out_name = seventeenB_1kms_wGBT_HI_file_dict['Cube'].split("/")[-1].rstrip(".fits") + \
".aca_fullmosaic_spatialmatch.fits"
targ_cube = aca_co21_data_path("full_mosaic/12CO21/M33_ACA_12CO21_2p6kms_fullmosaic_roundbeam.image_K.fits")
reproject_cube(seventeenB_1kms_wGBT_HI_file_dict['Cube'],
targ_cube,
out_name,
output_folder=out_folder,
save_spectral=False,
is_huge=True,
reproject_type='spatial',
common_beam=True,
verbose=True,
chunk=40)
run_pipeline(osjoin(out_folder, out_name),
out_folder,
masking_kwargs={"method": "ppv_connectivity",
"save_cube": True,
"is_huge": True,
"smooth_chans": 6,
"min_chan": 4,
"peak_snr": 4.,
"min_snr": 2,
"edge_thresh": 1,
"verbose": True,
},
moment_kwargs={"num_cores": 1,
"verbose": False,
"chunk_size": 1e5,
"make_peakvels": False},)
| mit | 4,816,399,364,986,148,000 | 35.018349 | 120 | 0.47784 | false |
Ledoux/ShareYourSystem | Pythonlogy/draft/Rater/03_ExampleCell.py | 1 | 1076 | #/###################/#
# Import modules
#
#ImportModules
import ShareYourSystem as SYS
#/###################/#
# Build the model
#
#Define
MyBrianer=SYS.BrianerClass(
).mapSet(
{
'-Neurongroups':{
'|Input':{
},
'|Population':SYS.BrianerClass(
).mapSet(
{
'RatingUnitsInt':3
}
)
}
}
).brian(
)
#/###################/#
# Print
#
#Definition the AttestedStr
print('MyBrianer is ')
SYS._print(MyBrianer)
#/###################/#
# Do one simulation
#
MyBrianer.simulate(
500.
)
#/###################/#
# View
#
#MyBrianer['/-Neurongroups/|Population/-Traces/|*r/-Samples/|Default'].pyplot()
#MyBrianer.pyplot()
#SYS.matplotlib.pyplot.show()
print(MyBrianer['/-Neurongroups/|Population/-Traces/|*r/-Samples/|Default'].BrianedStateMonitorVariable)
"""
from matplotlib import pyplot
pyplot.figure()
M=MyBrianer['/-Traces/|*v/-Samples/|Default'].BrianedStateMonitorVariable
pyplot.plot(M.t, M.v.T)
pyplot.figure()
M=MyBrianer['/-Events/|Default'].BrianedSpikeMonitorVariable
pyplot.plot(M.t, M.i,'.')
pyplot.show()
""" | mit | -2,291,236,197,726,133,200 | 15.318182 | 104 | 0.601301 | false |
oudalab/phyllo | phyllo/extractors/ciceroDB.py | 1 | 25553 | import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
def main():
# Case 1: Sections split by numbers (Roman or not) followed by a period, or bracketed. Subsections split by <p> tags
def parsecase1(ptags, c, colltitle, title, author, date, URL):
# ptags contains all <p> tags. c is the cursor object.
chapter = '-1'
verse = 1
# entry deletion is done in main()
for p in ptags:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
passage = ''
text = p.get_text().strip()
# Skip empty paragraphs.
if len(text) <= 0 or text.startswith('Cicero\n'):
continue
if url.endswith('nd3.shtml'): # isolate nd3 so we don't accidentally bug out the others
text = re.split('^([IVX]+)\.\s|^([0-9]+)\.\s|^\[([IVXL]+)\]|^\[([0-9]+)\]\s', text)
else:
text = re.split(
'^([IVX]+)\.\s|^([0-9]+)\.\s|^\[([IVXL]+)\]\s|^\[([0-9]+)\]\s|^\[\s([0-9]+)\]|^\[([0-9]+)',
text)
for element in text:
if element is None or element == '' or element.isspace():
text.remove(element)
# The split should not alter sections with no prefixed roman numeral.
if len(text) > 1:
i = 0
while text[i] is None:
i+=1
if chapter == 'FRAGMENTA':
verse = text[i]
else:
chapter = text[i]
i+=1
while text[i] is None:
i+=1
passage = text[i].strip()
if chapter == 'FRAGMENTA':
pass
else:
verse = 1
else:
passage = text[0]
if chapter == 'FRAGMENTA':
pass
else:
verse+=1
if passage.startswith("Cicero"):
continue
if passage.startswith("FRAGMENTA"):
chapter = 'FRAGMENTA'
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, passage.strip(), URL, 'prose'))
# Case 2: Sections are split by <p> tags and subsections by un/bracketed numbers.
def parsecase2(ptags, c, colltitle, title, author, date, URL):
# ptags contains all <p> tags. c is the cursor object.
chapter = 0
verse = '-1'
# entry deletion is done in main()
for p in ptags:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
passage = ''
text = p.get_text().strip()
if text.startswith("Cicero\n"):
continue
# Skip empty paragraphs.
if len(text) <= 0:
continue
text = re.split('([IVX]+)\.\s|([0-9]+)\.\s|\[([IVXL]+)\]\s|\[([0-9]+)\]\s', text)
for element in text:
if element is None or element == '' or element.isspace():
text.remove(element)
chapter +=1
for count, item in enumerate(text):
if item is None:
continue
if item.isnumeric() or len(item) < 5:
verse = item
else:
passage = item
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, passage.strip(), URL, 'prose'))
# Case 3: Chapters separated by un/bracketed numbers, similarly to sentences.
def parsecase3(ptags, c, colltitle, title, author, date, URL):
chapter = '-1'
verse = -1
isnumeral = case3isNumeral(ptags)
for p in ptags:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
passage = ''
text = p.get_text().strip()
# Skip empty paragraphs.
if len(text) <= 0:
continue
text = re.split('([IVXL]+)\.\s|([0-9]+)\.\s|\[([IVXL]+)\]\s|\[([0-9]+)\]\s', text)
for item in text:
if item is None:
continue
item = item.strip()
if item.isspace() or item == '' or item.startswith("Cicero\n"):
continue
if item.isdigit() and not isnumeral:
chapter = item
elif item.isdigit() and isnumeral:
verse = item
elif len(item) < 5 and isnumeral:
chapter = item
else:
passage = item
# Remove brackets if they have been picked up.
if chapter.startswith('['):
chapter = chapter[:-1]
chapter = chapter[1:]
if passage.startswith('['):
passage = passage[:-1]
passage = passage[1:]
if passage == chapter:
continue
else: # chapter/passage correction
chaptertest = chapter + 'I'
chaptertest2 = chapter[:-2] + 'V'
chaptertest3 = chapter[:-3] + 'V'
chaptertest4 = chapter[:-4] + 'IX'
chaptertest5 = chapter[:-2] + 'X'
if (chapter == 'LXIX' or chapter == 'LXX') and passage == 'L':
continue
if passage == chaptertest or passage == chaptertest2 or passage == chaptertest3\
or passage == chaptertest4 or passage == chaptertest5:
chapter = passage
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, passage.strip(), URL, 'prose'))
# this function checks if the work uses Roman Numerals or numerical values for chapters.
def case3isNumeral(ptags):
for p in ptags:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
if p.get_text().strip() is None or p.get_text().strip() == '':
continue
firstp = p.get_text().strip()
break
firstp = re.split('^([IVX]+)\.\s|^([0-9]+)\.\s|^\[([IVXL]+)\]\s|^\[([0-9]+)\]\s', firstp)
if firstp[0] is not None:
if firstp[0].isdigit():
return False
else:
return True
elif firstp[1] is not None:
if firstp[1].isdigit():
return False
else:
return True
elif firstp[2] is not None:
if firstp[2].isdigit():
return False
else:
return True
def parsespecial(ptags, c, colltitle, title, author, date, URL):
chapter = -1
verse = -1
isnumeral = case3isNumeral(ptags)
for p in ptags:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
passage = ''
if title.endswith('FONTEIO') or title.endswith('FLACCO'):
potentialchap = p.find('I') # chapters italicized #always returns none for some reason.
text = p.get_text()
if text is None:
continue
text = text.strip()
if p.i is not None or p.center is not None: # this also is never true for some reason.
chapter = text
continue
if potentialchap is not None:
potentialchap = None
chapter = p.get_text().strip()
continue
else:
# read in verse number and passage
text = re.split('\[([0-9]+)\]', text)
for element in text:
if element is None:
continue
if element == '' or element.isspace():
continue
element = element.strip()
if element.isdigit():
verse = element
else:
passage = element
if passage.startswith("Cicero\n"):
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, passage, URL, 'prose'))
elif title.endswith('Paradoxica'):
potentialchap = p.find('B') # chapters bolded
if potentialchap is not None:
chapter = potentialchap.find(text=True)
continue
else:
# read in verse number and passage
text = p.get_text()
if text is None:
continue
text = re.split('\[([0-9]+)\]', text)
for element in text:
if element is None:
continue
if element == '' or element.isspace():
continue
element = element.strip()
if element.isdigit():
verse = element
else:
passage = element
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, passage, URL, 'prose'))
elif title.endswith('Partitione'):
# chapter split by paragraph, verse by speaker
text = p.get_text()
if text is None:
continue
text = text.strip()
text = re.split('\[([0-9]+)\]', text)
chaptext = ''
for item in text:
if item.isspace() or item == '' or item is None:
continue
elif item.isnumeric():
chapter = item
continue
else:
chaptext += item
if item.startswith('Cicero\n'):
continue
try:
chaptext = re.split('\[([0-9]+)\]|(CICERO FILIUS)\.\s|(CICERO PATER)\.\s|(C\.F\.)\s|(C\.P\.)\s', chaptext)
for piece in chaptext:
if piece is None:
continue
if piece == '' or piece.isspace():
continue
piece = piece.strip()
if piece.startswith('CI') or piece.startswith("C."):
verse = piece
continue
elif piece.isnumeric():
chapter = piece
else:
passage = piece
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, passage, URL, 'prose'))
except:
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, item.strip(), URL, 'prose'))
# This case parses poetry
def parsePoem(ptags, c, colltitle, title, author, date, url):
chapter = -1
verse = 0
for p in ptags:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
# find chapter
chapter_f = p.find('b')
if chapter_f is not None:
chapter = p.get_text().strip()
verse = 0
continue
else:
brtags = p.findAll('br')
verses = []
try:
try:
firstline = brtags[0].previous_sibling.strip()
except:
firstline = brtags[0].previous_sibling.previous_sibling.strip()
verses.append(firstline)
except:
pass
for br in brtags:
try:
text = br.next_sibling.next_sibling.strip()
except:
text = br.next_sibling.strip()
if text is None or text == '' or text.isspace():
continue
verses.append(text)
for v in verses:
# verse number assignment.
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v, url, 'poetry'))
def getBooks(soup):
textsURL = []
# get links to books in the collection
for a in soup.find_all('a', href=True):
link = a['href']
textsURL.append("{}/{}".format(siteURL, a['href']))
# remove unnecessary URLs
while ("http://www.thelatinlibrary.com/index.html" in textsURL):
textsURL.remove("http://www.thelatinlibrary.com/index.html")
textsURL.remove("http://www.thelatinlibrary.com/classics.html")
logger.info("\n".join(textsURL))
return textsURL
# for nested hyperlinked works
def altgetbook(soup):
textsURL = []
siteURL = "http://www.thelatinlibrary.com/cicero"
# get links to books in the collection
for a in soup.find_all('a', href=True):
link = a['href']
textsURL.append("{}/{}".format(siteURL, a['href']))
# remove unnecessary URLs
while ("http://www.thelatinlibrary.com/cicero//index.html" in textsURL):
textsURL.remove("http://www.thelatinlibrary.com/cicero//index.html")
textsURL.remove("http://www.thelatinlibrary.com/cicero//cic.html")
textsURL.remove("http://www.thelatinlibrary.com/cicero//classics.html")
logger.info("\n".join(textsURL))
return textsURL
# main code
siteURL = 'http://www.thelatinlibrary.com'
ciceroURL = 'http://www.thelatinlibrary.com/cic.html'
ciceroOpen = urllib.request.urlopen(ciceroURL)
ciceroSOUP = BeautifulSoup(ciceroOpen, 'html5lib')
author = ciceroSOUP.title.string.strip()
colltitle = ciceroSOUP.h1.string.strip()
date = ciceroSOUP.h2.contents[0].strip().replace('(', '').replace(')', '').replace(u"\u2013", '-')
textsURL = getBooks(ciceroSOUP)
# pro ligario (case 1) needs to be moved to case 3
# pro Deitario is weird.
caseOneList = ['http://www.thelatinlibrary.com/cicero/quinc.shtml',
'http://www.thelatinlibrary.com/cicero/imp.shtml',
'http://www.thelatinlibrary.com/cicero/murena.shtml',
'http://www.thelatinlibrary.com/cicero/sulla.shtml',
'http://www.thelatinlibrary.com/cicero/arch.shtml',
'http://www.thelatinlibrary.com/cicero/postreditum.shtml',
'http://www.thelatinlibrary.com/cicero/postreditum2.shtml',
'http://www.thelatinlibrary.com/cicero/cael.shtml',
'http://www.thelatinlibrary.com/cicero/piso.shtml',
'http://www.thelatinlibrary.com/cicero/marc.shtml',
'http://www.thelatinlibrary.com/cicero/lig.shtml',
'http://www.thelatinlibrary.com/cicero/deio.shtml',
'http://www.thelatinlibrary.com/cicero/fato.shtml',
'http://www.thelatinlibrary.com/cicero/brut.shtml',
'http://www.thelatinlibrary.com/cicero/acad.shtml',
'http://www.thelatinlibrary.com/cicero/amic.shtml',
'http://www.thelatinlibrary.com/cicero/compet.shtml'
]
caseTwoList = ['http://www.thelatinlibrary.com/cicero/sex.rosc.shtml',
'http://www.thelatinlibrary.com/cicero/rosccom.shtml',
'http://www.thelatinlibrary.com/cicero/caecina.shtml',
'http://www.thelatinlibrary.com/cicero/rabirio.shtml',
'http://www.thelatinlibrary.com/cicero/domo.shtml',
'http://www.thelatinlibrary.com/cicero/haruspicum.shtml',
'http://www.thelatinlibrary.com/cicero/balbo.shtml',
'http://www.thelatinlibrary.com/cicero/milo.shtml',
'http://www.thelatinlibrary.com/cicero/topica.shtml',
'http://www.thelatinlibrary.com/cicero/scauro.shtml'
]
caseThreeList = ['http://www.thelatinlibrary.com/cicero/cluentio.shtml',
'http://www.thelatinlibrary.com/cicero/plancio.shtml',
'http://www.thelatinlibrary.com/cicero/sestio.shtml',
'http://www.thelatinlibrary.com/cicero/vatin.shtml',
'http://www.thelatinlibrary.com/cicero/prov.shtml',
'http://www.thelatinlibrary.com/cicero/rabiriopost.shtml'
'http://www.thelatinlibrary.com/cicero/optgen.shtml',
'http://www.thelatinlibrary.com/cicero/orator.shtml',
'http://www.thelatinlibrary.com/cicero/senectute.shtml'
]
specialcases = ['http://www.thelatinlibrary.com/cicero/flacco.shtml',
'http://www.thelatinlibrary.com/cicero/fonteio.shtml',
'http://www.thelatinlibrary.com/cicero/paradoxa.shtml',
'http://www.thelatinlibrary.com/cicero/partitione.shtml'
]
getURLList = ['http://www.thelatinlibrary.com/cicero/legagr.shtml',
'http://www.thelatinlibrary.com/cicero/ver.shtml',
'http://www.thelatinlibrary.com/cicero/cat.shtml',
'http://www.thelatinlibrary.com/cicero/phil.shtml',
'http://www.thelatinlibrary.com/cicero/inventione.shtml',
'http://www.thelatinlibrary.com/cicero/oratore.shtml',
'http://www.thelatinlibrary.com/cicero/leg.shtml',
'http://www.thelatinlibrary.com/cicero/fin.shtml',
'http://www.thelatinlibrary.com/cicero/tusc.shtml',
'http://www.thelatinlibrary.com/cicero/nd.shtml',
'http://www.thelatinlibrary.com/cicero/divinatione.shtml',
'http://www.thelatinlibrary.com/cicero/off.shtml'
]
#notes: repub uses parenthesis instead of brackets; apparently there are two repub.shtml
# leg is some kind of play
# tusc has numbers by itself.
# divinatione is truly a nightmare
# caecilium, phillipic, oratore, milone and/or cluentio-oratio, plancio, sestio might/n't need a new case
poemList = ['http://www.thelatinlibrary.com/cicero/repub.shtml']
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author='Cicero'")
for url in getURLList:
openu = urllib.request.urlopen(url)
soup = BeautifulSoup(openu, 'html5lib')
urltexts = getBooks(soup)
for urlt in urltexts:
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
try:
title = textsoup.title.string.split(':')[1].strip()
except:
title = textsoup.title.string.strip()
getp = textsoup.find_all('p')
# url refers to the url of the collection's collection. urlt is the url that the text/passages are found
if url.endswith("legagr.shtml") or urlt.endswith("caecilium.shtml") or url.endswith("cat.shtml") \
or url.endswith('phil.shtml') or url.endswith('oratore.shtml') or urlt.endswith("fin1.shtml") \
or urlt.endswith('fin2.shtml') or urlt.endswith('fin3.shtml') \
or (url.endswith('nd.shtml') and not urlt.endswith('nd3.shtml')):
parsecase2(getp, c, colltitle, title, author, date, urlt)
elif (url.endswith("ver.shtml") and not urlt.endswith('caecilium.shtml')) \
or url.endswith('inventione.shtml') or urlt.endswith('nd3.shtml') or url.endswith('off.shtml') \
or url.endswith('inventione.shtml') or url.endswith('leg.shtml') or url.endswith('tusc.shtml'):
parsecase1(getp, c, colltitle, title, author, date, urlt)
elif urlt.endswith('fin4.shtml') or urlt.endswith('fin5.shtml') or url.endswith('divinatione.shtml'):
parsecase3(getp, c, colltitle, title, author, date, urlt)
for url in textsURL:
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
try:
title = textsoup.title.string.split(':')[1].strip()
except:
title = textsoup.title.string.strip()
getp = textsoup.find_all('p')
if url in caseOneList:
parsecase1(getp, c, colltitle, title, author, date, url)
elif url in caseTwoList:
parsecase2(getp, c, colltitle, title, author, date, url)
elif url in caseThreeList:
parsecase3(getp, c, colltitle, title, author, date, url)
elif url in specialcases:
parsespecial(getp, c, colltitle, title, author, date, url)
elif url in poemList:
parsePoem(getp, c, colltitle, title, author, date, url)
logger.info("Program runs successfully.")
if __name__ == '__main__':
main()
| apache-2.0 | -8,415,987,638,483,457,000 | 46.858509 | 134 | 0.465855 | false |
mdepasca/miniature-adventure | miniature_adventure.py | 1 | 51544 | import argparse
import os
from os import path
import subprocess
import sys
import socket
import time
import warnings
from math import floor
import gc # garbage collector
import smtplib
import numpy as np
from scipy import signal, linalg
from matplotlib import pyplot as plt
import GPy
import classes as cls
import utilities as util
from utilities import bcolors
# import rpy2.robjects as ro
# from rpy2.robjects.packages import importr
# from rpy2.robjects.numpy2ri import numpy2ri
# # Activate automatic conversion of ndarray to R objects
# ro.conversion.py2ri = numpy2ri
from progressbar import ProgressBar, SimpleProgress, ETA, Percentage, Bar, \
AnimatedMarker, Timer, Counter
if __name__ == "__main__":
# gc.set_debug(gc.DEBUG_LEAK)
# Parsing input from command line
parser = argparse.ArgumentParser(
description = "SN lightcurve fitter and classifier.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
actionGroup = parser.add_argument_group('ACTION')
inputGroup = parser.add_argument_group('INPUT')
"""
ACTION OPTIONS
----------------------------------------------------------------------------
"""
actionGroup.add_argument(
"--fit", dest="fit",
action="store_true",
help="Fit lightcurves with Gaussian processes method."
)
actionGroup.add_argument(
'--prior', dest='prior',
action='store_true', help='Use priors in GP regression.'
)
actionGroup.add_argument(
'--length', dest='testLength',
action='store_true',
help='Set length scale hyper parameter to random value to ease \
optimization.'
)
actionGroup.add_argument(
"--cross-correlation", dest="crossCor",
action="store_true",
help="Performs cross correlation between non peaked lcs (with maximum in \
r-band at one of the MJD extremes) and all the peaked lcs. Produces \
an estimate for maximum in r-band. VERY TIME CONSUMING."
)
actionGroup.add_argument(
"--distance-matrix", dest="distMatrix",
action="store_true",
help="Calculate distance between fitted lightcurves in same band. \
It is use to build a diffusion map (see Coifman & Lafon (2006) \
and Lafon & Lee (2006)).")
actionGroup.add_argument(
"--diffuse", dest="diffuse",
action="store_true",
help="Computes the diffusion map coefficients. Run together or after \
--distance-matrix option. Uses `diffusionMap` R package developed \
by Joseph Richards.")
actionGroup.add_argument(
"--train", dest="train",
action="store_true",
help="Train the classifier - Random Forest. Uses `randomForest` R \
package.")
actionGroup.add_argument(
"--classify", dest="classify",
action="store_true")
actionGroup.add_argument(
"--plot", dest="plot",
action="store_true",
help="Save on `pdf` file the plot of fitting curve over data.")
actionGroup.add_argument(
'--nice-plots', dest='nicePlots',
action='store_true',
help='Produces plot suitable for publication (pdf, 300dpi).'
)
"""-------------------------------------------------------------------------
INPUT OPTIONS
----------------------------------------------------------------------------
"""
inputGroup.add_argument(
"--data-directory", dest="dirData",
default="train_data" + os.sep + "SIMGEN_PUBLIC_DES",
help="Path to directory containing training data.")
inputGroup.add_argument(
"--fit-directory", dest="dirFit",
default="results" + os.sep + "FIT",
help="Path to directory containing fitted data.")
# the use of this keyword is developed in dev_magnitudes branch
inputGroup.add_argument(
"--mag", dest="mag",
action="store_true",
help="Reads in magnitudes from file."
)
inputGroup.add_argument(
"--fit-file", dest="fitFile",
help="Path to file in which to dump fitting results.")
inputGroup.add_argument(
"-f", "--file",
help="")
inputGroup.add_argument(
"-c", "--candidate", dest="cand",
default=-1, type=int,
help="ID of a candidate."
)
inputGroup.add_argument(
"--all-bands", dest="allBands",
action="store_true",
help="Plot all bands --nice-plots option."
)
inputGroup.add_argument(
"-b", "--band", dest="band", default='r',
help="Which band to plot with --nice-plots.")
inputGroup.add_argument(
"--nBands", dest="nBands",
default=-1, type=int,
help="Number of bands to plot with --nice-plots.")
inputGroup.add_argument(
'--limits', nargs=2, dest='limits',
default=[0, 5], type=int,
help='Starting ending indeces for fitting and cross-correlation.'
)
inputGroup.add_argument(
'--offset', '-o', dest='offset',
default=0, type=int,
help='Offset for columns WRT limits (which are referred to rows).'
)
inputGroup.add_argument(
'--plot-offset', dest='plotOffset',
default=-1, type=int,
help='Offset in index to begin light curves plotting from.'
)
"""-------------------------------------------------------------------------
"""
args = parser.parse_args()
bands = ['g', 'r', 'i', 'z']
else:
pass
if __name__ == "__main__":
# os.system("clear")
fromAddress = '[email protected]'
toAddress = '[email protected]'
sent = False
indent = " "
resDir = "results"+os.sep
peakIdx = np.empty(0)
nopeakIdx = np.empty(0)
print bcolors.bldpur
print indent + "* * * * * * * * * * * * * * *"
print indent + "* Miniature Adventure *"
print indent + "* ------------------- *"
print indent + "* lightcurves fitting *"
print indent + "* and *"
print indent + "* SN classification *"
print indent + "* * * * * * * * * * * * * * *"
print bcolors.txtrst
if args.dirFit == 'results/FIT':
yesno = str(raw_input(indent + 'Set fit directory other then default (' + \
parser.get_default('dirFit') + ')? (y/n)'))
if yesno == 'y':
args.dirFit = str(raw_input(indent + 'Specify new directory '\
+'for fit: '))
if args.dirData[-1] != os.sep:
args.dirData += os.sep
if args.dirFit[-1] != os.sep:
args.dirFit += os.sep
print indent + 'Fit directory will be: ' + path.abspath(args.dirFit)
if not os.path.exists(path.abspath(args.dirFit)):
os.makedirs(path.abspath(args.dirFit))
start_time = time.time()
"""
Get list of files in data directory and fit directory
----------------------------------------------------------------------------
"""
p = subprocess.Popen("ls *SN*.DAT", shell=True, stdout=subprocess.PIPE,
cwd=args.dirData)
lsDirData = p.stdout.read()
lsDirData = lsDirData.split('\n')
lsDirData.sort()
lsDirData.remove('')
p = subprocess.Popen("ls *SN*.DAT", shell=True, stdout=subprocess.PIPE,
cwd=args.dirFit)
lsDirFit = p.stdout.read()
lsDirFit = lsDirFit.split('\n')
lsDirFit.sort()
lsDirFit.remove('')
"""-------------------------------------------------------------------------
"""
"""
PERFORMS LCs FITTING
"""
if args.fit:
if args.limits[1] > len(lsDirData):
print indent + \
"WARNING: upper limit > than the number of files. Corrected.\n"
args.limits[1] = len(lsDirData)
filePath = args.dirFit + 'PEAKED_{:<}_{:<5.3f}.LIST'.format(
socket.gethostname(), time.time()
)
fPeaked = open(filePath, 'w')
filePath = args.dirFit + 'NOPEAKED_{:<}_{:<5.3f}.LIST'.format(
socket.gethostname(), time.time()
)
fNopeaked = open(filePath, 'w')
# Relevant input data
print "\n" + indent + "[1] * Fit lightcurves ..."
print "\n" + indent + "Index interval [{:<},{:<})".format(
args.limits[0], args.limits[1]
)
print "\n" + indent + \
"Data directory: " + os.curdir + args.dirData
print "\n" + indent \
+ "Number of candidates = {:<d}".format(len(lsDirData))
"""
GP kernel specification
------------------------------------------------------------------------
"""
# kern = GPy.kern.RatQuad(1)
kern = GPy.kern.RBF(1)
# kern = GPy.kern.Matern32(1)
# kern = GPy.kern.Matern52(1)
"""---------------------------------------------------------------------
"""
print "\n" + indent \
+ "Data will be smoothed using GP kernel " + kern.name.upper()
print '\n' + indent + \
"INDEX | SN ID | BAND"
for i in range(args.limits[0], args.limits[1]):
filePath = path.splitext(lsDirData[i])[0] + "_FIT.DAT"
"""
Check if file with fit results already exits. If positive skip
to next loop iteration.
"""
if filePath in lsDirFit:
continue
candidate = util.get_sn_from_file(
args.dirData + lsDirData[i],
args.mag
)
# Creating SupernovaFit object
candidateFit = cls.SupernovaFit(candidate, kern.name)
for b in candidate.lcsDict.keys():
# Correcting for time dilution
epoch = util.time_correct(
candidate.lcsDict[b].mjd,
candidate.zSpec if candidate.zSpec else candidate.zPhotHost
)
# Correcting for absorption
flux = util.correct_for_absorption(
candidate.lcsDict[b].flux,
candidate.MWEBV, b
)
errFlux = candidate.lcsDict[b].fluxErr
if (candidate.lcsDict[b].badCurve) or (len(flux) <= 3):
candidateFit.lcsDict[b].badCurve = True
print indent + bcolors.FAIL + \
"{:<} {:<} {:<} Bad Curve".format(i, candidate.SNID, b) + \
bcolors.txtrst
"""
>>> if 'break' instead of 'continue' the candidate would not be
>>> processed and the further code would be easier (no double
>>> checks both on data and fit).
"""
continue
"""
Fitting Lightcurve
----------------------------------------------------------------
"""
try:
predMjd, predFlux, predErr, GPModel = util.gp_fit(
epoch, flux, errFlux,
kern, n_restarts=10,
parallel=False,
test_length=args.testLength,
test_prior=args.prior)
except linalg.LinAlgError as e:
if sent == False:
server = smtplib.SMTP('mailauth.oapd.inaf.it',587)
server.starttls()
server.login('marco.depascale', 'M@p3d_8$')
msg = 'Subject: LinAlgError\n\n' + \
'index = {:<d}, SNID = {:<d}'.format(i, candidate.SNID)
server.sendmail(fromAddress, toAddress, msg)
server.close()
sent = True
"""
if LinAlgError light curve won't be saved.
"""
print indent + \
"{:>5d} {:>5d} {:>4s} > FAIL".format(
i, candidate.SNID, b
) + bcolors.FAIL + ' LinAlgError' + bcolors.txtrst
candidateFit.r.badCurve = True
raise ValueError(
'LinAlgError from GPy. Mail sent to {:s}'.format(
toAddress
)
)
else:
candidateFit.set_lightcurve(b, predMjd, predFlux, predErr)
print indent + bcolors.OKGREEN + \
"{:>5d} {:>5d} {:>4s} > DONE".format(
i, candidate.SNID, b
) + bcolors.txtrst
"""-------------------------------------------------------------
"""
else:
"""
Saving fit results on file
----------------------------------------------------------------
"""
if (candidateFit.r.badCurve == False):
filePath = args.dirFit + \
path.splitext(lsDirData[i])[0] + "_FIT.DAT"
candidateFit.save_on_txt(filePath)
print indent + 'file saved!'
if candidateFit.peaked:
peakIdx = np.append(peakIdx, i)
fPeaked.write('{:<}\n'.format(filePath))
else:
nopeakIdx = np.append(nopeakIdx, i)
fNopeaked.write('{:<}\n'.format(filePath))
"""-------------------------------------------------------------
"""
gc.collect()
# free memory
gc.collect()
fPeaked.close()
fNopeaked.close()
filePath = 'peaked_{:<}_{:<5.3f}.dat'.format(
socket.gethostname(), time.time()
)
np.savetxt(args.dirFit + filePath, peakIdx,
header='Indexes of fitted LCs with r maximum.', fmt='%d')
filePath = args.dirFit + 'nopeaked_{:<}_{:<5.3f}.dat'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, nopeakIdx,
header='Indexes of fitted LCs without an r maximum.', fmt='%d')
gc.collect()
"""#########################################################################
############################################################################
PERFORMING CROSS-CORRELATION
############################################################################
############################################################################
"""
if args.crossCor:
"""
File are sorted by SNID.
In the following peakIdx and nopeakIdx contain index referring to the
full list of files. For this reason the list of files it is queried on
dirData. It is then filtered using the above variables.
"""
print "\n" + indent + bcolors.undwht + \
"(*) Calculate cross-correlation of not peaked- with " + \
"peaked-lcs ..." + bcolors.txtrst
print "\n" + indent + "Interval [{:<},{:<})".format(args.limits[0], args.limits[1])
filePath = args.dirFit + 'PEAKED.LIST'
if path.exists(filePath) == False:
# create the file concatenating existing partial files
print '{:<s} created!'.format(filePath)
peakedFileList = util.list_files(args.dirFit+'PEAKED*.LIST')
util.concat_files(peakedFileList, filePath)
peakList = np.loadtxt(filePath, dtype=np.str)
filePath = args.dirFit + 'NOPEAKED.LIST'
if path.exists(filePath) == False:
# create the file from existing partial files
print '{:<s} created!'.format(filePath)
noPeakedFileList = util.list_files(args.dirFit+'NOPEAKED*.LIST')
util.concat_files(noPeakedFileList, filePath)
tmp = np.loadtxt(filePath, dtype=np.str)
if tmp.size == 1:
nopeakList = np.asarray([tmp])
else:
nopeakList = np.asarray(tmp)
if args.limits[1] > len(nopeakList):
args.limits[1] = len(nopeakList)
#
# filePath = 'repeats.txt'
# repeats = np.loadtxt(args.dirFit + filePath, dtype=np.str)
filePath = 'cross_correlated_files_{:<5.3f}.dat'.format(time.time())
reWrite = open(args.dirFit + filePath, 'w')
prog = 0
for i in nopeakList[args.limits[0]:args.limits[1]]:
z = 0 # goes on peakIdx to index the progress bar
"""
READ DATA FROM NOT-PEAKED FILE
creates a Supernova object
"""
filePath = i
try:
tmpSN = util.get_sn_from_file(filePath)
print "Progress: {:<d} -- {:<}".format(prog, filePath)
prog += 1
ccIndent = "ID:{: ^7d}".format(tmpSN.SNID)
widgets = [ccIndent, Percentage(), ' ',
Bar(marker='#',left='[',right=']'),
' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=len(peakList)).start()
except IOError:
print "IOError: {:<}".format(filePath)
continue
if tmpSN.r.badCurve:
print "IOError (BAD r curve): {:<}".format(filePath)
continue
"""
create SupernovaFit object
"""
notPeaked = cls.SupernovaFit(tmpSN)
for l in tmpSN.lcsDict.keys():
notPeaked.set_lightcurve(l,
tmpSN.lcsDict[l].mjd,
tmpSN.lcsDict[l].flux,
tmpSN.lcsDict[l].fluxErr
)
"""
Shifting mjds in not-peaked
"""
notPeaked.shift_mjds()
ccMax = list()#np.zeros(peakIdx.size)
k = 0 # goes on ccMax
# for j in peakIdx:
for j in peakList:
"""
READ DATA FROM PEAKED FILE
"""
# if j in repeats:
# print indent + bcolors.WARNING + \
# 'File appears also in unpeaked list: ignoring it.' + \
# bcolors.txtrst
# continue
filePath = j#args.dirFit + lsDirData[j][0:12] + '_FIT.DAT'
try:
tmpSN = util.get_sn_from_file(filePath)
except IOError:
print indent + bcolors.WARNING + \
'File appears also in peaked list but it does not exists: ignoring it.' + \
bcolors.txtrst
continue
if tmpSN.r.badCurve:
print indent + bcolors.WARNING + \
'Peaked file has bad r curve: ignoring it.' + \
bcolors.txtrst
continue
peaked = cls.SupernovaFit(tmpSN)
for l in tmpSN.lcsDict.keys():
peaked.set_lightcurve(l,
tmpSN.lcsDict[l].mjd,
tmpSN.lcsDict[l].flux,
tmpSN.lcsDict[l].fluxErr
)
"""
Shifting mjds in peaked
"""
peaked.shift_mjds()
"""
Performing cross-correlation
"""
ycorr = signal.correlate(
notPeaked.normalized_flux('r'),
peaked.normalized_flux('r')
)
xcorr = np.arange(ycorr.size)
lags = xcorr - (
len(notPeaked.normalized_flux('r'))-1
)
distancePerLag = (
notPeaked.r.shiftedMjd[-1] - \
notPeaked.r.shiftedMjd[0])/float(
len(notPeaked.r.shiftedMjd)
)
offsets = -lags*distancePerLag
# ccMax[k] = offsets[np.argmax(ycorr)]
ccMax.append(offsets[np.argmax(ycorr)])
# k += 1
pbar.update(z+1)
z += 1
# gc.collect()
notPeaked.ccMjdMaxFlux = np.mean(ccMax)#ccMax.mean()
"""
re-writing file of not peaked lc to include information on maximum
position from CC.
"""
filePath = i#args.dirFit + lsDirData[i][0:12] + '_FIT.DAT'
notPeaked.save_on_txt(filePath)
reWrite.write(filePath+'\n')
pbar.finish()
# gc.collect()
reWrite.close()
print 'CC ended!'
gc.collect()
"""
CALCULATING DISTANCE MATRIX
needs:
- args.distMatrix
- args.limits
- args.offset
- args.dirFit
"""
if args.distMatrix:
if not os.path.exists(path.abspath(args.dirFit + 'distance_matrix' + os.sep)):
os.makedirs(path.abspath(args.dirFit + 'distance_matrix' + os.sep))
"""
Calculate distance between fitted lightcurves.
Distance values are saved in a R matrix. This will be used by the R
package `diffusionMap` through rpy2 Python package.
"""
j_offset = args.offset
i_start = args.limits[0]
i_end = args.limits[1]
j_start = i_start + j_offset
j_end = (i_end + j_offset) if (i_end+j_offset<=len(lsDirFit)) else len(lsDirFit)
print "\n" + indent + bcolors.undwht + \
"(*) Calculate distances between lightcurves ..." + \
bcolors.txtrst
print indent + "Rows in [{:<d}, {:<d})".format(i_start, i_end)
print indent + "Cols in [{:<d}, {:<d})".format(j_start, j_end)
"""
setting value for big distance
"""
distFlag = 5
missColCount = 0
missRowlist = list()
bandDict = {
'g':0,
'r':1,
'i':2,
'z':3
}
widgets = [indent, 'Processing:', ' ', Counter(), ' ',
AnimatedMarker(), indent, Timer()]
# creating list of 4 lists
distList = list([[], [], [], []])
nCols = 0
# distList = np.zeros((4,
# len(lsDirFit[i_start:i_end]), len(lsDirFit[i_start:i_end])),
# dtype=float
# )
pbar = ProgressBar(widgets=widgets, maxval=(i_end-i_start)).start()
for i in range(i_start, i_end):
missColCount = 0
"""
Reading in i-candidate
"""
tmpSN = util.get_sn_from_file(
args.dirFit+lsDirFit[i]
)
if tmpSN.r.badCurve:
# nothing has to be added to the distance matrix. Print and
#
# continue to nex object
# print "{:<} Has bad curve in r band - ".format(lsDirFit[i]) + \
# "THE FILE HAS TO BE DELETED" +\
# " indices {:<d}".format(i)
missRowlist.append(i)
continue
iCandidate = cls.SupernovaFit(tmpSN)
for b in tmpSN.lcsDict.keys():
# set_lightcurve set also if the lc is peaked or not
iCandidate.set_lightcurve(b,
tmpSN.lcsDict[b].mjd,
tmpSN.lcsDict[b].flux,
tmpSN.lcsDict[b].fluxErr
)
"""
Shifting mjds in i-candidate
"""
iCandidate.shift_mjds()
if iCandidate.peaked == False:
# print i, iCandidate.SNID
"""
keeping to perform check with other non peaked LC
"""
iElMax = iCandidate.r.shiftedMjd.index(0.)
"""
correcting using CC results
"""
for b in bands:
iCandidate.lcsDict[b].shiftedMjd = [
iCandidate.lcsDict[b].shiftedMjd[l] +
iCandidate.ccMjdMaxFlux for l in range(len(
iCandidate.lcsDict[b].shiftedMjd
))
]
iElSize = iCandidate.r.size
iPeaked = iCandidate.peaked
for j in range(j_start, j_end):
"""
if this SN has badCurve in this band it will be far from all
the others by default.
here will save time from not opening all the other files
to create new SupernovaFit objcets.
"""
if j == i:
# filling elements on the distance matrix diagonal
for b in bands:
# adding one element to each sub list in distList
distList[bandDict[b]].append(0.)
# distList[bandDict[b], i-i_start, j-j_start] = 0.
continue
if j < i:
# filling matrix elements below the diagonal
if j in missRowlist:
missColCount += 1
continue
for b in bands:
# appending the symmetric element in the list: i-i_start
distList[bandDict[b]].append(
distList[bandDict[b]][
(j-j_start-missColCount)*nCols+\
i-i_start-len(missRowlist)
])
# distList[bandDict[b], i-i_start, j-j_start] = \
# distList[bandDict[b], j-j_start, i-i_start]
continue # jump to the next iteration of the loop
"""
Reading in j-candidate
"""
try:
tmpSN = util.get_sn_from_file(
args.dirFit+lsDirFit[j]
)
except IndexError:
print j, len(lsDirFit)
raise IndexError("list index out of range")
if tmpSN.r.badCurve:
# nothing has to be added to the distance matrix. Print and
#
# continue to nex object
# print "{:<} Has bad curve in r band -".format(lsDirFit[j])+\
# " THE FILE HAS TO BE DELETED:" +\
# " indices {:<d}, {:<d}".format(i, j)
continue
jCandidate = cls.SupernovaFit(tmpSN)
for b in tmpSN.lcsDict.keys():
jCandidate.set_lightcurve(b,
tmpSN.lcsDict[b].mjd,
tmpSN.lcsDict[b].flux,
tmpSN.lcsDict[b].fluxErr
)
"""
Shifting mjds in j-candidate
"""
jCandidate.shift_mjds()
if jCandidate.peaked == False:
"""
keeping to perform check with other non peaked LC
"""
jElMax = jCandidate.r.shiftedMjd.index(0.)
"""
correcting using CC results
"""
for b in bands:
jCandidate.lcsDict[b].shiftedMjd = [
jCandidate.lcsDict[b].shiftedMjd[l] +
jCandidate.ccMjdMaxFlux for l in range(len(
jCandidate.lcsDict[b].shiftedMjd
))
]
jElSize = jCandidate.r.size
for b in bands:
if not jCandidate.lcsDict[b].badCurve \
and not iCandidate.lcsDict[b].badCurve:
distList[bandDict[b]].append(
iCandidate.get_distance(jCandidate, b)
)
# distList[bandDict[b], i-i_start, j-j_start] = \
# iCandidate.get_distance(jCandidate, b)
else:
# in case of bad curve
"""
This works like a flag. These elements will be set
equal to a neutral value (the mean of the other)
"""
distList[bandDict[b]].append(distFlag)
# distList[bandDict[b], i-i_start, j-j_start] = distFlag
"""
# >>> !! Checking for i being equal to its beginning value in the loop
does not take into account the
possibility of the first SN having a bad r curve, in which case
the loop will never arrive here, since it is reset by a continue.
Checking on nCols being still equal to zero is much better, since is
the only way to verify if the first loop has been completed.
"""
# if (i == i_start):
if (nCols == 0):
nCols = len(distList[0])
print 'nCols updated! {:<d}'.format(nCols)
pbar.update(i-i_start+1)
pbar.finish()
# del iCandidate
# del jCandidate
# del tmpSN
gc.collect()
distMatrix = np.zeros((4,
len(distList[0])/nCols, nCols),
dtype=float
)
for b in bands:
distMatrix[bandDict[b]] = np.reshape(
distList[bandDict[b]], (len(distList[bandDict[b]])/nCols, nCols)
)
"""
distList is no more used from now on. I delete it to save memory
"""
del distList
gc.collect()
# fixing flagged elements
# raise SystemExit
if distMatrix[0, distMatrix[0] == distFlag].size > 0:
ind = np.where(distMatrix[0] == distFlag)
distMatrix[0, ind[0], ind[1]] = np.add(
np.add(
distMatrix[1, ind[0], ind[1]],
distMatrix[2, ind[0], ind[1]]
),
distMatrix[3, ind[0], ind[1]]
)/3.
if distMatrix[1, distMatrix[1] == distFlag].size > 0:
ind = np.where(distMatrix[1] == distFlag)
# distMatrix[1, ind[0], ind[1]] = distMatrix[1,:,:].max()
distMatrix[1, ind[0], ind[1]] = np.add(
np.add(
distMatrix[0, ind[0], ind[1]],
distMatrix[2, ind[0], ind[1]]
),
distMatrix[3, ind[0], ind[1]]
)/3.
if distMatrix[2, distMatrix[2] == distFlag].size > 0:
ind = np.where(distMatrix[2] == distFlag)
# distMatrix[2, ind[0], ind[1]] = distMatrix[2].max()
distMatrix[2, ind[0], ind[1]] = np.add(
np.add(
distMatrix[0, ind[0], ind[1]],
distMatrix[1, ind[0], ind[1]]
),
distMatrix[3, ind[0], ind[1]]
)/3.
if distMatrix[3, distMatrix[3] == distFlag].size > 0:
ind = np.where(distMatrix[3] == distFlag)
# distMatrix[3, ind[0], ind[1]] = distMatrix[3].max()
distMatrix[3, ind[0], ind[1]] = np.add(
np.add(
distMatrix[0, ind[0], ind[1]],
distMatrix[1, ind[0], ind[1]]
),
distMatrix[2, ind[0], ind[1]]
)/3.
distMatrixSum = np.sum(distMatrix, 0)
"""
Saving on text files
"""
fileHeader = "distMatrix[{:<d}:{:<d},{:<d}:{:<d}] --- ".format(
i_start, i_end, j_start, j_end
) + \
"Created by {:<}".format(socket.gethostname())
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_Sum_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrixSum, fmt='%6.4f', header=fileHeader)
del distMatrixSum
gc.collect()
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_g_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrix[0], fmt='%6.4f', header=fileHeader)
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_r_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrix[1], fmt='%6.4f', header=fileHeader)
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_i_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrix[2], fmt='%6.4f', header=fileHeader)
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_z_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrix[3], fmt='%6.4f', header=fileHeader)
del distMatrix
gc.collect()
"""
CALCULATING DIFFUSION MAP
"""
if args.diffuse:
if 'diffusionMap' not in globals():
diffusionMap = importr('diffusionMap')
ndim = ro.r.attributes(Rmatrix)[0][0]
dmap = diffusionMap.diffuse(Rmatrix, neigen=5)
util.dump_pkl('diffusion_map.pkl', dmap)
"""
TRAINING RANDOM FOREST CLASSIFIER
"""
if args.train:
randomForest = importr('randomForest')
if 'dmap' not in globals():
print indent + 'Loading catalog from dump file ...'
dmap = util.open_pkl('tmp_diffusion_map.pkl')
dmap_rf = randomForest.randomForest(dmap)
"""
PLOT OBSERVATION AND FIT
--plot
"""
if args.plot:
timeMark = time.time()
"""
getting file list from directory
File will be sorted by SNID
"""
print indent + 'Plotting ...'
'''
Column index is always increasing, no check on its value.
'''
nrows = 5
ncols = 5
"""
If plotOffset is to specified, get a proper random value
"""
if (args.plotOffset == -1):
np.random.RandomState
offset = int(np.random.uniform(low=0, high=len(lsDirFit)-nrows*ncols))
else:
offset = args.plotOffset
fig_g, ax_g = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(16.5, 11.7)#,
#tight_layout=True
)
fig_r, ax_r = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(16.5, 11.7)#,
#tight_layout=True
)
fig_i, ax_i = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(16.5, 11.7)#,
#tight_layout=True
)
fig_z, ax_z = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(16.5, 11.7)#,
# tight_layout=True
)
dictFig = {'g':fig_g,
'r':fig_r,
'i':fig_i,
'z':fig_z}
dictAx = {'g':ax_g,
'r':ax_r,
'i':ax_i,
'z':ax_z}
r = {'g':0,
'r':0,
'i':0,
'z':0}
c = {'g':0,
'r':0,
'i':0,
'z':0}
"""
Adjust subplot margins and title
"""
for b in dictFig.keys():
dictFig[b].subplots_adjust(
top=0.96, right=0.99, bottom=0.03, left=0.02,
wspace=0.08, hspace=0.13
)
dictFig[b].suptitle('band {:<1} - offset {:<d}'.format(b, offset))
GPkern = ''
for i in range(nrows*ncols):
"""
Getting the observational data from file
"""
candidate = util.get_sn_from_file(
args.dirData + lsDirData[i+offset]#candidateIdx]
)
"""
Reading fit data from file
"""
try:
tmpSN = util.get_sn_from_file(
args.dirFit+lsDirFit[i+offset],
magFlag=args.mag,
)
except IndexError:
warnStr = 'IndexError: list index out of range. '+\
'i={:<d}.'.format(i+offset)
print warnings.warn(warnStr)
print '\n'+indent+'Saving files as they are and stopping.'
else:
"""
Initializing SupernovaFit object
"""
fit = cls.SupernovaFit(tmpSN,
tmpSN.kern if hasattr(tmpSN, 'kern') else None)
if (i == 0) and hasattr(tmpSN, 'kern'):
GPkern = tmpSN.kern
for b in tmpSN.lcsDict.keys():
fit.set_lightcurve(b,
tmpSN.lcsDict[b].mjd,
tmpSN.lcsDict[b].flux,
tmpSN.lcsDict[b].fluxErr,
magFlag=args.mag
)
if fit.r.badCurve:
print 'SN ID{:>06d} has bad r band light curve!'.format(
fit.SNID)
# continue
else:
"""
Shift fit mjd to have 0 at r band maximum
"""
fit.shift_mjds()
"""
Fixing shiftedMjd for not-peaked LCs
"""
if (fit.peaked == False) and (fit.r.badCurve == False) :
"""
correcting using CC results
"""
for b in bands:
fit.lcsDict[b].shiftedMjd = [
el + fit.ccMjdMaxFlux for el in fit.lcsDict[b].shiftedMjd
]
for b in dictAx.keys():
"""
variable `data` initialized as light curve in band b for
cleaner code.
"""
data = candidate.lcsDict[b]
fit_b = fit.lcsDict[b]
fit_r = fit.lcsDict['r']
if c[b] > nrows-1:
c[b] = 0
r[b] += 1
xlim = dictAx[b][r[b], c[b]].get_xlim()
ylim = dictAx[b][r[b], c[b]].get_ylim()
dictAx[b][r[b], c[b]].set_xticks([0])
dictAx[b][r[b], c[b]].set_yticks([0])
dictAx[b][r[b], c[b]].set_xticklabels(['0'])
dictAx[b][r[b], c[b]].set_yticklabels(['0'])
if (data.badCurve == False) and (fit_b.badCurve == False) and (fit.r.badCurve == False):
epoch = util.time_correct(data.mjd,
candidate.zSpec if candidate.zSpec else candidate.zPhotHost)
epoch = [val-fit_r.mjd[fit_r.max_flux_index] for val in epoch]
if fit.peaked == False:
epoch = [val+fit.ccMjdMaxFlux for val in epoch]
flux = util.correct_for_absorption(data.flux,
candidate.MWEBV, b)
"""
Setting limits for plot axes
"""
if min(fit_b.flux) < min(flux):
y_min = min(fit_b.flux) - 3*max(fit_b.fluxErr)
else:
y_min = min(flux) - np.median(data.fluxErr)
if max(fit_b.flux) > max(flux):
y_max = max(fit_b.flux) + 3*max(fit_b.fluxErr)
else:
y_max = max(flux) + np.median(data.fluxErr)
dictAx[b][r[b], c[b]].set_ylim(y_min, y_max)
"""
Setting limits for fill_between
"""
fluxUpLim = [val for val in [
fit_b.flux[el] + fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
fluxLowLim = [val for val in [
fit_b.flux[el] - fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
dictAx[b][r[b], c[b]].fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.4, linewidth=0.5)
"""
Setting limits for fill_between
"""
fluxUpLim = [val for val in [
fit_b.flux[el] + 2*fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
fluxLowLim = [val for val in [
fit_b.flux[el] - 2*fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
dictAx[b][r[b], c[b]].fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.2, linewidth=0.5)
"""
Setting limits for fill_between
"""
fluxUpLim = [val for val in [
fit_b.flux[el] + 3*fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
fluxLowLim = [val for val in [
fit_b.flux[el] - 3*fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
dictAx[b][r[b], c[b]].fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.1, linewidth=0.5)
dictAx[b][r[b], c[b]].plot(fit_b.shiftedMjd, fit_b.flux,
color='#7f0000',
linewidth=2)
scatterLab = 'SN ID {:<d}'.format(candidate.SNID)
dictAx[b][r[b], c[b]].scatter(epoch, flux,
s=10, label=scatterLab, c='black', marker='x')
dictAx[b][r[b], c[b]].errorbar(epoch, flux,
data.fluxErr, fmt=None, color='black', ecolor='black')
if not fit.peaked:
pass
dictAx[b][r[b], c[b]].legend(
loc='best', framealpha=0.3, fontsize='10')
else:
label = str(candidate.SNID)+" BAD CURVE"
dictAx[b][r[b], c[b]].plot([0, 1], [0, 1], color='red',
label=label)
dictAx[b][r[b], c[b]].plot([0, 1], [1, 0], color='red')
dictAx[b][r[b], c[b]].legend(
loc='best', framealpha=0.3, fontsize='10')
c[b] += 1
print indent + "Plots saved in files:"
if not os.path.exists(path.abspath(args.dirFit + "plots" + os.sep)):
os.makedirs(args.dirFit + "plots")
for b in dictFig.keys():
dictFig[b].savefig(
args.dirFit + "plots"+ os.sep + GPkern + \
"_band_{:<1}_{:<f}.png".format(b,timeMark),
dpi=300
)
print indent + " - " + args.dirFit + "plots" + os.sep + \
GPkern + "_band_{:<1}_{:<f}.png".format(b,timeMark)
plt.close('all')
"""
PLOT OBSERVATION AND FIT (publication style)
--nice-plots
"""
if args.nicePlots:
"""
1 candidate
choose how many bands
make the plot with confidence regions
"""
# if args.nBands != 1 or args.nBands != 4:
# args.nBands = 1
if args.cand == -1:
args.cand = np.random.random_integers(
low=0, high=len(lsDirData))
fname = 'DES_SN{:0>6d}.DAT'.format(args.cand)
candidate = util.get_sn_from_file(
args.dirData+fname
)
fname = 'DES_SN{:0>6d}_FIT.DAT'.format(args.cand)
tmpSN = util.get_sn_from_file(
args.dirFit+fname,
magFlag=args.mag,
)
"""
Initializing SupernovaFit object
"""
fit = cls.SupernovaFit(tmpSN, tmpSN.kern if hasattr(tmpSN, 'kern') else None)
for b in tmpSN.lcsDict.keys():
fit.set_lightcurve(b,
tmpSN.lcsDict[b].mjd,
tmpSN.lcsDict[b].flux,
tmpSN.lcsDict[b].fluxErr,
magFlag=args.mag
)
if fit.r.badCurve:
raise SystemExit('Bad r curve!')
fit.shift_mjds()
"""
Fixing shiftedMjd for not-peaked LCs
"""
if fit.peaked == False:
"""
correcting using CC results
"""
for b in candidate.lcsDict.keys():
fit.lcsDict[b].shiftedMjd = [el + fit.ccMjdMaxFlux
for el in fit.lcsDict[b].shiftedMjd]
bands = candidate.lcsDict.keys() if args.allBands else args.band
"""
Pre-process data so to be compared with fit (made from
pre-precessed data)
"""
for b in bands:
if (not candidate.lcsDict[b].badCurve) and (not fit.lcsDict[b].badCurve):
candidate = util.pre_process(candidate, b)
candidate.lcsDict[b].mjd = [el - fit.r.mjd[fit.r.max_flux_index]
for el in candidate.lcsDict[b].mjd]
if fit.peaked == False:
candidate.lcsDict[b].mjd = [el + fit.ccMjdMaxFlux
for el in candidate.lcsDict[b].mjd]
else:
raise SystemExit('Bad {:1s} curve!'.format(b))
if args.allBands:
fig, ax = plt.subplots(nrows=2, ncols=2,
# figsize=(16.5, 11.7),
tight_layout=False
)
axDict = {
'g':ax[0,0],
'r':ax[0,1],
'i':ax[1,0],
'z':ax[1,1]
}
# fig.subplots_adjust(left=0.05, right=0.97, top=0.94, wspace=0.29)
else:
fig = plt.figure()
xlim = [-35,12]
ylim = [-10,10]
# fig, ax = plt.subplots(nrows=2, ncols=1,
# # figsize=(16.5, 11.7),
# tight_layout=False
# )
# axDict = {
# 'g':ax[0,0],
# 'r':ax[0,1],
# 'i':ax[1,0],
# 'z':ax[1,1]
# }
if not args.allBands:
fit_b = fit.lcsDict[args.band]
data = candidate.lcsDict[args.band]
if not data.badCurve and not fit_b.badCurve:
epoch = data.mjd
flux = data.flux
"""
Setting limits for fill_between
"""
fluxUpLim = [el for el in [
fit_b.flux[i] + fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
fluxLowLim = [el for el in [
fit_b.flux[i] - fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
plt.fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.4, linewidth=0.5)
# axDict[b].fill_between(fit_b.shiftedMjd,
# fluxUpLim, fluxLowLim,
# facecolor='red', alpha=0.4, linewidth=0.5)
"""
Setting limits for fill_between
"""
fluxUpLim = [el for el in [
fit_b.flux[i] + 2*fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
fluxLowLim = [el for el in [
fit_b.flux[i] - 2*fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
plt.fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.2, linewidth=0.5)
# axDict[b].fill_between(fit_b.shiftedMjd,
# fluxUpLim, fluxLowLim,
# facecolor='red', alpha=0.2, linewidth=0.5)
"""
Setting limits for fill_between
"""
fluxUpLim = [el for el in [
fit_b.flux[i] + 3*fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
fluxLowLim = [el for el in [
fit_b.flux[i] - 3*fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
plt.fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.1, linewidth=0.5)
# axDict[b].fill_between(fit_b.shiftedMjd,
# fluxUpLim, fluxLowLim,
# facecolor='red', alpha=0.1, linewidth=0.5)
plt.plot(fit_b.shiftedMjd, fit_b.flux,
color='#7f0000',
linewidth=2,
label='GP fit')
# axDict[b].plot(fit_b.shiftedMjd, fit_b.flux,
# color='#7f0000',
# linewidth=2)
plt.scatter(epoch, flux,
s=30, label='data', c='black', marker='x')
# axDict[b].scatter(epoch, flux,
# s=10, label=str(candidate.SNID), c='black', marker='x')
plt.errorbar(epoch, flux,
data.fluxErr, fmt=None, color='black', ecolor='black')
# plt.xlim(xlim)
plt.ylim(ylim)
title = 'SN ID {:d} - Band {:s}'.format(candidate.SNID, args.band)
plt.title(title)
plt.xlabel('Epoch [mjd]')
plt.ylabel('Flux [adu]')
plt.legend(loc='upper right', scatterpoints=1)
# axDict[b].errorbar(epoch, flux,
# data.fluxErr, fmt=None, color='black', ecolor='black')
print "\n" + indent \
+ "The process took {:5.3f} secs.".format(time.time()-start_time)
| unlicense | -168,076,514,215,684,130 | 34.063946 | 108 | 0.443582 | false |
mpirnat/aoc2016 | day22/test.py | 1 | 1854 | #!/usr/bin/env python
import unittest
from day22 import Node, make_nodes, viable_nodes
class TestMakingNodes(unittest.TestCase):
def test_makes_nodes_from_input(self):
df = """
/dev/grid/node-x0-y0 87T 71T 16T 81%
/dev/grid/node-x0-y1 93T 72T 21T 77%
/dev/grid/node-x1-y0 86T 66T 20T 76%
/dev/grid/node-x1-y1 93T 64T 29T 68%
"""
nodes = make_nodes(df)
self.assertEqual(nodes, [
[Node(name='node-x0-y0', size=87, used=71, avail=16),
Node(name='node-x1-y0', size=86, used=66, avail=20)],
[Node(name='node-x0-y1', size=93, used=72, avail=21),
Node(name='node-x1-y1', size=93, used=64, avail=29)]])
class TestFindingViableNodes(unittest.TestCase):
grid = [
[Node(name='A', size=100, used=1, avail=99),
Node(name='B', size=100, used=50, avail=50)],
[Node(name='C', size=100, used=0, avail=100),
Node(name='D', size=100, used=100, avail=0)],
[Node(name='E', size=50, used=10, avail=40),
Node(name='F', size=100, used=60, avail=40)]]
def test_finds_viable_nodes(self):
grid = self.grid
nodes = viable_nodes(grid)
self.assertEqual(nodes, {
(grid[0][0], grid[0][1]),
(grid[0][0], grid[1][0]),
(grid[0][0], grid[2][0]),
(grid[0][0], grid[2][1]),
(grid[0][1], grid[0][0]),
(grid[0][1], grid[1][0]),
(grid[1][1], grid[1][0]),
(grid[2][0], grid[0][0]),
(grid[2][0], grid[0][1]),
(grid[2][0], grid[1][0]),
(grid[2][0], grid[2][1]),
(grid[2][1], grid[0][0]),
(grid[2][1], grid[1][0])})
if __name__ == '__main__':
unittest.main()
| mit | 3,496,811,144,772,983,300 | 32.709091 | 67 | 0.481122 | false |
cordis/pycloudia-chat | pyligaforex/services/gateways/interfaces.py | 1 | 3985 | from abc import ABCMeta, abstractmethod
class IServiceFactory(object):
__metaclass__ = ABCMeta
@abstractmethod
def create_service(self):
"""
:rtype: L{pyligaforex.services.gateways.interfaces.IService}
"""
class IService(object):
__metaclass__ = ABCMeta
@abstractmethod
def create_gateway(self, channel):
"""
:type channel: L{pycloudia.services.beans.Channel}
:rtype: L{Deferred} of C{None}
"""
@abstractmethod
def delete_gateway(self, runtime, reason=None):
"""
:type runtime: C{str}
:type reason: C{str} or C{None}
:rtype: L{Deferred} of C{None}
:raise: L{pyligaforex.services.gateways.exceptions.GatewayNotFoundError}
"""
@abstractmethod
def authenticate_gateway(self, runtime, user_id):
"""
:type runtime: C{str}
:type user_id: C{str}
:rtype: L{Deferred} of C{None}
:raise: L{pyligaforex.services.gateways.exceptions.GatewayNotFoundError}
"""
@abstractmethod
def process_incoming_package(self, runtime, package):
"""
:type runtime: C{str}
:type package: L{pycloudia.packages.interfaces.IPackage}
:rtype: L{Deferred} of C{None}
:raise: L{pyligaforex.services.gateways.exceptions.GatewayNotFoundError}
"""
@abstractmethod
def process_outgoing_package(self, runtime, package):
"""
:type runtime: C{str}
:type package: L{pycloudia.packages.interfaces.IPackage}
:rtype: L{Deferred} of C{None}
:raise: L{pyligaforex.services.gateways.exceptions.GatewayNotFoundError}
"""
class IGateway(object):
__metaclass__ = ABCMeta
@abstractmethod
def set_client_user_id(self, user_id):
"""
:type user_id: C{str}
:rtype: L{Deferred} of C{None}
"""
@abstractmethod
def process_incoming_package(self, package):
"""
:type package: L{pycloudia.packages.interfaces.IPackage}
:rtype: L{Deferred} of C{None}
:raise: L{pyligaforex.services.gateways.exceptions.HeaderNotFoundError}
"""
@abstractmethod
def process_outgoing_package(self, package):
"""
:type package: L{pycloudia.packages.interfaces.IPackage}
:rtype: L{Deferred} of C{None}
"""
class IGatewayFactory(object):
__metaclass__ = ABCMeta
@abstractmethod
def create_gateway(self, channel):
"""
:type channel: L{pycloudia.services.beans.Channel}
:rtype: L{pyligaforex.services.gateways.interfaces.IGateway}
"""
class IRouter(object):
__metaclass__ = ABCMeta
@abstractmethod
def get_target_channel(self, package):
"""
:type package: L{pycloudia.packages.interfaces.IPackage}
:rtype: L{pycloudia.services.beans.Channel}
:raise: L{pyligaforex.services.gateways.exceptions.HeaderNotFoundError}
:raise: L{pyligaforex.services.gateways.exceptions.ServiceNotFoundError}
"""
class IDao(object):
__metaclass__ = ABCMeta
@abstractmethod
def set_gateway_client_address(self, client_id, facade_address):
"""
:type client_id: C{str}
:type facade_address: C{str}
:return: deferred with facade_address
:rtype: L{Deferred} of C{str}
"""
@abstractmethod
def set_gateway_client_user_id(self, client_id, user_id):
"""
:type client_id: C{str}
:type user_id: C{str}
:return: deferred with user_id
:rtype: L{Deferred} of C{str}
"""
class IClients(object):
__metaclass__ = ABCMeta
@abstractmethod
def process_outgoing_package(self, client_address, client_id, package):
"""
:type client_address: C{str}
:type client_id: C{str}
:type package: L{pycloudia.packages.interfaces.IPackage}
:rtype: L{Deferred} of C{None}
"""
| mit | 484,113,746,543,363,000 | 26.867133 | 80 | 0.611794 | false |
SmallFatCYW/tcn-analysis-python | tcnanalysis.py | 1 | 2745 | #!/usr/bin/env python
"""
____ _____ _ _ _
| _ \ _ |_ _|__ _ __ / \ _ __ __ _| |_ _ ___(_)___
| |_) | | | || |/ __| '_ \ / _ \ | '_ \ / _` | | | | / __| / __|
| __/| |_| || | (__| | | |/ ___ \| | | | (_| | | |_| \__ \ \__ \
|_| \__, ||_|\___|_| |_/_/ \_\_| |_|\__,_|_|\__, |___/_|___/
|___/ |___/
t.cn JSON and XML Analysis - v0.2Fix
By iPixelOldC & http://hoc117.top
License: MIT
"""
import json
import xml.etree.cElementTree as et
import urllib.request
import re
import os
def JSONReturn(site):
"""
json analysis: JSONReturn(site='Website URL(format:http[s]://xxx)')
return: {'url_short': 'http://t.cn/xxx', 'url_long': site, 'type': 0}
type: 链接的类型,0:普通网页(site page)、1:视频(video)、2:音乐(music)、3:活动(activity)、5、投票(vote)
"""
response = urllib.request.urlopen('http://api.t.sina.com.cn/short_url/shorten.json?source=3271760578&url_long={0!s}'.format((site)))
html = response.read().decode('utf8')
loads = json.loads(str(html))
return loads[0]
def XMLReturn(site):
"""
xml analysis: XMLReturn(site='Website URL(format:http[s]://xxx)')
return: {'url_short': 'http://t.cn/xxx', 'url_long': site}
"""
response = urllib.request.urlopen('http://api.t.sina.com.cn/short_url/shorten.xml?source=3271760578&url_long={0!s}'.format((site)))
html = response.read().decode('utf8')
loads = et.fromstring(str(html))[0]
return {"url_short": loads[0].text, "url_long": loads[1].text, "type": loads[2].text}
if __name__ == "__main__":
print(__doc__)
inputurl = input('>>Please enter url: ')
if 'http://' in inputurl:
pass
else:
inputurl = 'http://'+inputurl
while True:
inputJorX = input('>>(x)ml or (j)son: ').lower()
if inputJorX not in ('x', 'j'):
print("> Please enter 'x' or 'j'!")
else:
break
if 'x' == inputJorX:
r_xml = XMLReturn(inputurl)
print(">>{0!s}: \n> Short URL: {1!s}".format(r_xml["url_long"], r_xml["url_short"]))
if 'j' == inputJorX:
r_json = JSONReturn(inputurl)
print(">>{0!s}: \n> Short URL: {1!s}".format(r_json["url_long"], r_json["url_short"]))
while True:
save_yn = input('>>Do you want to save it?[Y/n]').lower()
if save_yn == 'n':
break
elif save_yn == 'y':
print("> Saving...")
open('{0!s}.json'.format((re.search(r'(http://+)(.*)', inputurl).group(2))), 'w+').write(str(JSONReturn(inputurl)))
print("> OK")
break
else:
print('Please enter (y) or (n)') | mit | 2,421,101,698,474,163,000 | 36.388889 | 136 | 0.48272 | false |
iamroger/vpn | win/utils.py | 1 | 3331 | import os, sys, re, shutil, tarfile, subprocess
j = os.path.join
class Cd(object):
"""
Cd is a context manager that allows
you to temporary change the working directory.
with Cd(dir) as cd:
...
"""
def __init__(self, directory):
self._dir = directory
def orig(self):
return self._orig
def __enter__(self):
self._orig = os.getcwd()
os.chdir(self._dir)
return self
def __exit__(self, *args):
os.chdir(self._orig)
class ModEnv(object):
"""
Context manager for temporarily
modifying an env var. Normally used to make
changes to PATH.
"""
def __init__(self, key, value):
self.key = key;
self.value = value;
def __enter__(self):
self.orig_value = os.environ.get(self.key)
os.environ[self.key] = self.value
return self
def __exit__(self, *args):
if self.orig_value is not None:
os.environ[self.key] = self.orig_value
def rmtree(dir):
print "RMTREE", dir
shutil.rmtree(dir, ignore_errors=True)
def makedirs(dir):
print "MAKEDIRS", dir
os.makedirs(dir)
def wipetree(dir):
print "WIPETREE", dir
shutil.rmtree(dir, ignore_errors=True)
if not os.path.isdir(dir):
os.mkdir(dir)
def extract_dict(d, k, default=None):
if k in d:
v = d[k]
del d[k]
else:
v = default
return v
def scan_prefixes(prefix, dir):
fns = []
for dirpath, dirnames, filenames in os.walk(dir):
for f in filenames:
if f.startswith(prefix):
fns.append(f)
break
return fns
def one_prefix(prefix, dir):
f = scan_prefixes(prefix, dir)
if len(f) == 0:
raise ValueError("prefix %r not found in dir %r" % (prefix, dir))
elif len(f) >= 2:
raise ValueError("prefix %r is ambiguous in dir %r: %r" % (prefix, dir, f))
return f[0]
def tarsplit(fn):
if fn.endswith(".tar.gz") or fn.endswith(".tgz"):
t = 'gz'
b = fn[:-7]
elif fn.endswith(".tar.bz2") or fn.endswith(".tbz"):
t = 'bz2'
b = fn[:-8]
else:
raise ValueError("unrecognized tar file type: %r" % (fn,))
return b, t
def tarextract(fn, t):
print "TAR EXTRACT %s [%s]" % (fn, t)
tar = tarfile.open(fn, mode='r:'+t)
try:
tar.extractall()
finally:
tar.close()
def expand(pkg_prefix, srcdir):
f = one_prefix(pkg_prefix, srcdir)
b, t = tarsplit(f)
# remove previous directory
rmtree(b)
# expand it
tarextract(os.path.join(srcdir, f), t)
return b
def call(cmd, **kw):
print "***", cmd
ignore_errors = extract_dict(kw, 'ignore_errors', False)
extra_env = extract_dict(kw, 'extra_env', None)
if extra_env:
env = kw.get('env', os.environ).copy()
env.update(extra_env)
kw['env'] = env
succeed = extract_dict(kw, 'succeed', 0)
ret = subprocess.call(cmd, **kw)
if not ignore_errors and ret != succeed:
raise ValueError("command failed with status %r (expected %r)" % (ret, succeed))
def vc_cmd(parms, cmd, succeed=0):
with ModEnv('PATH', "%s;%s\\VC" % (os.environ['PATH'], parms['MSVC_DIR'])):
status = call('vcvarsall.bat x86 && %s' % (cmd,), shell=True, succeed=succeed)
| agpl-3.0 | 2,720,960,673,189,920,000 | 23.674074 | 88 | 0.565296 | false |
RomanPlusPlus/smartGroceryList | .ycm_extra_conf.py | 1 | 3559 | import os
import sys
import ycm_core
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
preferred_build_type = 'debug'
flags = [
'-std=c++11'
,'-Wall'
,'-Wextra'
,'-Wconversion'
,'-Wno-deprecated'
,'-I%s' % os.path.join(DirectoryOfThisScript(), 'build', preferred_build_type, 'src')
,'-I%s' % os.path.join(DirectoryOfThisScript(), 'src')
]
compilation_database_folder = os.path.join(DirectoryOfThisScript(), 'build')
configurations = ['debug', 'release']
databases = []
for conf in configurations:
path = os.path.join(compilation_database_folder, conf)
if os.path.exists(path):
databases.append(ycm_core.CompilationDatabase(path))
SOURCE_EXTENSIONS = ['.cpp', '.cxx', '.cc', '.c', '.m', '.mm']
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return list(flags)
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile(filename):
extension = os.path.splitext(filename)[1]
return extension in ['.h', '.hxx', '.hpp', '.hh']
def GetCompilationInfoForFileInDb(database, filename):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile(filename):
basename = os.path.splitext(filename)[0]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists(replacement_file):
compilation_info = database.GetCompilationInfoForFile(replacement_file)
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile(filename)
def GetCompilationInfoForFile(filename):
for db in databases:
info = GetCompilationInfoForFileInDb(db, filename)
if info is None:
continue
else:
return info
return None
def FlagsForFile(filename, **kwargs):
if len(databases) != 0:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile(filename)
if not compilation_info:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
else:
final_flags = MakeRelativePathsInFlagsAbsolute(compilation_info.compiler_flags_, compilation_info.compiler_working_dir_)
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {
'flags': final_flags,
'do_cache': True
}
| gpl-3.0 | -8,626,690,842,388,371,000 | 31.651376 | 132 | 0.638944 | false |
pzbadams/udacity | fresh_tomatoes.py | 1 | 5784 | import webbrowser
import os
import re
# Styles and scripting for the page
main_page_head = '''
<head>
<meta charset="utf-8">
<title>Fresh Tomatoes!</title>
<!-- Bootstrap 3 -->
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap.min.css">
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap-theme.min.css">
<script src="http://code.jquery.com/jquery-1.10.1.min.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/js/bootstrap.min.js"></script>
<style type="text/css" media="screen">
body {
padding-top: 80px;
}
#trailer .modal-dialog {
margin-top: 200px;
width: 640px;
height: 480px;
}
.hanging-close {
position: absolute;
top: -12px;
right: -12px;
z-index: 9001;
}
#trailer-video {
width: 100%;
height: 100%;
}
.movie-tile {
margin-bottom: 20px;
padding-top: 20px;
}
.movie-tile:hover {
background-color: #EEE;
cursor: pointer;
}
.scale-media {
padding-bottom: 56.25%;
position: relative;
}
.scale-media iframe {
border: none;
height: 100%;
position: absolute;
width: 100%;
left: 0;
top: 0;
background-color: white;
}
</style>
<script type="text/javascript" charset="utf-8">
// Pause the video when the modal is closed
$(document).on('click', '.hanging-close, .modal-backdrop, .modal', function (event) {
// Remove the src so the player itself gets removed, as this is the only
// reliable way to ensure the video stops playing in IE
$("#trailer-video-container").empty();
});
// Start playing the video whenever the trailer modal is opened
$(document).on('click', '.movie-tile', function (event) {
var trailerYouTubeId = $(this).attr('data-trailer-youtube-id')
var sourceUrl = 'http://www.youtube.com/embed/' + trailerYouTubeId + '?autoplay=1&html5=1';
$("#trailer-video-container").empty().append($("<iframe></iframe>", {
'id': 'trailer-video',
'type': 'text-html',
'src': sourceUrl,
'frameborder': 0
}));
});
// Animate in the movies when the page loads
$(document).ready(function () {
$('.movie-tile').hide().first().show("fast", function showNext() {
$(this).next("div").show("fast", showNext);
});
});
</script>
</head>
'''
# The main page layout and title bar
main_page_content = '''
<!DOCTYPE html>
<html lang="en">
<body>
<!-- Trailer Video Modal -->
<div class="modal" id="trailer">
<div class="modal-dialog">
<div class="modal-content">
<a href="#" class="hanging-close" data-dismiss="modal" aria-hidden="true">
<img src="https://lh5.ggpht.com/v4-628SilF0HtHuHdu5EzxD7WRqOrrTIDi_MhEG6_qkNtUK5Wg7KPkofp_VJoF7RS2LhxwEFCO1ICHZlc-o_=s0#w=24&h=24"/>
</a>
<div class="scale-media" id="trailer-video-container">
</div>
</div>
</div>
</div>
<!-- Main Page Content -->
<div class="container">
<div class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<a class="navbar-brand" href="#"><font size="6">Some of My Favorite Movies</font></a>
</div>
</div>
</div>
</div>
<div class="container">
{movie_tiles}
</div>
</body>
</html>
'''
# A single movie entry html template
movie_tile_content = '''
<div class="col-md-10 col-lg-4 movie-tile text-center" data-trailer-youtube-id="{trailer_youtube_id}" data-toggle="modal" data-target="#trailer">
<img src="{poster_image_url}" width="165" height="257">
<h2><font size="5">{movie_title}</font></h2>
<h3><font size="4">{movie_year}</font></h3>
<h4><font size="4">{movie_rating}</font></h4>
<h5><font size="2">{movie_storyline}</font></h5>
</div>
'''
def create_movie_tiles_content(movies):
# The HTML content for this section of the page
content = ''
for movie in movies:
# Extract the youtube ID from the url
youtube_id_match = re.search(r'(?<=v=)[^&#]+', movie.trailer_youtube_url)
youtube_id_match = youtube_id_match or re.search(r'(?<=be/)[^&#]+', movie.trailer_youtube_url)
trailer_youtube_id = youtube_id_match.group(0) if youtube_id_match else None
# Append the tile for the movie with its content filled in
content += movie_tile_content.format(
movie_title=movie.title,
movie_year=movie.year,
movie_rating=movie.rating,
movie_storyline=movie.storyline,
poster_image_url=movie.poster_image_url,
trailer_youtube_id=trailer_youtube_id
)
return content
def open_movies_page(movies):
# Create or overwrite the output file
output_file = open('fresh_tomatoes.html', 'w')
# Replace the placeholder for the movie tiles with the actual dynamically generated content
rendered_content = main_page_content.format(movie_tiles=create_movie_tiles_content(movies))
# Output the file
output_file.write(main_page_head + rendered_content)
output_file.close()
# open the output file in the browser
url = os.path.abspath(output_file.name)
webbrowser.open('file://' + url, new=2) # open in a new tab, if possible
| gpl-2.0 | -5,111,499,279,754,586,000 | 34.054545 | 145 | 0.573824 | false |
gillett-hernandez/project-euler | Python/problem_58.py | 1 | 2420 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: Gillett Hernandez
# @Date: 2016-03-21 22:33:46
# @Last Modified by: Gillett Hernandez
# @Last Modified time: 2017-08-10 21:14:06
# from mfunctions import is_prime
from itertools import count, chain
from euler_funcs import is_prime_w_primes_given, get_primes, timed
# def lag(iterable):
# iterable = iter(iterable)
# lv = next(iterable)
# v = next(iterable)
# yield lv
# yield v
# while True:
# lv, v = v, next(iterable)
# yield v
# def double(iterable):
# for i in iter(iterable):
# yield ik
# yield i
# def dup_last(iterable):
# '''meant to be used with iterators of integers
# adds a duplicate of the last element'''
# for el in iterable:
# yield el
# yield el
# def square_spiral(s=None):
# # 1 1 2 2 3 3 4 4 5 5 6 6
# if s is not None:
# iterable = range(1, s-1)
# else:
# iterable = count(1)
# C = 0
# for i in dup_last(double(iterable)):
# C += i
def eval_proportion(L, lL):
assert isinstance(L, list), L
assert len(L) == 3, L
# each sublist of L will contain necessarily distinct numbers
C = lL * 4 + 1
C1 = len(L[0])*4 + 1
assert C == C1, (lL, len(L[0]))
c = 0
for n in chain(*L):
if is_prime(n):
c += 1
return c*100 / C
def square_spiral(N=None, limit=None, primef = lambda n: is_prime(n)):
# br = ((2*i+1)**2 for i in count(1)) # bottom right, the squares
bl = ((2*i+1)**2-2*i for i in count(1)) # bottom left
tl = ((2*i+1)**2-4*i for i in count(1)) # topleft
tr = ((2*i+1)**2-6*i for i in count(1)) # topright
c = 0
for i in count():
C = (i+1) * 4 + 1
for g in [bl, tl, tr]:
ng = next(g)
if primef(ng):
# print(ng)
c += 1
p = c*100 / C
if limit is not None and p < limit:
break
if N is not None and i > N-2:
print("bksadiwoqndnslknlasdbllaarrghghghghghgh", i)
break
return i, p
@timed
def main():
# print(square_spiral(3))
# print(square_spiral(20000, 10))
primes = get_primes(limit=27000)
primef = lambda n: is_prime_w_primes_given(n, primes)
radius, proportion = square_spiral(limit=10, primef=primef)
print((radius+1)*2+1)
if __name__ == '__main__':
main()
| mit | 6,795,844,792,540,024,000 | 24.744681 | 70 | 0.544215 | false |
lborgav/cookiecutter-django | {{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/{{cookiecutter.project_name}}/settings/base.py | 1 | 7003 | # Django base settings for {{cookiecutter.project_name}} project.
from os.path import abspath, basename, dirname, join, normpath
from sys import path
########## PATH CONFIG
# Absolute filesystem path to the Django project folder:
PROJECT_ROOT = dirname(dirname(dirname(abspath(__file__))))
# Absolute filesystem path to the repository folder:
REPO_ROOT = dirname(PROJECT_ROOT)
# Site name:
SITE_NAME = basename(PROJECT_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(PROJECT_ROOT)
########## END PATH CONFIG
########## MANAGER CONFIG
ADMINS = (
('{{cookiecutter.author_name}}', '{{cookiecutter.email}}'),
)
MANAGERS = ADMINS
########## END MANAGER CONFIG
########## AUTH MODEL CONFIG
# AUTH_USER_MODEL = 'accounts.CustomUser'
########## END AUTH MODEL CONFIG
########## GENERAL CONFIG
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Sao_Paulo'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Language
LANGUAGES = (
('en', ('English')),
)
# Locale Paths
LOCALE_PATHS = (
# normpath(join(REPO_ROOT, 'locale')),
)
########## END GENERAL CONFIG
########## MEDIA CONFIG
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = normpath(join(PROJECT_ROOT, 'media'))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
########## END MEDIA CONFIG
########## STATIC FILE CONFIG
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
# STATIC_ROOT = normpath(join(PROJECT_ROOT, 'assets'))
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
normpath(join(PROJECT_ROOT, 'static')),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
########## END STATIC FILE CONFIG
########## SECRET CONFIG
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'SECRET_KEY'
########## END SECRET CONFIG
########## TEMPLATE CONFIG
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
normpath(join(PROJECT_ROOT, 'templates')),
)
########### END TEMPLATE CONFIG
########### MIDDLEWARE CONFIG
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########### END MIDDLEWARE CONFIG
########### URL CONFIG
ROOT_URLCONF = '%s.urls' % SITE_NAME
########### END URL CONFIG
########## WSGI CONFIG
WSGI_APPLICATION = '{{cookiecutter.project_name}}.wsgi.application'
########## END WSGI CONFIG
########## AUTHENTICATION_BACKENDS
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
########## END AUTHENTICATION_BACKENDS
########## APPS CONFIG
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.humanize', # Useful template tags:
# Admin Panel and Admin docs
'django.contrib.admin',
'django.contrib.admindocs',
)
THIRD_PARTY_APPS = (
'south', # Migrations
)
LOCAL_APPS = (
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APPS CONFIG
########## LOGGING CONFIG
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site a dmins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIG
| mit | -3,837,163,642,000,832,500 | 28.548523 | 88 | 0.684135 | false |
ULHPC/modules | easybuild/easybuild-framework/test/framework/options.py | 1 | 78224 | # #
# Copyright 2013-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Unit tests for eb command line options.
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import re
import shutil
import sys
import tempfile
from test.framework.utilities import EnhancedTestCase, init_config
from unittest import TestLoader
from unittest import main as unittestmain
from urllib2 import URLError
import easybuild.tools.build_log
import easybuild.tools.options
from easybuild.framework.easyconfig import BUILD, CUSTOM, DEPENDENCIES, EXTENSIONS, FILEMANAGEMENT, LICENSE
from easybuild.framework.easyconfig import MANDATORY, MODULES, OTHER, TOOLCHAIN
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import DEFAULT_MODULECLASSES, get_module_syntax
from easybuild.tools.environment import modify_env
from easybuild.tools.filetools import mkdir, read_file, write_file
from easybuild.tools.github import fetch_github_token
from easybuild.tools.modules import modules_tool
from easybuild.tools.options import EasyBuildOptions
from easybuild.tools.version import VERSION
from vsc.utils import fancylogger
# test account, for which a token is available
GITHUB_TEST_ACCOUNT = 'easybuild_test'
class CommandLineOptionsTest(EnhancedTestCase):
"""Testcases for command line options."""
logfile = None
def setUp(self):
"""Set up test."""
super(CommandLineOptionsTest, self).setUp()
self.github_token = fetch_github_token(GITHUB_TEST_ACCOUNT)
def test_help_short(self, txt=None):
"""Test short help message."""
if txt is None:
topt = EasyBuildOptions(
go_args=['-h'],
go_nosystemexit=True, # when printing help, optparse ends with sys.exit
go_columns=100, # fix col size for reproducible unittest output
help_to_string=True, # don't print to stdout, but to StingIO fh,
prog='easybuildoptions_test', # generate as if called from generaloption.py
)
outtxt = topt.parser.help_to_file.getvalue()
else:
outtxt = txt
self.assertTrue(re.search(' -h ', outtxt), "Only short options included in short help")
self.assertTrue(re.search("show short help message and exit", outtxt), "Documentation included in short help")
self.assertEqual(re.search("--short-help ", outtxt), None, "Long options not included in short help")
self.assertEqual(re.search("Software search and build options", outtxt), None, "Not all option groups included in short help (1)")
self.assertEqual(re.search("Regression test options", outtxt), None, "Not all option groups included in short help (2)")
def test_help_long(self):
"""Test long help message."""
topt = EasyBuildOptions(
go_args=['-H'],
go_nosystemexit=True, # when printing help, optparse ends with sys.exit
go_columns=100, # fix col size for reproducible unittest output
help_to_string=True, # don't print to stdout, but to StingIO fh,
prog='easybuildoptions_test', # generate as if called from generaloption.py
)
outtxt = topt.parser.help_to_file.getvalue()
self.assertTrue(re.search("-H, --help", outtxt), "Long documentation expanded in long help")
self.assertTrue(re.search("show short help message and exit", outtxt), "Documentation included in long help")
self.assertTrue(re.search("Software search and build options", outtxt), "Not all option groups included in short help (1)")
self.assertTrue(re.search("Regression test options", outtxt), "Not all option groups included in short help (2)")
def test_no_args(self):
"""Test using no arguments."""
outtxt = self.eb_main([])
error_msg = "ERROR Please provide one or multiple easyconfig files,"
error_msg += " or use software build options to make EasyBuild search for easyconfigs"
self.assertTrue(re.search(error_msg, outtxt), "Error message when eb is run without arguments")
def test_debug(self):
"""Test enabling debug logging."""
for debug_arg in ['-d', '--debug']:
args = [
'nosuchfile.eb',
debug_arg,
]
outtxt = self.eb_main(args)
for log_msg_type in ['DEBUG', 'INFO', 'ERROR']:
res = re.search(' %s ' % log_msg_type, outtxt)
self.assertTrue(res, "%s log messages are included when using %s: %s" % (log_msg_type, debug_arg, outtxt))
def test_info(self):
"""Test enabling info logging."""
for info_arg in ['--info']:
args = [
'nosuchfile.eb',
info_arg,
]
outtxt = self.eb_main(args)
for log_msg_type in ['INFO', 'ERROR']:
res = re.search(' %s ' % log_msg_type, outtxt)
self.assertTrue(res, "%s log messages are included when using %s ( out: %s)" % (log_msg_type, info_arg, outtxt))
for log_msg_type in ['DEBUG']:
res = re.search(' %s ' % log_msg_type, outtxt)
self.assertTrue(not res, "%s log messages are *not* included when using %s" % (log_msg_type, info_arg))
def test_quiet(self):
"""Test enabling quiet logging (errors only)."""
for quiet_arg in ['--quiet']:
args = [
'nosuchfile.eb',
quiet_arg,
]
outtxt = self.eb_main(args)
for log_msg_type in ['ERROR']:
res = re.search(' %s ' % log_msg_type, outtxt)
msg = "%s log messages are included when using %s (outtxt: %s)" % (log_msg_type, quiet_arg, outtxt)
self.assertTrue(res, msg)
for log_msg_type in ['DEBUG', 'INFO']:
res = re.search(' %s ' % log_msg_type, outtxt)
msg = "%s log messages are *not* included when using %s (outtxt: %s)" % (log_msg_type, quiet_arg, outtxt)
self.assertTrue(not res, msg)
def test_force(self):
"""Test forcing installation even if the module is already available."""
# use GCC-4.6.3.eb easyconfig file that comes with the tests
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'GCC-4.6.3.eb')
# check log message without --force
args = [
eb_file,
'--debug',
]
outtxt, error_thrown = self.eb_main(args, return_error=True)
self.assertTrue(not error_thrown, "No error is thrown if software is already installed (error_thrown: %s)" % error_thrown)
already_msg = "GCC/4.6.3 is already installed"
self.assertTrue(re.search(already_msg, outtxt), "Already installed message without --force, outtxt: %s" % outtxt)
# clear log file
write_file(self.logfile, '')
# check that --force works
args = [
eb_file,
'--force',
'--debug',
]
outtxt = self.eb_main(args)
self.assertTrue(not re.search(already_msg, outtxt), "Already installed message not there with --force")
def test_skip(self):
"""Test skipping installation of module (--skip, -k)."""
# use toy-0.0.eb easyconfig file that comes with the tests
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb')
# check log message with --skip for existing module
args = [
eb_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--force',
'--debug',
]
self.eb_main(args, do_build=True)
modules_tool().purge()
args.append('--skip')
outtxt = self.eb_main(args, do_build=True, verbose=True)
found_msg = "Module toy/0.0 found.\n[^\n]+Going to skip actual main build"
found = re.search(found_msg, outtxt, re.M)
self.assertTrue(found, "Module found message present with --skip, outtxt: %s" % outtxt)
# cleanup for next test
write_file(self.logfile, '')
os.chdir(self.cwd)
modules_tool().purge()
# reinitialize modules tool with original $MODULEPATH, to avoid problems with future tests
os.environ['MODULEPATH'] = ''
modules_tool()
# check log message with --skip for non-existing module
args = [
eb_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--try-software-version=1.2.3.4.5.6.7.8.9',
'--try-amend=sources=toy-0.0.tar.gz,toy-0.0.tar.gz', # hackish, but fine
'--force',
'--debug',
'--skip',
]
outtxt = self.eb_main(args, do_build=True, verbose=True)
found_msg = "Module toy/1.2.3.4.5.6.7.8.9 found."
found = re.search(found_msg, outtxt)
self.assertTrue(not found, "Module found message not there with --skip for non-existing modules: %s" % outtxt)
not_found_msg = "No module toy/1.2.3.4.5.6.7.8.9 found. Not skipping anything."
not_found = re.search(not_found_msg, outtxt)
self.assertTrue(not_found, "Module not found message there with --skip for non-existing modules: %s" % outtxt)
modules_tool().purge()
# reinitialize modules tool with original $MODULEPATH, to avoid problems with future tests
modify_env(os.environ, self.orig_environ)
modules_tool()
def test_job(self):
"""Test submitting build as a job."""
# use gzip-1.4.eb easyconfig file that comes with the tests
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'gzip-1.4.eb')
def check_args(job_args, passed_args=None):
"""Check whether specified args yield expected result."""
if passed_args is None:
passed_args = job_args[:]
# clear log file
write_file(self.logfile, '')
args = [
eb_file,
'--job',
] + job_args
outtxt = self.eb_main(args)
job_msg = "INFO.* Command template for jobs: .* && eb %%\(spec\)s.* %s.*\n" % ' .*'.join(passed_args)
assertmsg = "Info log msg with job command template for --job (job_msg: %s, outtxt: %s)" % (job_msg, outtxt)
self.assertTrue(re.search(job_msg, outtxt), assertmsg)
# options passed are reordered, so order here matters to make tests pass
check_args(['--debug'])
check_args(['--debug', '--stop=configure', '--try-software-name=foo'])
check_args(['--debug', '--robot-paths=/tmp/foo:/tmp/bar'])
# --robot has preference over --robot-paths, --robot is not passed down
check_args(['--debug', '--robot-paths=/tmp/foo', '--robot=/tmp/bar'],
passed_args=['--debug', '--robot-paths=/tmp/bar:/tmp/foo'])
# 'zzz' prefix in the test name is intentional to make this test run last,
# since it fiddles with the logging infrastructure which may break things
def test_zzz_logtostdout(self):
"""Testing redirecting log to stdout."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
for stdout_arg in ['--logtostdout', '-l']:
_stdout = sys.stdout
fd, fn = tempfile.mkstemp()
fh = os.fdopen(fd, 'w')
sys.stdout = fh
args = [
'--software-name=somethingrandom',
'--robot', '.',
'--debug',
stdout_arg,
]
self.eb_main(args, logfile=dummylogfn)
# make sure we restore
sys.stdout.flush()
sys.stdout = _stdout
fancylogger.logToScreen(enable=False, stdout=True)
outtxt = read_file(fn)
self.assertTrue(len(outtxt) > 100, "Log messages are printed to stdout when %s is used (outtxt: %s)" % (stdout_arg, outtxt))
# cleanup
os.remove(fn)
stdoutorig = sys.stdout
sys.stdout = open("/dev/null", 'w')
toy_ecfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'toy-0.0.eb')
self.logfile = None
out = self.eb_main([toy_ecfile, '--debug', '-l', '--force'], raise_error=True)
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
sys.stdout.close()
sys.stdout = stdoutorig
def test_avail_easyconfig_params(self):
"""Test listing available easyconfig parameters."""
def run_test(custom=None, extra_params=[], fmt=None):
"""Inner function to run actual test in current setting."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
avail_args = [
'-a',
'--avail-easyconfig-params',
]
for avail_arg in avail_args:
# clear log
write_file(self.logfile, '')
args = [
'--unittest-file=%s' % self.logfile,
avail_arg,
]
if fmt is not None:
args.append(fmt)
if custom is not None:
args.extend(['-e', custom])
outtxt = self.eb_main(args, logfile=dummylogfn, verbose=True)
logtxt = read_file(self.logfile)
# check whether all parameter types are listed
par_types = [BUILD, DEPENDENCIES, EXTENSIONS, FILEMANAGEMENT,
LICENSE, MANDATORY, MODULES, OTHER, TOOLCHAIN]
if custom is not None:
par_types.append(CUSTOM)
for param_type in [x[1] for x in par_types]:
# regex for parameter group title, matches both txt and rst formats
regex = re.compile("%s.*\n%s" % (param_type, '-' * len(param_type)), re.I)
tup = (param_type, avail_arg, args, logtxt)
msg = "Parameter type %s is featured in output of eb %s (args: %s): %s" % tup
self.assertTrue(regex.search(logtxt), msg)
# check a couple of easyconfig parameters
for param in ["name", "version", "toolchain", "versionsuffix", "buildopts", "sources", "start_dir",
"dependencies", "group", "exts_list", "moduleclass", "buildstats"] + extra_params:
# regex for parameter name (with optional '*') & description, matches both txt and rst formats
regex = re.compile("^[`]*%s(?:\*)?[`]*\s+\w+" % param, re.M)
tup = (param, avail_arg, args, regex.pattern, logtxt)
msg = "Parameter %s is listed with help in output of eb %s (args: %s, regex: %s): %s" % tup
self.assertTrue(regex.search(logtxt), msg)
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
for fmt in [None, 'txt', 'rst']:
run_test(fmt=fmt)
run_test(custom='EB_foo', extra_params=['foo_extra1', 'foo_extra2'], fmt=fmt)
run_test(custom='bar', extra_params=['bar_extra1', 'bar_extra2'], fmt=fmt)
run_test(custom='EB_foofoo', extra_params=['foofoo_extra1', 'foofoo_extra2'], fmt=fmt)
# double underscore to make sure it runs first, which is required to detect certain types of bugs,
# e.g. running with non-initialized EasyBuild config (truly mimicing 'eb --list-toolchains')
def test__list_toolchains(self):
"""Test listing known compiler toolchains."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
args = [
'--list-toolchains',
'--unittest-file=%s' % self.logfile,
]
outtxt = self.eb_main(args, logfile=dummylogfn)
info_msg = r"INFO List of known toolchains \(toolchainname: module\[,module\.\.\.\]\):"
logtxt = read_file(self.logfile)
self.assertTrue(re.search(info_msg, logtxt), "Info message with list of known compiler toolchains")
# toolchain elements should be in alphabetical order
tcs = {
'dummy': [],
'goalf': ['ATLAS', 'BLACS', 'FFTW', 'GCC', 'OpenMPI', 'ScaLAPACK'],
'ictce': ['icc', 'ifort', 'imkl', 'impi'],
}
for tc, tcelems in tcs.items():
res = re.findall("^\s*%s: .*" % tc, logtxt, re.M)
self.assertTrue(res, "Toolchain %s is included in list of known compiler toolchains" % tc)
# every toolchain should only be mentioned once
n = len(res)
self.assertEqual(n, 1, "Toolchain %s is only mentioned once (count: %d)" % (tc, n))
# make sure definition is correct (each element only named once, in alphabetical order)
self.assertEqual("\t%s: %s" % (tc, ', '.join(tcelems)), res[0])
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
def test_avail_lists(self):
"""Test listing available values of certain types."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
name_items = {
'modules-tools': ['EnvironmentModulesC', 'Lmod'],
'module-naming-schemes': ['EasyBuildMNS', 'HierarchicalMNS', 'CategorizedHMNS'],
}
for (name, items) in name_items.items():
args = [
'--avail-%s' % name,
'--unittest-file=%s' % self.logfile,
]
outtxt = self.eb_main(args, logfile=dummylogfn)
logtxt = read_file(self.logfile)
words = name.replace('-', ' ')
info_msg = r"INFO List of supported %s:" % words
self.assertTrue(re.search(info_msg, logtxt), "Info message with list of available %s" % words)
for item in items:
res = re.findall("^\s*%s" % item, logtxt, re.M)
self.assertTrue(res, "%s is included in list of available %s" % (item, words))
# every item should only be mentioned once
n = len(res)
self.assertEqual(n, 1, "%s is only mentioned once (count: %d)" % (item, n))
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
def test_avail_cfgfile_constants(self):
"""Test --avail-cfgfile-constants."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# copy test easyconfigs to easybuild/easyconfigs subdirectory of temp directory
# to check whether easyconfigs install path is auto-included in robot path
tmpdir = tempfile.mkdtemp(prefix='easybuild-easyconfigs-pkg-install-path')
mkdir(os.path.join(tmpdir, 'easybuild'), parents=True)
test_ecs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
shutil.copytree(test_ecs_dir, os.path.join(tmpdir, 'easybuild', 'easyconfigs'))
orig_sys_path = sys.path[:]
sys.path.insert(0, tmpdir) # prepend to give it preference over possible other installed easyconfigs pkgs
args = [
'--avail-cfgfile-constants',
'--unittest-file=%s' % self.logfile,
]
outtxt = self.eb_main(args, logfile=dummylogfn)
logtxt = read_file(self.logfile)
cfgfile_constants = {
'DEFAULT_ROBOT_PATHS': os.path.join(tmpdir, 'easybuild', 'easyconfigs'),
}
for cst_name, cst_value in cfgfile_constants.items():
cst_regex = re.compile(r"^\*\s%s:\s.*\s\[value: .*%s.*\]" % (cst_name, cst_value), re.M)
tup = (cst_regex.pattern, logtxt)
self.assertTrue(cst_regex.search(logtxt), "Pattern '%s' in --avail-cfgfile_constants output: %s" % tup)
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
sys.path[:] = orig_sys_path
def test_list_easyblocks(self):
"""Test listing easyblock hierarchy."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# adjust PYTHONPATH such that test easyblocks are found
import easybuild
eb_blocks_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'sandbox'))
if not eb_blocks_path in sys.path:
sys.path.append(eb_blocks_path)
easybuild = reload(easybuild)
import easybuild.easyblocks
reload(easybuild.easyblocks)
reload(easybuild.tools.module_naming_scheme) # required to run options unit tests stand-alone
# simple view
for list_arg in ['--list-easyblocks', '--list-easyblocks=simple']:
# clear log
write_file(self.logfile, '')
args = [
list_arg,
'--unittest-file=%s' % self.logfile,
]
outtxt = self.eb_main(args, logfile=dummylogfn)
logtxt = read_file(self.logfile)
for pat in [
r"EasyBlock\n",
r"|--\s+EB_foo\n|\s+|--\s+EB_foofoo\n",
r"|--\s+bar\n",
]:
msg = "Pattern '%s' is found in output of --list-easyblocks: %s" % (pat, logtxt)
self.assertTrue(re.search(pat, logtxt), msg)
# clear log
write_file(self.logfile, '')
# detailed view
args = [
'--list-easyblocks=detailed',
'--unittest-file=%s' % self.logfile,
]
outtxt = self.eb_main(args, logfile=dummylogfn)
logtxt = read_file(self.logfile)
for pat in [
r"EasyBlock\s+\(easybuild.framework.easyblock\)\n",
r"|--\s+EB_foo\s+\(easybuild.easyblocks.foo\)\n|\s+|--\s+EB_foofoo\s+\(easybuild.easyblocks.foofoo\)\n",
r"|--\s+bar\s+\(easybuild.easyblocks.generic.bar\)\n",
]:
msg = "Pattern '%s' is found in output of --list-easyblocks: %s" % (pat, logtxt)
self.assertTrue(re.search(pat, logtxt), msg)
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
def test_search(self):
"""Test searching for easyconfigs."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
args = [
'--search=gzip',
'--robot=%s' % os.path.join(os.path.dirname(__file__), 'easyconfigs'),
'--unittest-file=%s' % self.logfile,
]
self.eb_main(args, logfile=dummylogfn)
logtxt = read_file(self.logfile)
info_msg = r"Searching \(case-insensitive\) for 'gzip' in"
self.assertTrue(re.search(info_msg, logtxt), "Info message when searching for easyconfigs in '%s'" % logtxt)
for ec in ["gzip-1.4.eb", "gzip-1.4-GCC-4.6.3.eb"]:
self.assertTrue(re.search(r" \* \S*%s$" % ec, logtxt, re.M), "Found easyconfig %s in '%s'" % (ec, logtxt))
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
write_file(self.logfile, '')
args = [
'--search=^gcc.*2.eb',
'--robot=%s' % os.path.join(os.path.dirname(__file__), 'easyconfigs'),
'--unittest-file=%s' % self.logfile,
]
self.eb_main(args, logfile=dummylogfn)
logtxt = read_file(self.logfile)
info_msg = r"Searching \(case-insensitive\) for '\^gcc.\*2.eb' in"
self.assertTrue(re.search(info_msg, logtxt), "Info message when searching for easyconfigs in '%s'" % logtxt)
for ec in ['GCC-4.7.2.eb', 'GCC-4.8.2.eb', 'GCC-4.9.2.eb']:
self.assertTrue(re.search(r" \* \S*%s$" % ec, logtxt, re.M), "Found easyconfig %s in '%s'" % (ec, logtxt))
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
write_file(self.logfile, '')
for search_arg in ['-S', '--search-short']:
open(self.logfile, 'w').write('')
args = [
search_arg,
'toy-0.0',
'-r',
os.path.join(os.path.dirname(__file__), 'easyconfigs'),
'--unittest-file=%s' % self.logfile,
]
self.eb_main(args, logfile=dummylogfn, raise_error=True, verbose=True)
logtxt = read_file(self.logfile)
info_msg = r"Searching \(case-insensitive\) for 'toy-0.0' in"
self.assertTrue(re.search(info_msg, logtxt), "Info message when searching for easyconfigs in '%s'" % logtxt)
self.assertTrue(re.search('INFO CFGS\d+=', logtxt), "CFGS line message found in '%s'" % logtxt)
for ec in ["toy-0.0.eb", "toy-0.0-multiple.eb"]:
self.assertTrue(re.search(" \* \$CFGS\d+/*%s" % ec, logtxt), "Found easyconfig %s in '%s'" % (ec, logtxt))
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
def test_dry_run(self):
"""Test dry run (long format)."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
args = [
os.path.join(os.path.dirname(__file__), 'easyconfigs', 'gzip-1.4-GCC-4.6.3.eb'),
'--dry-run', # implies enabling dependency resolution
'--unittest-file=%s' % self.logfile,
'--robot-paths=%s' % os.path.join(os.path.dirname(__file__), 'easyconfigs'),
]
self.eb_main(args, logfile=dummylogfn)
logtxt = read_file(self.logfile)
info_msg = r"Dry run: printing build status of easyconfigs and dependencies"
self.assertTrue(re.search(info_msg, logtxt, re.M), "Info message dry running in '%s'" % logtxt)
ecs_mods = [
("gzip-1.4-GCC-4.6.3.eb", "gzip/1.4-GCC-4.6.3", ' '),
("GCC-4.6.3.eb", "GCC/4.6.3", 'x'),
]
for ec, mod, mark in ecs_mods:
regex = re.compile(r" \* \[%s\] \S+%s \(module: %s\)" % (mark, ec, mod), re.M)
self.assertTrue(regex.search(logtxt), "Found match for pattern %s in '%s'" % (regex.pattern, logtxt))
def test_dry_run_short(self):
"""Test dry run (short format)."""
# unset $EASYBUILD_ROBOT_PATHS that was defined in setUp
del os.environ['EASYBUILD_ROBOT_PATHS']
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# copy test easyconfigs to easybuild/easyconfigs subdirectory of temp directory
# to check whether easyconfigs install path is auto-included in robot path
tmpdir = tempfile.mkdtemp(prefix='easybuild-easyconfigs-pkg-install-path')
mkdir(os.path.join(tmpdir, 'easybuild'), parents=True)
test_ecs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
shutil.copytree(test_ecs_dir, os.path.join(tmpdir, 'easybuild', 'easyconfigs'))
orig_sys_path = sys.path[:]
sys.path.insert(0, tmpdir) # prepend to give it preference over possible other installed easyconfigs pkgs
for dry_run_arg in ['-D', '--dry-run-short']:
open(self.logfile, 'w').write('')
args = [
os.path.join(tmpdir, 'easybuild', 'easyconfigs', 'gzip-1.4-GCC-4.6.3.eb'),
dry_run_arg,
# purposely specifying senseless dir, to test auto-inclusion of easyconfigs pkg path in robot path
'--robot=%s' % os.path.join(tmpdir, 'robot_decoy'),
'--unittest-file=%s' % self.logfile,
]
outtxt = self.eb_main(args, logfile=dummylogfn)
info_msg = r"Dry run: printing build status of easyconfigs and dependencies"
self.assertTrue(re.search(info_msg, outtxt, re.M), "Info message dry running in '%s'" % outtxt)
self.assertTrue(re.search('CFGS=', outtxt), "CFGS line message found in '%s'" % outtxt)
ecs_mods = [
("gzip-1.4-GCC-4.6.3.eb", "gzip/1.4-GCC-4.6.3", ' '),
("GCC-4.6.3.eb", "GCC/4.6.3", 'x'),
]
for ec, mod, mark in ecs_mods:
regex = re.compile(r" \* \[%s\] \$CFGS\S+%s \(module: %s\)" % (mark, ec, mod), re.M)
self.assertTrue(regex.search(outtxt), "Found match for pattern %s in '%s'" % (regex.pattern, outtxt))
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
# cleanup
shutil.rmtree(tmpdir)
sys.path[:] = orig_sys_path
def test_try_robot_force(self):
"""
Test correct behavior for combination of --try-toolchain --robot --force.
Only the listed easyconfigs should be forced, resolved dependencies should not (even if tweaked).
"""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# use toy-0.0.eb easyconfig file that comes with the tests
test_ecs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
eb_file1 = os.path.join(test_ecs_dir, 'FFTW-3.3.3-gompi-1.4.10.eb')
eb_file2 = os.path.join(test_ecs_dir, 'ScaLAPACK-2.0.2-gompi-1.4.10-OpenBLAS-0.2.6-LAPACK-3.4.2.eb')
# check log message with --skip for existing module
args = [
eb_file1,
eb_file2,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--force',
'--robot=%s' % test_ecs_dir,
'--try-toolchain=gompi,1.3.12',
'--dry-run',
'--unittest-file=%s' % self.logfile,
]
outtxt = self.eb_main(args, logfile=dummylogfn)
scalapack_ver = '2.0.2-gompi-1.3.12-OpenBLAS-0.2.6-LAPACK-3.4.2'
ecs_mods = [
# GCC/OpenMPI dependencies are there, but part of toolchain => 'x'
("GCC-4.6.4.eb", "GCC/4.6.4", 'x'),
("OpenMPI-1.6.4-GCC-4.6.4.eb", "OpenMPI/1.6.4-GCC-4.6.4", 'x'),
# OpenBLAS dependency is there, but not listed => 'x'
("OpenBLAS-0.2.6-gompi-1.3.12-LAPACK-3.4.2.eb", "OpenBLAS/0.2.6-gompi-1.3.12-LAPACK-3.4.2", 'x'),
# both FFTW and ScaLAPACK are listed => 'F'
("ScaLAPACK-%s.eb" % scalapack_ver, "ScaLAPACK/%s" % scalapack_ver, 'F'),
("FFTW-3.3.3-gompi-1.3.12.eb", "FFTW/3.3.3-gompi-1.3.12", 'F'),
]
for ec, mod, mark in ecs_mods:
regex = re.compile("^ \* \[%s\] \S+%s \(module: %s\)$" % (mark, ec, mod), re.M)
self.assertTrue(regex.search(outtxt), "Found match for pattern %s in '%s'" % (regex.pattern, outtxt))
def test_dry_run_hierarchical(self):
"""Test dry run using a hierarchical module naming scheme."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
test_ecs = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
args = [
os.path.join(test_ecs, 'gzip-1.5-goolf-1.4.10.eb'),
os.path.join(test_ecs, 'OpenMPI-1.6.4-GCC-4.7.2.eb'),
'--dry-run',
'--unittest-file=%s' % self.logfile,
'--module-naming-scheme=HierarchicalMNS',
'--ignore-osdeps',
'--force',
'--debug',
'--robot-paths=%s' % os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs'),
]
outtxt = self.eb_main(args, logfile=dummylogfn, verbose=True, raise_error=True)
ecs_mods = [
# easyconfig, module subdir, (short) module name
("GCC-4.7.2.eb", "Core", "GCC/4.7.2", 'x'), # already present but not listed, so 'x'
("hwloc-1.6.2-GCC-4.7.2.eb", "Compiler/GCC/4.7.2", "hwloc/1.6.2", 'x'),
("OpenMPI-1.6.4-GCC-4.7.2.eb", "Compiler/GCC/4.7.2", "OpenMPI/1.6.4", 'F'), # already present and listed, so 'F'
("gompi-1.4.10.eb", "Core", "gompi/1.4.10", 'x'),
("OpenBLAS-0.2.6-gompi-1.4.10-LAPACK-3.4.2.eb", "MPI/GCC/4.7.2/OpenMPI/1.6.4",
"OpenBLAS/0.2.6-LAPACK-3.4.2", 'x'),
("FFTW-3.3.3-gompi-1.4.10.eb", "MPI/GCC/4.7.2/OpenMPI/1.6.4", "FFTW/3.3.3", 'x'),
("ScaLAPACK-2.0.2-gompi-1.4.10-OpenBLAS-0.2.6-LAPACK-3.4.2.eb", "MPI/GCC/4.7.2/OpenMPI/1.6.4",
"ScaLAPACK/2.0.2-OpenBLAS-0.2.6-LAPACK-3.4.2", 'x'),
("goolf-1.4.10.eb", "Core", "goolf/1.4.10", 'x'),
("gzip-1.5-goolf-1.4.10.eb", "MPI/GCC/4.7.2/OpenMPI/1.6.4", "gzip/1.5", ' '), # listed but not there: ' '
]
for ec, mod_subdir, mod_name, mark in ecs_mods:
regex = re.compile("^ \* \[%s\] \S+%s \(module: %s \| %s\)$" % (mark, ec, mod_subdir, mod_name), re.M)
self.assertTrue(regex.search(outtxt), "Found match for pattern %s in '%s'" % (regex.pattern, outtxt))
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
def test_dry_run_categorized(self):
"""Test dry run using a categorized hierarchical module naming scheme."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
self.setup_categorized_hmns_modules()
test_ecs = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
args = [
os.path.join(test_ecs, 'gzip-1.5-goolf-1.4.10.eb'),
os.path.join(test_ecs, 'OpenMPI-1.6.4-GCC-4.7.2.eb'),
'--dry-run',
'--unittest-file=%s' % self.logfile,
'--module-naming-scheme=CategorizedHMNS',
'--ignore-osdeps',
'--force',
'--debug',
'--robot-paths=%s' % os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs'),
]
outtxt = self.eb_main(args, logfile=dummylogfn, verbose=True, raise_error=True)
ecs_mods = [
# easyconfig, module subdir, (short) module name, mark
("GCC-4.7.2.eb", "Core/compiler", "GCC/4.7.2", 'x'), # already present but not listed, so 'x'
("hwloc-1.6.2-GCC-4.7.2.eb", "Compiler/GCC/4.7.2/system", "hwloc/1.6.2", 'x'),
("OpenMPI-1.6.4-GCC-4.7.2.eb", "Compiler/GCC/4.7.2/mpi", "OpenMPI/1.6.4", 'F'), # already present and listed, so 'F'
("gompi-1.4.10.eb", "Core/toolchain", "gompi/1.4.10", 'x'),
("OpenBLAS-0.2.6-gompi-1.4.10-LAPACK-3.4.2.eb", "MPI/GCC/4.7.2/OpenMPI/1.6.4/numlib",
"OpenBLAS/0.2.6-LAPACK-3.4.2", 'x'),
("FFTW-3.3.3-gompi-1.4.10.eb", "MPI/GCC/4.7.2/OpenMPI/1.6.4/numlib", "FFTW/3.3.3", 'x'),
("ScaLAPACK-2.0.2-gompi-1.4.10-OpenBLAS-0.2.6-LAPACK-3.4.2.eb", "MPI/GCC/4.7.2/OpenMPI/1.6.4/numlib",
"ScaLAPACK/2.0.2-OpenBLAS-0.2.6-LAPACK-3.4.2", 'x'),
("goolf-1.4.10.eb", "Core/toolchain", "goolf/1.4.10", 'x'),
("gzip-1.5-goolf-1.4.10.eb", "MPI/GCC/4.7.2/OpenMPI/1.6.4/base", "gzip/1.5", ' '), # listed but not there: ' '
]
for ec, mod_subdir, mod_name, mark in ecs_mods:
regex = re.compile("^ \* \[%s\] \S+%s \(module: %s \| %s\)$" % (mark, ec, mod_subdir, mod_name), re.M)
self.assertTrue(regex.search(outtxt), "Found match for pattern %s in '%s'" % (regex.pattern, outtxt))
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
def test_from_pr(self):
"""Test fetching easyconfigs from a PR."""
if self.github_token is None:
print "Skipping test_from_pr, no GitHub token available?"
return
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
tmpdir = tempfile.mkdtemp()
args = [
# PR for foss/2015a, see https://github.com/hpcugent/easybuild-easyconfigs/pull/1239/files
'--from-pr=1239',
'--dry-run',
# an argument must be specified to --robot, since easybuild-easyconfigs may not be installed
'--robot=%s' % os.path.join(os.path.dirname(__file__), 'easyconfigs'),
'--unittest-file=%s' % self.logfile,
'--github-user=%s' % GITHUB_TEST_ACCOUNT, # a GitHub token should be available for this user
'--tmpdir=%s' % tmpdir,
]
try:
outtxt = self.eb_main(args, logfile=dummylogfn, raise_error=True)
modules = [
(tmpdir, 'FFTW/3.3.4-gompi-2015a'),
(tmpdir, 'foss/2015a'),
('.*', 'GCC/4.9.2'), # not included in PR
(tmpdir, 'gompi/2015a'),
(tmpdir, 'HPL/2.1-foss-2015a'),
(tmpdir, 'hwloc/1.10.0-GCC-4.9.2'),
(tmpdir, 'numactl/2.0.10-GCC-4.9.2'),
(tmpdir, 'OpenBLAS/0.2.13-GCC-4.9.2-LAPACK-3.5.0'),
(tmpdir, 'OpenMPI/1.8.3-GCC-4.9.2'),
(tmpdir, 'OpenMPI/1.8.4-GCC-4.9.2'),
(tmpdir, 'ScaLAPACK/2.0.2-gompi-2015a-OpenBLAS-0.2.13-LAPACK-3.5.0'),
]
for path_prefix, module in modules:
ec_fn = "%s.eb" % '-'.join(module.split('/'))
regex = re.compile(r"^ \* \[.\] %s.*%s \(module: %s\)$" % (path_prefix, ec_fn, module), re.M)
self.assertTrue(regex.search(outtxt), "Found pattern %s in %s" % (regex.pattern, outtxt))
# make sure that *only* these modules are listed, no others
regex = re.compile(r"^ \* \[.\] .*/(?P<filepath>.*) \(module: (?P<module>.*)\)$", re.M)
self.assertTrue(sorted(regex.findall(outtxt)), sorted(modules))
pr_tmpdir = os.path.join(tmpdir, 'eb-\S{6}', 'files_pr1239')
regex = re.compile("Prepended list of robot search paths with %s:" % pr_tmpdir, re.M)
self.assertTrue(regex.search(outtxt), "Found pattern %s in %s" % (regex.pattern, outtxt))
except URLError, err:
print "Ignoring URLError '%s' in test_from_pr" % err
shutil.rmtree(tmpdir)
def test_from_pr_listed_ecs(self):
"""Test --from-pr in combination with specifying easyconfigs on the command line."""
if self.github_token is None:
print "Skipping test_from_pr, no GitHub token available?"
return
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# copy test easyconfigs to easybuild/easyconfigs subdirectory of temp directory
test_ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
ecstmpdir = tempfile.mkdtemp(prefix='easybuild-easyconfigs-pkg-install-path')
mkdir(os.path.join(ecstmpdir, 'easybuild'), parents=True)
shutil.copytree(test_ecs_path, os.path.join(ecstmpdir, 'easybuild', 'easyconfigs'))
# inject path to test easyconfigs into head of Python search path
sys.path.insert(0, ecstmpdir)
tmpdir = tempfile.mkdtemp()
args = [
'toy-0.0.eb',
'gompi-2015a.eb', # also pulls in GCC, OpenMPI (which pulls in hwloc and numactl)
'GCC-4.6.3.eb',
# PR for foss/2015a, see https://github.com/hpcugent/easybuild-easyconfigs/pull/1239/files
'--from-pr=1239',
'--dry-run',
# an argument must be specified to --robot, since easybuild-easyconfigs may not be installed
'--robot=%s' % test_ecs_path,
'--unittest-file=%s' % self.logfile,
'--github-user=%s' % GITHUB_TEST_ACCOUNT, # a GitHub token should be available for this user
'--tmpdir=%s' % tmpdir,
]
try:
outtxt = self.eb_main(args, logfile=dummylogfn, raise_error=True)
modules = [
(test_ecs_path, 'toy/0.0'), # not included in PR
(test_ecs_path, 'GCC/4.9.2'), # not included in PR
(tmpdir, 'hwloc/1.10.0-GCC-4.9.2'),
(tmpdir, 'numactl/2.0.10-GCC-4.9.2'),
(tmpdir, 'OpenMPI/1.8.4-GCC-4.9.2'),
(tmpdir, 'gompi/2015a'),
(test_ecs_path, 'GCC/4.6.3'), # not included in PR
]
for path_prefix, module in modules:
ec_fn = "%s.eb" % '-'.join(module.split('/'))
regex = re.compile(r"^ \* \[.\] %s.*%s \(module: %s\)$" % (path_prefix, ec_fn, module), re.M)
self.assertTrue(regex.search(outtxt), "Found pattern %s in %s" % (regex.pattern, outtxt))
# make sure that *only* these modules are listed, no others
regex = re.compile(r"^ \* \[.\] .*/(?P<filepath>.*) \(module: (?P<module>.*)\)$", re.M)
self.assertTrue(sorted(regex.findall(outtxt)), sorted(modules))
except URLError, err:
print "Ignoring URLError '%s' in test_from_pr" % err
shutil.rmtree(tmpdir)
def test_no_such_software(self):
"""Test using no arguments."""
args = [
'--software-name=nosuchsoftware',
'--robot=.',
'--debug',
]
outtxt = self.eb_main(args)
# error message when template is not found
error_msg1 = "ERROR No easyconfig files found for software nosuchsoftware, and no templates available. "
error_msg1 += "I'm all out of ideas."
# error message when template is found
error_msg2 = "ERROR Unable to find an easyconfig for the given specifications"
msg = "Error message when eb can't find software with specified name (outtxt: %s)" % outtxt
self.assertTrue(re.search(error_msg1, outtxt) or re.search(error_msg2, outtxt), msg)
def test_footer(self):
"""Test specifying a module footer."""
# create file containing modules footer
if get_module_syntax() == 'Tcl':
module_footer_txt = '\n'.join([
"# test footer",
"setenv SITE_SPECIFIC_ENV_VAR foobar",
])
elif get_module_syntax() == 'Lua':
module_footer_txt = '\n'.join([
"-- test footer",
'setenv("SITE_SPECIFIC_ENV_VAR", "foobar")',
])
else:
self.assertTrue(False, "Unknown module syntax: %s" % get_module_syntax())
fd, modules_footer = tempfile.mkstemp(prefix='modules-footer-')
os.close(fd)
write_file(modules_footer, module_footer_txt)
# use toy-0.0.eb easyconfig file that comes with the tests
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb')
# check log message with --skip for existing module
args = [
eb_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--force',
'--modules-footer=%s' % modules_footer,
]
self.eb_main(args, do_build=True)
toy_module = os.path.join(self.test_installpath, 'modules', 'all', 'toy', '0.0')
if get_module_syntax() == 'Lua':
toy_module += '.lua'
toy_module_txt = read_file(toy_module)
footer_regex = re.compile(r'%s$' % module_footer_txt.replace('(', '\\(').replace(')', '\\)'), re.M)
msg = "modules footer '%s' is present in '%s'" % (module_footer_txt, toy_module_txt)
self.assertTrue(footer_regex.search(toy_module_txt), msg)
# cleanup
os.remove(modules_footer)
def test_recursive_module_unload(self):
"""Test generating recursively unloading modules."""
# use toy-0.0.eb easyconfig file that comes with the tests
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0-deps.eb')
# check log message with --skip for existing module
args = [
eb_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--force',
'--recursive-module-unload',
]
self.eb_main(args, do_build=True, verbose=True)
toy_module = os.path.join(self.test_installpath, 'modules', 'all', 'toy', '0.0-deps')
if get_module_syntax() == 'Lua':
toy_module += '.lua'
toy_module_txt = read_file(toy_module)
is_loaded_regex = re.compile(r"if { !\[is-loaded gompi/1.3.12\] }", re.M)
self.assertFalse(is_loaded_regex.search(toy_module_txt), "Recursive unloading is used: %s" % toy_module_txt)
def test_tmpdir(self):
"""Test setting temporary directory to use by EasyBuild."""
# use temporary paths for build/install paths, make sure sources can be found
tmpdir = tempfile.mkdtemp()
# use toy-0.0.eb easyconfig file that comes with the tests
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb')
# check log message with --skip for existing module
args = [
eb_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--tmpdir=%s' % tmpdir,
]
outtxt = self.eb_main(args, do_build=True, reset_env=False)
tmpdir_msg = r"Using %s\S+ as temporary directory" % os.path.join(tmpdir, 'eb-')
found = re.search(tmpdir_msg, outtxt, re.M)
self.assertTrue(found, "Log message for tmpdir found in outtxt: %s" % outtxt)
for var in ['TMPDIR', 'TEMP', 'TMP']:
self.assertTrue(os.environ[var].startswith(os.path.join(tmpdir, 'eb-')))
self.assertTrue(tempfile.gettempdir().startswith(os.path.join(tmpdir, 'eb-')))
tempfile_tmpdir = tempfile.mkdtemp()
self.assertTrue(tempfile_tmpdir.startswith(os.path.join(tmpdir, 'eb-')))
fd, tempfile_tmpfile = tempfile.mkstemp()
self.assertTrue(tempfile_tmpfile.startswith(os.path.join(tmpdir, 'eb-')))
# cleanup
os.close(fd)
shutil.rmtree(tmpdir)
def test_ignore_osdeps(self):
"""Test ignoring of listed OS dependencies."""
txt = '\n'.join([
'easyblock = "ConfigureMake"',
'name = "pi"',
'version = "3.14"',
'homepage = "http://example.com"',
'description = "test easyconfig"',
'toolchain = {"name":"dummy", "version": "dummy"}',
'osdependencies = ["nosuchosdependency", ("nosuchdep_option1", "nosuchdep_option2")]',
])
fd, eb_file = tempfile.mkstemp(prefix='easyconfig_test_file_', suffix='.eb')
os.close(fd)
write_file(eb_file, txt)
# check whether non-existing OS dependencies result in failure, by default
args = [
eb_file,
]
outtxt = self.eb_main(args, do_build=True)
regex = re.compile("Checking OS dependencies")
self.assertTrue(regex.search(outtxt), "OS dependencies are checked, outtxt: %s" % outtxt)
msg = "One or more OS dependencies were not found: "
msg += "\[\('nosuchosdependency',\), \('nosuchdep_option1', 'nosuchdep_option2'\)\]"
regex = re.compile(r'%s' % msg, re.M)
self.assertTrue(regex.search(outtxt), "OS dependencies are honored, outtxt: %s" % outtxt)
# check whether OS dependencies are effectively ignored
args = [
eb_file,
'--ignore-osdeps',
'--dry-run',
]
outtxt = self.eb_main(args, do_build=True)
regex = re.compile("Not checking OS dependencies", re.M)
self.assertTrue(regex.search(outtxt), "OS dependencies are ignored with --ignore-osdeps, outtxt: %s" % outtxt)
txt += "\nstop = 'notavalidstop'"
write_file(eb_file, txt)
args = [
eb_file,
'--dry-run', # no explicit --ignore-osdeps, but implied by --dry-run
]
outtxt = self.eb_main(args, do_build=True)
regex = re.compile("stop provided 'notavalidstop' is not valid", re.M)
self.assertTrue(regex.search(outtxt), "Validations are performed with --ignore-osdeps, outtxt: %s" % outtxt)
def test_experimental(self):
"""Test the experimental option"""
orig_value = easybuild.tools.build_log.EXPERIMENTAL
# make sure it's off by default
self.assertFalse(orig_value)
log = fancylogger.getLogger()
# force it to False
topt = EasyBuildOptions(
go_args=['--disable-experimental'],
)
try:
log.experimental('x')
# sanity check, should never be reached if it works.
self.assertTrue(False, "Experimental logging should be disabled by setting the --disable-experimental option")
except easybuild.tools.build_log.EasyBuildError, err:
# check error message
self.assertTrue('Experimental functionality.' in str(err))
# toggle experimental
topt = EasyBuildOptions(
go_args=['--experimental'],
)
try:
log.experimental('x')
except easybuild.tools.build_log.EasyBuildError, err:
self.assertTrue(False, 'Experimental logging should be allowed by the --experimental option.')
# set it back
easybuild.tools.build_log.EXPERIMENTAL = orig_value
def test_deprecated(self):
"""Test the deprecated option"""
if 'EASYBUILD_DEPRECATED' in os.environ:
os.environ['EASYBUILD_DEPRECATED'] = str(VERSION)
init_config()
orig_value = easybuild.tools.build_log.CURRENT_VERSION
# make sure it's off by default
self.assertEqual(orig_value, VERSION)
log = fancylogger.getLogger()
# force it to lower version using 0.x, which should no result in any raised error (only deprecation logging)
topt = EasyBuildOptions(
go_args=['--deprecated=0.%s' % orig_value],
)
try:
log.deprecated('x', str(orig_value))
except easybuild.tools.build_log.EasyBuildError, err:
self.assertTrue(False, 'Deprecated logging should work')
# force it to current version, which should result in deprecation
topt = EasyBuildOptions(
go_args=['--deprecated=%s' % orig_value],
)
try:
log.deprecated('x', str(orig_value))
# not supposed to get here
self.assertTrue(False, 'Deprecated logging should throw EasyBuildError')
except easybuild.tools.build_log.EasyBuildError, err2:
self.assertTrue('DEPRECATED' in str(err2))
# force higher version by prefixing it with 1, which should result in deprecation errors
topt = EasyBuildOptions(
go_args=['--deprecated=1%s' % orig_value],
)
try:
log.deprecated('x', str(orig_value))
# not supposed to get here
self.assertTrue(False, 'Deprecated logging should throw EasyBuildError')
except easybuild.tools.build_log.EasyBuildError, err3:
self.assertTrue('DEPRECATED' in str(err3))
# set it back
easybuild.tools.build_log.CURRENT_VERSION = orig_value
def test_allow_modules_tool_mismatch(self):
"""Test allowing mismatch of modules tool with 'module' function."""
# make sure MockModulesTool is available
from test.framework.modulestool import MockModulesTool
ec_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'easyconfigs', 'toy-0.0.eb')
# keep track of original module definition so we can restore it
orig_module = os.environ.get('module', None)
# check whether mismatch between 'module' function and selected modules tool is detected
os.environ['module'] = "() { eval `/Users/kehoste/Modules/$MODULE_VERSION/bin/modulecmd bash $*`\n}"
args = [
ec_file,
'--modules-tool=MockModulesTool',
'--module-syntax=Tcl', # Lua would require Lmod
]
self.eb_main(args, do_build=True)
outtxt = read_file(self.logfile)
error_regex = re.compile("ERROR .*pattern .* not found in defined 'module' function")
self.assertTrue(error_regex.search(outtxt), "Found error w.r.t. module function mismatch: %s" % outtxt[-600:])
# check that --allow-modules-tool-mispatch transforms this error into a warning
os.environ['module'] = "() { eval `/Users/kehoste/Modules/$MODULE_VERSION/bin/modulecmd bash $*`\n}"
args = [
ec_file,
'--modules-tool=MockModulesTool',
'--module-syntax=Tcl', # Lua would require Lmod
'--allow-modules-tool-mismatch',
]
self.eb_main(args, do_build=True)
outtxt = read_file(self.logfile)
warn_regex = re.compile("WARNING .*pattern .* not found in defined 'module' function")
self.assertTrue(warn_regex.search(outtxt), "Found warning w.r.t. module function mismatch: %s" % outtxt[-600:])
# check whether match between 'module' function and selected modules tool is detected
os.environ['module'] = "() { eval ` /bin/echo $*`\n}"
args = [
ec_file,
'--modules-tool=MockModulesTool',
'--module-syntax=Tcl', # Lua would require Lmod
'--debug',
]
self.eb_main(args, do_build=True)
outtxt = read_file(self.logfile)
found_regex = re.compile("DEBUG Found pattern .* in defined 'module' function")
self.assertTrue(found_regex.search(outtxt), "Found debug message w.r.t. module function: %s" % outtxt[-600:])
# restore 'module' function
if orig_module is not None:
os.environ['module'] = orig_module
else:
del os.environ['module']
def test_try(self):
"""Test whether --try options are taken into account."""
ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
tweaked_toy_ec = os.path.join(self.test_buildpath, 'toy-0.0-tweaked.eb')
shutil.copy2(os.path.join(ecs_path, 'toy-0.0.eb'), tweaked_toy_ec)
f = open(tweaked_toy_ec, 'a')
f.write("easyblock = 'ConfigureMake'")
f.close()
args = [
tweaked_toy_ec,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--dry-run',
'--robot=%s' % ecs_path,
]
test_cases = [
([], 'toy/0.0'),
(['--try-software=foo,1.2.3', '--try-toolchain=gompi,1.4.10'], 'foo/1.2.3-gompi-1.4.10'),
(['--try-toolchain-name=gompi', '--try-toolchain-version=1.4.10'], 'toy/0.0-gompi-1.4.10'),
# --try-toolchain is overridden by --toolchain
(['--try-toolchain=gompi,1.3.12', '--toolchain=dummy,dummy'], 'toy/0.0'),
(['--try-software-name=foo', '--try-software-version=1.2.3'], 'foo/1.2.3'),
(['--try-toolchain-name=gompi', '--try-toolchain-version=1.4.10'], 'toy/0.0-gompi-1.4.10'),
(['--try-software-version=1.2.3', '--try-toolchain=gompi,1.4.10'], 'toy/1.2.3-gompi-1.4.10'),
(['--try-amend=versionsuffix=-test'], 'toy/0.0-test'),
# --try-amend is overridden by --amend
(['--amend=versionsuffix=', '--try-amend=versionsuffix=-test'], 'toy/0.0'),
(['--try-toolchain=gompi,1.3.12', '--toolchain=dummy,dummy'], 'toy/0.0'),
# tweak existing list-typed value (patches)
(['--try-amend=versionsuffix=-test2', '--try-amend=patches=1.patch,2.patch'], 'toy/0.0-test2'),
# append to existing list-typed value (patches)
(['--try-amend=versionsuffix=-test3', '--try-amend=patches=,extra.patch'], 'toy/0.0-test3'),
# prepend to existing list-typed value (patches)
(['--try-amend=versionsuffix=-test4', '--try-amend=patches=extra.patch,'], 'toy/0.0-test4'),
# define extra list-typed parameter
(['--try-amend=versionsuffix=-test5', '--try-amend=exts_list=1,2,3'], 'toy/0.0-test5'),
# only --try causes other build specs to be included too
(['--try-software=foo,1.2.3', '--toolchain=gompi,1.4.10'], 'foo/1.2.3-gompi-1.4.10'),
(['--software=foo,1.2.3', '--try-toolchain=gompi,1.4.10'], 'foo/1.2.3-gompi-1.4.10'),
(['--software=foo,1.2.3', '--try-amend=versionsuffix=-test'], 'foo/1.2.3-test'),
]
for extra_args, mod in test_cases:
outtxt = self.eb_main(args + extra_args, verbose=True, raise_error=True)
mod_regex = re.compile("\(module: %s\)$" % mod, re.M)
self.assertTrue(mod_regex.search(outtxt), "Pattern %s found in %s" % (mod_regex.pattern, outtxt))
for extra_arg in ['--try-software=foo', '--try-toolchain=gompi', '--try-toolchain=gomp,1.4.10,-no-OFED']:
allargs = args + [extra_arg]
self.assertErrorRegex(EasyBuildError, "problems validating the options", self.eb_main, allargs, raise_error=True)
# no --try used, so no tweaked easyconfig files are generated
allargs = args + ['--software-version=1.2.3', '--toolchain=gompi,1.4.10']
self.assertErrorRegex(EasyBuildError, "version .* not available", self.eb_main, allargs, raise_error=True)
def test_recursive_try(self):
"""Test whether recursive --try-X works."""
ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
tweaked_toy_ec = os.path.join(self.test_buildpath, 'toy-0.0-tweaked.eb')
shutil.copy2(os.path.join(ecs_path, 'toy-0.0.eb'), tweaked_toy_ec)
f = open(tweaked_toy_ec, 'a')
f.write("dependencies = [('gzip', '1.4')]\n") # add fictious dependency
f.close()
sourcepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sandbox', 'sources')
args = [
tweaked_toy_ec,
'--sourcepath=%s' % sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--try-toolchain=gompi,1.4.10',
'--robot=%s' % ecs_path,
'--ignore-osdeps',
'--dry-run',
]
for extra_args in [[], ['--module-naming-scheme=HierarchicalMNS']]:
outtxt = self.eb_main(args + extra_args, verbose=True, raise_error=True)
# toolchain gompi/1.4.10 should be listed (but not present yet)
if extra_args:
mark = 'x'
else:
mark = ' '
tc_regex = re.compile("^ \* \[%s\] %s/gompi-1.4.10.eb \(module: .*gompi/1.4.10\)$" % (mark, ecs_path), re.M)
self.assertTrue(tc_regex.search(outtxt), "Pattern %s found in %s" % (tc_regex.pattern, outtxt))
# both toy and gzip dependency should be listed with gompi/1.4.10 toolchain
for ec_name in ['gzip-1.4', 'toy-0.0']:
ec = '%s-gompi-1.4.10.eb' % ec_name
if extra_args:
mod = ec_name.replace('-', '/')
else:
mod = '%s-gompi-1.4.10' % ec_name.replace('-', '/')
mod_regex = re.compile("^ \* \[ \] \S+/eb-\S+/%s \(module: .*%s\)$" % (ec, mod), re.M)
#mod_regex = re.compile("%s \(module: .*%s\)$" % (ec, mod), re.M)
self.assertTrue(mod_regex.search(outtxt), "Pattern %s found in %s" % (mod_regex.pattern, outtxt))
# clear fictious dependency
f = open(tweaked_toy_ec, 'a')
f.write("dependencies = []\n")
f.close()
# no recursive try if --(try-)software(-X) is involved
for extra_args in [['--try-software-version=1.2.3'], ['--software-version=1.2.3']]:
outtxt = self.eb_main(args + extra_args, raise_error=True)
for mod in ['toy/1.2.3-gompi-1.4.10', 'gompi/1.4.10', 'GCC/4.7.2']:
mod_regex = re.compile("\(module: %s\)$" % mod, re.M)
self.assertTrue(mod_regex.search(outtxt), "Pattern %s found in %s" % (mod_regex.pattern, outtxt))
for mod in ['gompi/1.2.3', 'GCC/1.2.3']:
mod_regex = re.compile("\(module: %s\)$" % mod, re.M)
self.assertFalse(mod_regex.search(outtxt), "Pattern %s found in %s" % (mod_regex.pattern, outtxt))
def test_cleanup_builddir(self):
"""Test cleaning up of build dir and --disable-cleanup-builddir."""
toy_ec = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb')
toy_buildpath = os.path.join(self.test_buildpath, 'toy', '0.0', 'dummy-dummy')
args = [
toy_ec,
'--force',
]
self.eb_main(args, do_build=True, verbose=True)
# make sure build directory is properly cleaned up after a successful build (default behavior)
self.assertFalse(os.path.exists(toy_buildpath), "Build dir %s removed after succesful build" % toy_buildpath)
# make sure --disable-cleanup-builddir works
args.append('--disable-cleanup-builddir')
self.eb_main(args, do_build=True, verbose=True)
self.assertTrue(os.path.exists(toy_buildpath), "Build dir %s is retained when requested" % toy_buildpath)
shutil.rmtree(toy_buildpath)
# make sure build dir stays in case of failed build
args = [
toy_ec,
'--force',
'--try-amend=prebuildopts=nosuchcommand &&',
]
self.eb_main(args, do_build=True)
self.assertTrue(os.path.exists(toy_buildpath), "Build dir %s is retained after failed build" % toy_buildpath)
def test_filter_deps(self):
"""Test use of --filter-deps."""
test_dir = os.path.dirname(os.path.abspath(__file__))
ec_file = os.path.join(test_dir, 'easyconfigs', 'goolf-1.4.10.eb')
os.environ['MODULEPATH'] = os.path.join(test_dir, 'modules')
args = [
ec_file,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--robot=%s' % os.path.join(test_dir, 'easyconfigs'),
'--dry-run',
]
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertTrue(re.search('module: FFTW/3.3.3-gompi', outtxt))
self.assertTrue(re.search('module: ScaLAPACK/2.0.2-gompi', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
# clear log file
open(self.logfile, 'w').write('')
# filter deps (including a non-existing dep, i.e. zlib)
args.append('--filter-deps=FFTW,ScaLAPACK,zlib')
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertFalse(re.search('module: FFTW/3.3.3-gompi', outtxt))
self.assertFalse(re.search('module: ScaLAPACK/2.0.2-gompi', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
def test_hide_deps(self):
"""Test use of --hide-deps."""
test_dir = os.path.dirname(os.path.abspath(__file__))
ec_file = os.path.join(test_dir, 'easyconfigs', 'goolf-1.4.10.eb')
os.environ['MODULEPATH'] = os.path.join(test_dir, 'modules')
args = [
ec_file,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--robot=%s' % os.path.join(test_dir, 'easyconfigs'),
'--dry-run',
]
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertTrue(re.search('module: GCC/4.7.2', outtxt))
self.assertTrue(re.search('module: OpenMPI/1.6.4-GCC-4.7.2', outtxt))
self.assertTrue(re.search('module: OpenBLAS/0.2.6-gompi-1.4.10-LAPACK-3.4.2', outtxt))
self.assertTrue(re.search('module: FFTW/3.3.3-gompi', outtxt))
self.assertTrue(re.search('module: ScaLAPACK/2.0.2-gompi', outtxt))
# zlib is not a dep at all
self.assertFalse(re.search('module: zlib', outtxt))
# clear log file
open(self.logfile, 'w').write('')
# filter deps (including a non-existing dep, i.e. zlib)
args.append('--hide-deps=FFTW,ScaLAPACK,zlib')
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertTrue(re.search('module: GCC/4.7.2', outtxt))
self.assertTrue(re.search('module: OpenMPI/1.6.4-GCC-4.7.2', outtxt))
self.assertTrue(re.search('module: OpenBLAS/0.2.6-gompi-1.4.10-LAPACK-3.4.2', outtxt))
self.assertFalse(re.search(r'module: FFTW/3\.3\.3-gompi', outtxt))
self.assertTrue(re.search(r'module: FFTW/\.3\.3\.3-gompi', outtxt))
self.assertFalse(re.search(r'module: ScaLAPACK/2\.0\.2-gompi', outtxt))
self.assertTrue(re.search(r'module: ScaLAPACK/\.2\.0\.2-gompi', outtxt))
# zlib is not a dep at all
self.assertFalse(re.search(r'module: zlib', outtxt))
def test_test_report_env_filter(self):
"""Test use of --test-report-env-filter."""
def toy(extra_args=None):
"""Build & install toy, return contents of test report."""
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb')
args = [
eb_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--force',
'--debug',
]
if extra_args is not None:
args.extend(extra_args)
self.eb_main(args, do_build=True, raise_error=True, verbose=True)
software_path = os.path.join(self.test_installpath, 'software', 'toy', '0.0')
test_report_path_pattern = os.path.join(software_path, 'easybuild', 'easybuild-toy-0.0*test_report.md')
f = open(glob.glob(test_report_path_pattern)[0], 'r')
test_report_txt = f.read()
f.close()
return test_report_txt
# define environment variables that should (not) show up in the test report
test_var_secret = 'THIS_IS_JUST_A_SECRET_ENV_VAR_FOR_EASYBUILD'
os.environ[test_var_secret] = 'thisshouldremainsecretonrequest'
test_var_secret_regex = re.compile(test_var_secret)
test_var_public = 'THIS_IS_JUST_A_PUBLIC_ENV_VAR_FOR_EASYBUILD'
os.environ[test_var_public] = 'thisshouldalwaysbeincluded'
test_var_public_regex = re.compile(test_var_public)
# default: no filtering
test_report_txt = toy()
self.assertTrue(test_var_secret_regex.search(test_report_txt))
self.assertTrue(test_var_public_regex.search(test_report_txt))
# filter out env vars that match specified regex pattern
filter_arg = "--test-report-env-filter=.*_SECRET_ENV_VAR_FOR_EASYBUILD"
test_report_txt = toy(extra_args=[filter_arg])
res = test_var_secret_regex.search(test_report_txt)
self.assertFalse(res, "No match for %s in %s" % (test_var_secret_regex.pattern, test_report_txt))
self.assertTrue(test_var_public_regex.search(test_report_txt))
# make sure that used filter is reported correctly in test report
filter_arg_regex = re.compile(filter_arg.replace('*', '\*'))
tup = (filter_arg_regex.pattern, test_report_txt)
self.assertTrue(filter_arg_regex.search(test_report_txt), "%s in %s" % tup)
def test_robot(self):
"""Test --robot and --robot-paths command line options."""
# unset $EASYBUILD_ROBOT_PATHS that was defined in setUp
os.environ['EASYBUILD_ROBOT_PATHS'] = self.test_prefix
test_ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
eb_file = os.path.join(test_ecs_path, 'gzip-1.4-GCC-4.6.3.eb') # includes 'toy/.0.0-deps' as a dependency
# hide test modules
self.reset_modulepath([])
# dependency resolution is disabled by default, even if required paths are available
args = [
eb_file,
'--robot-paths=%s' % test_ecs_path,
]
error_regex = 'no module .* found for dependency'
self.assertErrorRegex(EasyBuildError, error_regex, self.eb_main, args, raise_error=True, do_build=True)
# enable robot, but without passing path required to resolve toy dependency => FAIL
args = [
eb_file,
'--robot',
'--dry-run',
]
self.assertErrorRegex(EasyBuildError, 'Irresolvable dependencies', self.eb_main, args, raise_error=True)
# add path to test easyconfigs to robot paths, so dependencies can be resolved
self.eb_main(args + ['--robot-paths=%s' % test_ecs_path], raise_error=True)
# copy test easyconfigs to easybuild/easyconfigs subdirectory of temp directory
# to check whether easyconfigs install path is auto-included in robot path
tmpdir = tempfile.mkdtemp(prefix='easybuild-easyconfigs-pkg-install-path')
mkdir(os.path.join(tmpdir, 'easybuild'), parents=True)
shutil.copytree(test_ecs_path, os.path.join(tmpdir, 'easybuild', 'easyconfigs'))
# prepend path to test easyconfigs into Python search path, so it gets picked up as --robot-paths default
del os.environ['EASYBUILD_ROBOT_PATHS']
orig_sys_path = sys.path[:]
sys.path.insert(0, tmpdir)
self.eb_main(args, raise_error=True)
shutil.rmtree(tmpdir)
sys.path[:] = orig_sys_path
# make sure that paths specified to --robot get preference over --robot-paths
args = [
eb_file,
'--robot=%s' % test_ecs_path,
'--robot-paths=%s' % os.path.join(tmpdir, 'easybuild', 'easyconfigs'),
'--dry-run',
]
outtxt = self.eb_main(args, raise_error=True)
for ecfile in ['GCC-4.6.3.eb', 'ictce-4.1.13.eb', 'toy-0.0-deps.eb', 'gzip-1.4-GCC-4.6.3.eb']:
ec_regex = re.compile(r'^\s\*\s\[[xF ]\]\s%s' % os.path.join(test_ecs_path, ecfile), re.M)
self.assertTrue(ec_regex.search(outtxt), "Pattern %s found in %s" % (ec_regex.pattern, outtxt))
def test_missing_cfgfile(self):
"""Test behaviour when non-existing config file is specified."""
args = ['--configfiles=/no/such/cfgfile.foo']
error_regex = "parseconfigfiles: configfile .* not found"
self.assertErrorRegex(EasyBuildError, error_regex, self.eb_main, args, raise_error=True)
def test_show_default_moduleclasses(self):
"""Test --show-default-moduleclasses."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
args = [
'--unittest-file=%s' % self.logfile,
'--show-default-moduleclasses',
]
write_file(self.logfile, '')
self.eb_main(args, logfile=dummylogfn, verbose=True)
logtxt = read_file(self.logfile)
lst = ["\t%s:[ ]*%s" % (c, d.replace('(', '\\(').replace(')', '\\)')) for (c, d) in DEFAULT_MODULECLASSES]
regex = re.compile("Default available module classes:\n\n" + '\n'.join(lst), re.M)
self.assertTrue(regex.search(logtxt), "Pattern '%s' found in %s" % (regex.pattern, logtxt))
def test_show_default_configfiles(self):
"""Test --show-default-configfiles."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
home = os.environ['HOME']
for envvar in ['XDG_CONFIG_DIRS', 'XDG_CONFIG_HOME']:
if envvar in os.environ:
del os.environ[envvar]
reload(easybuild.tools.options)
args = [
'--unittest-file=%s' % self.logfile,
'--show-default-configfiles',
]
cfgtxt = '\n'.join([
'[config]',
'prefix = %s' % self.test_prefix,
])
expected_tmpl = '\n'.join([
"Default list of configuration files:",
'',
"[with $XDG_CONFIG_HOME: %s, $XDG_CONFIG_DIRS: %s]",
'',
"* user-level: ${XDG_CONFIG_HOME:-$HOME/.config}/easybuild/config.cfg",
" -> %s",
"* system-level: ${XDG_CONFIG_DIRS:-/etc}/easybuild.d/*.cfg",
" -> %s/easybuild.d/*.cfg => ",
])
write_file(self.logfile, '')
self.eb_main(args, logfile=dummylogfn, verbose=True)
logtxt = read_file(self.logfile)
homecfgfile = os.path.join(os.environ['HOME'], '.config', 'easybuild', 'config.cfg')
homecfgfile_str = homecfgfile
if os.path.exists(homecfgfile):
homecfgfile_str += " => found"
else:
homecfgfile_str += " => not found"
expected = expected_tmpl % ('(not set)', '(not set)', homecfgfile_str, '{/etc}')
self.assertTrue(expected in logtxt)
# to predict the full output, we need to take control over $HOME and $XDG_CONFIG_DIRS
os.environ['HOME'] = self.test_prefix
xdg_config_dirs = os.path.join(self.test_prefix, 'etc')
os.environ['XDG_CONFIG_DIRS'] = xdg_config_dirs
expected_tmpl += '\n'.join([
"%s",
'',
"Default list of existing configuration files (%d): %s",
])
# put dummy cfgfile in place in $HOME (to predict last line of output which only lists *existing* files)
mkdir(os.path.join(self.test_prefix, '.config', 'easybuild'), parents=True)
homecfgfile = os.path.join(self.test_prefix, '.config', 'easybuild', 'config.cfg')
write_file(homecfgfile, cfgtxt)
reload(easybuild.tools.options)
write_file(self.logfile, '')
self.eb_main(args, logfile=dummylogfn, verbose=True)
logtxt = read_file(self.logfile)
expected = expected_tmpl % ('(not set)', xdg_config_dirs, "%s => found" % homecfgfile, '{%s}' % xdg_config_dirs,
'(no matches)', 1, homecfgfile)
self.assertTrue(expected in logtxt)
xdg_config_home = os.path.join(self.test_prefix, 'home')
os.environ['XDG_CONFIG_HOME'] = xdg_config_home
xdg_config_dirs = [os.path.join(self.test_prefix, 'etc'), os.path.join(self.test_prefix, 'moaretc')]
os.environ['XDG_CONFIG_DIRS'] = os.pathsep.join(xdg_config_dirs)
# put various dummy cfgfiles in place
cfgfiles = [
os.path.join(self.test_prefix, 'etc', 'easybuild.d', 'config.cfg'),
os.path.join(self.test_prefix, 'moaretc', 'easybuild.d', 'bar.cfg'),
os.path.join(self.test_prefix, 'moaretc', 'easybuild.d', 'foo.cfg'),
os.path.join(xdg_config_home, 'easybuild', 'config.cfg'),
]
for cfgfile in cfgfiles:
mkdir(os.path.dirname(cfgfile), parents=True)
write_file(cfgfile, cfgtxt)
reload(easybuild.tools.options)
write_file(self.logfile, '')
self.eb_main(args, logfile=dummylogfn, verbose=True)
logtxt = read_file(self.logfile)
expected = expected_tmpl % (xdg_config_home, os.pathsep.join(xdg_config_dirs),
"%s => found" % os.path.join(xdg_config_home, 'easybuild', 'config.cfg'),
'{' + ', '.join(xdg_config_dirs) + '}',
', '.join(cfgfiles[:-1]), 4, ', '.join(cfgfiles))
self.assertTrue(expected in logtxt)
del os.environ['XDG_CONFIG_DIRS']
del os.environ['XDG_CONFIG_HOME']
os.environ['HOME'] = home
reload(easybuild.tools.options)
def test_generate_cmd_line(self):
"""Test for generate_cmd_line."""
ebopts = EasyBuildOptions()
self.assertEqual(ebopts.generate_cmd_line(), [])
ebopts = EasyBuildOptions(go_args=['--force'])
self.assertEqual(ebopts.generate_cmd_line(), ['--force'])
ebopts = EasyBuildOptions(go_args=['--search=bar', '--search', 'foobar'])
self.assertEqual(ebopts.generate_cmd_line(), ['--search=foobar'])
def suite():
""" returns all the testcases in this module """
return TestLoader().loadTestsFromTestCase(CommandLineOptionsTest)
if __name__ == '__main__':
unittestmain()
| mit | 8,760,911,598,607,643,000 | 45.095463 | 138 | 0.569224 | false |
ChameleonCloud/blazar | blazar/tests/utils/test_trusts.py | 1 | 3871 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from oslo_config import fixture as conf_fixture
from blazar import context
from blazar import tests
from blazar.utils.openstack import base
from blazar.utils.openstack import keystone
from blazar.utils import trusts
CONF = cfg.CONF
class TestTrusts(tests.TestCase):
def setUp(self):
super(TestTrusts, self).setUp()
self.base = base
self.trusts = trusts
self.context = context
self.keystone = keystone
self.client = self.patch(self.keystone, 'BlazarKeystoneClient')
self.patch(self.context, 'current')
self.patch(self.base, 'url_for').return_value = 'http://www.foo.fake'
self.cfg = self.useFixture(conf_fixture.Config(CONF))
def test_create_trust(self):
correct_trust = self.client().trusts.create()
trust = self.trusts.create_trust()
self.assertEqual(trust, correct_trust)
def test_delete_trust(self):
lease = mock.MagicMock(trust_id='1')
self.trusts.delete_trust(lease)
self.client.assert_called_once_with(trust_id='1')
def test_create_ctx_from_trust(self):
self.cfg.config(os_admin_project_name='admin')
self.cfg.config(os_admin_username='admin')
ctx = self.trusts.create_ctx_from_trust('1')
fake_ctx_dict = {
'auth_token': self.client().session.get_token(),
'domain': None,
'global_request_id': self.context.current().global_request_id,
'is_admin': False,
'is_admin_project': True,
'project': self.client().session.get_project_id(),
'project_domain': None,
'read_only': False,
'request_id': ctx.request_id,
'resource_uuid': None,
'roles': [],
'service_catalog': ctx.service_catalog,
'show_deleted': False,
'system_scope': None,
'user': None,
'user_domain': None}
self.assertDictContainsSubset(fake_ctx_dict, ctx.to_dict())
def test_use_trust_auth_dict(self):
def to_wrap(self, arg_to_update):
return arg_to_update
correct_trust = self.client().trusts.create()
fill_with_trust_id = {}
updated_arg = self.trusts.use_trust_auth()(to_wrap)(self,
fill_with_trust_id)
self.assertIn('trust_id', updated_arg)
self.assertEqual(correct_trust.id, updated_arg['trust_id'])
def test_use_trust_auth_object(self):
class AsDict(object):
def __init__(self, value):
self.value = value
def as_dict(self):
to_return = {}
for key in dir(self):
to_return[key] = getattr(self, key)
return to_return
def to_wrap(self, arg_to_update):
return arg_to_update
correct_trust = self.client().trusts.create()
fill_with_trust_id = AsDict(1)
updated_arg = self.trusts.use_trust_auth()(to_wrap)(self,
fill_with_trust_id)
self.assertIn('trust_id', updated_arg.as_dict())
self.assertEqual(correct_trust.id, updated_arg.trust_id)
| apache-2.0 | -6,670,288,697,171,886,000 | 33.873874 | 79 | 0.601912 | false |
northDacoder/angular_ionic_project | ionicons-1.4.1/builder/generate.py | 1 | 7596 | from subprocess import call
import os
import json
BUILDER_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.join(BUILDER_PATH, '..')
FONTS_FOLDER_PATH = os.path.join(ROOT_PATH, 'fonts')
CSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'css')
SCSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'scss')
def main():
generate_font_files()
data = get_build_data()
rename_svg_glyph_names(data)
generate_scss(data)
generate_cheatsheet(data)
generate_component_json(data)
generate_composer_json(data)
generate_bower_json(data)
def generate_font_files():
print "Generate Fonts"
cmd = "fontforge -script %s/scripts/generate_font.py" % (BUILDER_PATH)
call(cmd, shell=True)
def rename_svg_glyph_names(data):
# hacky and slow (but safe) way to rename glyph-name attributes
svg_path = os.path.join(FONTS_FOLDER_PATH, 'ionicons.svg')
svg_file = open(svg_path, 'r+')
svg_text = svg_file.read()
svg_file.seek(0)
for ionicon in data['icons']:
# uniF2CA
org_name = 'uni%s' % (ionicon['code'].replace('0x', '').upper())
ion_name = 'ion-%s' % (ionicon['name'])
svg_text = svg_text.replace(org_name, ion_name)
svg_file.write(svg_text)
svg_file.close()
def generate_scss(data):
print "Generate SCSS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-variables.scss')
icons_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-icons.scss')
d = []
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('$ionicons-font-path: "../fonts" !default;')
d.append('$ionicons-font-family: "%s" !default;' % (font_name) )
d.append('$ionicons-version: "%s" !default;' % (font_version) )
d.append('$ionicons-prefix: %s !default;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('$ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.#{$ionicons-prefix}%s' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' @extend .ion;')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.#{$ionicons-prefix}%s:before { content: $ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
generate_css_from_scss(data)
def generate_css_from_scss(data):
print "Generate CSS From SCSS"
scss_file_path = os.path.join(SCSS_FOLDER_PATH, 'ionicons.scss')
css_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.css')
css_min_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.min.css')
cmd = "sass %s %s --style compact" % (scss_file_path, css_file_path)
call(cmd, shell=True)
print "Generate Minified CSS From SCSS"
cmd = "sass %s %s --style compressed" % (scss_file_path, css_min_file_path)
call(cmd, shell=True)
def generate_cheatsheet(data):
print "Generate Cheatsheet"
cheatsheet_file_path = os.path.join(ROOT_PATH, 'cheatsheet.html')
template_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'template.html')
icon_row_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'icon-row.html')
f = open(template_path, 'r')
template_html = f.read()
f.close()
f = open(icon_row_path, 'r')
icon_row_template = f.read()
f.close()
content = []
for ionicon in data['icons']:
css_code = ionicon['code'].replace('0x', '\\')
escaped_html_code = ionicon['code'].replace('0x', '&#x') + ';'
html_code = ionicon['code'].replace('0x', '&#x') + ';'
item_row = icon_row_template
item_row = item_row.replace('{{name}}', ionicon['name'])
item_row = item_row.replace('{{prefix}}', data['prefix'])
item_row = item_row.replace('{{css_code}}', css_code)
item_row = item_row.replace('{{escaped_html_code}}', escaped_html_code)
item_row = item_row.replace('{{html_code}}', html_code)
content.append(item_row)
template_html = template_html.replace("{{font_name}}", data["name"])
template_html = template_html.replace("{{font_version}}", data["version"])
template_html = template_html.replace("{{icon_count}}", str(len(data["icons"])) )
template_html = template_html.replace("{{content}}", '\n'.join(content) )
f = open(cheatsheet_file_path, 'w')
f.write(template_html)
f.close()
def generate_component_json(data):
print "Generate component.json"
d = {
"name": data['name'],
"repo": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"version": data['version'],
"keywords": [],
"dependencies": {},
"development": {},
"license": "MIT",
"styles": [
"css/%s.css" % (data['name'].lower())
],
"fonts": [
"fonts/%s.eot" % (data['name'].lower()),
"fonts/%s.svg" % (data['name'].lower()),
"fonts/%s.ttf" % (data['name'].lower()),
"fonts/%s.woff" % (data['name'].lower())
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
component_file_path = os.path.join(ROOT_PATH, 'component.json')
f = open(component_file_path, 'w')
f.write(txt)
f.close()
def generate_composer_json(data):
print "Generate composer.json"
d = {
"name": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"homepage": "http://ionicons.com/",
"authors": [
{
"name": "Ben Sperry",
"email": "[email protected]",
"role": "Designer",
"homepage": "https://twitter.com/helloimben"
},
{
"name": "Adam Bradley",
"email": "[email protected]",
"role": "Developer",
"homepage": "https://twitter.com/adamdbradley"
},
{
"name": "Max Lynch",
"email": "[email protected]",
"role": "Developer",
"homepage": "https://twitter.com/maxlynch"
}
],
"extra": {},
"license": [ "MIT" ]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
composer_file_path = os.path.join(ROOT_PATH, 'composer.json')
f = open(composer_file_path, 'w')
f.write(txt)
f.close()
def generate_bower_json(data):
print "Generate bower.json"
d = {
"name": data['name'],
"version": data['version'],
"homepage": "https://github.com/driftyco/ionicons",
"authors": [
"Ben Sperry <[email protected]>",
"Adam Bradley <[email protected]>",
"Max Lynch <[email protected]>"
],
"description": "Ionicons - free and beautiful icons from the creators of Ionic Framework",
"main": [
"css/%s.css" % (data['name'].lower()),
"fonts/*"
],
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"license": "MIT",
"ignore": [
"**/.*",
"builder",
"node_modules",
"bower_components",
"test",
"tests"
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
bower_file_path = os.path.join(ROOT_PATH, 'bower.json')
f = open(bower_file_path, 'w')
f.write(txt)
f.close()
def get_build_data():
build_data_path = os.path.join(BUILDER_PATH, 'build_data.json')
f = open(build_data_path, 'r')
data = json.loads(f.read())
f.close()
return data
if __name__ == "__main__":
main()
| mit | 3,167,289,036,028,404,000 | 27.772727 | 113 | 0.594787 | false |
linuxscout/arramooz | scripts/nouns/xmldict.py | 1 | 1996 | #!/usr/bin/python2
# -*- coding=utf-8 -*-
#************************************************************************
# $Id: generatenoundict.py,v 0.7 2011/03/26 01:10:00 Taha Zerrouki $
#
# ------------
# Description:
# ------------
# Copyright (c) 2011, Arabtechies, Arabeyes Taha Zerrouki
#
# This file is the main file to execute the application in the command line
#
# -----------------
# Revision Details: (Updated by Revision Control System)
# -----------------
# $Date: 2009/06/02 01:10:00 $
# $Author: Taha Zerrouki $
# $Revision: 0.7 $
# $Source: arabtechies.sourceforge.net
#
#***********************************************************************/
import csvdict
import noundict_functions as ndf
class XmlDict(csvdict.CsvDict):
""" a virtual converter of data from table to specific format
the data is big, then every function print string """
def __init__(self, wordtype, version="N/A"):
"""
initiate the dict
"""
csvdict.CsvDict.__init__(self, wordtype, version)
def add_header(self,):
"""
add the header for new dict
"""
line ="""<?xml version='1.0' encoding='utf8'?>\n"""
line += "<!--" + "-->\n<!--".join(self.headerlines) + "-->\n"
line += "<dictionary>"
return line
def add_record(self, noun_row):
"""
Add a new to the dict
"""
fields = self.treat_tuple(noun_row)
line="<noun id='%d'>\n"%self.id;
for k in range(len(self.display_order)):
key = self.display_order[k];
if self.display_order[k] != "id":
if fields[key]:
line+=u" <%s>%s</%s>\n"%(key,fields[key],key);
else:
line+=u" <%s/>\n"%(key);
line+=u"</noun>\n";
return line
def add_footer(self):
"""close the data set, used for ending xml, or sql"""
return "</dictionary>"
| gpl-2.0 | 393,406,699,130,314,700 | 30.68254 | 76 | 0.477455 | false |
usc-isi-i2/WEDC | wedc/domain/core/ml/classifier/label_propagation/lp.py | 1 | 12248 | import numpy as np
from sklearn import datasets
from sklearn.semi_supervised import LabelPropagation
from wedc.domain.core.ml.helper import label
from wedc.domain.core.ml.graph import knn
from wedc.domain.core.data.seed import seed_vector
from wedc.infrastructure import database
from wedc.infrastructure.model.labelled_data import LabelledData
from wedc.infrastructure.model.seed_dict import SeedDict
def do_label_propagation(input_data,
input_label,
output=None,
kernel='knn',
gamma=None,
n_neighbors=10,
alpha=1,
max_iter=30,
tol=0.001):
n_neighbors += 1
# input label
input_label_fh = open(input_label, 'rb')
label_lines = input_label_fh.readlines()
label_lines = [int(_.strip()) for _ in label_lines]
y = np.array(label_lines)
input_label_fh.close()
size = len(y)
# input data
input_data_fh = open(input_data, 'rb')
data_lines = input_data_fh.readlines()[:size]
data_lines = [_.strip() for _ in data_lines]
X = np.array(np.mat(';'.join(data_lines)))
input_data_fh.close()
label_prop_model = LabelPropagation(kernel=kernel,
gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha,
max_iter=max_iter,
tol=tol)
label_prop_model.fit(X, y)
prediction = label_prop_model.predict(X)
if output:
output_fh = open(output, 'wb')
for p in prediction:
output_fh.write(str(p)+'\n')
output_fh.close()
return label_prop_model
def run_lp(input, output, lp_jar):
import subprocess
import os
if os.path.isfile(output):
os.remove(output)
output_file = open(output, 'a')
working_dir = os.sep.join(lp_jar.split(os.sep)[:-1])
jar_file = lp_jar.split(os.sep)[-1]
argsArray = ['java', '-classpath', jar_file, 'org.ooxo.LProp', '-a', 'GFHF', '-m', '100', '-e', '10e-6', input]
subprocess.call(argsArray, cwd=working_dir, stdout=output_file)
output_file.close()
def evaluate_from_file(input_data,
output=None,
kernel='knn',
gamma=None,
n_neighbors=10,
alpha=1,
max_iter=100,
tol=0.00001):
label_dict = label.load_label_dict()
label_dict = sorted(label_dict.iteritems(), key=lambda x:x[0])
post_id_list = []
y = []
for (k, v) in label_dict:
post_id_list.append(k)
y.append(v)
input_data_fh = open(input_data, 'rb')
data_lines = input_data_fh.readlines()
data_lines = [data_lines[i] for i in post_id_list]
data_lines = [_.strip() for _ in data_lines]
X = np.array(np.mat(';'.join(data_lines)))
input_data_fh.close()
# sklearn_lp(X, y, output=output, kernel=kernel, gamma=gamma, n_neighbors=n_neighbors, alpha=alpha, max_iter=max_iter, tol=tol)
java_lp(new_X, new_y, output=output, kernel=kernel, gamma=gamma, n_neighbors=n_neighbors, alpha=alpha, max_iter=max_iter, tol=tol)
def sklearn_lp(X, y,
output=None,
kernel='knn',
gamma=None,
n_neighbors=10,
alpha=1,
max_iter=1000,
tol=0.00001):
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9, random_state=3)
label_prop_model = LabelPropagation(kernel=kernel,
gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha,
max_iter=max_iter,
tol=tol)
label_prop_model.fit(X_train, y_train)
y_predict = label_prop_model.predict(X_test)
print 'y_train: ', y_train
print 'y_predict: ', y_predict
print '+--------------------------------------------------------+'
print '| Report +'
print '+--------------------------------------------------------+'
print classification_report(y_test, y_predict)
print 'accuracy: ' + str(accuracy_score(y_test, y_predict))
print '\n\n'
def evaluate_from_database(output=None,
kernel='knn',
gamma=None,
n_neighbors=10,
alpha=1,
max_iter=100,
tol=0.00001):
labelled_dataset = LabelledData.load_data()
size = len(labelled_dataset)
ld_data = []
ld_label = []
for labelled_data in labelled_dataset:
ld_data.append(labelled_data.extraction)
ld_label.append(labelled_data.label)
seeds = SeedDict.load_data()
post_vectors = seed_vector.generate_post_vector(ld_data, seeds)
# post_vector_seeds = seed_vector.generate_post_vector_seed(ld_data, seeds=seeds)
# for i, vector in enumerate(post_vector_seeds):
# if ld_label[i] == 4:
# print vector.strip()
# remove short post
short_post_indexes = []
short_ext_word_edge = 8
for i, vec in enumerate(post_vectors):
post_id = i + 1
if len(ld_data[i].split(' ')) < short_ext_word_edge or max([float(_) for _ in vec.strip().split(' ')]) == 0:
short_post_indexes.append(post_id)
# print short_post_indexes
X = np.array(np.mat(';'.join(post_vectors)))
y = ld_label
mapping = {}
new_X = []
new_y = []
new_post_id = 1
for i in range(size):
post_id = i + 1
if post_id not in short_post_indexes:
new_X.append(X[i])
new_y.append(y[i])
mapping[new_post_id] = post_id
new_post_id += 1
# do_evaluation(new_X, new_y, output=output, kernel=kernel, gamma=gamma, n_neighbors=n_neighbors, alpha=alpha, max_iter=max_iter, tol=tol)
# sklearn_lp(new_X, new_y, output=output, kernel=kernel, gamma=gamma, n_neighbors=n_neighbors, alpha=alpha, max_iter=max_iter, tol=tol)
# print len(new_X), len(new_y)
# print len(X), len(y)
print mapping
return java_lp(new_X, new_y, mapping=mapping, output=output, kernel=kernel, gamma=gamma, n_neighbors=n_neighbors, alpha=alpha, max_iter=max_iter, tol=tol)
def java_lp(X, y,
mapping=None,
output=None,
kernel='knn',
gamma=None,
n_neighbors=10,
alpha=1,
max_iter=1000,
tol=0.00001):
gk_path = '/Users/ZwEin/job_works/StudentWork_USC-ISI/projects/WEDC/tests/data/graph_knn.txt'
gl_path = '/Users/ZwEin/job_works/StudentWork_USC-ISI/projects/WEDC/tests/data/graph_lp.txt'
lp_path = '/Users/ZwEin/job_works/StudentWork_USC-ISI/projects/WEDC/tests/data/labelprop.jar'
post_dict, top_k, training_index, training_labels, testing_index, testing_labels = knn.do_knn(X, output=gk_path, post_labels=y, n_neighbors=n_neighbors)
# print top_k, post_dict
run_lp(gk_path, gl_path, lp_path)
# y_test = testing_labels
y_test = []
y_predict = []
valid_predict_indexes = []
with open(gl_path, 'rb') as gl_file:
lines = gl_file.readlines()
for line in lines:
line = line.strip()
if not line:
continue
line = line[1:-1]
line = line.split(',')
post_id = int(line[0])
check_point = float(line[3][:-1]) + float(line[5][:-1]) + float(line[7][:-1])
# if check_point > 0:
# valid_predict_indexes.append(post_id-1)
if post_id not in training_index and check_point > 0:
valid_predict_indexes.append(post_id)
y_predict.append(int(line[1]))
if post_id in testing_index:
tmp = testing_index.index(post_id)
y_test.append(testing_labels[tmp])
# valid_predict_indexes = [mapping[_] for _ in valid_predict_indexes]
# print len(valid_predict_indexes), valid_predict_indexes
# print len(y_predict), y_predict
# print len(y_test), y_test
# """
# print 'y_predict', len(y_predict), y_predict
# print 'y_test', len(y_test), y_test
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_test, y_predict)
if accuracy > 0.5:
print accuracy, '\n'
return accuracy
print '+--------------------------------------------------------+'
print '| Report |'
print '+--------------------------------------------------------+'
print 'training size:', len(training_index)
print 'training_labels:', training_labels #, len(training_labels)
print 'training_index:', training_index #, len(training_index)
# print len(training_index), ' + ', len(testing_index)
# print testing_index
# print 'test round:', (i+1), ' with random seed: ', random_seeds[i]
# print 'training label: ', training_labels
print 'predict label: ', y_predict
print 'y_test: ', y_test
print 'graph post_id:', valid_predict_indexes
print 'original post_id:', [mapping[_] for _ in valid_predict_indexes]
print classification_report(y_test, y_predict)
print 'accuracy: ' + str(accuracy_score(y_test, y_predict))
print '\n\n'
# """
return accuracy
# do_evaluation(X, y, kernel=kernel, gamma=gamma, n_neighbors=n_neighbors, alpha=alpha, max_iter=max_iter, tol=tol)
def do_evaluation(X, y,
kernel='knn',
output=None,
gamma=None,
n_neighbors=10,
alpha=1,
max_iter=1000,
tol=0.00001):
# from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
import random
size = len(X)
random_seeds = np.random.randint(1, 1000, size=10)
for i in range(len(random_seeds)):
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.6, random_state=random_seeds[i])
labels = np.copy(y)
tmp = np.arange(size)
np.random.shuffle(tmp)
train_test_split_rate = int(size*.9)
random_unlabeled_points = tmp[:train_test_split_rate]
labeled_points = tmp[train_test_split_rate:]
random_unlabeled_points.sort()
X_test = [X[_] for _ in range(size) if _ in random_unlabeled_points]
y_test = [y[_] for _ in range(size) if _ in random_unlabeled_points]
y_train = [y[_] for _ in range(size) if _ in labeled_points]
labels[random_unlabeled_points] = -1
label_prop_model = LabelPropagation(kernel=kernel,
gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha,
max_iter=max_iter,
tol=tol)
label_prop_model.fit(X, labels)
y_predict = label_prop_model.predict(X_test)
print '+--------------------------------------------------------+'
print '| Report |'
print '+--------------------------------------------------------+'
print 'test round:', (i+1), ' with random seed: ', random_seeds[i]
print 'training label: ', y_train
print 'training post id: ', [_+1 for _ in labeled_points]
print 'predict label: ', y_predict
print classification_report(y_test, y_predict)
print 'accuracy: ' + str(accuracy_score(y_test, y_predict))
print '\n\n'
| apache-2.0 | 1,027,601,411,627,719,000 | 34.29683 | 158 | 0.532822 | false |
mattasmith/Non-contiguous-recombination | ncr.py | 1 | 5343 | #! /usr/local/bin/python
"""Script for designing a set of non-contiguous recombination libraries for site-directed, structure-guided homologous recombination.
******************************************************************
Copyright (C) 2011 Matt Smith, California Institute of Technology
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*******************************************************************
SCHEMA and non-contiguous recombination were developed in the laboratory of Frances H. Arnold at the California Institute of Technology.
References:
Smith, M.A. et al., Chimeragenesis of distantly-related proteins by non-contiguous recombination, Protein Science 22(2):231-238 (2013).
Voigt, C.A. et al., Protein building blocks preserved by recombination, Nature Structural Biology 9(7):553-558 (2002).
Karypis, G. et al., Multilevel Hypergraph Partitioning: Applications in VLSI Domain, 34th Design and Automation Conference, 526-529, (1997).
Karypis, G. et al., Multilevel k-way Hypergraph Partitioning, 36th Design Automation Conference, 343-348, 1999.
Edgar, R.C., MUSCLE: multiple sequence alignment with high accuracy and high throughput, Nucleic Acids Research 32(5), 1792-97.
"""
from subprocess import Popen
from sys import exit
from sys import path
path.append('./tools')
import os
from make_alignment_and_contacts2 import make_alignment_and_contacts2
from run_shmetis import run_shmetis
from PDB_tools import split_into_chains
## information about non-contiguous recombination
print('\n********************* Non-contiguous recombination *********************\n')
print('Written by Matt Smith, 2011.\n')
print('SCHEMA and non-contiguous recombination were developed in the \nlaboratory of Frances H. Arnold at the California Institute of Technology.\n')
## check the files are there
if os.path.isfile('alignment.fasta') == False:
exit('Error: cannot find \'alignment.fasta\' alignment file')
if os.path.isfile('init.txt') == False:
exit('Error: cannot find \'init.txt\' setup file.')
if os.path.isdir('./tools') == False:
exit('Error: the non-contiguous recombination tools are missing')
if len([f for f in os.listdir('./tools/muscle') if 'muscle' in f])==0:
exit('Error: cannot find MUSCLE in \'tools/muscle\'')
if len([f for f in os.listdir('./tools/muscle') if 'muscle' in f])>1:
exit('Error: please provide just one MUSCLE executable in \'tools/muscle\'')
if len([f for f in os.listdir('./tools') if 'hmetis' in f])==0:
exit('Error: cannot find hmetis package in \'tools\'')
if len([f for f in os.listdir('./tools') if 'hmetis-1.5' in f])>1:
exit('Error: please provide just one hmetis package in \'tools\'')
## load in the initial file
data = [s for s in open('init.txt').read().split('\n') if (len(s)>0 and s[0]!='#')]
for i,datum in enumerate(data):
if 'Number of blocks' in datum.split(' = ')[0]:
numberofblocks_str = datum.split(' = ')[1]
if '-' in numberofblocks_str:
numberofblocks_min = int(numberofblocks_str.split('-')[0])
numberofblocks_max = int(numberofblocks_str.split('-')[1])
else:
numberofblocks_min = int(numberofblocks_str)
numberofblocks_max = int(numberofblocks_str)
if 'Find all PDB structures' in datum.split(' = ')[0]:
searchPDB = int(datum.split(' = ')[1])
# end for i, datum
## find the muscle version
muscle_file = [f for f in os.listdir('./tools/muscle') if 'muscle' in f]
muscle_version = muscle_file[0]
## find the hmetis version
hmetis_file = [f for f in os.listdir('./tools') if 'hmetis-1.5' in f]
hmetis_version = hmetis_file[0]
## download all available structures or check user pdb files
if searchPDB == 1:
Popen('python ./tools/search_download_save.py',shell=True).wait()
else:
if os.path.isdir('./structures') == False:
exit('Error: you need to provide at least one pdb in a folder called \'structures\'')
elif len([f for f in os.listdir('./structures') if os.path.splitext(f)[-1].lower() == '.pdb'])==0:
exit('Error: there are no pdbs in \'structures\'')
else:
print('Structures provided by user:')
structurefilelist = os.listdir('./structures')
for filename in structurefilelist:
if os.path.splitext(filename)[-1].lower() == '.pdb':
print filename
split_into_chains(filename,'./structures/')
## create the contact maps - one for each parent (if the parent has a structure)
num_contact_maps = make_alignment_and_contacts2(muscle_version)
## formulate and solve with graph partitioning
print ('\nDesigning libraries...')
run_success = run_shmetis(num_contact_maps, numberofblocks_min, numberofblocks_max, hmetis_version)
## done!
| gpl-3.0 | -7,719,600,997,329,283,000 | 46.283186 | 149 | 0.678271 | false |
spectresearch/detectem | detectem/response.py | 1 | 6146 | import base64
import json
import logging
import re
import urllib.parse
from string import Template
import pkg_resources
import requests
from detectem.exceptions import SplashError
from detectem.settings import SPLASH_TIMEOUT, SPLASH_URL
from detectem.utils import docker_container
DEFAULT_CHARSET = "iso-8859-1"
ERROR_STATUS_CODES = [400, 504]
logger = logging.getLogger("detectem")
def is_url_allowed(url):
""" Return ``True`` if ``url`` is not in ``blacklist``.
:rtype: bool
"""
blacklist = [
r"\.ttf",
r"\.woff",
r"fonts\.googleapis\.com",
r"\.png",
r"\.jpe?g",
r"\.gif",
r"\.svg",
]
for ft in blacklist:
if re.search(ft, url):
return False
return True
def is_valid_mimetype(response):
""" Return ``True`` if the mimetype is not blacklisted.
:rtype: bool
"""
blacklist = ["image/"]
mimetype = response.get("mimeType")
if not mimetype:
return True
for bw in blacklist:
if bw in mimetype:
return False
return True
def get_charset(response):
""" Return charset from ``response`` or default charset.
:rtype: str
"""
# Set default charset
charset = DEFAULT_CHARSET
m = re.findall(r";charset=(.*)", response.get("mimeType", ""))
if m:
charset = m[0]
return charset
def create_lua_script(plugins):
""" Return script template filled up with plugin javascript data.
:rtype: str
"""
lua_template = pkg_resources.resource_string("detectem", "script.lua")
template = Template(lua_template.decode("utf-8"))
javascript_data = to_javascript_data(plugins)
return template.substitute(js_data=json.dumps(javascript_data))
def to_javascript_data(plugins):
"""
Return a dictionary with all JavaScript matchers. Quotes are escaped.
:rtype: dict
"""
def escape(v):
return re.sub(r'"', r'\\"', v)
def dom_matchers(p):
dom_matchers = p.get_matchers("dom")
escaped_dom_matchers = []
for dm in dom_matchers:
check_statement, version_statement = dm
escaped_dom_matchers.append(
{
"check_statement": escape(check_statement),
# Escape '' and not None
"version_statement": escape(version_statement or ""),
}
)
return escaped_dom_matchers
return [
{"name": p.name, "matchers": dom_matchers(p)}
for p in plugins.with_dom_matchers()
]
def get_response(url, plugins, timeout=SPLASH_TIMEOUT):
"""
Return response with HAR, inline scritps and software detected by JS matchers.
:rtype: dict
"""
lua_script = create_lua_script(plugins)
lua = urllib.parse.quote_plus(lua_script)
page_url = f"{SPLASH_URL}/execute?url={url}&timeout={timeout}&lua_source={lua}"
try:
with docker_container():
logger.debug("[+] Sending request to Splash instance")
res = requests.get(page_url)
except requests.exceptions.ConnectionError:
raise SplashError("Could not connect to Splash server {}".format(SPLASH_URL))
logger.debug("[+] Response received")
json_data = res.json()
if res.status_code in ERROR_STATUS_CODES:
raise SplashError(get_splash_error(json_data))
softwares = json_data["softwares"]
scripts = json_data["scripts"].values()
har = get_valid_har(json_data["har"])
js_error = get_evaljs_error(json_data)
if js_error:
logger.debug("[+] WARNING: failed to eval JS matchers: %(n)s", {"n": js_error})
else:
logger.debug("[+] Detected %(n)d softwares from the DOM", {"n": len(softwares)})
logger.debug("[+] Detected %(n)d scripts from the DOM", {"n": len(scripts)})
logger.debug("[+] Final HAR has %(n)d valid entries", {"n": len(har)})
return {"har": har, "scripts": scripts, "softwares": softwares}
def get_splash_error(json_data):
msg = json_data["description"]
if "info" in json_data and "error" in json_data["info"]:
error = json_data["info"]["error"]
if error.startswith("http"):
msg = "Request to site failed with error code {0}".format(error)
elif error.startswith("network"):
# see http://doc.qt.io/qt-5/qnetworkreply.html
qt_errors = {
"network1": "ConnectionRefusedError",
"network2": "RemoteHostClosedError",
"network3": "HostNotFoundError",
"network4": "TimeoutError",
"network5": "OperationCanceledError",
"network6": "SslHandshakeFailedError",
}
error = qt_errors.get(error, "error code {0}".format(error))
msg = "Request to site failed with {0}".format(error)
else:
msg = "{0}: {1}".format(msg, error)
return msg
def get_evaljs_error(json_data):
error = None
if "errors" in json_data and "evaljs" in json_data["errors"]:
res = json_data["errors"]["evaljs"]
if isinstance(res, str):
m = re.search(r"'message': '(.*?)'[,}]", res)
if m:
error = bytes(m.group(1), "utf-8").decode("unicode_escape")
return error
def get_valid_har(har_data):
""" Return list of valid HAR entries.
:rtype: list
"""
new_entries = []
entries = har_data.get("log", {}).get("entries", [])
logger.debug("[+] Detected %(n)d entries in HAR", {"n": len(entries)})
for entry in entries:
url = entry["request"]["url"]
if not is_url_allowed(url):
continue
response = entry["response"]["content"]
if not is_valid_mimetype(response):
continue
if response.get("text"):
charset = get_charset(response)
response["text"] = base64.b64decode(response["text"]).decode(charset)
else:
response["text"] = ""
new_entries.append(entry)
logger.debug("[+] Added URL: %(url)s ...", {"url": url[:100]})
return new_entries
| mit | 6,745,631,188,370,809,000 | 25.606061 | 88 | 0.582981 | false |
ihoru/play_with_python | tasks/test_django/test_django/settings.py | 1 | 3502 | """
Django settings for test_django project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=0033co1(k8giglasb-&0-_5d%fbp*pfsa-u0173w4eb60clo3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'quiz',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'test_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test_django',
'USER': 'ihoru',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '3306',
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| mit | 7,435,446,665,208,901,000 | 26.793651 | 91 | 0.669903 | false |
SuLab/scheduled-bots | scheduled_bots/drugs/pubchem.py | 1 | 3162 | """
Note: this is a really abbreviated version of sebastian's full pubchem bot
that simply gets the pubchem ID from an inchikey
Adapted from: https://github.com/sebotic/cdk_pywrapper/blob/master/cdk_pywrapper/chemlib.py
"""
import json
import time
import requests
import wikidataintegrator.wdi_core as wdi_core
class PubChemMolecule(object):
headers = {
'accept': 'application/json',
'content-type': 'application/json',
'charset': 'utf-8'
}
base_url = 'http://pubchem.ncbi.nlm.nih.gov/rest/rdf/{}'
def __init__(self, cid=None, inchi_key=None):
if cid:
self.cid = cid
if inchi_key:
self.stdinchikey = inchi_key
if cid:
pass
elif inchi_key:
cids = self._retrieve_pubchem_cids(self.stdinchikey)
if len(cids) == 0:
raise ValueError('InChI key not found in PubChem!')
if len(cids) == 1:
self.cid = cids[0]
else:
raise ValueError('More than one result: {}'.format(cids))
@staticmethod
def _retrieve_basic_compound_info(cid):
cmpnd_url = 'https://pubchem.ncbi.nlm.nih.gov/rest/rdf/compound/{}.json'.format(cid)
print(cmpnd_url)
# r = PubChemMolecule.s.get(cmpnd_url, headers=PubChemMolecule.headers).json()
r = requests.get(cmpnd_url, headers=PubChemMolecule.headers).json()
return r
@staticmethod
def _retrieve_pubchem_cids(ikey):
url = 'http://pubchem.ncbi.nlm.nih.gov/rest/rdf/inchikey/{}.json'.format(ikey)
try:
# r = PubChemMolecule.s.get(url, headers=PubChemMolecule.headers).json()
r = requests.get(url, headers=PubChemMolecule.headers).json()
except json.JSONDecodeError as e:
# print(e.__str__())
print('PubChem does not have this InChI key', ikey)
return []
cids = list()
if 'http://semanticscience.org/resource/is-attribute-of' in r['inchikey/{}'.format(ikey)]:
for x in r['inchikey/{}'.format(ikey)]['http://semanticscience.org/resource/is-attribute-of']:
cids.append(x['value'].split('/')[-1])
return cids
@property
def label(self):
return None
def to_wikidata(self):
refs = [[
wdi_core.WDItemID(value='Q278487', prop_nr='P248', is_reference=True), # stated in
wdi_core.WDExternalID(value=self.cid, prop_nr='P662', is_reference=True), # source element
wdi_core.WDTime(time=time.strftime('+%Y-%m-%dT00:00:00Z'), prop_nr='P813', is_reference=True) # retrieved
]]
elements = {
'P662': self.cid[3:]
}
data = []
for k, v in elements.items():
if not v:
continue
print('{}:'.format(k), v)
if isinstance(v, list) or isinstance(v, set):
for x in v:
data.append(wdi_core.WDString(prop_nr=k, value=x, references=refs))
else:
data.append(wdi_core.WDString(prop_nr=k, value=v, references=refs))
return data
| mit | 5,386,451,797,409,121,000 | 30.939394 | 118 | 0.57432 | false |
protwis/protwis | similaritysearch/views.py | 1 | 5740 | from django.shortcuts import render, redirect
from django.conf import settings
from common.views import AbsReferenceSelection
from common.views import AbsSegmentSelection
#from common.views import AbsTargetSelection
from common.views import AbsTargetSelectionTable
# from common.alignment_SITE_NAME import Alignment
Alignment = getattr(__import__('common.alignment_' + settings.SITE_NAME, fromlist=['Alignment']), 'Alignment')
from collections import OrderedDict
class ReferenceSelection(AbsReferenceSelection):
step = 1
number_of_steps = 3
target_input = False
docs = 'sequences.html#similarity-search-gpcrdb'
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/similaritysearch/segmentselection',
'color': 'success',
},
}
class SegmentSelection(AbsSegmentSelection):
step = 2
number_of_steps = 3
docs = 'sequences.html#similarity-search-gpcrdb'
selection_boxes = OrderedDict([
('reference', True),
('segments', True),
('targets', False),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/similaritysearch/targetselection',
'color': 'success',
},
}
class TargetSelection(AbsTargetSelectionTable):
step = 3
number_of_steps = 3
docs = "sequences.html#similarity-search-gpcrdb"
title = "SELECT RECEPTORS"
description = "Select receptors in the table (below) or browse the classification tree (right). You can select entire" \
+ " families or individual receptors.\n\nOnce you have selected all your receptors, click the green button."
selection_boxes = OrderedDict([
("reference", True),
("segments", True),
("targets", True),
])
buttons = {
"continue": {
"label": "Next",
"onclick": "submitSelection('/similaritysearch/render');",
"color": "success",
},
}
# class TargetSelection(AbsTargetSelection):
# step = 3
# number_of_steps = 3
# docs = 'sequences.html#similarity-search-gpcrdb'
# selection_boxes = OrderedDict([
# ('reference', True),
# ('segments', True),
# ('targets', True),
# ])
# buttons = {
# 'continue': {
# 'label': 'Show similarity',
# 'url': '/similaritysearch/render',
# 'color': 'success',
# },
# }
def render_alignment(request):
# get the user selection from session
simple_selection = request.session.get('selection', False)
if simple_selection == False or not simple_selection.targets or not simple_selection.reference:
return redirect("/similaritysearch/referenceselection")
# create an alignment object
a = Alignment()
# load data from selection into the alignment
a.load_reference_protein_from_selection(simple_selection)
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
# calculate identity and similarity of each row compared to the reference
a.calculate_similarity()
num_of_sequences = len(a.proteins)
num_residue_columns = len(a.positions) + len(a.segments)
return render(request, 'similaritysearch/alignment.html', {'a': a, 'num_of_sequences': num_of_sequences,
'num_residue_columns': num_residue_columns})
def render_fasta_alignment(request):
# get the user selection from session
simple_selection = request.session.get('selection', False)
# create an alignment object
a = Alignment()
a.show_padding = False
# load data from selection into the alignment
a.load_reference_protein_from_selection(simple_selection)
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
# build the alignment data matrix
a.build_alignment()
# calculate identity and similarity of each row compared to the reference
a.calculate_similarity()
num_of_sequences = len(a.proteins)
num_residue_columns = len(a.positions) + len(a.segments)
response = render(request, 'alignment/alignment_fasta.html', {'a': a, 'num_of_sequences': num_of_sequences,
'num_residue_columns': num_residue_columns}, content_type='text/fasta')
response['Content-Disposition'] = "attachment; filename=" + settings.SITE_TITLE + "_alignment.fasta"
return response
def render_csv_alignment(request):
# get the user selection from session
simple_selection = request.session.get('selection', False)
# create an alignment object
a = Alignment()
a.show_padding = False
# load data from selection into the alignment
a.load_reference_protein_from_selection(simple_selection)
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
# calculate identity and similarity of each row compared to the reference
a.calculate_similarity()
num_of_sequences = len(a.proteins)
num_residue_columns = len(a.positions) + len(a.segments)
response = render(request, 'alignment/alignment_csv.html', {'a': a, 'num_of_sequences': num_of_sequences,
'num_residue_columns': num_residue_columns}, content_type='text/fasta')
response['Content-Disposition'] = "attachment; filename=" + settings.SITE_TITLE + "_alignment.csv"
return response
| apache-2.0 | -7,357,047,311,267,593,000 | 32.964497 | 124 | 0.669861 | false |
andreabrambilla/libres | python/tests/res/enkf/test_runpath_list_ert.py | 1 | 3354 | import unittest
import os
from res.test import ErtTestContext
from tests import ResTest
from res.enkf import RunpathList, RunpathNode, ErtRunContext
from res.enkf.enums import EnkfInitModeEnum,EnkfRunType
from ecl.util.util import BoolVector
from res.util.substitution_list import SubstitutionList
class RunpathListTestErt(ResTest):
def test_an_enkf_runpath(self):
# TODO this test is flaky and we need to figure out why. See #1370
# enkf_util_assert_buffer_type: wrong target type in file (expected:104 got:0)
test_path = self.createTestPath("local/snake_oil_field/snake_oil.ert")
with ErtTestContext("runpathlist_basic", test_path) as tc:
pass
def test_assert_export(self):
with ErtTestContext("create_runpath_export" , self.createTestPath("local/snake_oil_no_data/snake_oil.ert")) as tc:
ert = tc.getErt( )
runpath_list = ert.getRunpathList( )
self.assertFalse( os.path.isfile( runpath_list.getExportFile( ) ))
ens_size = ert.getEnsembleSize( )
runner = ert.getEnkfSimulationRunner( )
fs_manager = ert.getEnkfFsManager( )
init_fs = fs_manager.getFileSystem("init_fs")
mask = BoolVector( initial_size = 25 , default_value = True )
runpath_fmt = ert.getModelConfig().getRunpathFormat( )
subst_list = SubstitutionList( )
itr = 0
jobname_fmt = ert.getModelConfig().getJobnameFormat()
run_context1 = ErtRunContext( EnkfRunType.INIT_ONLY , init_fs, None , mask , runpath_fmt, jobname_fmt, subst_list , itr )
runner.createRunPath( run_context1 )
self.assertTrue( os.path.isfile( runpath_list.getExportFile( ) ))
self.assertEqual( "test_runpath_list.txt" , os.path.basename( runpath_list.getExportFile( ) ))
def test_assert_symlink_deleted(self):
with ErtTestContext("create_runpath_symlink_deleted" , self.createTestPath("local/snake_oil_field/snake_oil.ert")) as tc:
ert = tc.getErt( )
runpath_list = ert.getRunpathList( )
ens_size = ert.getEnsembleSize()
runner = ert.getEnkfSimulationRunner()
mask = BoolVector( initial_size = ens_size , default_value = True )
fs_manager = ert.getEnkfFsManager()
init_fs = fs_manager.getFileSystem("init_fs")
# create directory structure
runpath_fmt = ert.getModelConfig().getRunpathFormat( )
subst_list = SubstitutionList( )
itr = 0
jobname_fmt = ert.getModelConfig().getJobnameFormat()
run_context = ErtRunContext( EnkfRunType.INIT_ONLY , init_fs, None , mask , runpath_fmt, jobname_fmt, subst_list , itr )
runner.createRunPath( run_context )
# replace field file with symlink
linkpath = '%s/permx.grdcel' % str(runpath_list[0].runpath)
targetpath = '%s/permx.grdcel.target' % str(runpath_list[0].runpath)
open(targetpath, 'a').close()
os.remove(linkpath)
os.symlink(targetpath, linkpath)
# recreate directory structure
runner.createRunPath( run_context )
# ensure field symlink is replaced by file
self.assertFalse( os.path.islink(linkpath) )
| gpl-3.0 | -5,098,693,126,175,305,000 | 40.407407 | 133 | 0.637448 | false |
mattvonrocketstein/ymir | ymir/service/amazon.py | 1 | 9978 | # -*- coding: utf-8 -*-
""" ymir.service.amazon
"""
import os
import time
import boto
from fabric.colors import yellow
from ymir import util
from ymir.service.base import AbstractService
class AmazonService(AbstractService):
""" """
def __init__(self, conn=None, **kargs):
""""""
self.conn = conn or util.aws.get_conn()
super(AmazonService, self).__init__(**kargs)
def _get_instance(self, strict=False):
""" """
conn = self.conn
name = self.template_data()['name']
i = util.aws.get_instance_by_name(name, conn)
if strict and i is None:
err = "Could not acquire instance! Is the name '{0}' correct?"
err = err.format(name)
self.report(err)
raise SystemExit(1)
return i
def setup_ip(self):
""" """
self.sync_tags()
self.sync_buckets()
self.sync_eips()
super(AmazonService, self).setup_ip()
@util.declare_operation
def s3(self):
""" show summary of s3 information for this service """
buckets = self.sync_buckets(quiet=True).items()
if not buckets:
self.report("this service is not using S3 buckets")
for bname, bucket in buckets:
keys = [k for k in bucket]
self.report(" {0} ({1} items) [{2}]".format(
bname, len(keys), bucket.get_acl()))
for key in keys:
print (" {0} (size {1}) [{2}]".format(
key.name, key.size, key.get_acl()))
@property
def _s3_conn(self):
return boto.connect_s3()
@property
def _username(self):
""" username data is accessible only as a property because
it must overridden for i.e. vagrant-based services
"""
return self._service_json['username']
@property
def _pem(self):
""" pem-file is accessible only as a property because
it must overridden for i.e. vagrant-based services
"""
return util.unexpand(self._service_json['pem'])
@util.declare_operation
def sync_buckets(self, quiet=False):
report = self.report if not quiet else util.NOOP
buckets = self.template_data()['s3_buckets']
report("synchronizing s3 buckets")
if buckets:
report(' buckets to create: {0}'.format(buckets))
else:
self.report(" no s3 buckets mentioned in service-definition")
conn = self._s3_conn
tmp = {}
for name in buckets:
report(" setting up s3 bucket: {0}".format(name))
tmp[name] = conn.create_bucket(name, location=self.S3_LOCATION)
return tmp
@util.declare_operation
def sync_eips(self, quiet=False):
""" synchronizes elastic IPs with service.json data """
report = self.report if not quiet else lambda *args, **kargs: None
report("synchronizing elastic ip's")
service_instance_id = self._status()['instance'].id
eips = self.template_data()['elastic_ips']
if not eips:
report(' no elastic IPs mentioned in service-definition')
return
addresses = [x for x in self.conn.get_all_addresses()
if x.public_ip in eips]
for aws_address in addresses:
report(" Address: {0}".format(aws_address))
if aws_address.instance_id is None:
report(" -> currently unassigned. "
"associating with this instance")
aws_address.associate(instance_id=service_instance_id)
elif aws_address.instance_id == service_instance_id:
report(" -> already associated with this service")
else:
report(" -> assigned to another instance {0}! (that seems bad)".format(
aws_address.instance_id))
sync_elastic_ips = sync_eips
@util.declare_operation
@util.require_running_instance
def sync_tags(self):
""" update aws instance tags from service.json `tags` field """
self.report('updating instance tags: ')
json = self.template_data()
tags = dict(
description=json.get('service_description', ''),
org=json.get('org_name', ''),
app=json.get('app_name', ''),
env=json.get("env_name", ''),
)
for tag in json.get('tags', []):
tags[tag] = 'true'
for tag in tags:
if not tags[tag]:
tags.pop(tag)
self.report(' {0}'.format(tags.keys()))
self._instance.add_tags(tags)
@util.declare_operation
@util.require_running_instance
def terminate(self, force=False):
""" terminate this service (delete from ec2) """
instance = self._instance
self.report("{0} slated for termination.".format(instance))
if force:
return self.conn.terminate_instances(
instance_ids=[instance.id])
else:
msg = ("This will terminate the instance {0} ({1}) and can "
"involve data loss. Are you sure? [y/n] ")
answer = None
name = self.template_data()['name']
while answer not in ['y', 'n']:
answer = raw_input(msg.format(instance, name))
if answer == 'y':
self.terminate(force=True)
@util.declare_operation
@util.require_running_instance
def mosh(self):
""" connect to this service with mosh """
self.report('connecting with mosh')
service_data = self.template_data()
util.mosh(self.status()['ip'],
username=self._username,
pem=service_data['pem'])
ssh = util.require_running_instance(AbstractService.ssh)
def _status(self):
""" retrieves service status information.
use this instead of self.status() if you want to quietly
retrieve information for use without actually displaying it
"""
tdata = self._service_json # NOT template_data(), that's cyclic
if not self._status_computed and self._debug_mode:
self.report("AWS profile: {0}".format(yellow(
os.environ.get('AWS_PROFILE', 'default'))))
name = tdata['name']
# DON'T use self._get_instance(); recursion
instance = util.aws.get_instance_by_name(name, self.conn)
result = dict(
instance=None, ip=None,
private_ip=None, tags=[],
status='terminated?',)
if instance:
result.update(
dict(
instance=instance,
tags=instance.tags,
status=instance.update(),
ip=instance.ip_address,
private_ip=instance.private_ip_address,
))
self._status_computed = result
return result
@util.declare_operation
def create(self, force=False):
""" create new instance of this service ('force' defaults to False)"""
self.report('creating ec2 instance', section=True)
conn = self.conn
i = self._get_instance()
if i is not None:
msg = ' instance already exists: {0} ({1})'
msg = msg.format(i, i.update())
self.report(msg)
if force:
self.report(' force is True, terminating it & rebuilding')
util._block_while_terminating(i, conn)
# might need to block and wait here
return self.create(force=False)
self.report(' force is False, refusing to rebuild it')
return
service_data = self.template_data()
# HACK: deal with unfortunate vpc vs. ec2-classic differences
reservation_extras = service_data.get('reservation_extras', {}).copy()
# set security group stuff in reservation extras
sg_names = service_data['security_groups']
if not sg_names:
err = ('without `security_groups` in service.json, '
'cannot create instance reservation')
raise SystemExit(err)
self.report(
"service description uses {0} as a security groups".format(sg_names))
tmp = {}
sgs = dict([[sg.id, sg.name] for sg in conn.get_all_security_groups()])
for sg_name in sg_names:
if sg_name not in sgs.values():
err = "could not find {0} amongst security groups at {1}"
err = err.format(sg_names, sgs.values())
raise SystemExit(err)
else:
_id = [_id for _id in sgs if sgs[_id] == sg_name][0]
self.report(" sg '{0}' is id {1}".format(sgs[_id], _id))
tmp[_id] = sgs[_id]
reservation_extras['security_group_ids'] = tmp.keys()
reservation = conn.run_instances(
image_id=service_data['ami'],
key_name=service_data['key_name'],
instance_type=service_data['instance_type'],
**reservation_extras)
instance = reservation.instances[0]
self.report(' no instance found, creating it now.')
self.report(' reservation-id:', instance.id)
util._block_while_pending(instance)
status = instance.update()
name = self.template_data()['name']
if status == 'running':
self.report(' instance is running.')
self.report(' setting tag for "Name": {0}'.format(
name))
instance.add_tag("Name", name)
else:
self.report('Weird instance status: ', status)
return None
time.sleep(5)
self.report("Finished with creation. Now run `fab setup`")
@util.declare_operation
def shell(self):
""" """
return util.shell(
conn=self.conn,
Service=self, service=self)
| mit | 7,567,753,346,309,193,000 | 36.511278 | 89 | 0.552315 | false |
CSD-Public/stonix | src/stonix_resources/rules/BlockSystemAccounts.py | 1 | 12193 | ###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
"""
Created on Apr 2, 2013
The BlockSystemAccounts rule will search through /etc/passwd to determine if
there are any system accounts which currently allow login. If any are found
which do allow login, the fix method will append :/dev/null to the end of
the entry in /etc/passwd preventing future login from them. One exception is
the 'root' account which will not be blocked due access to it being required
by administrators in certain situations.
@author: Breen Malmberg
@change: 01/29/2014 Derek Walker revised
@change: 02/12/2014 Ekkehard Implemented self.detailedresults flow
@change: 02/12/2014 Ekkehard Implemented isapplicable
@change: 02/19/2014 Ekkehard Make sure report always runs
@change: 04/18/2014 Dave Kennel Updated to new style configuration item.
@change: 2014/10/17 Ekkehard OS X Yosemite 10.10 Update
@change: 2015/04/14 Dave Kennel Updated for new style isApplicable
@change: 2015/06/10 Breen Malmberg - updated author names; implemented correct
mac os x functionality; refactored code for readability; fixed pep8 violations
@change: 2015/08/28 Ekkehard [artf37764] : BlockSystemAccounts(40) - NCAF - OS X El Capitan 10.11
@change: 2015/11/09 Ekkehard - make eligible of OS X El Capitan
@change: 2017/07/07 Ekkehard - make eligible for macOS High Sierra 10.13
@change: 2017/11/13 Ekkehard - make eligible for OS X El Capitan 10.11+
@change: 2018/06/08 Ekkehard - make eligible for macOS Mojave 10.14
@change: 2018/10/50 Breen Malmberg - refactor of rule
@change: 2019/03/12 Ekkehard - make eligible for macOS Sierra 10.12+
@change: 2019/08/07 ekkehard - enable for macOS Catalina 10.15 only
"""
import os
import re
import traceback
from rule import Rule
from logdispatcher import LogPriority
from CommandHelper import CommandHelper
from stonixutilityfunctions import readFile, iterate
from stonixutilityfunctions import resetsecon
class BlockSystemAccounts(Rule):
"""this module ensures that no system accounts have a login shell"""
def __init__(self, config, enviro, logger, statechglogger):
"""
private method to initialize this module
:param config: configuration object instance
:param enviro: environment object instance
:param logger: logdispatcher object instance
:param statechglogger: statechglogger object instance
"""
Rule.__init__(self, config, enviro, logger, statechglogger)
self.logger = logger
self.environ = enviro
self.rulenumber = 40
self.rulename = 'BlockSystemAccounts'
self.formatDetailedResults("initialize")
self.compliant = False
self.mandatory = True
self.sethelptext()
self.rootrequired = True
datatype = 'bool'
key = 'BLOCKSYSACCOUNTS'
instructions = """If you have system accounts that need to have valid \
shells set the value of this to False, or No."""
default = True
self.applicable = {'type': 'white',
'family': ['linux', 'solaris', 'freebsd'],
'os': {'Mac OS X': ['10.15', 'r', '10.15.10']}}
self.ci = self.initCi(datatype, key, instructions,
default)
self.guidance = ['CIS', 'NSA(2.3.1.4)', 'cce-3987-5', '4525-2',
'4657-3', '4661-5', '4807-4', '4701-9', '4669-8',
'4436-2', '4815-7', '4696-1', '4216-8', '4758-9',
'4621-9', '4515-3', '4282-0', '4802-5', '4806-6',
'4471-9', '4617-7', '4418-0', '4810-8', '3955-2',
'3834-9', '4408-1', '4536-9', '4809-0', '3841-4']
self.iditerator = 0
def report(self):
"""report on the status of the system's compliance with disallowing
system accounts to log in
:return: self.compliant - boolean; True if compliant, False if not
"""
self.detailedresults = ""
self.compliant = True
acceptable_nologin_shells = ["/sbin/nologin", "/dev/null", "", "/usr/bin/false"]
self.ch = CommandHelper(self.logger)
self.corrections_needed = []
try:
system_login_shells = self.getsysloginshells()
for acc in system_login_shells:
if system_login_shells[acc] not in acceptable_nologin_shells:
self.compliant = False
self.corrections_needed.append(acc)
if self.corrections_needed:
self.detailedresults += "\nThe following system accounts can log in:\n" + "\n".join(self.corrections_needed)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.compliant = False
self.detailedresults = traceback.format_exc()
self.logger.log(LogPriority.ERROR, self.detailedresults)
self.formatDetailedResults("report", self.compliant, self.detailedresults)
self.logdispatch.log(LogPriority.INFO, self.detailedresults)
return self.compliant
def getUIDMIN(self):
"""return this system's minimum user ID start value, if configured
:return: uid_min - string; system's user id starting value
"""
uid_min = ""
logindefs = "/etc/login.defs"
try:
# get normal user uid start value
logindefscontents = readFile(logindefs, self.logger)
if logindefscontents:
for line in logindefscontents:
if re.search("^UID_MIN", line, re.IGNORECASE):
sline = line.split()
uid_min = sline[1]
if not uid_min:
self.logger.log(LogPriority.DEBUG, "Unable to determine UID_MIN")
except IndexError:
pass
except IOError:
self.logger.log(LogPriority.DEBUG, "Failed to read uid_min from file")
return uid_min
return uid_min
def getsystemaccounts(self):
"""
return a list of system accounts
:return: system_accounts_list - list of system accounts
"""
system_accounts_list = []
if self.environ.getosfamily() == "darwin":
try:
system_accounts_list = ["root", "nobody"]
get_sys_accounts_cmd = "/usr/bin/dscl . list /Users | grep -i _"
self.ch.executeCommand(get_sys_accounts_cmd)
system_accounts_list += self.ch.getOutput()
except OSError:
self.logger.log(LogPriority.DEBUG, "Failed to retrieve list of system accounts")
return system_accounts_list
else:
exclude_accounts = ["halt", "shutdown", "sync", "root"]
system_accounts_list = []
uid_min = self.getUIDMIN()
if not uid_min:
uid_min = "500"
f = open("/etc/passwd", "r")
contentlines = f.readlines()
f.close()
try:
for line in contentlines:
sline = line.split(":")
if int(sline[2]) < int(uid_min):
if sline[0] not in exclude_accounts:
system_accounts_list.append(sline[0])
except IndexError:
pass
return system_accounts_list
def getloginshell(self, account):
"""
retrieve the login shell, of the passed account, from passwd
:param account: string; name of user account to get info for
:return: loginshell - string; default login shell path for account
"""
loginshell = ""
try:
f = open("/etc/passwd", "r")
contentlines = f.readlines()
f.close()
except IOError:
self.logger.log(LogPriority.DEBUG, "Could not read from passwd file")
return loginshell
try:
for line in contentlines:
if re.search("^"+account, line, re.IGNORECASE):
sline = line.split(":")
loginshell = sline[6]
except IndexError:
pass
return loginshell
def getsysloginshells(self):
"""
return a dictionary of system accounts and their login shells
:return: system_login_shells - dictionary of system accounts and their login shells
"""
system_login_shells = {}
system_accounts = self.getsystemaccounts()
for acc in system_accounts:
system_login_shells[acc] = self.getloginshell(acc).strip()
return system_login_shells
def setdefaultloginshell(self, account, shell):
"""
set default shell for given user account
:param account: string; name of user account to set default shell for
:param shell: the type of shell to set for the given user account
"""
change_shell_cmd = "/usr/bin/chsh -s " + shell + " " + account
self.ch.executeCommand(change_shell_cmd)
def fix(self):
"""The fix method will apply the required settings to the system.
self.rulesuccess will be updated if the rule does not succeed.
:return: self.rulesuccess - boolean; True if fix succeeds, False if not
"""
self.detailedresults = ""
self.rulesuccess = True
path = "/etc/passwd"
tmppath = path + ".stonixtmp"
self.iditerator = 0
newcontentlines = []
try:
if not self.ci.getcurrvalue():
return self.rulesuccess
f = open(path, "r")
contentlines = f.readlines()
f.close()
for line in contentlines:
sline = line.split(":")
if sline[0] in self.corrections_needed:
sline[6] = "/sbin/nologin\n"
line = ":".join(sline)
newcontentlines.append(line)
tf = open(tmppath, "w")
tf.writelines(newcontentlines)
self.iditerator += 1
myid = iterate(self.iditerator, self.rulenumber)
event = {'eventtype': 'conf',
'filepath': path}
self.statechglogger.recordchgevent(myid, event)
self.statechglogger.recordfilechange(path, tmppath, myid)
os.rename(tmppath, path)
os.chown(path, 0, 0)
os.chmod(path, 420)
resetsecon(path)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.rulesuccess = False
self.detailedresults = traceback.format_exc()
self.logger.log(LogPriority.ERROR, self.detailedresults)
self.formatDetailedResults("fix", self.rulesuccess, self.detailedresults)
self.logdispatch.log(LogPriority.INFO, self.detailedresults)
return self.rulesuccess
| gpl-2.0 | 4,207,096,394,589,483,500 | 36.749226 | 124 | 0.580415 | false |
apple/llvm-project | lldb/test/API/commands/expression/import-std-module/list-dbg-info-content/TestDbgInfoContentListFromStdModule.py | 5 | 1813 | """
Test basic std::list functionality but with a declaration from
the debug info (the Foo struct) as content.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestDbgInfoContentList(TestBase):
mydir = TestBase.compute_mydir(__file__)
@add_test_categories(["libc++"])
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.",
lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
list_type = "std::list<Foo>"
size_type = list_type + "::size_type"
value_type = list_type + "::value_type"
self.expect_expr("a",
result_type=list_type,
result_children=[
ValueCheck(children=[ValueCheck(value="3")]),
ValueCheck(children=[ValueCheck(value="1")]),
ValueCheck(children=[ValueCheck(value="2")])
])
self.expect_expr("a.size()", result_type=size_type, result_value="3")
self.expect_expr("a.front().a", result_type="int", result_value="3")
self.expect_expr("a.back().a", result_type="int", result_value="2")
self.expect("expr std::reverse(a.begin(), a.end())")
self.expect_expr("a.front().a", result_type="int", result_value="2")
self.expect_expr("a.back().a", result_type="int", result_value="3")
self.expect_expr("a.begin()->a", result_type="int", result_value="2")
self.expect_expr("a.rbegin()->a", result_type="int", result_value="3")
| apache-2.0 | -4,014,391,853,952,687,600 | 37.574468 | 78 | 0.556536 | false |
nikpap/inspire-next | inspirehep/utils/cv_latex_html_text.py | 1 | 10784 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2016 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
import re
from .export import MissingRequiredFieldError, Export
from inspirehep import config
class Cv_latex_html_text(Export):
"""Class used to output CV format(html) and CV format(text)."""
def __init__(self, record, format_type, separator):
super(Cv_latex_html_text, self).__init__(record)
self.record = record
self.format_type = format_type
self.separator = separator
def format(self):
"""Return CV format export for single record."""
formats = {
'record': self._format_record,
}
return formats['record']()
def _format_record(self):
required_fields = ['title', 'author', 'arxiv']
optional_fields = ['doi', 'publi_info']
try:
return self._format_entry(required_fields, optional_fields)
except MissingRequiredFieldError as e:
raise e
def _format_entry(self, req, opt):
"""
:raises: MissingRequiredFieldError
"""
out = ''
out += self._fetch_fields(req, opt) + '%s' % self.separator
return out
def _fetch_fields(self, req_fields, opt_fields=[]):
fields = {
'title': self._get_title,
'author': self._get_author,
'arxiv': self._get_arxiv,
'doi': self._get_doi,
'publi_info': self._get_publi_info,
}
out = ''
for field in req_fields:
value = fields[field]()
if value:
out += self._format_output_row(field, value)
# RAISE EXCEPTION HERE IF REQ FIELD IS MISSING
for field in opt_fields:
value = fields[field]()
if value:
out += self._format_output_row(field, value)
return out
def _format_output_row(self, field, value):
out = ''
if field == 'title':
if self.format_type == 'cv_latex_html':
out += unicode('<a href="' + config.SERVER_NAME + '/record/' +
str(self.record['control_number']) + '">' +
value + '.</a>' + self.separator)
else:
out += u'{0}{1}'.format(value, self.separator)
elif field == 'author':
if len(value) == 1:
out += u'By {0}.{1}'.format(value[0], self.separator)
elif len(value) > 8:
if 'collaboration' in self.record:
try:
collaboration = self.record[
'collaboration'][0]['value']
if 'Collaboration' in collaboration:
out += unicode('By ' + collaboration +
'(' + value[0] + ' et al.).' +
self.separator)
else:
out += unicode('By ' + collaboration +
' Collaboration (' +
value[0] + ' et al.).' +
self.separator)
except IndexError:
pass
else:
out += u'By {0} et al..{1}'.format(value[0],
self.separator)
else:
out += u'By {0}.{1}'.format(', '.join(value), self.separator)
elif field == 'arxiv':
if self.format_type == 'cv_latex_html':
out += u'[{0}].{1}'.format(value, self.separator)
else:
out += u'{0}.{1}'.format(value, self.separator)
elif field == 'doi':
dois_splitted = value.split(',')
for k, v in enumerate(dois_splitted):
v = '<a href="http://dx.doi.org/' + v + '">' + v + '</a>'
dois_splitted[k] = v
out += u'{0}.{1}'.format(', '.join(out for out in dois_splitted),
self.separator)
elif field == 'publi_info':
out += u'{0}.{1}'.format(', '.join(out for out in value),
self.separator)
return out
def _get_author(self):
"""Return list of name(s) of the author(s)."""
re_last_first = re.compile(
r'^(?P<last>[^,]+)\s*,\s*(?P<first_names>[^\,]*)(?P<extension>\,?.*)$'
)
result = []
if 'authors' in self.record:
for author in self.record['authors']:
if 'full_name' in author and author['full_name']:
if isinstance(author['full_name'], list):
author_full_name = ''.join(full_name for full_name
in author['full_name'])
first_last_match = re_last_first.search(
author_full_name)
if first_last_match:
result.append(
first_last_match.group('first_names') +
' ' + first_last_match.
group('last') +
first_last_match.
group('extension')
)
else:
first_last_match = re_last_first.search(
author['full_name'])
if first_last_match:
result.append(
first_last_match.group('first_names') +
' ' + first_last_match.
group('last') +
first_last_match.group('extension')
)
elif 'corporate_author' in self.record:
for corp_author in self.record['corporate_author']:
if corp_author:
result.append(corp_author)
return result
def _get_title(self):
"""Return record title(s)"""
record_title = ''
if 'titles' in self.record:
if isinstance(self.record['titles'], list):
for title in self.record['titles']:
if 'title' in title:
record_title = title['title']
break
else:
record_title = self.record['titles']['title'].strip()
if isinstance(self.record['titles'], list):
for subtitle in self.record['titles']:
if 'subtitle' in subtitle and subtitle['subtitle']:
record_title += ' : ' + subtitle['subtitle']
break
else:
if 'subtitle' in self.record['titles']:
record_title += ' : ' + self.record['titles']['subtitle']
if record_title.upper() == record_title or \
record_title.find('THE') >= 0:
record_title = ' '.join([word.capitalize() for word
in record_title.split(' ')])
return record_title
def _get_publi_info(self):
result = []
if 'publication_info' in self.record:
journal_title, journal_volume, year, journal_issue, pages = \
('', '', '', '', '')
for field in self.record['publication_info']:
out = ''
if 'journal_title' in field:
if isinstance(field['journal_title'], list):
if not ('journal_volume' in field or
'journal_issue' in field or
'page_artid' in field or
'doi' in self.record):
journal_title = 'Submitted to:' +\
field['journal_title'][-1]
else:
journal_title = field['journal_title'][-1]
else:
if not ('journal_volume' in field or
'journal_issue' in field or
'page_artid' in field or
'doi' in self.record):
journal_title = 'Submitted to:' +\
field['journal_title']
else:
journal_title = field['journal_title']
if 'journal_volume' in field:
journal_volume = ' ' + field['journal_volume']
if 'year' in field:
if isinstance(field['year'], list):
year = ' (' + str(field['year'][-1]) + ')'
else:
year = ' (' + str(field['year']) + ')'
if 'journal_issue' in field:
if field['journal_issue']:
journal_issue = ' ' + \
field['journal_issue'] + ','
if 'page_artid' in field:
if field['page_artid']:
if isinstance(field['page_artid'], list):
pages = ' ' + field['page_artid'][-1]
else:
pages = ' ' + field['page_artid']
out += journal_title + journal_volume + year + \
journal_issue + pages
result.append(out)
if not result:
for field in self.record['publication_info']:
if 'pubinfo_freetext' in field and len(field) == 1:
return field['pubinfo_freetext']
return result
| gpl-2.0 | -8,740,075,710,879,147,000 | 42.659919 | 82 | 0.443064 | false |
lvapeab/nmt-keras | tests/NMT_architectures/attention_ConditionalLSTM.py | 1 | 8620 | import argparse
import os
import pytest
from tests.test_config import load_tests_params, clean_dirs
from data_engine.prepare_data import build_dataset
from nmt_keras.training import train_model
from nmt_keras.apply_model import sample_ensemble, score_corpus
def test_ConditionalLSTM_add():
params = load_tests_params()
# Current test params: Single layered LSTM - ConditionalGRU
params['BIDIRECTIONAL_ENCODER'] = True
params['N_LAYERS_ENCODER'] = 1
params['BIDIRECTIONAL_DEEP_ENCODER'] = True
params['ENCODER_RNN_TYPE'] = 'LSTM'
params['DECODER_RNN_TYPE'] = 'ConditionalLSTM'
params['N_LAYERS_DECODER'] = 1
params['ATTENTION_MODE'] = 'add'
params['REBUILD_DATASET'] = True
dataset = build_dataset(params)
params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['INPUTS_IDS_DATASET'][0]]
params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['OUTPUTS_IDS_DATASET'][0]]
params['MODEL_NAME'] = \
params['TASK_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '_' + params['MODEL_TYPE'] + \
'_src_emb_' + str(params['SOURCE_TEXT_EMBEDDING_SIZE']) + \
'_bidir_' + str(params['BIDIRECTIONAL_ENCODER']) + \
'_enc_' + params['ENCODER_RNN_TYPE'] + '_*' + str(params['N_LAYERS_ENCODER']) + '_' + str(
params['ENCODER_HIDDEN_SIZE']) + \
'_dec_' + params['DECODER_RNN_TYPE'] + '_*' + str(params['N_LAYERS_DECODER']) + '_' + str(
params['DECODER_HIDDEN_SIZE']) + params['ATTENTION_MODE'] + \
'_deepout_' + '_'.join([layer[0] for layer in params['DEEP_OUTPUT_LAYERS']]) + \
'_trg_emb_' + str(params['TARGET_TEXT_EMBEDDING_SIZE']) + \
'_' + params['OPTIMIZER'] + '_' + str(params['LR'])
# Test several NMT-Keras utilities: train, sample, sample_ensemble, score_corpus...
print("Training model")
train_model(params)
params['RELOAD'] = 1
print("Done")
parser = argparse.ArgumentParser('Parser for unit testing')
parser.dataset = os.path.join(
params['DATASET_STORE_PATH'],
'Dataset_' + params['DATASET_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '.pkl')
parser.text = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['SRC_LAN'])
parser.splits = ['val']
parser.config = params['STORE_PATH'] + '/config.pkl'
parser.models = [params['STORE_PATH'] + '/epoch_' + str(1)]
parser.verbose = 0
parser.dest = None
parser.source = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['SRC_LAN'])
parser.target = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['TRG_LAN'])
parser.weights = []
parser.glossary = None
for n_best in [True, False]:
parser.n_best = n_best
print("Sampling with n_best = %s " % str(n_best))
sample_ensemble(parser, params)
print("Done")
print("Scoring corpus")
score_corpus(parser, params)
print("Done")
clean_dirs(params)
def test_ConditionalLSTM_dot():
params = load_tests_params()
# Current test params: Single layered LSTM - ConditionalGRU
params['BIDIRECTIONAL_ENCODER'] = True
params['N_LAYERS_ENCODER'] = 1
params['BIDIRECTIONAL_DEEP_ENCODER'] = True
params['ENCODER_RNN_TYPE'] = 'LSTM'
params['DECODER_RNN_TYPE'] = 'ConditionalLSTM'
params['N_LAYERS_DECODER'] = 1
params['ATTENTION_MODE'] = 'dot'
params['REBUILD_DATASET'] = True
dataset = build_dataset(params)
params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['INPUTS_IDS_DATASET'][0]]
params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['OUTPUTS_IDS_DATASET'][0]]
params['MODEL_NAME'] = \
params['TASK_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '_' + params['MODEL_TYPE'] + \
'_src_emb_' + str(params['SOURCE_TEXT_EMBEDDING_SIZE']) + \
'_bidir_' + str(params['BIDIRECTIONAL_ENCODER']) + \
'_enc_' + params['ENCODER_RNN_TYPE'] + '_*' + str(params['N_LAYERS_ENCODER']) + '_' + str(
params['ENCODER_HIDDEN_SIZE']) + \
'_dec_' + params['DECODER_RNN_TYPE'] + '_*' + str(params['N_LAYERS_DECODER']) + '_' + str(
params['DECODER_HIDDEN_SIZE']) + params['ATTENTION_MODE'] + \
'_deepout_' + '_'.join([layer[0] for layer in params['DEEP_OUTPUT_LAYERS']]) + \
'_trg_emb_' + str(params['TARGET_TEXT_EMBEDDING_SIZE']) + \
'_' + params['OPTIMIZER'] + '_' + str(params['LR'])
# Test several NMT-Keras utilities: train, sample, sample_ensemble, score_corpus...
print("Training model")
train_model(params)
params['RELOAD'] = 1
print("Done")
parser = argparse.ArgumentParser('Parser for unit testing')
parser.dataset = os.path.join(
params['DATASET_STORE_PATH'],
'Dataset_' + params['DATASET_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '.pkl')
parser.text = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['SRC_LAN'])
parser.splits = ['val']
parser.config = params['STORE_PATH'] + '/config.pkl'
parser.models = [params['STORE_PATH'] + '/epoch_' + str(1)]
parser.verbose = 0
parser.dest = None
parser.source = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['SRC_LAN'])
parser.target = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['TRG_LAN'])
parser.weights = []
parser.glossary = None
for n_best in [True, False]:
parser.n_best = n_best
print("Sampling with n_best = %s " % str(n_best))
sample_ensemble(parser, params)
print("Done")
print("Scoring corpus")
score_corpus(parser, params)
print("Done")
clean_dirs(params)
def test_ConditionalLSTM_scaled():
params = load_tests_params()
# Current test params: Single layered LSTM - ConditionalGRU
params['BIDIRECTIONAL_ENCODER'] = True
params['N_LAYERS_ENCODER'] = 1
params['BIDIRECTIONAL_DEEP_ENCODER'] = True
params['ENCODER_RNN_TYPE'] = 'LSTM'
params['DECODER_RNN_TYPE'] = 'ConditionalLSTM'
params['N_LAYERS_DECODER'] = 1
params['ATTENTION_MODE'] = 'scaled-dot'
params['REBUILD_DATASET'] = True
dataset = build_dataset(params)
params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['INPUTS_IDS_DATASET'][0]]
params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['OUTPUTS_IDS_DATASET'][0]]
params['MODEL_NAME'] = \
params['TASK_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '_' + params['MODEL_TYPE'] + \
'_src_emb_' + str(params['SOURCE_TEXT_EMBEDDING_SIZE']) + \
'_bidir_' + str(params['BIDIRECTIONAL_ENCODER']) + \
'_enc_' + params['ENCODER_RNN_TYPE'] + '_*' + str(params['N_LAYERS_ENCODER']) + '_' + str(
params['ENCODER_HIDDEN_SIZE']) + \
'_dec_' + params['DECODER_RNN_TYPE'] + '_*' + str(params['N_LAYERS_DECODER']) + '_' + str(
params['DECODER_HIDDEN_SIZE']) + params['ATTENTION_MODE'] + \
'_deepout_' + '_'.join([layer[0] for layer in params['DEEP_OUTPUT_LAYERS']]) + \
'_trg_emb_' + str(params['TARGET_TEXT_EMBEDDING_SIZE']) + \
'_' + params['OPTIMIZER'] + '_' + str(params['LR'])
# Test several NMT-Keras utilities: train, sample, sample_ensemble, score_corpus...
print("Training model")
train_model(params)
params['RELOAD'] = 1
print("Done")
parser = argparse.ArgumentParser('Parser for unit testing')
parser.dataset = os.path.join(
params['DATASET_STORE_PATH'],
'Dataset_' + params['DATASET_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '.pkl')
parser.text = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['SRC_LAN'])
parser.splits = ['val']
parser.config = params['STORE_PATH'] + '/config.pkl'
parser.models = [params['STORE_PATH'] + '/epoch_' + str(1)]
parser.verbose = 0
parser.dest = None
parser.source = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['SRC_LAN'])
parser.target = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['TRG_LAN'])
parser.weights = []
parser.glossary = None
for n_best in [True, False]:
parser.n_best = n_best
print("Sampling with n_best = %s " % str(n_best))
sample_ensemble(parser, params)
print("Done")
print("Scoring corpus")
score_corpus(parser, params)
print("Done")
clean_dirs(params)
if __name__ == '__main__':
pytest.main([__file__])
| mit | 6,003,570,170,736,867,000 | 42.756345 | 107 | 0.609281 | false |
mathandy/svgpathtools | svgpathtools/paths2svg.py | 1 | 19028 | """This submodule: basic tools for creating svg files from path data.
See also the document.py submodule.
"""
# External dependencies:
from __future__ import division, absolute_import, print_function
from math import ceil
from os import path as os_path, makedirs
from tempfile import gettempdir
from xml.dom.minidom import parse as md_xml_parse
from svgwrite import Drawing, text as txt
from time import time
from warnings import warn
import re
# Internal dependencies
from .path import Path, Line, is_path_segment
from .misctools import open_in_browser
# color shorthand for inputting color list as string of chars.
color_dict = {'a': 'aqua',
'b': 'blue',
'c': 'cyan',
'd': 'darkblue',
'e': '',
'f': '',
'g': 'green',
'h': '',
'i': '',
'j': '',
'k': 'black',
'l': 'lime',
'm': 'magenta',
'n': 'brown',
'o': 'orange',
'p': 'pink',
'q': 'turquoise',
'r': 'red',
's': 'salmon',
't': 'tan',
'u': 'purple',
'v': 'violet',
'w': 'white',
'x': '',
'y': 'yellow',
'z': 'azure'}
def str2colorlist(s, default_color=None):
color_list = [color_dict[ch] for ch in s]
if default_color:
for idx, c in enumerate(color_list):
if not c:
color_list[idx] = default_color
return color_list
def is3tuple(c):
return isinstance(c, tuple) and len(c) == 3
def big_bounding_box(paths_n_stuff):
"""returns minimal upright bounding box.
Args:
paths_n_stuff: iterable of Paths, Bezier path segments, and
points (given as complex numbers).
Returns:
extrema of bounding box, (xmin, xmax, ymin, ymax)
"""
bbs = []
for thing in paths_n_stuff:
if is_path_segment(thing) or isinstance(thing, Path):
bbs.append(thing.bbox())
elif isinstance(thing, complex):
bbs.append((thing.real, thing.real, thing.imag, thing.imag))
else:
try:
complexthing = complex(thing)
bbs.append((complexthing.real, complexthing.real,
complexthing.imag, complexthing.imag))
except ValueError:
raise TypeError("paths_n_stuff can only contains Path, "
"CubicBezier, QuadraticBezier, Line, "
"and complex objects.")
xmins, xmaxs, ymins, ymaxs = list(zip(*bbs))
xmin = min(xmins)
xmax = max(xmaxs)
ymin = min(ymins)
ymax = max(ymaxs)
return xmin, xmax, ymin, ymax
def disvg(paths=None, colors=None, filename=None, stroke_widths=None,
nodes=None, node_colors=None, node_radii=None,
openinbrowser=True, timestamp=None, margin_size=0.1,
mindim=600, dimensions=None, viewbox=None, text=None,
text_path=None, font_size=None, attributes=None,
svg_attributes=None, svgwrite_debug=False,
paths2Drawing=False, baseunit='px'):
"""Creates (and optionally displays) an SVG file.
REQUIRED INPUTS:
:param paths - a list of paths
OPTIONAL INPUT:
:param colors - specifies the path stroke color. By default all paths
will be black (#000000). This paramater can be input in a few ways
1) a list of strings that will be input into the path elements stroke
attribute (so anything that is understood by the svg viewer).
2) a string of single character colors -- e.g. setting colors='rrr' is
equivalent to setting colors=['red', 'red', 'red'] (see the
'color_dict' dictionary above for a list of possibilities).
3) a list of rgb 3-tuples -- e.g. colors = [(255, 0, 0), ...].
:param filename - the desired location/filename of the SVG file
created (by default the SVG will be named 'disvg_output.svg' or
'disvg_output_<timestamp>.svg' and stored in the temporary
directory returned by `tempfile.gettempdir()`. See `timestamp`
for information on the timestamp.
:param stroke_widths - a list of stroke_widths to use for paths
(default is 0.5% of the SVG's width or length)
:param nodes - a list of points to draw as filled-in circles
:param node_colors - a list of colors to use for the nodes (by default
nodes will be red)
:param node_radii - a list of radii to use for the nodes (by default
nodes will be radius will be 1 percent of the svg's width/length)
:param text - string or list of strings to be displayed
:param text_path - if text is a list, then this should be a list of
path (or path segments of the same length. Note: the path must be
long enough to display the text or the text will be cropped by the svg
viewer.
:param font_size - a single float of list of floats.
:param openinbrowser - Set to True to automatically open the created
SVG in the user's default web browser.
:param timestamp - if true, then the a timestamp will be
appended to the output SVG's filename. This is meant as a
workaround for issues related to rapidly opening multiple
SVGs in your browser using `disvg`. This defaults to true if
`filename is None` and false otherwise.
:param margin_size - The min margin (empty area framing the collection
of paths) size used for creating the canvas and background of the SVG.
:param mindim - The minimum dimension (height or width) of the output
SVG (default is 600).
:param dimensions - The (x,y) display dimensions of the output SVG.
I.e. this specifies the `width` and `height` SVG attributes. Note that
these also can be used to specify units other than pixels. Using this
will override the `mindim` parameter.
:param viewbox - This specifies the coordinated system used in the svg.
The SVG `viewBox` attribute works together with the the `height` and
`width` attrinutes. Using these three attributes allows for shifting
and scaling of the SVG canvas without changing the any values other
than those in `viewBox`, `height`, and `width`. `viewbox` should be
input as a 4-tuple, (min_x, min_y, width, height), or a string
"min_x min_y width height". Using this will override the `mindim`
parameter.
:param attributes - a list of dictionaries of attributes for the input
paths. Note: This will override any other conflicting settings.
:param svg_attributes - a dictionary of attributes for output svg.
:param svgwrite_debug - This parameter turns on/off `svgwrite`'s
debugging mode. By default svgwrite_debug=False. This increases
speed and also prevents `svgwrite` from raising of an error when not
all `svg_attributes` key-value pairs are understood.
:param paths2Drawing - If true, an `svgwrite.Drawing` object is
returned and no file is written. This `Drawing` can later be saved
using the `svgwrite.Drawing.save()` method.
NOTES:
* The `svg_attributes` parameter will override any other conflicting
settings.
* Any `extra` parameters that `svgwrite.Drawing()` accepts can be
controlled by passing them in through `svg_attributes`.
* The unit of length here is assumed to be pixels in all variables.
* If this function is used multiple times in quick succession to
display multiple SVGs (all using the default filename), the
svgviewer/browser will likely fail to load some of the SVGs in time.
To fix this, use the timestamp attribute, or give the files unique
names, or use a pause command (e.g. time.sleep(1)) between uses.
SEE ALSO:
* document.py
"""
_default_relative_node_radius = 5e-3
_default_relative_stroke_width = 1e-3
_default_path_color = '#000000' # black
_default_node_color = '#ff0000' # red
_default_font_size = 12
if filename is None:
timestamp = True if timestamp is None else timestamp
filename = os_path.join(gettempdir(), 'disvg_output.svg')
# append time stamp to filename
if timestamp:
fbname, fext = os_path.splitext(filename)
dirname = os_path.dirname(filename)
tstamp = str(time()).replace('.', '')
stfilename = os_path.split(fbname)[1] + '_' + tstamp + fext
filename = os_path.join(dirname, stfilename)
# check paths and colors are set
if isinstance(paths, Path) or is_path_segment(paths):
paths = [paths]
if paths:
if not colors:
colors = [_default_path_color] * len(paths)
else:
assert len(colors) == len(paths)
if isinstance(colors, str):
colors = str2colorlist(colors,
default_color=_default_path_color)
elif isinstance(colors, list):
for idx, c in enumerate(colors):
if is3tuple(c):
colors[idx] = "rgb" + str(c)
# check nodes and nodes_colors are set (node_radii are set later)
if nodes:
if not node_colors:
node_colors = [_default_node_color] * len(nodes)
else:
assert len(node_colors) == len(nodes)
if isinstance(node_colors, str):
node_colors = str2colorlist(node_colors,
default_color=_default_node_color)
elif isinstance(node_colors, list):
for idx, c in enumerate(node_colors):
if is3tuple(c):
node_colors[idx] = "rgb" + str(c)
# set up the viewBox and display dimensions of the output SVG
# along the way, set stroke_widths and node_radii if not provided
assert paths or nodes
stuff2bound = []
if viewbox:
if not isinstance(viewbox, str):
viewbox = '%s %s %s %s' % viewbox
if dimensions is None:
dimensions = viewbox.split(' ')[2:4]
elif dimensions:
dimensions = tuple(map(str, dimensions))
def strip_units(s):
return re.search(r'\d*\.?\d*', s.strip()).group()
viewbox = '0 0 %s %s' % tuple(map(strip_units, dimensions))
else:
if paths:
stuff2bound += paths
if nodes:
stuff2bound += nodes
if text_path:
stuff2bound += text_path
xmin, xmax, ymin, ymax = big_bounding_box(stuff2bound)
dx = xmax - xmin
dy = ymax - ymin
if dx == 0:
dx = 1
if dy == 0:
dy = 1
# determine stroke_widths to use (if not provided) and max_stroke_width
if paths:
if not stroke_widths:
sw = max(dx, dy) * _default_relative_stroke_width
stroke_widths = [sw]*len(paths)
max_stroke_width = sw
else:
assert len(paths) == len(stroke_widths)
max_stroke_width = max(stroke_widths)
else:
max_stroke_width = 0
# determine node_radii to use (if not provided) and max_node_diameter
if nodes:
if not node_radii:
r = max(dx, dy) * _default_relative_node_radius
node_radii = [r]*len(nodes)
max_node_diameter = 2*r
else:
assert len(nodes) == len(node_radii)
max_node_diameter = 2*max(node_radii)
else:
max_node_diameter = 0
extra_space_for_style = max(max_stroke_width, max_node_diameter)
xmin -= margin_size*dx + extra_space_for_style/2
ymin -= margin_size*dy + extra_space_for_style/2
dx += 2*margin_size*dx + extra_space_for_style
dy += 2*margin_size*dy + extra_space_for_style
viewbox = "%s %s %s %s" % (xmin, ymin, dx, dy)
if mindim is None:
szx = "{}{}".format(dx, baseunit)
szy = "{}{}".format(dy, baseunit)
else:
if dx > dy:
szx = str(mindim) + baseunit
szy = str(int(ceil(mindim * dy / dx))) + baseunit
else:
szx = str(int(ceil(mindim * dx / dy))) + baseunit
szy = str(mindim) + baseunit
dimensions = szx, szy
# Create an SVG file
if svg_attributes is not None:
dimensions = (svg_attributes.get("width", dimensions[0]),
svg_attributes.get("height", dimensions[1]))
debug = svg_attributes.get("debug", svgwrite_debug)
dwg = Drawing(filename=filename, size=dimensions, debug=debug,
**svg_attributes)
else:
dwg = Drawing(filename=filename, size=dimensions, debug=svgwrite_debug,
viewBox=viewbox)
# add paths
if paths:
for i, p in enumerate(paths):
if isinstance(p, Path):
ps = p.d()
elif is_path_segment(p):
ps = Path(p).d()
else: # assume this path, p, was input as a Path d-string
ps = p
if attributes:
good_attribs = {'d': ps}
for key in attributes[i]:
val = attributes[i][key]
if key != 'd':
try:
dwg.path(ps, **{key: val})
good_attribs.update({key: val})
except Exception as e:
warn(str(e))
dwg.add(dwg.path(**good_attribs))
else:
dwg.add(dwg.path(ps, stroke=colors[i],
stroke_width=str(stroke_widths[i]),
fill='none'))
# add nodes (filled in circles)
if nodes:
for i_pt, pt in enumerate([(z.real, z.imag) for z in nodes]):
dwg.add(dwg.circle(pt, node_radii[i_pt], fill=node_colors[i_pt]))
# add texts
if text:
assert isinstance(text, str) or (isinstance(text, list) and
isinstance(text_path, list) and
len(text_path) == len(text))
if isinstance(text, str):
text = [text]
if not font_size:
font_size = [_default_font_size]
if not text_path:
pos = complex(xmin + margin_size*dx, ymin + margin_size*dy)
text_path = [Line(pos, pos + 1).d()]
else:
if font_size:
if isinstance(font_size, list):
assert len(font_size) == len(text)
else:
font_size = [font_size] * len(text)
else:
font_size = [_default_font_size] * len(text)
for idx, s in enumerate(text):
p = text_path[idx]
if isinstance(p, Path):
ps = p.d()
elif is_path_segment(p):
ps = Path(p).d()
else: # assume this path, p, was input as a Path d-string
ps = p
# paragraph = dwg.add(dwg.g(font_size=font_size[idx]))
# paragraph.add(dwg.textPath(ps, s))
pathid = 'tp' + str(idx)
dwg.defs.add(dwg.path(d=ps, id=pathid))
txter = dwg.add(dwg.text('', font_size=font_size[idx]))
txter.add(txt.TextPath('#'+pathid, s))
if paths2Drawing:
return dwg
# save svg
if not os_path.exists(os_path.dirname(filename)):
makedirs(os_path.dirname(filename))
dwg.save()
# re-open the svg, make the xml pretty, and save it again
xmlstring = md_xml_parse(filename).toprettyxml()
with open(filename, 'w') as f:
f.write(xmlstring)
# try to open in web browser
if openinbrowser:
try:
open_in_browser(filename)
except:
print("Failed to open output SVG in browser. SVG saved to:")
print(filename)
def wsvg(paths=None, colors=None, filename=None, stroke_widths=None,
nodes=None, node_colors=None, node_radii=None,
openinbrowser=False, timestamp=False, margin_size=0.1,
mindim=600, dimensions=None, viewbox=None, text=None,
text_path=None, font_size=None, attributes=None,
svg_attributes=None, svgwrite_debug=False,
paths2Drawing=False, baseunit='px'):
"""Create SVG and write to disk.
Note: This is identical to `disvg()` except that `openinbrowser`
is false by default and an assertion error is raised if `filename
is None`.
See `disvg()` docstring for more info.
"""
assert filename is not None
return disvg(paths, colors=colors, filename=filename,
stroke_widths=stroke_widths, nodes=nodes,
node_colors=node_colors, node_radii=node_radii,
openinbrowser=openinbrowser, timestamp=timestamp,
margin_size=margin_size, mindim=mindim,
dimensions=dimensions, viewbox=viewbox, text=text,
text_path=text_path, font_size=font_size,
attributes=attributes, svg_attributes=svg_attributes,
svgwrite_debug=svgwrite_debug,
paths2Drawing=paths2Drawing, baseunit=baseunit)
def paths2Drawing(paths=None, colors=None, filename=None,
stroke_widths=None, nodes=None, node_colors=None,
node_radii=None, openinbrowser=False, timestamp=False,
margin_size=0.1, mindim=600, dimensions=None,
viewbox=None, text=None, text_path=None,
font_size=None, attributes=None, svg_attributes=None,
svgwrite_debug=False, paths2Drawing=True, baseunit='px'):
"""Create and return `svg.Drawing` object.
Note: This is identical to `disvg()` except that `paths2Drawing`
is true by default and an assertion error is raised if `filename
is None`.
See `disvg()` docstring for more info.
"""
return disvg(paths, colors=colors, filename=filename,
stroke_widths=stroke_widths, nodes=nodes,
node_colors=node_colors, node_radii=node_radii,
openinbrowser=openinbrowser, timestamp=timestamp,
margin_size=margin_size, mindim=mindim,
dimensions=dimensions, viewbox=viewbox, text=text,
text_path=text_path, font_size=font_size,
attributes=attributes, svg_attributes=svg_attributes,
svgwrite_debug=svgwrite_debug,
paths2Drawing=paths2Drawing, baseunit=baseunit)
| mit | -2,271,244,267,651,352,000 | 38.559252 | 79 | 0.567952 | false |
regebro/doctrine.urwid | docs/conf.py | 1 | 7899 | # -*- coding: utf-8 -*-
#
# doctrine.urwid documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 11 19:35:12 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'doctrine.urwid'
copyright = u'2015, Lennart Regebro'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'doctrineurwiddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'doctrineurwid.tex', u'doctrine.urwid Documentation',
u'Lennart Regebro', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'doctrineurwid', u'doctrine.urwid Documentation',
[u'Lennart Regebro'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'doctrineurwid', u'doctrine.urwid Documentation',
u'Lennart Regebr0', 'doctrineurwid', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit | 7,486,623,617,247,903,000 | 31.506173 | 80 | 0.704773 | false |
nburn42/tensorflow | tensorflow/contrib/eager/python/datasets.py | 1 | 6160 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Iteration over tf.data.Datasets when eager execution is enabled."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.data.python.ops import prefetching_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.saver import BaseSaverBuilder
_uid_counter = 0
_uid_lock = threading.Lock()
def _generate_shared_name(prefix):
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "{}{}".format(prefix, uid)
class Iterator(iterator_ops.EagerIterator, checkpointable.CheckpointableBase):
"""An iterator producing tf.Tensor objects from a tf.data.Dataset.
NOTE: Unlike the iterator created by the
@{tf.data.Dataset.make_one_shot_iterator} method, this class enables
additional experimental functionality, such as prefetching to the GPU.
"""
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Tensors produced will be placed on the device on which this iterator object
was created.
Args:
dataset: A `tf.data.Dataset` object.
Raises:
TypeError: If `dataset` is an unsupported type.
RuntimeError: When invoked without eager execution enabled.
"""
if isinstance(dataset, prefetching_ops._PrefetchToDeviceDataset): # pylint: disable=protected-access
raise TypeError(
"`tf.contrib.data.prefetch_to_device()` is not compatible with "
"`tf.contrib.eager.Iterator`. Use `for ... in dataset:` to iterate "
"over the dataset instead.")
super(Iterator, self).__init__(dataset)
if not context.context().device_spec.device_type:
is_remote_device = False
else:
is_remote_device = context.context().device_spec.device_type != "CPU"
self._buffer_resource_handle = None
if is_remote_device:
with ops.device("/device:CPU:0"):
iter_string_handle = gen_dataset_ops.iterator_to_string_handle(
self._resource)
@function.Defun(dtypes.string)
def remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, self.output_types, self.output_shapes, self.output_classes)
return remote_iterator.get_next()
remote_fn.add_to_graph(None)
target = constant_op.constant("/device:CPU:0")
with ops.device(self._device):
self._buffer_resource_handle = prefetching_ops.function_buffering_resource( # pylint: disable=line-too-long
string_arg=iter_string_handle,
f=remote_fn,
target_device=target,
buffer_size=10,
container="",
shared_name=_generate_shared_name("function_buffer_resource"))
self._buffer_resource_deleter = resource_variable_ops.EagerResourceDeleter( # pylint: disable=line-too-long
handle=self._buffer_resource_handle,
handle_device=self._device)
def _next_internal(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
# This runs in sync mode as iterators use an error status to communicate
# that there is no more data to iterate over.
# TODO(b/77291417): Fix
with context.execution_mode(context.SYNC):
if self._buffer_resource_handle is not None:
with ops.device(self._device):
ret = prefetching_ops.function_buffering_resource_get_next(
function_buffer_resource=self._buffer_resource_handle,
output_types=self._flat_output_types)
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self._output_types, ret), self._output_types,
self._output_shapes, self._output_classes)
else:
return super(Iterator, self)._next_internal()
# TODO(shivaniagrawal): Expose checkpointable stateful objects from dataset
# attributes(potential).
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject for saving/restoring iterator state."""
def __init__(self, iterator_resource, name):
serialized_iterator = gen_dataset_ops.serialize_iterator(
iterator_resource)
specs = [
BaseSaverBuilder.SaveSpec(serialized_iterator, "", name + "_STATE")
]
# pylint: disable=protected-access
super(Iterator._Saveable, self).__init__(iterator_resource, specs, name)
def restore(self, restored_tensors, restored_shapes):
with ops.colocate_with(self.op):
return gen_dataset_ops.deserialize_iterator(self.op,
restored_tensors[0])
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name):
return self._Saveable(self._resource, name)
return {"ITERATOR": _saveable_factory}
| apache-2.0 | -1,686,156,188,781,747,200 | 38.235669 | 116 | 0.68539 | false |
matandobr/Mobile-Security-Framework-MobSF | StaticAnalyzer/views/android/generate_downloads.py | 1 | 1944 | # -*- coding: utf_8 -*-
"""Generate Zipped downloads."""
import logging
import os
import re
import shutil
from django.conf import settings
from django.shortcuts import redirect
from MobSF.utils import print_n_send_error_response
logger = logging.getLogger(__name__)
def run(request):
"""Generate downloads for apk, java and smali."""
try:
logger.info('Generating Downloads')
md5 = request.GET['hash']
file_type = request.GET['file_type']
match = re.match('^[0-9a-f]{32}$', md5)
if not match and file_type not in ['apk', 'smali', 'java']:
logger.exception('Invalid options')
return print_n_send_error_response(request,
'Invalid options')
app_dir = os.path.join(settings.UPLD_DIR, md5)
file_name = ''
if file_type == 'java':
# For Java
file_name = md5 + '-java'
directory = os.path.join(app_dir, 'java_source/')
dwd_dir = os.path.join(settings.DWD_DIR, file_name)
shutil.make_archive(dwd_dir, 'zip', directory)
file_name = file_name + '.zip'
elif file_type == 'smali':
# For Smali
file_name = md5 + '-smali'
directory = os.path.join(app_dir, 'smali_source/')
dwd_dir = os.path.join(settings.DWD_DIR, file_name)
shutil.make_archive(dwd_dir, 'zip', directory)
file_name = file_name + '.zip'
elif file_type == 'apk':
file_name = md5 + '.apk'
src = os.path.join(app_dir, file_name)
dst = os.path.join(settings.DWD_DIR, file_name)
shutil.copy2(src, dst)
return redirect('/download/' + file_name)
except Exception:
logger.exception('Generating Downloads')
return print_n_send_error_response(request,
'Generating Downloads')
| gpl-3.0 | -6,746,440,024,267,676,000 | 35.679245 | 67 | 0.55607 | false |
isislovecruft/arm | arm/controller.py | 1 | 18726 | """
Main interface loop for arm, periodically redrawing the screen and issuing
user input to the proper panels.
"""
import os
import time
import curses
import threading
import arm.arguments
import arm.menu.menu
import arm.popups
import arm.headerPanel
import arm.logPanel
import arm.configPanel
import arm.torrcPanel
import arm.graphing.graphPanel
import arm.graphing.bandwidthStats
import arm.graphing.connStats
import arm.graphing.resourceStats
import arm.connections.connPanel
import arm.util.tracker
from stem.control import State
from arm.util import panel, torConfig, torTools
from stem.util import conf, enum, log, system
ARM_CONTROLLER = None
def conf_handler(key, value):
if key == "features.redrawRate":
return max(1, value)
elif key == "features.refreshRate":
return max(0, value)
CONFIG = conf.config_dict("arm", {
"startup.events": "N3",
"startup.data_directory": "~/.arm",
"features.panels.show.graph": True,
"features.panels.show.log": True,
"features.panels.show.connection": True,
"features.panels.show.config": True,
"features.panels.show.torrc": True,
"features.redrawRate": 5,
"features.refreshRate": 5,
"features.confirmQuit": True,
"features.graph.type": 1,
"features.graph.bw.prepopulate": True,
"start_time": 0,
}, conf_handler)
GraphStat = enum.Enum("BANDWIDTH", "CONNECTIONS", "SYSTEM_RESOURCES")
# maps 'features.graph.type' config values to the initial types
GRAPH_INIT_STATS = {1: GraphStat.BANDWIDTH, 2: GraphStat.CONNECTIONS, 3: GraphStat.SYSTEM_RESOURCES}
def get_controller():
"""
Provides the arm controller instance.
"""
return ARM_CONTROLLER
def stop_controller():
"""
Halts our Controller, providing back the thread doing so.
"""
def halt_controller():
control = get_controller()
if control:
for panel_impl in control.get_daemon_panels():
panel_impl.stop()
for panel_impl in control.get_daemon_panels():
panel_impl.join()
halt_thread = threading.Thread(target = halt_controller)
halt_thread.start()
return halt_thread
def init_controller(stdscr, start_time):
"""
Spawns the controller, and related panels for it.
Arguments:
stdscr - curses window
"""
global ARM_CONTROLLER
# initializes the panels
sticky_panels = [
arm.headerPanel.HeaderPanel(stdscr, start_time),
LabelPanel(stdscr),
]
page_panels, first_page_panels = [], []
# first page: graph and log
if CONFIG["features.panels.show.graph"]:
first_page_panels.append(arm.graphing.graphPanel.GraphPanel(stdscr))
if CONFIG["features.panels.show.log"]:
expanded_events = arm.arguments.expand_events(CONFIG["startup.events"])
first_page_panels.append(arm.logPanel.LogPanel(stdscr, expanded_events))
if first_page_panels:
page_panels.append(first_page_panels)
# second page: connections
if CONFIG["features.panels.show.connection"]:
page_panels.append([arm.connections.connPanel.ConnectionPanel(stdscr)])
# The DisableDebuggerAttachment will prevent our connection panel from really
# functioning. It'll have circuits, but little else. If this is the case then
# notify the user and tell them what they can do to fix it.
controller = torTools.get_conn().controller
if controller.get_conf("DisableDebuggerAttachment", None) == "1":
log.notice("Tor is preventing system utilities like netstat and lsof from working. This means that arm can't provide you with connection information. You can change this by adding 'DisableDebuggerAttachment 0' to your torrc and restarting tor. For more information see...\nhttps://trac.torproject.org/3313")
arm.util.tracker.get_connection_tracker().set_paused(True)
else:
# Configures connection resoultions. This is paused/unpaused according to
# if Tor's connected or not.
controller.add_status_listener(conn_reset_listener)
tor_pid = controller.get_pid(None)
if tor_pid:
# use the tor pid to help narrow connection results
tor_cmd = system.get_name_by_pid(tor_pid)
if tor_cmd is None:
tor_cmd = "tor"
resolver = arm.util.tracker.get_connection_tracker()
log.info("Operating System: %s, Connection Resolvers: %s" % (os.uname()[0], ", ".join(resolver._resolvers)))
resolver.start()
else:
# constructs singleton resolver and, if tor isn't connected, initizes
# it to be paused
arm.util.tracker.get_connection_tracker().set_paused(not controller.is_alive())
# third page: config
if CONFIG["features.panels.show.config"]:
page_panels.append([arm.configPanel.ConfigPanel(stdscr, arm.configPanel.State.TOR)])
# fourth page: torrc
if CONFIG["features.panels.show.torrc"]:
page_panels.append([arm.torrcPanel.TorrcPanel(stdscr, arm.torrcPanel.Config.TORRC)])
# initializes the controller
ARM_CONTROLLER = Controller(stdscr, sticky_panels, page_panels)
# additional configuration for the graph panel
graph_panel = ARM_CONTROLLER.get_panel("graph")
if graph_panel:
# statistical monitors for graph
bw_stats = arm.graphing.bandwidthStats.BandwidthStats()
graph_panel.add_stats(GraphStat.BANDWIDTH, bw_stats)
graph_panel.add_stats(GraphStat.SYSTEM_RESOURCES, arm.graphing.resourceStats.ResourceStats())
if CONFIG["features.panels.show.connection"]:
graph_panel.add_stats(GraphStat.CONNECTIONS, arm.graphing.connStats.ConnStats())
# sets graph based on config parameter
try:
initial_stats = GRAPH_INIT_STATS.get(CONFIG["features.graph.type"])
graph_panel.set_stats(initial_stats)
except ValueError:
pass # invalid stats, maybe connections when lookups are disabled
# prepopulates bandwidth values from state file
if CONFIG["features.graph.bw.prepopulate"] and torTools.get_conn().is_alive():
is_successful = bw_stats.prepopulate_from_state()
if is_successful:
graph_panel.update_interval = 4
class LabelPanel(panel.Panel):
"""
Panel that just displays a single line of text.
"""
def __init__(self, stdscr):
panel.Panel.__init__(self, stdscr, "msg", 0, height=1)
self.msg_text = ""
self.msg_attr = curses.A_NORMAL
def set_message(self, msg, attr = None):
"""
Sets the message being displayed by the panel.
Arguments:
msg - string to be displayed
attr - attribute for the label, normal text if undefined
"""
if attr is None:
attr = curses.A_NORMAL
self.msg_text = msg
self.msg_attr = attr
def draw(self, width, height):
self.addstr(0, 0, self.msg_text, self.msg_attr)
class Controller:
"""
Tracks the global state of the interface
"""
def __init__(self, stdscr, sticky_panels, page_panels):
"""
Creates a new controller instance. Panel lists are ordered as they appear,
top to bottom on the page.
Arguments:
stdscr - curses window
sticky_panels - panels shown at the top of each page
page_panels - list of pages, each being a list of the panels on it
"""
self._screen = stdscr
self._sticky_panels = sticky_panels
self._page_panels = page_panels
self._page = 0
self._is_paused = False
self._force_redraw = False
self._is_done = False
self._last_drawn = 0
self.set_msg() # initializes our control message
def get_screen(self):
"""
Provides our curses window.
"""
return self._screen
def get_page_count(self):
"""
Provides the number of pages the interface has. This may be zero if all
page panels have been disabled.
"""
return len(self._page_panels)
def get_page(self):
"""
Provides the number belonging to this page. Page numbers start at zero.
"""
return self._page
def set_page(self, page_number):
"""
Sets the selected page, raising a ValueError if the page number is invalid.
Arguments:
page_number - page number to be selected
"""
if page_number < 0 or page_number >= self.get_page_count():
raise ValueError("Invalid page number: %i" % page_number)
if page_number != self._page:
self._page = page_number
self._force_redraw = True
self.set_msg()
def next_page(self):
"""
Increments the page number.
"""
self.set_page((self._page + 1) % len(self._page_panels))
def prev_page(self):
"""
Decrements the page number.
"""
self.set_page((self._page - 1) % len(self._page_panels))
def is_paused(self):
"""
True if the interface is paused, false otherwise.
"""
return self._is_paused
def set_paused(self, is_pause):
"""
Sets the interface to be paused or unpaused.
"""
if is_pause != self._is_paused:
self._is_paused = is_pause
self._force_redraw = True
self.set_msg()
for panel_impl in self.get_all_panels():
panel_impl.set_paused(is_pause)
def get_panel(self, name):
"""
Provides the panel with the given identifier. This returns None if no such
panel exists.
Arguments:
name - name of the panel to be fetched
"""
for panel_impl in self.get_all_panels():
if panel_impl.get_name() == name:
return panel_impl
return None
def get_sticky_panels(self):
"""
Provides the panels visibile at the top of every page.
"""
return list(self._sticky_panels)
def get_display_panels(self, page_number = None, include_sticky = True):
"""
Provides all panels belonging to a page and sticky content above it. This
is ordered they way they are presented (top to bottom) on the page.
Arguments:
page_number - page number of the panels to be returned, the current
page if None
include_sticky - includes sticky panels in the results if true
"""
return_page = self._page if page_number is None else page_number
if self._page_panels:
if include_sticky:
return self._sticky_panels + self._page_panels[return_page]
else:
return list(self._page_panels[return_page])
else:
return self._sticky_panels if include_sticky else []
def get_daemon_panels(self):
"""
Provides thread panels.
"""
thread_panels = []
for panel_impl in self.get_all_panels():
if isinstance(panel_impl, threading.Thread):
thread_panels.append(panel_impl)
return thread_panels
def get_all_panels(self):
"""
Provides all panels in the interface.
"""
all_panels = list(self._sticky_panels)
for page in self._page_panels:
all_panels += list(page)
return all_panels
def redraw(self, force = True):
"""
Redraws the displayed panel content.
Arguments:
force - redraws reguardless of if it's needed if true, otherwise ignores
the request when there arne't changes to be displayed
"""
force |= self._force_redraw
self._force_redraw = False
current_time = time.time()
if CONFIG["features.refreshRate"] != 0:
if self._last_drawn + CONFIG["features.refreshRate"] <= current_time:
force = True
display_panels = self.get_display_panels()
occupied_content = 0
for panel_impl in display_panels:
panel_impl.set_top(occupied_content)
occupied_content += panel_impl.get_height()
# apparently curses may cache display contents unless we explicitely
# request a redraw here...
# https://trac.torproject.org/projects/tor/ticket/2830#comment:9
if force:
self._screen.clear()
for panel_impl in display_panels:
panel_impl.redraw(force)
if force:
self._last_drawn = current_time
def request_redraw(self):
"""
Requests that all content is redrawn when the interface is next rendered.
"""
self._force_redraw = True
def get_last_redraw_time(self):
"""
Provides the time when the content was last redrawn, zero if the content
has never been drawn.
"""
return self._last_drawn
def set_msg(self, msg = None, attr = None, redraw = False):
"""
Sets the message displayed in the interfaces control panel. This uses our
default prompt if no arguments are provided.
Arguments:
msg - string to be displayed
attr - attribute for the label, normal text if undefined
redraw - redraws right away if true, otherwise redraws when display
content is next normally drawn
"""
if msg is None:
msg = ""
if attr is None:
if not self._is_paused:
msg = "page %i / %i - m: menu, p: pause, h: page help, q: quit" % (self._page + 1, len(self._page_panels))
attr = curses.A_NORMAL
else:
msg = "Paused"
attr = curses.A_STANDOUT
control_panel = self.get_panel("msg")
control_panel.set_message(msg, attr)
if redraw:
control_panel.redraw(True)
else:
self._force_redraw = True
def get_data_directory(self):
"""
Provides the path where arm's resources are being placed. The path ends
with a slash and is created if it doesn't already exist.
"""
data_dir = os.path.expanduser(CONFIG["startup.data_directory"])
if not data_dir.endswith("/"):
data_dir += "/"
if not os.path.exists(data_dir):
os.makedirs(data_dir)
return data_dir
def is_done(self):
"""
True if arm should be terminated, false otherwise.
"""
return self._is_done
def quit(self):
"""
Terminates arm after the input is processed. Optionally if we're connected
to a arm generated tor instance then this may check if that should be shut
down too.
"""
self._is_done = True
# check if the torrc has a "ARM_SHUTDOWN" comment flag, if so then shut
# down the instance
is_shutdown_flag_present = False
torrc_contents = torConfig.get_torrc().get_contents()
if torrc_contents:
for line in torrc_contents:
if "# ARM_SHUTDOWN" in line:
is_shutdown_flag_present = True
break
if is_shutdown_flag_present:
try:
torTools.get_conn().shutdown()
except IOError as exc:
arm.popups.show_msg(str(exc), 3, curses.A_BOLD)
def heartbeat_check(is_unresponsive):
"""
Logs if its been ten seconds since the last BW event.
Arguments:
is_unresponsive - flag for if we've indicated to be responsive or not
"""
conn = torTools.get_conn()
last_heartbeat = conn.controller.get_latest_heartbeat()
if conn.is_alive():
if not is_unresponsive and (time.time() - last_heartbeat) >= 10:
is_unresponsive = True
log.notice("Relay unresponsive (last heartbeat: %s)" % time.ctime(last_heartbeat))
elif is_unresponsive and (time.time() - last_heartbeat) < 10:
# really shouldn't happen (meant Tor froze for a bit)
is_unresponsive = False
log.notice("Relay resumed")
return is_unresponsive
def conn_reset_listener(controller, event_type, _):
"""
Pauses connection resolution when tor's shut down, and resumes with the new
pid if started again.
"""
resolver = arm.util.tracker.get_connection_tracker()
if resolver.is_alive():
resolver.set_paused(event_type == State.CLOSED)
if event_type in (State.INIT, State.RESET):
# Reload the torrc contents. If the torrc panel is present then it will
# do this instead since it wants to do validation and redraw _after_ the
# new contents are loaded.
if get_controller().get_panel("torrc") is None:
torConfig.get_torrc().load(True)
def start_arm(stdscr):
"""
Main draw loop context.
Arguments:
stdscr - curses window
"""
start_time = CONFIG['start_time']
init_controller(stdscr, start_time)
control = get_controller()
# provides notice about any unused config keys
for key in conf.get_config("arm").unused_keys():
log.notice("Unused configuration entry: %s" % key)
# tells daemon panels to start
for panel_impl in control.get_daemon_panels():
panel_impl.start()
# allows for background transparency
try:
curses.use_default_colors()
except curses.error:
pass
# makes the cursor invisible
try:
curses.curs_set(0)
except curses.error:
pass
# logs the initialization time
log.info("arm started (initialization took %0.3f seconds)" % (time.time() - start_time))
# main draw loop
override_key = None # uses this rather than waiting on user input
is_unresponsive = False # flag for heartbeat responsiveness check
while not control.is_done():
display_panels = control.get_display_panels()
is_unresponsive = heartbeat_check(is_unresponsive)
# sets panel visability
for panel_impl in control.get_all_panels():
panel_impl.set_visible(panel_impl in display_panels)
# redraws the interface if it's needed
control.redraw(False)
stdscr.refresh()
# wait for user keyboard input until timeout, unless an override was set
if override_key:
key, override_key = override_key, None
else:
curses.halfdelay(CONFIG["features.redrawRate"] * 10)
key = stdscr.getch()
if key == curses.KEY_RIGHT:
control.next_page()
elif key == curses.KEY_LEFT:
control.prev_page()
elif key == ord('p') or key == ord('P'):
control.set_paused(not control.is_paused())
elif key == ord('m') or key == ord('M'):
arm.menu.menu.show_menu()
elif key == ord('q') or key == ord('Q'):
# provides prompt to confirm that arm should exit
if CONFIG["features.confirmQuit"]:
msg = "Are you sure (q again to confirm)?"
confirmation_key = arm.popups.show_msg(msg, attr = curses.A_BOLD)
quit_confirmed = confirmation_key in (ord('q'), ord('Q'))
else:
quit_confirmed = True
if quit_confirmed:
control.quit()
elif key == ord('x') or key == ord('X'):
# provides prompt to confirm that arm should issue a sighup
msg = "This will reset Tor's internal state. Are you sure (x again to confirm)?"
confirmation_key = arm.popups.show_msg(msg, attr = curses.A_BOLD)
if confirmation_key in (ord('x'), ord('X')):
try:
torTools.get_conn().reload()
except IOError as exc:
log.error("Error detected when reloading tor: %s" % exc.strerror)
elif key == ord('h') or key == ord('H'):
override_key = arm.popups.show_help_popup()
elif key == ord('l') - 96:
# force redraw when ctrl+l is pressed
control.redraw(True)
else:
for panel_impl in display_panels:
is_keystroke_consumed = panel_impl.handle_key(key)
if is_keystroke_consumed:
break
| gpl-3.0 | -8,813,717,470,918,531,000 | 26.297376 | 313 | 0.659885 | false |
ty707/airflow | airflow/jobs.py | 1 | 43857 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from past.builtins import basestring
from collections import defaultdict, Counter
from datetime import datetime, timedelta
import getpass
import logging
import socket
import subprocess
import multiprocessing
import math
from time import sleep
from sqlalchemy import Column, Integer, String, DateTime, func, Index, or_
from sqlalchemy.orm.session import make_transient
from airflow import executors, models, settings
from airflow import configuration as conf
from airflow.exceptions import AirflowException
from airflow.utils.state import State
from airflow.utils.db import provide_session, pessimistic_connection_handling
from airflow.utils.email import send_email
from airflow.utils.logging import LoggingMixin
from airflow.utils import asciiart
from airflow.settings import Stats
DagRun = models.DagRun
Base = models.Base
ID_LEN = models.ID_LEN
Stats = settings.Stats
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.DEFAULT_EXECUTOR,
heartrate=conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.getfqdn()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
self.logger.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self, session=None):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter_by(id=self.id).one()
if job.state == State.SHUTDOWN:
self.kill()
if job.latest_heartbeat:
sleep_for = self.heartrate - (
datetime.now() - job.latest_heartbeat).total_seconds()
if sleep_for > 0:
sleep(sleep_for)
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
session.close()
self.logger.debug('[heart] Boom.')
def run(self):
Stats.incr(self.__class__.__name__.lower()+'_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
Stats.incr(self.__class__.__name__.lower()+'_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs indefinitely and constantly schedules the jobs
that are ready to run. It figures out the latest runs for each
task and see if the dependencies for the next schedules are met.
If so it triggers the task instance. It does this for each task
in each DAG and repeats.
:param dag_id: to run the scheduler for a single specific DAG
:type dag_id: string
:param subdir: to search for DAG under a certain folder only
:type subdir: string
:param test_mode: used for unit testing this class only, runs a single
schedule run
:type test_mode: bool
:param refresh_dags_every: force refresh the DAG definition every N
runs, as specified here
:type refresh_dags_every: int
:param do_pickle: to pickle the DAG object and send over to workers
for non-local executors
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=None,
test_mode=False,
refresh_dags_every=10,
num_runs=None,
do_pickle=False,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
if test_mode:
self.num_runs = 1
else:
self.num_runs = num_runs
self.refresh_dags_every = refresh_dags_every
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
self.max_threads = min(conf.getint('scheduler', 'max_threads'), multiprocessing.cpu_count())
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
if self.max_threads > 1:
self.logger.error("Cannot use more than 1 thread when using sqlite. Setting max_threads to 1")
self.max_threads = 1
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.filter(TI.state == State.SUCCESS)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = datetime.now()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm = dag.following_schedule(dttm)
while dttm < datetime.now():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < datetime.now():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.email_sent.is_(False) or SlaMiss.notification_sent.is_(False))
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
blocking_tis = ([ti for ti in blocking_tis
if ti.are_dependencies_met(session=session)])
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.logger.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
email_content = """\
Here's a list of tasks thas missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(bug=asciiart.bug, **locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
session.close()
def import_errors(self, dagbag):
session = settings.Session()
session.query(models.ImportError).delete()
for filename, stacktrace in list(dagbag.import_errors.items()):
session.add(models.ImportError(
filename=filename, stacktrace=stacktrace))
session.commit()
@provide_session
def schedule_dag(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval:
DagRun = models.DagRun
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < datetime.now() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = datetime.now()
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False,
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX+'%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.logger.debug("Next run date based on tasks {}"
.format(next_run_date))
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.logger.debug("Dag start date: {}. Next run date: {}"
.format(dag.start_date, next_run_date))
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= datetime.now():
next_run = dag.create_dagrun(
run_id='scheduled__' + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=datetime.now(),
state=State.RUNNING,
external_trigger=False
)
return next_run
def process_dag(self, dag, queue):
"""
This method schedules a single DAG by looking at the latest
run for each task and attempting to schedule the following run.
As multiple schedulers may be running for redundancy, this
function takes a lock on the DAG and timestamps the last run
in ``last_scheduler_run``.
"""
DagModel = models.DagModel
session = settings.Session()
# picklin'
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle_id = dag.pickle(session).id
# obtain db lock
db_dag = session.query(DagModel).filter_by(
dag_id=dag.dag_id
).with_for_update().one()
last_scheduler_run = db_dag.last_scheduler_run or datetime(2000, 1, 1)
secs_since_last = (datetime.now() - last_scheduler_run).total_seconds()
if secs_since_last < self.heartrate:
# release db lock
session.commit()
session.close()
return None
# Release the db lock
# the assumption here is that process_dag will take less
# time than self.heartrate otherwise we might unlock too
# quickly and this should moved below, but that would increase
# the time the record is locked and is blocking for other calls.
db_dag.last_scheduler_run = datetime.now()
session.commit()
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
# do not consider runs that are executed in the future
if run.execution_date > datetime.now():
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
# future: remove adhoc
if task.adhoc:
continue
if ti.is_runnable(flag_upstream_failed=True):
self.logger.debug('Queuing task: {}'.format(ti))
queue.put((ti.key, pickle_id))
session.close()
@provide_session
def prioritize_queued(self, session, executor, dagbag):
# Prioritizing queued task instances
pools = {p.pool: p for p in session.query(models.Pool).all()}
TI = models.TaskInstance
queued_tis = (
session.query(TI)
.filter(TI.state == State.QUEUED)
.all()
)
self.logger.info(
"Prioritizing {} queued jobs".format(len(queued_tis)))
session.expunge_all()
d = defaultdict(list)
for ti in queued_tis:
if ti.dag_id not in dagbag.dags:
self.logger.info(
"DAG no longer in dagbag, deleting {}".format(ti))
session.delete(ti)
session.commit()
elif not dagbag.dags[ti.dag_id].has_task(ti.task_id):
self.logger.info(
"Task no longer exists, deleting {}".format(ti))
session.delete(ti)
session.commit()
else:
d[ti.pool].append(ti)
dag_blacklist = set(dagbag.paused_dags())
for pool, tis in list(d.items()):
if not pool:
# Arbitrary:
# If queued outside of a pool, trigger no more than
# non_pooled_task_slot_count per run
open_slots = conf.getint('core', 'non_pooled_task_slot_count')
else:
open_slots = pools[pool].open_slots(session=session)
queue_size = len(tis)
self.logger.info("Pool {pool} has {open_slots} slots, {queue_size} "
"task instances in queue".format(**locals()))
if open_slots <= 0:
continue
tis = sorted(
tis, key=lambda ti: (-ti.priority_weight, ti.start_date))
for ti in tis:
if open_slots <= 0:
continue
task = None
try:
task = dagbag.dags[ti.dag_id].get_task(ti.task_id)
except:
self.logger.error("Queued task {} seems gone".format(ti))
session.delete(ti)
session.commit()
continue
if not task:
continue
ti.task = task
# picklin'
dag = dagbag.dags[ti.dag_id]
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor,
executors.SequentialExecutor):
self.logger.info("Pickling DAG {}".format(dag))
pickle_id = dag.pickle(session).id
if dag.dag_id in dag_blacklist:
continue
if dag.concurrency_reached:
dag_blacklist.add(dag.dag_id)
continue
if ti.are_dependencies_met():
executor.queue_task_instance(ti, pickle_id=pickle_id)
open_slots -= 1
else:
session.delete(ti)
session.commit()
continue
ti.task = task
session.commit()
def _split(self, items, size):
"""
This function splits a list of items into chunks of int size.
_split([1,2,3,4,5,6], 3) becomes [[1,2,3],[4,5,6]]
"""
size = max(1, size)
return [items[i:i + size] for i in range(0, len(items), size)]
def _do_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and schedules and processes them
"""
for dag in dags:
self.logger.debug("Scheduling {}".format(dag.dag_id))
dag = dagbag.get_dag(dag.dag_id)
if not dag:
continue
try:
self.schedule_dag(dag)
self.process_dag(dag, tis_out)
self.manage_slas(dag)
except Exception as e:
self.logger.exception(e)
@provide_session
def _reset_state_for_orphaned_tasks(self, dag_run, session=None):
"""
This function checks for a DagRun if there are any tasks
that have a scheduled state but are not known by the
executor. If it finds those it will reset the state to None
so they will get picked up again.
"""
queued_tis = self.executor.queued_tasks
# also consider running as the state might not have changed in the db yet
running = self.executor.running
tis = dag_run.get_task_instances(state=State.SCHEDULED, session=session)
for ti in tis:
if ti.key not in queued_tis and ti.key not in running:
ti.state = State.NONE
self.logger.debug("Rescheduling orphaned task {}".format(ti))
session.commit()
def _execute(self):
session = settings.Session()
TI = models.TaskInstance
pessimistic_connection_handling()
logging.basicConfig(level=logging.DEBUG)
self.logger.info("Starting the scheduler")
dagbag = models.DagBag(self.subdir, sync_to_db=True)
executor = self.executor = dagbag.executor
executor.start()
# grab orphaned tasks and make sure to reset their state
active_runs = DagRun.find(
state=State.RUNNING,
external_trigger=False,
session=session
)
for dr in active_runs:
self._reset_state_for_orphaned_tasks(dr, session=session)
self.runs = 0
while not self.num_runs or self.num_runs > self.runs:
try:
loop_start_dttm = datetime.now()
try:
self.prioritize_queued(executor=executor, dagbag=dagbag)
except Exception as e:
self.logger.exception(e)
self.runs += 1
try:
if self.runs % self.refresh_dags_every == 0:
dagbag = models.DagBag(self.subdir, sync_to_db=True)
else:
dagbag.collect_dags(only_if_updated=True)
except Exception as e:
self.logger.error("Failed at reloading the dagbag. {}".format(e))
Stats.incr('dag_refresh_error', 1, 1)
sleep(5)
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values() if dag.dag_id in self.dag_ids]
else:
dags = [
dag for dag in dagbag.dags.values()
if not dag.parent_dag]
paused_dag_ids = dagbag.paused_dags()
dags = [x for x in dags if x.dag_id not in paused_dag_ids]
# dags = filter(lambda x: x.dag_id not in paused_dag_ids, dags)
self.logger.debug("Total Cores: {} Max Threads: {} DAGs:{}".
format(multiprocessing.cpu_count(),
self.max_threads,
len(dags)))
dags = self._split(dags, math.ceil(len(dags) / self.max_threads))
tis_q = multiprocessing.Queue()
jobs = [multiprocessing.Process(target=self._do_dags,
args=(dagbag, dags[i], tis_q))
for i in range(len(dags))]
self.logger.info("Starting {} scheduler jobs".format(len(jobs)))
for j in jobs:
j.start()
while any(j.is_alive() for j in jobs):
while not tis_q.empty():
ti_key, pickle_id = tis_q.get()
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
if ti.state == State.SCHEDULED:
session.commit()
self.logger.debug("Task {} was picked up by another scheduler"
.format(ti))
continue
elif ti.state is State.NONE:
ti.state = State.SCHEDULED
self.executor.queue_task_instance(ti, pickle_id=pickle_id)
session.merge(ti)
session.commit()
for j in jobs:
j.join()
self.logger.info("Done queuing tasks, calling the executor's "
"heartbeat")
duration_sec = (datetime.now() - loop_start_dttm).total_seconds()
self.logger.info("Loop took: {} seconds".format(duration_sec))
Stats.timing("scheduler_loop", duration_sec * 1000)
try:
self.import_errors(dagbag)
except Exception as e:
self.logger.exception(e)
try:
dagbag.kill_zombies()
except Exception as e:
self.logger.exception(e)
try:
# We really just want the scheduler to never ever stop.
executor.heartbeat()
self.heartbeat()
except Exception as e:
self.logger.exception(e)
self.logger.error("Tachycardia!")
except Exception as deep_e:
self.logger.exception(deep_e)
raise
finally:
settings.Session.remove()
executor.end()
session.close()
@provide_session
def heartbeat_callback(self, session=None):
Stats.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
def __init__(
self,
dag, start_date=None, end_date=None, mark_success=False,
include_adhoc=False,
donot_pickle=False,
ignore_dependencies=False,
ignore_first_depends_on_past=False,
pool=None,
*args, **kwargs):
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.include_adhoc = include_adhoc
self.donot_pickle = donot_pickle
self.ignore_dependencies = ignore_dependencies
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.pool = pool
super(BackfillJob, self).__init__(*args, **kwargs)
def _execute(self):
"""
Runs a dag for a specified date range.
"""
session = settings.Session()
start_date = self.bf_start_date
end_date = self.bf_end_date
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
executor_fails = Counter()
# Build a list of all instances to run
tasks_to_run = {}
failed = set()
succeeded = set()
started = set()
skipped = set()
not_ready = set()
deadlocked = set()
for task in self.dag.tasks:
if (not self.include_adhoc) and task.adhoc:
continue
start_date = start_date or task.start_date
end_date = end_date or task.end_date or datetime.now()
for dttm in self.dag.date_range(start_date, end_date=end_date):
ti = models.TaskInstance(task, dttm)
tasks_to_run[ti.key] = ti
session.merge(ti)
session.commit()
# Triggering what is ready to get triggered
while tasks_to_run and not deadlocked:
not_ready.clear()
for key, ti in list(tasks_to_run.items()):
ti.refresh_from_db(session=session, lock_for_update=True)
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == State.SUCCESS:
succeeded.add(key)
tasks_to_run.pop(key)
session.commit()
continue
elif ti.state == State.SKIPPED:
skipped.add(key)
tasks_to_run.pop(key)
session.commit()
continue
# Is the task runnable? -- then run it
if ti.is_queueable(
include_queued=True,
ignore_depends_on_past=ignore_depends_on_past,
flag_upstream_failed=True):
self.logger.debug('Sending {} to executor'.format(ti))
if ti.state == State.NONE:
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_dependencies=self.ignore_dependencies,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool)
started.add(key)
# Mark the task as not ready to run
elif ti.state in (State.NONE, State.UPSTREAM_FAILED):
not_ready.add(key)
session.commit()
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run, then the backfill is deadlocked
if not_ready and not_ready == set(tasks_to_run):
deadlocked.update(tasks_to_run.values())
tasks_to_run.clear()
# Reacting to events
for key, state in list(executor.get_event_buffer().items()):
dag_id, task_id, execution_date = key
if key not in tasks_to_run:
continue
ti = tasks_to_run[key]
ti.refresh_from_db()
# executor reports failure
if state == State.FAILED:
# task reports running
if ti.state == State.RUNNING:
msg = (
'Executor reports that task instance {} failed '
'although the task says it is running.'.format(key))
self.logger.error(msg)
ti.handle_failure(msg)
tasks_to_run.pop(key)
# task reports skipped
elif ti.state == State.SKIPPED:
self.logger.error("Skipping {} ".format(key))
skipped.add(key)
tasks_to_run.pop(key)
# anything else is a failure
else:
self.logger.error("Task instance {} failed".format(key))
failed.add(key)
tasks_to_run.pop(key)
# executor reports success
elif state == State.SUCCESS:
# task reports success
if ti.state == State.SUCCESS:
self.logger.info(
'Task instance {} succeeded'.format(key))
succeeded.add(key)
tasks_to_run.pop(key)
# task reports failure
elif ti.state == State.FAILED:
self.logger.error("Task instance {} failed".format(key))
failed.add(key)
tasks_to_run.pop(key)
# task reports skipped
elif ti.state == State.SKIPPED:
self.logger.info("Task instance {} skipped".format(key))
skipped.add(key)
tasks_to_run.pop(key)
# this probably won't ever be triggered
elif ti in not_ready:
self.logger.info(
"{} wasn't expected to run, but it did".format(ti))
# executor reports success but task does not - this is weird
elif ti.state not in (
State.SCHEDULED,
State.QUEUED,
State.UP_FOR_RETRY):
self.logger.error(
"The airflow run command failed "
"at reporting an error. This should not occur "
"in normal circumstances. Task state is '{}',"
"reported state is '{}'. TI is {}"
"".format(ti.state, state, ti))
# if the executor fails 3 or more times, stop trying to
# run the task
executor_fails[key] += 1
if executor_fails[key] >= 3:
msg = (
'The airflow run command failed to report an '
'error for task {} three or more times. The '
'task is being marked as failed. This is very '
'unusual and probably means that an error is '
'taking place before the task even '
'starts.'.format(key))
self.logger.error(msg)
ti.handle_failure(msg)
tasks_to_run.pop(key)
msg = ' | '.join([
"[backfill progress]",
"waiting: {0}",
"succeeded: {1}",
"kicked_off: {2}",
"failed: {3}",
"skipped: {4}",
"deadlocked: {5}"
]).format(
len(tasks_to_run),
len(succeeded),
len(started),
len(failed),
len(skipped),
len(deadlocked))
self.logger.info(msg)
executor.end()
session.close()
err = ''
if failed:
err += (
"---------------------------------------------------\n"
"Some task instances failed:\n{}\n".format(failed))
if deadlocked:
err += (
'---------------------------------------------------\n'
'BackfillJob is deadlocked.')
deadlocked_depends_on_past = any(
t.are_dependencies_met() != t.are_dependencies_met(
ignore_depends_on_past=True)
for t in deadlocked)
if deadlocked_depends_on_past:
err += (
'Some of the deadlocked tasks were unable to run because '
'of "depends_on_past" relationships. Try running the '
'backfill with the option '
'"ignore_first_depends_on_past=True" or passing "-I" at '
'the command line.')
err += ' These tasks were unable to run:\n{}\n'.format(deadlocked)
if err:
raise AirflowException(err)
self.logger.info("Backfill done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_dependencies=False,
ignore_depends_on_past=False,
force=False,
mark_success=False,
pickle_id=None,
pool=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_dependencies = ignore_dependencies
self.ignore_depends_on_past = ignore_depends_on_past
self.force = force
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
# Keeps track of the fact that the task instance has been observed
# as running at least once
self.was_running = False
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
command = self.task_instance.command(
raw=True,
ignore_dependencies=self.ignore_dependencies,
ignore_depends_on_past=self.ignore_depends_on_past,
force=self.force,
pickle_id=self.pickle_id,
mark_success=self.mark_success,
job_id=self.id,
pool=self.pool,
)
self.process = subprocess.Popen(['bash', '-c', command])
return_code = None
while return_code is None:
self.heartbeat()
return_code = self.process.poll()
def on_kill(self):
self.process.terminate()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# task is already terminating, let it breathe
return
# Suicide pill
TI = models.TaskInstance
ti = self.task_instance
state = session.query(TI.state).filter(
TI.dag_id==ti.dag_id, TI.task_id==ti.task_id,
TI.execution_date==ti.execution_date).scalar()
if state == State.RUNNING:
self.was_running = True
elif self.was_running and hasattr(self, 'process'):
logging.warning(
"State of this instance has been externally set to "
"{self.task_instance.state}. "
"Taking the poison pill. So long.".format(**locals()))
self.process.terminate()
self.terminating = True
| apache-2.0 | 6,076,973,327,566,079,000 | 37.037294 | 110 | 0.519393 | false |
frankdilo/cropper-python | cropper/cli.py | 1 | 4116 | #!/usr/bin/env python
import os
from glob import glob
import click
import shutil
from PIL import Image
from devices import DEVICES, ALL_DEVICE_NAMES
IPAD_MASTER_DIRNAME = 'iPadMaster'
IPHONE_MASTER_DIRNAME = 'iPhoneMaster'
def safe_mkdir(path):
try:
os.mkdir(path)
except OSError:
pass
def safe_mkdir_intermediate(path):
"""
Create dir at path, without failing if it already exists. Also intermediate directories are created.
"""
dir_path = os.path.dirname(path)
try:
os.makedirs(dir_path)
except OSError:
pass
def transform_images(src_paths, dest_paths, resize_to, crop_margins):
for (src, dest) in zip(src_paths, dest_paths):
src_image = Image.open(src)
final_image = src_image
# resize
if resize_to:
final_image = src_image.resize(resize_to, Image.LANCZOS)
# crop
if crop_margins:
# left, upper, right, lower
cropped_size = (0 + crop_margins[0]/2, 0, resize_to[0] - crop_margins[0]/2, resize_to[1]-crop_margins[1])
final_image = final_image.crop(cropped_size)
# save
safe_mkdir_intermediate(dest)
final_image.save(dest)
def group_screenshots_by_language(master_dir):
language_dirs = glob(os.path.join(master_dir, IPHONE_MASTER_DIRNAME, '*'))
language_dirs = filter(os.path.isdir, language_dirs)
supported_languages = [os.path.basename(lang_dir) for lang_dir in language_dirs]
screens_by_device = {device: glob(os.path.join(master_dir, device, '*', '*.png'))
for device in ALL_DEVICE_NAMES}
screens_by_language_and_device = {lang: {} for lang in supported_languages}
for device, screens in screens_by_device.iteritems():
for lang in supported_languages:
screens_by_language_and_device[lang][device] = filter(lambda path: lang in path, screens)
for lang in supported_languages:
# create top-level language directory
lang_dir = os.path.join(master_dir, lang)
safe_mkdir(lang_dir)
# create one sub-folder inside the language directory, for each device type
for device in ALL_DEVICE_NAMES:
device_subdir = os.path.join(lang_dir, device)
safe_mkdir(device_subdir)
screens_to_move = screens_by_language_and_device[lang][device]
for tomove in screens_to_move:
dest = os.path.join(device_subdir, os.path.basename(tomove))
os.rename(tomove, dest)
def rm_empty_device_folders(master_dir):
for device in ALL_DEVICE_NAMES:
dir_path = os.path.join(master_dir, device)
shutil.rmtree(dir_path)
@click.command()
@click.argument('master_dir', type=str)
def main(master_dir):
master_dir = os.path.abspath(master_dir)
iphone_images_pattern = os.path.join(master_dir, IPHONE_MASTER_DIRNAME) + '/*/*.png'
ipad_images_pattern = os.path.join(master_dir, IPAD_MASTER_DIRNAME) + '/*/*.png'
iphone_img_paths = glob(iphone_images_pattern)
ipad_img_paths = glob(ipad_images_pattern)
if not iphone_img_paths:
print "Error: no master iPhone images found!"
exit(1)
if not ipad_img_paths:
print "Error: no master iPad images found!"
exit(1)
# iphone screenshots
for device_name, operations in DEVICES['iPhone'].items():
dest_paths = [img_path.replace('iPhoneMaster', device_name) for img_path in iphone_img_paths]
transform_images(iphone_img_paths, dest_paths, operations['resize'], operations['crop'])
print "{} done".format(device_name)
# ipad screenshots
for device_name, operations in DEVICES['iPad'].items():
dest_paths = [img_path.replace('iPadMaster', device_name) for img_path in ipad_img_paths]
transform_images(ipad_img_paths, dest_paths, operations['resize'], operations['crop'])
print "{} done".format(device_name)
print "Reorganizing languages..."
group_screenshots_by_language(master_dir)
print "Cleaning up..."
rm_empty_device_folders(master_dir)
main()
| bsd-3-clause | -8,730,890,915,175,512,000 | 30.906977 | 117 | 0.646987 | false |
botherder/volatility | volatility/plugins/mac/netstat.py | 1 | 2242 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.obj as obj
import volatility.plugins.mac.lsof as lsof
class mac_netstat(lsof.mac_lsof):
""" Lists active per-process network connections """
def render_text(self, outfd, data):
self.table_header(outfd, [("Proto", "6"),
("Local IP", "20"),
("Local Port", "6"),
("Remote IP", "20"),
("Remote Port", "6"),
("State", "20"),
("Process", "24")])
for proc, i, fd, _path in data:
if fd.f_fglob.fg_type == 'DTYPE_SOCKET':
socket = fd.f_fglob.fg_data.dereference_as("socket")
family = socket.family
if family == 1:
upcb = socket.so_pcb.dereference_as("unpcb")
path = upcb.unp_addr.sun_path
outfd.write("UNIX {0}\n".format(path))
elif family in [2, 30]:
proto = socket.protocol
state = socket.state
(lip, lport, rip, rport) = socket.get_connection_info()
self.table_row(outfd, proto, lip, lport, rip, rport, state, "{}/{}".format(proc.p_comm, proc.p_pid))
| gpl-2.0 | 9,063,788,820,111,397,000 | 35.754098 | 120 | 0.544157 | false |
snakazawa/qibluemix | sample/sttproxy/sttproxy.py | 1 | 2371 | # -*- coding: utf-8 -*-
u"""
see readme.md
"""
import os
import sys
import time
from naoqi import ALBroker
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
from qibluemix import STTProxy, get_logger
from qibluemix.pepper import SpeechRecognitionMemory, StreamingAudioRecorder
from qibluemix.watson import Watson
# ==== parameters ====
PEPPER_IP = "192.168.xxx.xxx"
PEPPER_PORT = 9559
EVENT_ROOT_NAME = "Bluemix/STTProxy/" # 本アプリが使用するPepperのメモリのルートパス
USERNAME = "********" # credentials.username (Bluemix Speech To Text)
PASSWORD = "********" # credentials.password (Bluemix Speech To Text)
URL = "https://stream.watsonplatform.net/speech-to-text/api"
CONFIDENCE = 0.2 # 変換における信頼性の許容値(0.0~1.0) 許容値未満の変換結果は無視される
# ==== /parameters ====
StreamingAudioRecorderModule = None
SpeechRecognitionMemoryModule = None
broker = None
logger = get_logger()
def main():
global SpeechRecognitionMemoryModule
global StreamingAudioRecorderModule
global broker
logger.info("init watson")
watson = Watson(USERNAME, PASSWORD, URL)
token = get_token(watson)
stream = watson.recognize_stream(token)
logger.info("init remote pepper")
broker = ALBroker("myBroker", "0.0.0.0", 0, PEPPER_IP, PEPPER_PORT)
logger.info("init StreamingAudioRecorder")
recorder = StreamingAudioRecorderModule = StreamingAudioRecorder("StreamingAudioRecorderModule")
logger.info("init SpeechRecognitionMemory")
memory = SpeechRecognitionMemoryModule = SpeechRecognitionMemory("SpeechRecognitionMemoryModule", EVENT_ROOT_NAME)
logger.info("init SpeechToTextProxy")
proxy = STTProxy(recorder, stream, memory)
proxy.init()
logger.info("ready...")
# manual(proxy, duration=10, after_wait=3)
# service
while True:
time.sleep(1)
def manual(proxy, duration=10, after_wait=3):
logger.info("start")
proxy.start()
time.sleep(duration)
logger.info("stop")
proxy.stop()
time.sleep(after_wait)
logger.info("end")
def get_token(watson):
r = watson.get_token()
if r.status_code != 200:
logger.info(r.url)
logger.info(r.status_code)
logger.info(r.text.encode('utf-8'))
exit(1)
return r.text
if __name__ == "__main__":
main()
| mit | -6,306,866,242,211,763,000 | 24.561798 | 118 | 0.687912 | false |
RudolfCardinal/crate | crate_anon/preprocess/rio_pk.py | 1 | 5802 | #!/usr/bin/env python
"""
crate_anon/preprocess/rio_pk.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal ([email protected]).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <http://www.gnu.org/licenses/>.
===============================================================================
**Details of the names of primary keys in selected RiO tables.**
"""
__SUPERSEDED = """
RIO_6_2_ATYPICAL_PKS = { # SUPERSEDED by better PK detection
# These are table: pk_field mappings for PATIENT tables, i.e. those
# containing the ClientID field, where that PK is not the default of
# SequenceID.
# -------------------------------------------------------------------------
# RiO Core
# -------------------------------------------------------------------------
# Ams*: Appointment Management System
'AmsAppointmentContactActivity': 'ActivitySequenceID',
'AmsAppointmentOtherHCP': None, # non-patient; non-unique SequenceID
# ... SequenceID is non-unique and the docs also list it as an FK;
# ActivitySequenceID this is unique and a PK
'AmsReferralDatesArchive': 'AMSSequenceID',
# ... UNVERIFIED as no rows in our data; listed as a PK and an FK
'AmsReferralListUrgency': None,
'AmsReferralListWaitingStatus': None,
'AmsStream': None, # non-patient; non-unique SequenceID
'CarePlanIndex': 'CarePlanID',
'CarePlanProblemOrder': None,
'ClientAddressMerged': None, # disused table
'ClientCareSpell': None, # CareSpellNum is usually 1 for a given ClientID
'ClientDocumentAdditionalClient': None,
'ClientFamily': None,
'ClientFamilyLink': None,
'ClientGPMerged': None,
'ClientHealthCareProvider': None,
'ClientMerge': None,
'ClientMerged': None,
'ClientName': 'ClientNameID',
'ClientOtherDetail': None, # not in docs, but looks like Core
'ClientPhoto': None,
'ClientPhotoMerged': None,
'ClientProperty': None,
'ClientPropertyMerged': None,
'ClientTelecom': 'ClientTelecomID',
'ClientUpdatePDSCache': None,
# Con*: Contracts
'Contract': 'ContractNumber',
'ConAdHocAwaitingApproval': 'SequenceNo',
'ConClientInitialBedRate': None,
'ConClinicHistory': 'SequenceNo',
'ConLeaveDiscountHistory': 'SequenceNo',
# Not documented, but looks like Core
'Deceased': None, # or possibly TrustWideID (or just ClientID!)
'DemClientDeletedDetails': None,
# EP: E-Prescribing
# ... with DA: Drug Administration
# ... with DS: Drug Service
'EPClientConditions': 'RowID',
'EPClientPrescription': 'PrescriptionID',
'EPClientSensitivities': None, # UNVERIFIED: None? Joint PK on ProdID?
'EPDiscretionaryDrugClientLink': None,
'EPVariableDosageDrugLink': 'HistoryID', # UNVERIFIED
'EPClientAllergies': 'ReactionID',
'DAConcurrencyControl': None,
'DAIPPrescription': 'PrescriptionID',
'DSBatchPatientGroups': None,
'DSMedicationBatchContinue': None,
'DSMedicationBatchLink': None,
# Ims*: Inpatient Management System
'ImsEventLeave': 'UniqueSequenceID', # SequenceID
'ImsEventMovement': None,
'ImsEventRefno': None, # Not in docs but looks like Core.
'ImsEventRefnoBAKUP': None, # [Sic.] Not in docs but looks like Core.
# LR*: Legitimate Relationships
'LRIdentifiedCache': None,
# Mes*: messaging
'MesLettersGenerated': 'Reference',
# Mnt*: Mental Health module (re MHA detention)
'MntArtAttendee': None, # SequenceID being "of person within a meeting"
'MntArtOutcome': None, # ditto
'MntArtPanel': None, # ditto
'MntArtRpts': None, # ditto
'MntArtRptsReceived': None, # ditto
'MntClientEctSection62': None,
'MntClientMedSection62': None,
'MntClientSectionDetailCareCoOrdinator': None,
'MntClientSectionDetailCourtAppearance': None,
'MntClientSectionDetailFMR': None,
'MntClientSectionReview': None,
# NDTMS*: Nation(al?) Drug Treatment Monitoring System
# SNOMED*: SNOMED
'SNOMED_Client': 'SC_ID',
# UserAssess*: user assessment (= non-core?) tables.
# See other default PK below: type12:
# -------------------------------------------------------------------------
# Non-core? No docs available.
# -------------------------------------------------------------------------
# Chd*: presumably, child development
'ChdClientDevCheckBreastFeeding': None,
# ... guess; DevChkSeqID is probably FK to ChdClientDevCheck.SequenceID
# ??? But it has q1-q30, qu2-14, home, sch, comm... assessment tool...
'CYPcurrentviewImport': None, # not TrustWideID (which is non-unique)
'GoldmineIfcMapping': None, # no idea, really, and no data to explore
'KP90ErrorLog': None,
'ReportsOutpatientWaitersHashNotSeenReferrals': None,
'ReportsOutpatientWaitersNotSeenReferrals': None,
'UserAssesstfkcsa_childprev': 'type12_RowID', # Keeping Children Safe Assessment subtable # noqa
'UserAssesstfkcsa_childs': 'type12_RowID', # Keeping Children Safe Assessment subtable # noqa
}
"""
RIO_6_2_ATYPICAL_PATIENT_ID_COLS = {
'SNOMED_Client': 'SC_ClientID',
}
| gpl-3.0 | -156,543,975,734,238,370 | 35.2625 | 102 | 0.638401 | false |
n9code/calm | tests/test_websocket.py | 1 | 1369 | from tornado.testing import gen_test
from tornado.websocket import WebSocketHandler
from calm.testing import CalmWebSocketTestCase
from calm import Application
from calm.ex import DefinitionError
app = Application('testws', '1')
@app.websocket('/ws')
class SomeWebSocket(WebSocketHandler):
OPEN_MESSAGE = "some open message"
def open(self):
self.write_message(self.OPEN_MESSAGE)
def on_message(self, message):
self.write_message(message)
class WebSocketTests(CalmWebSocketTestCase):
def get_calm_app(self):
global app
return app
def test_wrong_class(self):
class WrongClass(object):
pass
self.assertRaises(DefinitionError, app.websocket('/wrong_class'),
WrongClass)
self.assertRaises(DefinitionError, app.websocket('/wrong_class'),
1)
@gen_test
async def test_base_case(self):
websocket = await self.init_websocket('/ws')
msg = await websocket.read_message()
self.assertEqual(msg, SomeWebSocket.OPEN_MESSAGE)
some_msg = "some echo message"
websocket.write_message(some_msg)
msg = await websocket.read_message()
self.assertEqual(msg, some_msg)
websocket.close()
msg = await websocket.read_message()
self.assertEqual(msg, None)
| mit | 7,134,426,757,140,080,000 | 25.843137 | 73 | 0.652301 | false |
camillescott/boink | goetia/cli/cdbg_stream.py | 1 | 11576 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Camille Scott, 2019
# File : cdbg_stream.py
# License: MIT
# Author : Camille Scott <[email protected]>
# Date : 11.03.2020
from goetia import libgoetia
from goetia.cdbg import (compute_connected_component_callback,
compute_unitig_fragmentation_callback,
write_cdbg_metrics_callback,
write_cdbg_callback)
from goetia.dbg import get_graph_args, process_graph_args
from goetia.parsing import get_fastx_args, iter_fastx_inputs
from goetia.processors import AsyncSequenceProcessor, at_modulo_interval
from goetia.messages import (Interval, SampleStarted, SampleFinished, Error, AllMessages)
from goetia.metadata import CUR_TIME
from goetia.serialization import cDBGSerialization
from goetia.cli.args import get_output_interval_args, print_interval_settings
from goetia.cli.runner import CommandRunner
import curio
import os
import sys
class cDBGRunner(CommandRunner):
def __init__(self, parser):
get_graph_args(parser)
get_cdbg_args(parser)
get_output_interval_args(parser)
group = get_fastx_args(parser)
group.add_argument('-o', dest='output_filename', default='/dev/stdout')
group.add_argument('-i', '--inputs', dest='inputs', nargs='+', required=True)
parser.add_argument('--echo', default=None,
help='echo all events to the given file.')
parser.add_argument('--curio-monitor', default=False, action='store_true',
help='Run curio kernel monitor for async debugging.')
parser.add_argument('--verbose', default=False, action='store_true')
super().__init__(parser)
def postprocess_args(self, args):
process_graph_args(args)
process_cdbg_args(args)
def setup(self, args):
os.makedirs(args.results_dir, exist_ok=True)
self.dbg_t = args.graph_t
self.hasher = args.hasher_t(args.ksize)
self.storage = args.storage.build(*args.storage_args)
self.dbg = args.graph_t.build(self.storage, self.hasher)
self.cdbg_t = libgoetia.cdbg.cDBG[type(self.dbg)]
self.compactor_t = libgoetia.cdbg.StreamingCompactor[type(self.dbg)]
self.compactor = self.compactor_t.Compactor.build(self.dbg)
if args.normalize:
self.file_processor = self.compactor_t.NormalizingCompactor[FastxReader].build(self.compactor,
args.normalize,
args.interval)
else:
self.file_processor = self.compactor_t.Processor.build(self.compactor,
args.interval)
# Iterator over samples (pairs or singles, depending on pairing-mode)
sample_iter = iter_fastx_inputs(args.inputs, args.pairing_mode, names=args.names)
# AsyncSequenceProcessor does event management and callback for the FileProcessors
self.processor = AsyncSequenceProcessor(self.file_processor, sample_iter, args.echo)
# Subscribe a listener to the FileProcessor producer
self.worker_listener = self.processor.add_listener('worker_q', 'cdbg.consumer')
#
# Register callbacks for data outputs.
# Track a list of files that need to be closed with a ]
# when we're done.
#
self.to_close = []
if args.track_cdbg_metrics:
self.worker_listener.on_message(Interval,
write_cdbg_metrics_callback,
self.compactor,
args.track_cdbg_metrics,
args.verbose)
self.to_close.append(args.track_cdbg_metrics)
if args.track_unitig_bp:
if args.unitig_bp_bins is None:
bins = [args.ksize, 100, 200, 500, 1000]
else:
bins = args.unitig_bp_bins
self.worker_listener.on_message(Interval,
at_modulo_interval(compute_unitig_fragmentation_callback,
modulus=args.unitig_bp_tick),
self.cdbg_t,
self.compactor.cdbg,
args.track_unitig_bp,
bins,
verbose=args.verbose)
self.to_close.append(args.track_unitig_bp)
if args.track_cdbg_components:
self.worker_listener.on_message(Interval,
at_modulo_interval(compute_connected_component_callback,
modulus=args.cdbg_components_tick),
self.cdbg_t,
self.compactor.cdbg,
args.track_cdbg_components,
args.component_sample_size,
verbose=args.verbose)
self.to_close.append(args.track_cdbg_components)
if args.save_cdbg:
for cdbg_format in args.save_cdbg_format:
self.worker_listener.on_message(Interval,
at_modulo_interval(write_cdbg_callback,
modulus=args.cdbg_tick),
args.save_cdbg,
cdbg_format,
verbose=args.verbose)
self.worker_listener.on_message(SampleFinished,
write_cdbg_callback,
args.save_cdbg,
cdbg_format,
verbose=args.verbose)
# Close all files when done
async def close_files(msg, files):
for file_name in files:
async with curio.aopen(file_name, 'a') as fp:
await fp.write('\n]\n')
self.worker_listener.on_message(SampleFinished, close_files, self.to_close)
#
# Regular diagnostics output
#
def info_output(msg):
info = f'{msg.msg_type}: {getattr(msg, "state", "")}'\
f'\n\tSample: {msg.sample_name}'\
f'\n\tSequences: {msg.sequence}'\
f'\n\tk-mers: {msg.t}'
if msg.msg_type == 'Error':
info += f'\n\tError: {msg.error}'
print(info, file=sys.stderr)
self.worker_listener.on_message(AllMessages, info_output)
def execute(self, args):
curio.run(self.processor.start, with_monitor=args.curio_monitor)
def teardown(self):
pass
def get_cdbg_args(parser):
default_prefix = 'goetia.build-cdbg.' + CUR_TIME
parser.default_prefix = default_prefix
group = parser.add_argument_group('cDBG')
group.add_argument('--results-dir',
default=default_prefix)
group.add_argument('--normalize',
type=int,
nargs='?',
const=10)
group.add_argument('--save-cdbg',
metavar='PREFIX.<format>',
nargs='?',
const='goetia.cdbg.graph',
help='Save a copy of the cDBG.')
group.add_argument('--save-cdbg-format',
nargs='+',
choices=cDBGSerialization.FORMATS,
default=['gfa1'])
group.add_argument('--cdbg-tick',
type=int,
default=10,
help='Save every N interval ticks.')
group.add_argument('--track-cdbg-metrics',
metavar='FILE_NAME.json',
nargs='?',
const='goetia.cdbg.stats.json',
help='Output basic cDBG metrics.')
group.add_argument('--cdbg-metrics-tick',
type=int,
default=5,
help='Output every N interval ticks.')
group.add_argument('--track-cdbg-components',
metavar='FILE_NAME.json',
nargs='?',
const='goetia.cdbg.components.json',
help='Save the distribution of component sizes.')
group.add_argument('--component-sample-size',
type=int,
default=10000,
help='Number of components to sample for size.')
group.add_argument('--cdbg-components-tick',
type=int,
default=5,
help='Sample and save distribution every N interval ticks.')
group.add_argument('--track-unitig-bp',
metavar='FILENAME.json',
nargs='?',
const='goetia.cdbg.unitigs.bp.json',
help='Track the distribution of unitig sizes.')
group.add_argument('--unitig-bp-bins',
nargs='+',
type=int,
help='Bin sizes of distribution.')
group.add_argument('--unitig-bp-tick',
type=int,
default=10)
group.add_argument('--validate',
metavar='FILENAME.csv',
nargs='?',
const='goetia.cdbg.validation.csv')
return group
def process_cdbg_args(args):
def join(p):
return p if p is None else os.path.join(args.results_dir, p)
args.track_cdbg_stats = join(args.track_cdbg_metrics)
args.track_cdbg_components = join(args.track_cdbg_components)
args.save_cdbg = join(args.save_cdbg)
args.track_cdbg_unitig_bp = join(args.track_unitig_bp)
def print_cdbg_args(args):
print('* cDBG Params', file=sys.stderr)
print('* Directory: ', args.results_dir, file=sys.stderr)
if args.save_cdbg:
print('* Saving cDBG every {0} sequences with file prefix {1}'.format(args.coarse_interval,
args.save_cdbg),
file=sys.stderr)
print('* cDBG save formats: {0}'.format(', '.join(args.save_cdbg_format)))
if args.track_cdbg_stats:
print('* Tracking cDBG stats and reporting every {0} sequences'.format(args.fine_interval),
file=sys.stderr)
print('* Saving tracking information to', args.track_cdbg_stats, file=sys.stderr)
if args.track_cdbg_history:
print('* Tracking cDBG history and saving to', args.track_cdbg_history, file=sys.stderr)
if args.validate:
print('* cDBG will be validated on completion and results saved to', args.validate,
file=sys.stderr)
print('*', '*' * 10, '*', sep='\n', file=sys.stderr)
| mit | 4,909,020,035,630,128,000 | 41.248175 | 106 | 0.503283 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.