code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import os
from django.http import FileResponse
from wsgiref.util import FileWrapper
from settings.static import MEDIA_URL
# from django.core.servers.basehttp import FileWrapper
from django.views.generic import TemplateView
from django.shortcuts import render_to_response, render, redirect, get_object_or_404
from django.core.mail import send_mail
from django.http import HttpResponse
from django.template import RequestContext
from django.http import HttpResponseRedirect
from pangolinfog.forms import *
# from pangolinfog.recaptcha.forms import *
from django.template.loader import get_template
from django.core.mail import EmailMessage
from django.template import Context
from product.models import Category
from product.models import Product, Accessory
from content.models import Slide
from django.core.urlresolvers import reverse_lazy
from django.views.generic import FormView
from nocaptcha_recaptcha.fields import NoReCaptchaField
def contact(request):
form_class = ContactForm
success_url = reverse_lazy('success')
args = {}
background_image = get_object_or_404(Slide, header_about=1)
args['menu'] = "contact"
categories_main_menu = Category.objects.filter(published_in_menu=1).order_by('ordering')
args['categories_main_menu'] = categories_main_menu
args['form'] = form_class
args['background_image'] = background_image
def form_valid(self, form):
return super(form_class, self).form_valid(form)
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
contact_name = request.POST.get(
'contact_name'
, '')
contact_email = request.POST.get(
'contact_email'
, '')
contact_phone = request.POST.get(
'contact_phone'
, '')
form_content = request.POST.get('content', '')
# Email the profile with the
# contact information
template = get_template('contact_template.txt')
context = Context({
'contact_name': contact_name,
'contact_email': contact_email,
'contact_phone': contact_phone,
'form_content': form_content,
})
content = template.render(context)
email = EmailMessage(
"Pangolin Fog",
content,
"Pangolin Fog" +'',
['vladimir@pangolin.com'],
headers = {'Reply-To': contact_email }
)
email.send()
return redirect(request.META.get('HTTP_REFERER', '/'))
return render(request, 'contact.html', args)
def jq_subsc(request):
return render(request, 'jq_subsc.html')
def download_file(request):
_file = 'manualtourhazer2.pdf.zip'
filename = os.path.basename(_file)
# python 3
# response = FileResponse(FileWrapper(open(filename, 'rb')), content_type='application/x-zip-compressed')
# python 2
response = FileResponse(FileWrapper(file(filename, 'rb')), content_type='application/x-zip-compressed')
response['Content-Disposition'] = "attachment; filename=%s" % _file
return response
def download_mp3(request):
_file = 'Last_Summer_in_Yalta.mp3.zip'
filename = os.path.basename(_file)
# python 3
# response = FileResponse(FileWrapper(open(filename, 'rb')), content_type='application/x-zip-compressed')
# python 2
response = FileResponse(FileWrapper(file(filename, 'rb')), content_type='application/x-zip-compressed')
response['Content-Disposition'] = "attachment; filename=%s" % _file
return response
def main(request):
args = {}
slides = Slide.objects.filter(published_main=1).order_by('ordering')
categories_main_menu = Category.objects.filter(published_in_menu=1).order_by('ordering')
products_main = Product.objects.filter(published_main=1)
args['products_main'] = products_main
args['categories_main_menu'] = categories_main_menu
args['slides'] = slides
return render_to_response("home.html", args)
def news(request):
args = {}
slides = Slide.objects.filter(published_portfolio=1).order_by('ordering')
news = Slide.objects.filter(published_news=1).order_by('ordering')
background_image = get_object_or_404(Slide, header_about=1)
args['news'] = news
args['menu'] = "news"
args['slides'] = slides
args['background_image'] = background_image
return render_to_response("news.html", args)
def about(request):
args = {}
slides = Slide.objects.filter(published_portfolio=1).order_by('ordering')
news = Slide.objects.filter(published_news=1).order_by('ordering')
background_image = get_object_or_404(Slide, header_about=1)
args['news'] = news
args['menu'] = "about"
args['slides'] = slides
args['background_image'] = background_image
return render_to_response("about.html", args)
| skylifewww/pangolin-fog | pangolinfog/views.py | Python | mit | 4,937 |
# -*- coding: utf-8 -*-
#
# Moonstone is platform for processing of medical images (DICOM).
# Copyright (C) 2009-2011 by Neppo Tecnologia da Informação LTDA
# and Aevum Softwares LTDA
#
# This file is part of Moonstone.
#
# Moonstone is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import abc
import logging
class IlsaSubjectBase(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def add(self, observer):
logging.debug("In IlsaSubjectBase::register()")
logging.info(":: IlsaSubjectBase::register() not implemented")
@abc.abstractmethod
def remove(self, observer):
logging.debug("In IlsaSubjectBase::remove()")
logging.info(":: IlsaSubjectBase::remove() not implemented")
@abc.abstractmethod
def notify(self):
logging.debug("In IlsaSubjectBase::notify()")
logging.info(":: IlsaSubjectBase::notify() not implemented")
class IlsaObserverBase(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def notify(self, *args, **kwargs):
logging.debug("In IlsaObserverBase::notify()")
logging.info(":: IlsaObserverBase::notify() not implemented")
class IlsaTriggerActionBase(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def trigger(self, *args, **kwargs):
logging.debug("In IlsaTriggerActionBase::trigger()")
logging.info(":: IlsaTriggerActionBase::trigger() not implemented")
class IlsaCompositeBase(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def add(self, plugin):
logging.debug("In IlsaCompositeBase::add()")
logging.info(":: IlsaCompositeBase::add() not implemented")
@abc.abstractmethod
def remove(self, plugin):
logging.debug("In IlsaCompositeBase::remove()")
logging.info(":: IlsaCompositeBase::remove() not implemented")
@abc.abstractmethod
def childs(self, index=None):
logging.debug("In IlsaCompositeBase::childs()")
logging.info(":: IlsaCompositeBase::childs() not implemented")
@abc.abstractmethod
def lenght(self, index=None):
logging.debug("In IlsaCompositeBase::lenght()")
logging.info(":: IlsaCompositeBase::lenght() not implemented")
| aevum/moonstone | src/moonstone/ilsa/base.py | Python | lgpl-3.0 | 2,901 |
#!/usr/bin/python
# Copyright 2012 William Yu
# wyu@ateneo.edu
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is a demonstration file created to show how to obtain flow
and port statistics from OpenFlow 1.0-enabled switches. The flow
statistics handler contains a summary of web-only traffic.
"""
# standard includes
from pox.core import core
from pox.lib.util import dpidToStr
import pox.openflow.libopenflow_01 as of
from pox.lib.addresses import IPAddr, EthAddr
# include as part of the betta branch
from pox.openflow.of_json import *
from pox.lib.recoco import Timer
import time
log = core.getLogger()
src_dpid = 0
dst_dpid = 0
input_pkts = 0
output_pkts = 0
def getTheTime(): #fuction to create a timestamp
flock = time.localtime()
then = "[%s-%s-%s" %(str(flock.tm_year),str(flock.tm_mon),str(flock.tm_mday))
if int(flock.tm_hour)<10:
hrs = "0%s" % (str(flock.tm_hour))
else:
hrs = str(flock.tm_hour)
if int(flock.tm_min)<10:
mins = "0%s" % (str(flock.tm_min))
else:
mins = str(flock.tm_min)
if int(flock.tm_sec)<10:
secs = "0%s" % (str(flock.tm_sec))
else:
secs = str(flock.tm_sec)
then +="]%s.%s.%s" % (hrs,mins,secs)
return then
# handler for timer function that sends the requests to all the
# switches connected to the controller.
def _timer_func ():
for connection in core.openflow._connections.values():
connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request()))
log.debug("Sent %i flow/port stats request(s)", len(core.openflow._connections))
# handler to display flow statistics received in JSON format
# structure of event.stats is defined by ofp_flow_stats()
def _handle_flowstats_received (event):
#stats = flow_stats_to_list(event.stats)
#log.debug("FlowStatsReceived from %s: %s", dpidToStr(event.connection.dpid), stats)
global src_dpid, dst_dpid, input_pkts, output_pkts
print "src_dpid=", dpidToStr(src_dpid), "dst_dpid=", dpidToStr(dst_dpid)
for f in event.stats:
if f.match.dl_type==0x0800 and f.match.nw_dst==IPAddr("192.168.123.2") and f.match.nw_tos==0x64 and event.connection.dpid==src_dpid:
print "input: ", f.byte_count, f.packet_count
input_pkts = f.packet_count
if f.match.dl_type==0x0800 and f.match.nw_dst==IPAddr("192.168.123.2") and f.match.nw_tos==0x64 and event.connection.dpid==dst_dpid:
print "output: ", f.byte_count, f.packet_count
output_pkts = f.packet_count
if input_pkts !=0:
print getTheTime(), "Path Loss Rate =", (input_pkts-output_pkts)*1.0/input_pkts*100, "%"
# handler to display port statistics received in JSON format
def _handle_portstats_received (event):
#print "\n<<<STATS-REPLY: Return PORT stats for Switch", event.connection.dpid,"at ",getTheTime()
#for f in event.stats:
#if int(f.port_no)<65534:
#print " PortNo:", f.port_no, " Fwd's Pkts:", f.tx_packets, " Fwd's Bytes:", f.tx_bytes, " Rc'd Pkts:", f.rx_packets, " Rc's Bytes:", f.rx_bytes
#print " PortNo:", f.port_no, " TxDrop:", f.tx_dropped, " RxDrop:", f.rx_dropped, " TxErr:", f.tx_errors, " RxErr:", f.rx_errors, " CRC:", f.rx_crc_err, " Coll:", f.collisions
stats = flow_stats_to_list(event.stats)
log.debug("PortStatsReceived from %s: %s", dpidToStr(event.connection.dpid), stats)
def _handle_ConnectionUp (event):
global src_dpid, dst_dpid
print "ConnectionUp: ", dpidToStr(event.connection.dpid)
for m in event.connection.features.ports:
if m.name == "s0-eth0":
src_dpid = event.connection.dpid
elif m.name == "s1-eth0":
dst_dpid = event.connection.dpid
msg = of.ofp_flow_mod()
msg.priority =1
msg.idle_timeout = 0
msg.match.in_port =1
msg.actions.append(of.ofp_action_output(port = of.OFPP_ALL))
event.connection.send(msg)
msg = of.ofp_flow_mod()
msg.priority =1
msg.idle_timeout = 0
msg.match.in_port =2
msg.actions.append(of.ofp_action_output(port = of.OFPP_ALL))
event.connection.send(msg)
msg = of.ofp_flow_mod()
msg.priority =10
msg.idle_timeout = 0
msg.hard_timeout = 0
msg.match.dl_type = 0x0800
msg.match.nw_tos = 0x64
msg.match.in_port=1
msg.match.nw_dst = "192.168.123.2"
msg.actions.append(of.ofp_action_output(port = 2))
event.connection.send(msg)
msg = of.ofp_flow_mod()
msg.priority =10
msg.idle_timeout = 0
msg.hard_timeout = 0
msg.match.dl_type = 0x0800
msg.match.nw_tos = 0x64
msg.match.nw_dst = "192.168.123.1"
msg.actions.append(of.ofp_action_output(port = 1))
event.connection.send(msg)
# main functiont to launch the module
def launch ():
# attach handsers to listners
core.openflow.addListenerByName("FlowStatsReceived",
_handle_flowstats_received)
core.openflow.addListenerByName("PortStatsReceived",
_handle_portstats_received)
core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
# timer set to execute every five seconds
Timer(1, _timer_func, recurring=True) | kulawczukmarcin/mypox | pox/misc/flow_stats_differnet_version.py | Python | apache-2.0 | 5,602 |
import os
from django.conf import settings
IXPMGR_VERSION = os.environ["IXPMGR_VERSION"]
SECRET_KEY = "test-secret-key"
INSTALLED_APPS = [
"django_ixpmgr.IxpManagerAppConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
]
| 20c/django-ixpmgr | src/django_ixpmgr/tests/django_settings.py | Python | apache-2.0 | 245 |
'''
Created on 11/07/2013
@author: Mads M. Pedersen (mmpe@dtu.dk)
'''
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from builtins import int
from future import standard_library
standard_library.install_aliases()
import math
from wetb.utils.cython_compile.cython_compile import cython_compile, \
cython_compile_autodeclare, cython_import
from wetb.utils.cython_compile.examples import cycheck
def pycheck(p):
for i in range(10):
for y in range(2, int(math.sqrt(p)) + 1):
if p % y == 0:
return False
return True
@cython_compile
def cycheck_compile(p):
import math
for i in range(10):
for y in range(2, int(math.sqrt(p)) + 1):
if p % y == 0:
return False
return True
@cython_compile_autodeclare
def cycheck_compile_autodeclare(p):
import math
for i in range(10):
for y in range(2, int(math.sqrt(p)) + 1):
if p % y == 0:
return False
return True
if __name__ == "__main__":
p = 17
print (pycheck(p))
cython_import('cycheck')
print (cycheck.cycheck(p))
print (cycheck.cycheck_pure(p))
print (cycheck.cycheck_cdef(p))
print (cycheck_compile(p))
print (cycheck_compile_autodeclare(p))
| madsmpedersen/MMPE | cython_compile/examples/examples.py | Python | gpl-3.0 | 1,398 |
#
# Advene: Annotate Digital Videos, Exchange on the NEt
# Copyright (C) 2008-2017 Olivier Aubert <contact@olivieraubert.net>
#
# Advene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Advene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Advene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import logging
logger = logging.getLogger(__name__)
import copy
from io import StringIO
from simpletal import simpleTAL
from simpletal import simpleTALES
from advene.model.exception import AdveneException
from . import global_methods
class AdveneTalesException(AdveneException):
pass
class AdveneTalesPathException(AdveneTalesException):
pass
class DebugLogger:
verbosity = 3
def debug (self, *args):
logger.debug(args)
def info (self, *args):
logger.info(args)
def warn (self, *args):
logger.warning(args)
def error (self, *args):
logger.error(args)
def critical (self, *args):
logger.error(args)
debuglogger_singleton = DebugLogger()
class NoCallVariable(simpleTALES.ContextVariable):
"""Not callable variable.
Used for view wrappers, that should not be called if intermediate values.
Such a value (if used in a _advene_context) can hold a callable, that will
be called only if final element in the path.
"""
def value (self, currentPath=None):
return self.ourValue
class _advene_context (simpleTALES.Context):
"""Advene specific implementation of TALES.
It is based on simpletal.simpleTALES.Context,
but whenever a path item is not resolved as an attribute nor a key of
the object to which it is applied, it is searched in a set of Methods
contained in the context."""
def __init__ (self, options):
simpleTALES.Context.__init__(self, options, allowPythonPath=True)
def wrap_method(self, method):
return simpleTALES.PathFunctionVariable(method)
def wrap_nocall(self, o):
return NoCallVariable(o)
def wrap_object(self, o):
"""Wraps an object into a ContextVariable."""
return simpleTALES.ContextVariable(o)
def addLocals (self, localVarList):
# For compatibility with code using simpletal 3.6 (migration phase)
# Pop the current locals onto the stack
self.pushLocals()
for name, value in localVarList:
self.setLocal(name, value)
def traversePathPreHook(self, obj, path):
"""Called before any TALES standard evaluation"""
#print "Prehook for %s on %s" % (obj, path)
val = None
if path in self.methods:
#print "Evaluating %s on %s" % (path, obj)
val = self.methods[path](obj, self)
# If the result is None, the method is not appliable
# and we should try other access ways (attributes,...) on the
# object
if val is None and hasattr (obj, 'getQName'):
if 'view' in self.locals:
ref = self.locals['view']
elif 'view' in self.globals:
ref = self.globals['view']
elif 'here' in self.locals:
ref = self.locals['here']
elif 'here' in self.globals:
ref = self.globals['here']
else:
ref = None
if ref is None:
ref = obj
pkg = ref.getOwnerPackage ()
ns_dict = pkg.getImports ().getInverseDict ()
ns_dict[''] = pkg.getUri (absolute=True)
val = obj.getQName (path, ns_dict, None)
return val
def traversePath (self, expr, canCall=1):
# canCall only applies to the *final* path destination, not points down the path.
# Check for and correct for trailing/leading quotes
if (expr.startswith ('"') or expr.startswith ("'")):
if (expr.endswith ('"') or expr.endswith ("'")):
expr = expr [1:-1]
else:
expr = expr [1:]
elif (expr.endswith ('"') or expr.endswith ("'")):
expr = expr [0:-1]
pathList = expr.split ('/')
path = pathList[0]
if path.startswith ('?'):
path = path[1:]
if path in self.locals:
path = self.locals[path]
if isinstance (path, simpleTALES.ContextVariable):
path = path.value()
elif callable (path):
path = path(*())
elif path in self.globals:
path = self.globals[path]
if isinstance (path, simpleTALES.ContextVariable):
path = path.value()
elif callable (path):
path = path(*())
#self.log.debug ("Dereferenced to %s" % path)
if path in self.locals:
val = self.locals[path]
elif path in self.globals:
val = self.globals[path]
else:
# If we can't find it then raise an exception
raise simpleTALES.PathNotFoundException() from None
# Advene hook: store the resolved_stack
resolved_stack = [ (path, val) ]
self.pushLocals()
self.setLocal( '__resolved_stack', resolved_stack )
index = 1
for path in pathList[1:]:
#self.log.debug ("Looking for path element %s" % path)
if path.startswith ('?'):
path = path[1:]
if path in self.locals:
path = self.locals[path]
if isinstance (path, simpleTALES.ContextVariable):
path = path.value()
elif callable (path):
path = path(*())
elif path in self.globals:
path = self.globals[path]
if isinstance (path, simpleTALES.ContextVariable):
path = path.value()
elif callable (path):
path = path(*())
#self.log.debug ("Dereferenced to %s" % path)
try:
if isinstance (val, simpleTALES.ContextVariable):
temp = val.value((index, pathList))
elif callable (val):
temp = val(*())
else:
temp = val
except simpleTALES.ContextVariable as e:
# Fast path for those functions that return values
self.popLocals()
return e.value()
# Advene hook:
val = self.traversePathPreHook (temp, path)
if val is not None:
pass
elif hasattr (temp, path):
val = getattr (temp, path)
else:
try:
val = temp[path]
except (TypeError, KeyError):
try:
val = temp[int(path)]
except:
#self.log.debug ("Not found.")
raise simpleTALES.PathNotFoundException() from None
# Advene hook: stack resolution
resolved_stack.insert(0, (path, val) )
index = index + 1
#self.log.debug ("Found value %s" % str (val))
self.popLocals()
if canCall:
try:
if isinstance (val, simpleTALES.ContextVariable):
result = val.value((index,pathList))
# Advene hook: introduced by the NoCallVariable
if callable(result):
result = val.value((index, pathList))(*())
elif callable (val):
result = val(*())
else:
result = val
except simpleTALES.ContextVariable as e:
# Fast path for those functions that return values
return e.value()
else:
if isinstance (val, simpleTALES.ContextVariable):
result = val.realValue
else:
result = val
return result
class AdveneContext(_advene_context):
@staticmethod
def defaultMethods():
return [ n
for n in dir(global_methods)
if not n.startswith('_') ]
def checkpoint(self):
"""Checkpoint locals/globals variables to preserve state
"""
self._cached_locals = copy.copy(self.locals)
self._cached_globals = copy.copy(self.globals)
def restore(self):
"""Restore locals/globals from a previous checkpoint state.
"""
self.locals = copy.copy(self._cached_locals)
self.globals = copy.copy(self._cached_globals)
def __str__ (self):
return "<pre>AdveneContext\nGlobals:\n\t%s\nLocals:\n\t%s</pre>" % (
"\n\t".join([ "%s: %s" % (k, str(v).replace("<", "<"))
for k, v in self.globals.items() ]),
"\n\t".join([ "%s: %s" % (k, str(v).replace("<", "<"))
for k, v in self.locals.items() ]))
def __init__(self, here, options=None):
"""Creates a tales.AdveneContext object, having a global symbol 'here'
with value 'here' and a global symbol 'options' where all the key-
value pairs of parameter 'options' are copied. Of course, it also
has all the standard TALES global symbols.
"""
if options is None:
options={}
_advene_context.__init__(self, dict(options)) # *copy* dict 'options'
self.methods = {}
self.addGlobal('here', here)
for dm_name in self.defaultMethods():
self.addMethod(dm_name, global_methods.__dict__[dm_name])
# FIXME: debug
self.log = debuglogger_singleton
def addMethod (self, name, function):
"""Add a new method to this context."""
# TODO: test that function is indeed a function, and that it has the
# correct signature
if True:
self.methods[name] = function
else:
raise AdveneTalesException("%s is not a valid method" % function)
def interpret (self, view_source, mimetype, stream=None):
"""
Interpret the TAL template available through the stream view_source,
with the mime-type mimetype, and print the result to the stream
"stream". The stream is returned. If stream is not given or None, a
StringIO will be created and returned.
"""
if stream is None:
stream = StringIO ()
if isinstance (view_source, str):
view_source = StringIO (view_source)
kw = {}
compiler = simpleTAL.XMLTemplateCompiler ()
compiler.log = self.log
compiler.parseTemplate (view_source)
kw["suppressXMLDeclaration"] = 1
compiler.getTemplate ().expand (context=self, outputFile=stream, outputEncoding='utf-8', **kw)
return stream
def evaluateValue(self, expr):
"""Returns the object matching the TALES expression expr applied on the
given context. If context is an instance of tales.AdveneContext, it
will be used directly. If it is another instance, a new AdveneContext
will be created with this instance as global symbol 'here'.
"""
r = None
try:
r = self.evaluate (expr)
except simpleTALES.PathNotFoundException:
raise AdveneTalesException(
'TALES expression %s returned None in context %s' %
(expr, self)) from None
except:
logger.error("Unhandled exception - please report", exc_info=True)
return r
| oaubert/advene | lib/advene/model/tal/context.py | Python | gpl-2.0 | 12,208 |
import h2o_nodes
from h2o_test import dump_json, verboseprint
import h2o_util
import h2o_print as h2p
from h2o_test import OutputObj
#************************************************************************
def runStoreView(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
print "\nStoreView:"
# FIX! are there keys other than frames and models
a = node.frames(**kwargs)
# print "storeview frames:", dump_json(a)
frameList = [af['key']['name'] for af in a['frames']]
for f in frameList:
print "frame:", f
print "# of frames:", len(frameList)
b = node.models()
# print "storeview models:", dump_json(b)
modelList = [bm['key'] for bm in b['models']]
for m in modelList:
print "model:", m
print "# of models:", len(modelList)
return {'keys': frameList + modelList}
#************************************************************************
def runExec(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
a = node.rapids(**kwargs)
return a
def runInspect(node=None, key=None, verbose=False, **kwargs):
if not key: raise Exception('No key for Inspect')
if not node: node = h2o_nodes.nodes[0]
a = node.frames(key, **kwargs)
if verbose:
print "inspect of %s:" % key, dump_json(a)
return a
#************************************************************************
def infoFromParse(parse):
if not parse:
raise Exception("parse is empty for infoFromParse")
# assumes just one result from Frames
if 'frames' not in parse:
raise Exception("infoFromParse expects parse= param from parse result: %s" % parse)
if len(parse['frames'])!=1:
raise Exception("infoFromParse expects parse= param from parse result: %s " % parse['frames'])
# it it index[0] or key '0' in a dictionary?
frame = parse['frames'][0]
# need more info about this dataset for debug
numCols = len(frame['columns'])
numRows = frame['rows']
key_name = frame['key']['name']
return numRows, numCols, key_name
#************************************************************************
# make this be the basic way to get numRows, numCols
def infoFromInspect(inspect):
if not inspect:
raise Exception("inspect is empty for infoFromInspect")
# assumes just one result from Frames
if 'frames' not in inspect:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s" % inspect)
if len(inspect['frames'])!=1:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s " % inspect['frames'])
# it it index[0] or key '0' in a dictionary?
frame = inspect['frames'][0]
# need more info about this dataset for debug
columns = frame['columns']
key_name = frame['key']['name']
missingList = []
labelList = []
typeList = []
for i, colDict in enumerate(columns): # columns is a list
if 'missing_count' not in colDict:
# debug
print "\ncolDict"
for k in colDict:
print " key: %s" % k
# data
# domain
# string_data
# type
# label
# percentiles
# precision
# mins
# maxs
# mean
# histogram_base
# histogram_bins
# histogram_stride
# zero_count
# missing_count
# positive_infinity_count
# negative_infinity_count
# __meta
mins = colDict['mins']
maxs = colDict['maxs']
missing = colDict['missing_count']
label = colDict['label']
stype = colDict['type']
missingList.append(missing)
labelList.append(label)
typeList.append(stype)
if missing!=0:
print "%s: col: %s %s, missing: %d" % (key_name, i, label, missing)
print "inspect typeList:", typeList
# make missingList empty if all 0's
if sum(missingList)==0:
missingList = []
# no type per col in inspect2
numCols = len(frame['columns'])
numRows = frame['rows']
print "\n%s numRows: %s, numCols: %s" % (key_name, numRows, numCols)
return missingList, labelList, numRows, numCols
#************************************************************************
# does all columns unless you specify column index.
# only will return first or specified column
def runSummary(node=None, key=None, column=None, expected=None, maxDelta=None, noPrint=False, **kwargs):
if not key: raise Exception('No key for Summary')
if not node: node = h2o_nodes.nodes[0]
# return node.summary(key, **kwargs)
i = InspectObj(key=key)
# just so I don't have to change names below
missingList = i.missingList
labelList = i.labelList
numRows = i.numRows
numCols = i.numCols
print "labelList:", labelList
assert labelList is not None
# doesn't take indices? only column labels?
# return first column, unless specified
if not (column is None or isinstance(column, (basestring, int))):
raise Exception("column param should be string or integer index or None %s %s" % (type(column), column))
# either return the first col, or the col indentified by label. the column identifed could be string or index?
if column is None: # means the summary json when we ask for col 0, will be what we return (do all though)
colNameToDo = labelList
colIndexToDo = range(len(labelList))
elif isinstance(column, int):
colNameToDo = [labelList[column]]
colIndexToDo = [column]
elif isinstance(column, basestring):
colNameToDo = [column]
if column not in labelList:
raise Exception("% not in labellist: %s" % (column, labellist))
colIndexToDo = [labelList.index(column)]
else:
raise Exception("wrong type %s for column %s" % (type(column), column))
# we get the first column as result after walking across all, if no column parameter
desiredResult = None
for (colIndex, colName) in zip(colIndexToDo, colNameToDo):
print "doing summary on %s %s" % (colIndex, colName)
# ugly looking up the colIndex
co = SummaryObj(key=key, colIndex=colIndex, colName=colName)
if not desiredResult:
desiredResult = co
if not noPrint:
for k,v in co:
# only print [0] of mins and maxs because of the e308 values when they don't have dataset values
if k=='mins' or k=='maxs':
print "%s[0]" % k, v[0]
else:
print k, v
if expected is not None:
print "len(co.histogram_bins):", len(co.histogram_bins)
print "co.label:", co.label, "mean (2 places):", h2o_util.twoDecimals(co.mean)
# what is precision. -1?
print "co.label:", co.label, "std dev. (2 places):", h2o_util.twoDecimals(co.sigma)
# print "FIX! hacking the co.percentiles because it's short by two"
# if co.percentiles:
# percentiles = [0] + co.percentiles + [0]
# else:
# percentiles = None
percentiles = co.percentiles
assert len(co.percentiles) == len(co.default_percentiles)
# the thresholds h2o used, should match what we expected
# expected = [0] * 5
# Fix. doesn't check for expected = 0?
# max of one bin
if maxDelta is None:
maxDelta = (co.maxs[0] - co.mins[0])/1000
if expected[0]: h2o_util.assertApproxEqual(co.mins[0], expected[0], tol=maxDelta,
msg='min is not approx. expected')
if expected[1]: h2o_util.assertApproxEqual(percentiles[2], expected[1], tol=maxDelta,
msg='25th percentile is not approx. expected')
if expected[2]: h2o_util.assertApproxEqual(percentiles[4], expected[2], tol=maxDelta,
msg='50th percentile (median) is not approx. expected')
if expected[3]: h2o_util.assertApproxEqual(percentiles[6], expected[3], tol=maxDelta,
msg='75th percentile is not approx. expected')
if expected[4]: h2o_util.assertApproxEqual(co.maxs[0], expected[4], tol=maxDelta,
msg='max is not approx. expected')
# figure out the expected max error
# use this for comparing to sklearn/sort
MAX_QBINS = 1000
if expected[0] and expected[4]:
expectedRange = expected[4] - expected[0]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
pt = h2o_util.twoDecimals(percentiles)
# only look at [0] for now...bit e308 numbers if unpopulated due to not enough unique values in dataset column
mx = h2o_util.twoDecimals(co.maxs[0])
mn = h2o_util.twoDecimals(co.mins[0])
print "co.label:", co.label, "co.percentiles (2 places):", pt
print "co.default_percentiles:", co.default_percentiles
print "co.label:", co.label, "co.maxs: (2 places):", mx
print "co.label:", co.label, "co.mins: (2 places):", mn
# FIX! why would percentiles be None? enums?
if pt is None:
compareActual = mn, [None] * 3, mx
else:
compareActual = mn, pt[2], pt[4], pt[6], mx
h2p.green_print("actual min/25/50/75/max co.label:", co.label, "(2 places):", compareActual)
h2p.green_print("expected min/25/50/75/max co.label:", co.label, "(2 places):", expected)
return desiredResult
# this parses the json object returned for one col from runSummary...returns an OutputObj object
# summaryResult = h2o_cmd.runSummary(key=hex_key, column=0)
# co = h2o_cmd.infoFromSummary(summaryResult)
# print co.label
# legacy
def infoFromSummary(summaryResult, column=None):
return SummaryObj(summaryResult, column=column)
class ParseObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, parseResult, expectedNumRows=None, expectedNumCols=None, noPrint=False, **kwargs):
super(ParseObj, self).__init__(parseResult['frames'][0], "Parse", noPrint=noPrint)
# add my stuff
self.numRows, self.numCols, self.parse_key = infoFromParse(parseResult)
# h2o_import.py does this for test support
if 'python_elapsed' in parseResult:
self.python_elapsed = parseResult['python_elapsed']
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
print "ParseObj created for:", self.parse_key # vars(self)
# Let's experiment with creating new objects that are an api I control for generic operations (Inspect)
class InspectObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, key,
expectedNumRows=None, expectedNumCols=None, expectedMissingList=None, expectedLabelList=None,
noPrint=False, **kwargs):
inspectResult = runInspect(key=key)
super(InspectObj, self).__init__(inspectResult['frames'][0], "Inspect", noPrint=noPrint)
# add my stuff
self.missingList, self.labelList, self.numRows, self.numCols = infoFromInspect(inspectResult)
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
if expectedMissingList is not None:
assert self.missingList == expectedMissingList, "%s %s" % (self.MissingList, expectedMissingList)
if expectedLabelList is not None:
assert self.labelList == expectedLabelList, "%s %s" % (self.labelList, expectedLabelList)
print "InspectObj created for:", key #, vars(self)
class SummaryObj(OutputObj):
@classmethod
def check(self,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, **kwargs):
if expectedLabel is not None:
assert self.label != expectedLabel
if expectedType is not None:
assert self.type != expectedType
if expectedMissing is not None:
assert self.missing != expectedMissing
if expectedDomain is not None:
assert self.domain != expectedDomain
if expectedBinsSum is not None:
assert self.binsSum != expectedBinsSum
# column is column name?
def __init__(self, key, colIndex, colName,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, timeoutSecs=30, **kwargs):
# we need both colInndex and colName for doing Summary efficiently
# ugly.
assert colIndex is not None
assert colName is not None
summaryResult = h2o_nodes.nodes[0].summary(key=key, column=colName, timeoutSecs=timeoutSecs, **kwargs)
# this should be the same for all the cols? Or does the checksum change?
frame = summaryResult['frames'][0]
default_percentiles = frame['default_percentiles']
checksum = frame['checksum']
rows = frame['rows']
# assert colIndex < len(frame['columns']), "You're asking for colIndex %s but there are only %s. " % \
# (colIndex, len(frame['columns']))
# coJson = frame['columns'][colIndex]
# is it always 0 now? the one I asked for ?
coJson = frame['columns'][0]
assert checksum !=0 and checksum is not None
assert rows!=0 and rows is not None
# FIX! why is frame['key'] = None here?
# assert frame['key'] == key, "%s %s" % (frame['key'], key)
super(SummaryObj, self).__init__(coJson, "Summary for %s" % colName, noPrint=noPrint)
# how are enums binned. Stride of 1? (what about domain values)
# touch all
# print "vars", vars(self)
coList = [
len(self.data),
self.domain,
self.string_data,
self.type,
self.label,
self.percentiles,
self.precision,
self.mins,
self.maxs,
self.mean,
self.histogram_base,
len(self.histogram_bins),
self.histogram_stride,
self.zero_count,
self.missing_count,
self.positive_infinity_count,
self.negative_infinity_count,
]
assert self.label==colName, "%s You must have told me the wrong colName %s for the given colIndex %s" % \
(self.label, colName, colIndex)
print "you can look at this attributes in the returned object (which is OutputObj if you assigned to 'co')"
for k,v in self:
print "%s" % k,
# hack these into the column object from the full summary
self.default_percentiles = default_percentiles
self.checksum = checksum
self.rows = rows
print "\nSummaryObj for", key, "for colName", colName, "colIndex:", colIndex
print "SummaryObj created for:", key # vars(self)
# now do the assertion checks
self.check(expectedNumRows, expectedNumCols,
expectedLabel, expectedType, expectedMissing, expectedDomain, expectedBinsSum,
noPrint=noPrint, **kwargs)
| bikash/h2o-dev | py2/h2o_cmd.py | Python | apache-2.0 | 16,424 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from multiprocessing import Process, Pipe
import time
from time import sleep
import random
import uuid
import logging
from tornado.websocket import websocket_connect
from tornado.ioloop import PeriodicCallback
from tornado.ioloop import IOLoop
from tornado.httpclient import HTTPClient, HTTPRequest
# use a module-level logger
mlog = logging.getLogger('loader')
class MrRobotto(Process):
def __init__(self, pipe, send_rate=10, server_address='127.0.0.1', port=9090):
super(MrRobotto, self).__init__()
self.pipe = pipe
self.loop = None
i, d = divmod(send_rate, 1)
self.send_rate = send_rate if d != 0 else send_rate-0.0001 # modify send rate so it's not an integer
self.send_delay = float(1)/self.send_rate
self.sending_enabled = True
self.nick = uuid.uuid4().hex[0:8]
self.server_address = server_address
self.port = port
self.client_id = None
self.payload_head = None
self.connected = False
self.msgs_received = 0
self.msgs_sent = 0
self.wsconn = None
self.periodic_check = None
#self.die = False
def run(self):
# first, we just wait for the connect command, ignores everything else
self.pipe.send('running')
cmd = ""
while cmd != "connect":
cmd = self.pipe.recv()
self.kick_off()
def kick_off(self):
self.loop = IOLoop.instance()
self.loop.add_callback(self.connect)
self.loop.start()
def connect(self):
http_client = HTTPClient()
mlog.debug("http://{}:{}/".format(self.server_address, str(self.port)))
url = HTTPRequest("http://{}:{}/".format(self.server_address, str(self.port)),
connect_timeout=480, request_timeout=1000)
response = http_client.fetch(url) # yes, we're doing a synchronous (blocking) call.
if response.error:
print("response = ", response)
else:
self.client_id = self.__get_ftc_id(response.headers)
if not self.client_id:
return
self.payload_head = self.client_id
self.__connect_ws()
@staticmethod
def __get_ftc_id(headers):
res = None
sch = headers['Set-Cookie']
v = sch.split(';')[0]
if 'ftc_cid' in v:
res = v.split('=')[1]
return res
def __connect_ws(self):
portstr = str(self.port)
wsuri = "ws://%s:%s/ws" % (self.server_address, portstr)
hreq = HTTPRequest(wsuri, connect_timeout=480, request_timeout=480,
headers={"Cookie": "ftc_cid=%s" % self.client_id})
websocket_connect(hreq, callback=self.handle_wsconn)
def handle_wsconn(self, conn):
self.connected = True
self.wsconn = conn.result(timeout=680)
self.periodic_check = PeriodicCallback(self.check_command, 473)
self.periodic_check.start()
self.pipe.send('connected')
def send(self):
if self.sending_enabled:
self.send_message()
self.loop.add_timeout(time.time()+self.send_delay, self.send)
def send_message(self):
if self.wsconn:
msg_str = '{"msgtype": "text", "payload": "%s-%d" , "sent_ts": %f}' % (self.payload_head,
self.msgs_sent, time.time())
self.wsconn.write_message(msg_str)
self.msgs_sent += 1
def check_command(self):
cmd = None
if self.pipe.poll():
cmd = self.pipe.recv() # check the pipe with a non-blocking call!
if cmd:
ftc = getattr(self, cmd)
if ftc is not None:
ftc()
def finish(self):
self.loop.stop()
def dummy_handler(self, message):
pass
class LoaderClient(MrRobotto):
def __init__(self, pipe, send_rate, server_address='localhost', port=9090):
super(LoaderClient, self).__init__(pipe, send_rate, server_address, port)
def handle_wsconn(self, conn):
super(LoaderClient, self).handle_wsconn(conn)
self.wsconn.on_message = self.dummy_handler
class ProbeClient(MrRobotto):
def __init__(self, pipe, send_rate, msgs_to_send=100, server_address='localhost', port=9090):
super(ProbeClient, self).__init__(pipe, send_rate, server_address, port)
self.msgs_to_send = msgs_to_send
self.msgs_received = 0
self.msgs_sent = 0
self.payload_head = None
self.received_lst = []
def handle_wsconn(self, conn):
super(ProbeClient, self).handle_wsconn(conn)
self.payload_head = "probe-msg"
def start_send_batch(self):
self.msgs_received = 0
self.msgs_sent = 0
self.received_lst = []
self.wsconn.on_message = self.handle_message # "re-connect" real handler
self.sending_enabled = True
self.loop.add_callback(self.send)
#self.send()
def handle_message(self, message):
if 'probe-msg' in message:
self.received_lst.append((time.time(), message))
self.msgs_received += 1
if self.msgs_sent >= self.msgs_to_send:
self.sending_enabled = False
self.pipe.send('done_sending_batch')
self.wsconn.on_message = self.dummy_handler # "disconnect" this handler temporarily
def report(self):
d = dict(msgs_sent=self.msgs_sent, msgs_received=self.msgs_received, received_lst=self.received_lst)
self.pipe.send(d)
class RobotLoader(object):
def __init__(self, server_address, port, num_clients, connect_rate=30, send_rate=10):
self.__server_address = server_address
self.__port = port
self.send_rate = send_rate
self.num_clients = num_clients
self.__total_num_clients = 0
self.__connect_rate = connect_rate
self.__clients_connected = 0
self.__clients_done_sending = 0
self.clients = [] # list of tuples (client, pipe)
def instantiate_clients(self):
mlog.debug("INSTANTIATE_CLIENTS")
for pc in range(0, self.num_clients):
pipe, b = Pipe()
cc = LoaderClient(pipe=b, send_rate=self.send_rate,
server_address=self.__server_address, port=self.__port)
self.clients.append((cc, pipe))
self.__total_num_clients = len(self.clients)
def start_all_clients(self):
mlog.debug("STARTING_ALL_CLIENTS")
for client, pipe in self.clients:
client.start()
# wait until we get the "running" confirmation from all clients.
for client, pipe in self.clients:
res = ""
while res != "running":
res = pipe.recv()
mlog.debug("ALL_CLIENTS_STARTED")
def connect_all_clients(self):
mlog.debug("CONNECTING_ALL_CLIENTS")
for client, pipe in self.clients:
pipe.send("connect")
sleep(float(1)/self.__connect_rate)
time_limit = time.time() + self.__total_num_clients/2.0 # seconds to wait for connection confirmation
i = 0
while self.__clients_connected < self.__total_num_clients:
client, pipe = self.clients[i]
if pipe.poll():
res = pipe.recv()
if res == "connected":
self.__clients_connected += 1
else:
i += 1
if i >= self.__total_num_clients:
i = 0
if time.time() > time_limit:
break
mlog.debug("CLIENTS_CONNECTED - NUM: %d - OF: %d" % (self.__clients_connected, self.__total_num_clients))
if self.__clients_connected != self.__total_num_clients:
mlog.debug("SOME_CLIENTS_DID_NOT_CONNECT")
def start_sending(self):
mlog.debug("CLIENTS_START_SENDING")
random.shuffle(self.clients)
for client, pipe in self.clients:
pipe.send("send")
def finish_clients(self):
mlog.debug("FINISHING_CLIENTS")
for client, pipe in self.clients:
pipe.send('finish')
mlog.debug("ALL_CLIENTS_FINISHED")
| RecNes/zo-test_proj | robot.py | Python | gpl-2.0 | 8,293 |
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015 Metaswitch Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
felix.config
~~~~~~~~~~~~
Configuration management for Felix.
On instantiation, this module automatically parses the configuration file and
builds a singleton configuration object. That object may (once) be changed by
etcd configuration being reported back to it.
"""
import os
import ConfigParser
import logging
import socket
from calico import common
# Logger
log = logging.getLogger(__name__)
# Convert log level names into python log levels.
LOGLEVELS = {"none": None,
"debug": logging.DEBUG,
"info": logging.INFO,
"warn": logging.WARNING,
"warning": logging.WARNING,
"err": logging.ERROR,
"error": logging.ERROR,
"crit": logging.CRITICAL,
"critical": logging.CRITICAL}
# Sources of a configuration parameter. The order is highest-priority first.
DEFAULT = "Default"
ENV = "Environment variable"
FILE = "Configuration file"
GLOBAL_ETCD = "Global etcd configuration"
LOCAL_ETCD = "Host specific etcd configuration"
DEFAULT_SOURCES = [ ENV, FILE, GLOBAL_ETCD, LOCAL_ETCD ]
class ConfigException(Exception):
def __init__(self, message, parameter):
super(ConfigException, self).__init__(message)
self.message = message
self.parameter = parameter
def __str__(self):
return "%s (value %r for %s (%s), read from %r)" \
% (self.message,
self.parameter.value,
self.parameter.name,
self.parameter.description,
self.parameter.active_source)
class ConfigParameter(object):
"""
A configuration parameter. This contains the following information.
- The name of the field.
- Where the location can validly be read from
- The current value
- Where the value was read from
"""
def __init__(self, name, description, default,
sources=DEFAULT_SOURCES, value_is_int=False,
value_is_bool=False):
"""
Create a configuration parameter.
:param str description: Description for logging
:param list sources: List of valid sources to try
:param str default: Default value
:param bool value_is_int: Integer value?
"""
self.description = description
self.name = name
self.sources = sources
self.value = default
self.active_source = None
self.value_is_int = value_is_int
self.value_is_bool = value_is_bool
def set(self, value, source):
"""
Set a value of a parameter - unless already set.
:param value: value
:param source: source; for example "Configuration file /etc/felix.cfg"
"""
if self.active_source is None:
log.debug("Read value %r for %s (%s) from %r",
value,
self.name,
self.description,
source)
self.active_source = source
if self.value_is_int:
# Set value before the call to int, so the ConfigException has
# the right value if / when it goes wrong.
self.value = value
try:
self.value = int(value)
except ValueError:
raise ConfigException("Field was not integer",
self)
elif self.value_is_bool:
lower_val = str(value).lower()
log.debug("Parsing %r as a Boolean.", lower_val)
if lower_val in ("true", "1", "yes", "y", "t"):
self.value = True
elif lower_val in ("false", "0", "no", "n", "f"):
self.value = False
else:
raise ConfigException("Field was not a valid Boolean",
self)
else:
# Calling str in principle can throw an exception, but it's
# hard to see how in practice, so don't catch and wrap.
self.value = str(value)
else:
log.warning("Ignore %r value for %s (%s) - already set from %r",
source,
self.name,
self.description,
self.active_source)
class Config(object):
def __init__(self, config_path):
"""
Create a config. This reads data from the following sources.
- Environment variables
- Configuration file (/etc/calico/felix.cfg)
- per-host etcd (/calico/vX/config)
- global etcd (/calico/vX/host/<host>/config)
After object creation, the environment variables and config file have
been read, and the variables ETCD_ADDR and HOSTNAME have been set and
validated. The caller is then responsible for reading the remaining
config from etcd and calling report_etcd_config with the returned
values before the rest of the config structure can be used.
:raises EtcdException
"""
self.parameters = {}
self.add_parameter("EtcdAddr", "Address and port for etcd",
"localhost:4001", sources=[ENV, FILE])
self.add_parameter("FelixHostname", "Felix compute host hostname",
socket.gethostname(), sources=[ENV, FILE])
self.add_parameter("EtcdScheme", "Protocol type for http or https",
"http", sources=[ENV, FILE])
self.add_parameter("EtcdKeyFile", "Path to etcd key file",
"none", sources=[ENV, FILE])
self.add_parameter("EtcdCertFile", "Path to etcd certificate file",
"none", sources=[ENV, FILE])
self.add_parameter("EtcdCaFile", "Path to etcd CA certificate file",
"none", sources=[ENV, FILE])
self.add_parameter("StartupCleanupDelay",
"Delay before cleanup starts",
30, value_is_int=True)
self.add_parameter("PeriodicResyncInterval",
"How often to do cleanups, seconds",
60 * 60, value_is_int=True)
self.add_parameter("IptablesRefreshInterval",
"How often to refresh iptables state, in seconds",
60, value_is_int=True)
self.add_parameter("MetadataAddr", "Metadata IP address or hostname",
"127.0.0.1")
self.add_parameter("MetadataPort", "Metadata Port",
8775, value_is_int=True)
self.add_parameter("InterfacePrefix", "Interface name prefix", None)
self.add_parameter("DefaultEndpointToHostAction",
"Action to take for packets that arrive from"
"an endpoint to the host.", "DROP")
self.add_parameter("LogFilePath",
"Path to log file", "/var/log/calico/felix.log")
self.add_parameter("EtcdDriverLogFilePath",
"Path to log file for etcd driver",
"/var/log/calico/felix-etcd.log")
self.add_parameter("LogSeverityFile",
"Log severity for logging to file", "INFO")
self.add_parameter("LogSeveritySys",
"Log severity for logging to syslog", "ERROR")
self.add_parameter("LogSeverityScreen",
"Log severity for logging to screen", "ERROR")
self.add_parameter("IpInIpEnabled",
"IP-in-IP device support enabled", False,
value_is_bool=True)
self.add_parameter("IpInIpMtu",
"MTU to set on the IP-in-IP device", 1440,
value_is_int=True)
self.add_parameter("ReportingIntervalSecs",
"Status reporting interval in seconds",
30, value_is_int=True)
self.add_parameter("ReportingTTLSecs",
"Status report time to live in seconds",
90, value_is_int=True)
self.add_parameter("EndpointReportingEnabled",
"Whether Felix should report per-endpoint status "
"into etcd",
False, value_is_bool=True)
self.add_parameter("EndpointReportingDelaySecs",
"Minimum delay between per-endpoint status reports",
1, value_is_int=True)
self.add_parameter("MaxIpsetSize",
"Maximum size of the ipsets that Felix uses to "
"represent profile tag memberships. Should be set "
"to a value larger than the expected number of "
"IP addresses using a single tag.",
2**20, value_is_int=True)
# Read the environment variables, then the configuration file.
self._read_env_vars()
self._read_cfg_file(config_path)
self._finish_update(final=False)
def add_parameter(self, name, description, default, **kwargs):
"""
Put a parameter in the parameter dictionary.
"""
self.parameters[name] = ConfigParameter(
name, description, default, **kwargs)
def _finish_update(self, final=False):
"""
Config has been completely read. Called twice - once after reading from
environment and config file (so we should be able to access etcd), and
once after reading from etcd (so we have all the config ready to go).
Responsible for :
- storing the parameters in the relevant fields in the structure
- validating the configuration is valid (for this stage in the process)
- updating logging parameters
Note that we complete the logging even before etcd configuration
changes are read. Hence, for example, if logging to file is turned on
after reading environment variables and config file, then the log file
is created and logging to it starts - even if later on etcd
configuration turns the file off. That's because we must log if etcd
configuration load fails, and not having the log file early enough is
worse.
:param final: Have we completed (rather than just read env and config file)
"""
self.ETCD_ADDR = self.parameters["EtcdAddr"].value
self.HOSTNAME = self.parameters["FelixHostname"].value
self.ETCD_SCHEME = self.parameters["EtcdScheme"].value
self.ETCD_KEY_FILE = self.parameters["EtcdKeyFile"].value
self.ETCD_CERT_FILE = self.parameters["EtcdCertFile"].value
self.ETCD_CA_FILE = self.parameters["EtcdCaFile"].value
self.STARTUP_CLEANUP_DELAY = self.parameters["StartupCleanupDelay"].value
self.RESYNC_INTERVAL = self.parameters["PeriodicResyncInterval"].value
self.REFRESH_INTERVAL = self.parameters["IptablesRefreshInterval"].value
self.METADATA_IP = self.parameters["MetadataAddr"].value
self.METADATA_PORT = self.parameters["MetadataPort"].value
self.IFACE_PREFIX = self.parameters["InterfacePrefix"].value
self.DEFAULT_INPUT_CHAIN_ACTION = \
self.parameters["DefaultEndpointToHostAction"].value
self.LOGFILE = self.parameters["LogFilePath"].value
self.DRIVERLOGFILE = self.parameters["EtcdDriverLogFilePath"].value
self.LOGLEVFILE = self.parameters["LogSeverityFile"].value
self.LOGLEVSYS = self.parameters["LogSeveritySys"].value
self.LOGLEVSCR = self.parameters["LogSeverityScreen"].value
self.IP_IN_IP_ENABLED = self.parameters["IpInIpEnabled"].value
self.IP_IN_IP_MTU = self.parameters["IpInIpMtu"].value
self.REPORTING_INTERVAL_SECS = self.parameters["ReportingIntervalSecs"].value
self.REPORTING_TTL_SECS = self.parameters["ReportingTTLSecs"].value
self.REPORT_ENDPOINT_STATUS = \
self.parameters["EndpointReportingEnabled"].value
self.ENDPOINT_REPORT_DELAY = \
self.parameters["EndpointReportingDelaySecs"].value
self.MAX_IPSET_SIZE = self.parameters["MaxIpsetSize"].value
self._validate_cfg(final=final)
# Update logging.
common.complete_logging(self.LOGFILE,
self.LOGLEVFILE,
self.LOGLEVSYS,
self.LOGLEVSCR,
gevent_in_use=True)
if final:
# Log configuration - the whole lot of it.
for name, parameter in self.parameters.iteritems():
log.info("Parameter %s (%s) has value %r read from %s",
name,
parameter.description,
parameter.value,
parameter.active_source)
def _read_env_vars(self):
"""
Read all of the variables from the environment.
"""
for name, parameter in self.parameters.iteritems():
# All currently defined config parameters have ENV as a valid
# source.
assert(ENV in parameter.sources)
# ENV is the first source, so we can assert that using defaults.
assert(parameter.active_source is None)
env_var = ("FELIX_%s" % name).upper()
if env_var in os.environ:
parameter.set(os.environ[env_var],
"Environment variable %s" % env_var)
def _read_cfg_file(self, config_file):
parser = ConfigParser.ConfigParser()
parser.read(config_file)
cfg_dict = {}
# Build up the cfg dictionary from the file.
for section in parser.sections():
cfg_dict.update(dict(parser.items(section)))
source = "Configuration file %s" % config_file
for name, parameter in self.parameters.iteritems():
# Config parameters are lower-cased by ConfigParser
name = name.lower()
if FILE in parameter.sources and name in cfg_dict:
# This can validly be read from file.
parameter.set(cfg_dict.pop(name), source)
self._warn_unused_cfg(cfg_dict, source)
def report_etcd_config(self, host_dict, global_dict):
"""
Report configuration parameters read from etcd to the config
component. This must be called only once, after configuration is
initially read and before the config structure is used (except for
ETCD_ADDR and HOSTNAME).
:param host_dict: Dictionary of etcd parameters
:param global_dict: Dictionary of global parameters
:raises ConfigException
"""
log.debug("Configuration reported from etcd")
for source, cfg_dict in ((LOCAL_ETCD, host_dict),
(GLOBAL_ETCD, global_dict)):
for name, parameter in self.parameters.iteritems():
if source in parameter.sources and name in cfg_dict:
parameter.set(cfg_dict.pop(name), source)
self._warn_unused_cfg(cfg_dict, source)
self._finish_update(final=True)
def _validate_cfg(self, final=True):
"""
Firewall that the config is not invalid. Called twice, once when
environment variables and config file have been read, and once
after those plus the etcd configuration have been read.
:param final: Is this after final etcd config has been read?
:raises ConfigException
"""
fields = self.ETCD_ADDR.split(":")
if len(fields) != 2:
raise ConfigException("Invalid format for field - must be "
"hostname:port", self.parameters["EtcdAddr"])
self._validate_addr("EtcdAddr", fields[0])
try:
int(fields[1])
except ValueError:
raise ConfigException("Invalid port in field",
self.parameters["EtcdAddr"])
# Set default or python None value for each etcd "none" config value
if self.ETCD_SCHEME.lower() == "none":
self.ETCD_SCHEME = "http"
if self.ETCD_KEY_FILE.lower() == "none":
self.ETCD_KEY_FILE = None
if self.ETCD_CERT_FILE.lower() == "none":
self.ETCD_CERT_FILE = None
if self.ETCD_CA_FILE == "none":
self.ETCD_CA_FILE = None
if self.ETCD_SCHEME == "https":
# key and certificate must be both specified or both not specified
if bool(self.ETCD_KEY_FILE) != bool(self.ETCD_CERT_FILE):
if not self.ETCD_KEY_FILE:
raise ConfigException("Missing etcd key file. Key and "
"certificate must both be specified "
"or both be blank.",
self.parameters["EtcdKeyFile"])
else:
raise ConfigException("Missing etcd certificate. Key and "
"certificate must both be specified "
"or both be blank.",
self.parameters["EtcdCertFile"])
# Make sure etcd key and certificate are readable
if self.ETCD_KEY_FILE and self.ETCD_CERT_FILE:
if not (os.path.isfile(self.ETCD_KEY_FILE) and
os.access(self.ETCD_KEY_FILE, os.R_OK)):
raise ConfigException("Cannot read key file. Key file "
"must be a readable path.",
self.parameters["EtcdKeyFile"])
if not (os.path.isfile(self.ETCD_CERT_FILE) and
os.access(self.ETCD_CERT_FILE, os.R_OK)):
raise ConfigException("Cannot read cert file. Cert file "
"must be a readable path.",
self.parameters["EtcdCertFile"])
# If Certificate Authority cert provided, check it's readable
if (self.ETCD_CA_FILE and
not (os.path.isfile(self.ETCD_CA_FILE) and
os.access(self.ETCD_CA_FILE, os.R_OK))):
raise ConfigException("Missing CA certificate or file is "
"unreadable. Value must be readable "
"file path.",
self.parameters["EtcdCaFile"])
elif self.ETCD_SCHEME != "http":
raise ConfigException("Invalid protocol scheme. Value must be one "
"of: \"\", \"http\", \"https\".",
self.parameters["EtcdScheme"])
try:
self.LOGLEVFILE = LOGLEVELS[self.LOGLEVFILE.lower()]
except KeyError:
raise ConfigException("Invalid log level",
self.parameters["LogSeverityFile"])
try:
self.LOGLEVSYS = LOGLEVELS[self.LOGLEVSYS.lower()]
except KeyError:
raise ConfigException("Invalid log level",
self.parameters["LogSeveritySys"])
try:
self.LOGLEVSCR = LOGLEVELS[self.LOGLEVSCR.lower()]
except KeyError:
raise ConfigException("Invalid log level",
self.parameters["LogSeverityScreen"])
# Log files may be "None" (the literal string, case insensitive). In
# this case no log file should be written.
if self.LOGFILE.lower() == "none":
self.LOGFILE = None
if self.DRIVERLOGFILE.lower() == "none":
self.DRIVERLOGFILE = None
if self.METADATA_IP.lower() == "none":
# Metadata is not required.
self.METADATA_IP = None
self.METADATA_PORT = None
else:
# Metadata must be supplied as IP or address, but we store as IP
self.METADATA_IP = self._validate_addr("MetadataAddr",
self.METADATA_IP)
if not common.validate_port(self.METADATA_PORT):
raise ConfigException("Invalid field value",
self.parameters["MetadataPort"])
if self.DEFAULT_INPUT_CHAIN_ACTION not in ("DROP", "RETURN", "ACCEPT"):
raise ConfigException(
"Invalid field value",
self.parameters["DefaultEndpointToHostAction"]
)
# For non-positive time values of reporting interval we set both
# interval and ttl to 0 - i.e. status reporting is disabled.
if self.REPORTING_INTERVAL_SECS <= 0:
log.warning("Reporting disabled.")
self.REPORTING_TTL_SECS = 0
self.REPORTING_INTERVAL_SECS = 0
# Ensure the TTL is longer than the reporting interval, defaulting
# it if not.
if (self.REPORTING_TTL_SECS <= self.REPORTING_INTERVAL_SECS or
self.REPORTING_TTL_SECS == 0):
log.warning("Reporting TTL set to %s.", self.REPORTING_TTL_SECS)
self.REPORTING_TTL_SECS = self.REPORTING_INTERVAL_SECS * 5/2
if self.ENDPOINT_REPORT_DELAY < 0:
log.warning("Endpoint status delay is negative, defaulting to 1.")
self.ENDPOINT_REPORT_DELAY = 1
if self.MAX_IPSET_SIZE <= 0:
log.warning("Max ipset size is non-positive, defaulting to 2^20.")
self.MAX_IPSET_SIZE = 2**20
if not final:
# Do not check that unset parameters are defaulted; we have more
# config to read.
return
for parameter in self.parameters.itervalues():
if parameter.value is None:
# No value, not even a default
raise ConfigException("Missing undefaulted value",
parameter)
def _warn_unused_cfg(self, cfg_dict, source):
# Warn about any unexpected items - i.e. ones we have not used.
for lKey in cfg_dict:
log.warning("Got unexpected config item %s=%s",
lKey, cfg_dict[lKey])
def _validate_addr(self, name, addr):
"""
Validate an address, returning the IP address it resolves to. If the
address cannot be resolved then an exception is returned.
Parameters :
- name of the field, for use in logging
- address to resolve
"""
try:
stripped_addr = addr.strip()
if not stripped_addr:
raise ConfigException("Blank value",
self.parameters[name])
return socket.gethostbyname(addr)
except socket.gaierror:
raise ConfigException("Invalid or unresolvable value",
self.parameters[name])
| alexhersh/calico | calico/felix/config.py | Python | apache-2.0 | 23,990 |
# Copyright (c) 2017 Cedric Bellegarde <cedric.bellegarde@adishatz.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gio
from lollypop.define import PROXY_BUS, PROXY_PATH, PROXY_INTERFACE, Lp
class DBusHelper:
"""
Simpler helper for DBus
"""
def __init__(self):
pass
def call(self, call, args, callback, data):
"""
Call function
@param call as str
@param args as GLib.Variant()/None
@param callback as function
@param data
"""
try:
bus = Lp().get_dbus_connection()
Gio.DBusProxy.new(bus, Gio.DBusProxyFlags.NONE, None,
PROXY_BUS,
PROXY_PATH,
PROXY_INTERFACE, None,
self.__on_get_proxy, call, args, callback, data)
except Exception as e:
print("DBusHelper::call():", e)
#######################
# PRIVATE #
#######################
def __on_get_proxy(self, source, result, call, args, callback, data):
"""
Launch call and connect it to callback
@param source as GObject.Object
@param result as Gio.AsyncResult
@param call as str
@param args as GLib.Variant()/None
@param callback as function
@param data
"""
try:
proxy = source.new_finish(result)
proxy.call(call, args, Gio.DBusCallFlags.NO_AUTO_START,
500, None, callback, data)
except Exception as e:
print("DBusHelper::__on_get_proxy():", e)
| gnumdk/lollypop | lollypop/helper_dbus.py | Python | gpl-3.0 | 2,279 |
#!/afs/bx.psu.edu/project/pythons/py2.7-linux-x86_64-ucs4/bin/python2.7
"""
Print the number of bases in a nib file.
usage: %prog nib_file
"""
from bx.seq import nib as seq_nib
import sys
nib = seq_nib.NibFile( file( sys.argv[1] ) )
print nib.length
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/bx_python-0.7.2-py2.7-linux-x86_64-ucs4.egg/EGG-INFO/scripts/nib_length.py | Python | gpl-3.0 | 254 |
import os
from os import environ
import dj_database_url
from boto.mturk import qualification
import otree.settings
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# the environment variable OTREE_PRODUCTION controls whether Django runs in
# DEBUG mode. If OTREE_PRODUCTION==1, then DEBUG=False
if environ.get('OTREE_PRODUCTION') not in {None, '', '0'}:
DEBUG = False
else:
DEBUG = True
# don't share this with anybody.
SECRET_KEY = '14g=w*lzyd_tigp!%$o6gy)ocjsgxq3xn#-7+sa%f7-77==m)f'
DATABASES = {
'default': dj_database_url.config(
# Rather than hardcoding the DB parameters here,
# it's recommended to set the DATABASE_URL environment variable.
# This will allow you to use SQLite locally, and postgres/mysql
# on the server
# Examples:
# export DATABASE_URL=postgres://USER:PASSWORD@HOST:PORT/NAME
# export DATABASE_URL=mysql://USER:PASSWORD@HOST:PORT/NAME
# fall back to SQLite if the DATABASE_URL env var is missing
default='sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
)
}
# AUTH_LEVEL:
# this setting controls which parts of your site are freely accessible,
# and which are password protected:
# - If it's not set (the default), then the whole site is freely accessible.
# - If you are launching a study and want visitors to only be able to
# play your app if you provided them with a start link, set it to STUDY.
# - If you would like to put your site online in public demo mode where
# anybody can play a demo version of your game, but not access the rest
# of the admin interface, set it to DEMO.
# for flexibility, you can set it in the environment variable OTREE_AUTH_LEVEL
AUTH_LEVEL = environ.get('OTREE_AUTH_LEVEL')
ADMIN_USERNAME = 'admin'
# for security, best to set admin password in an environment variable
ADMIN_PASSWORD = environ.get('OTREE_ADMIN_PASSWORD')
# setting for integration with AWS Mturk
AWS_ACCESS_KEY_ID = environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = environ.get('AWS_SECRET_ACCESS_KEY')
# e.g. EUR, CAD, GBP, CHF, CNY, JPY
REAL_WORLD_CURRENCY_CODE = 'USD'
USE_POINTS = True
# e.g. en, de, fr, it, ja, zh-hans
# see: https://docs.djangoproject.com/en/1.9/topics/i18n/#term-language-code
LANGUAGE_CODE = 'en'
# if an app is included in SESSION_CONFIGS, you don't need to list it here
INSTALLED_APPS = ['otree']
# SENTRY_DSN = ''
DEMO_PAGE_INTRO_TEXT = """
<ul>
<li>
<a href="https://github.com/oTree-org/otree" target="_blank">
oTree on GitHub
</a>.
</li>
<li>
<a href="http://www.otree.org/" target="_blank">
oTree homepage
</a>.
</li>
</ul>
<p>
Here are various games implemented with oTree. These games are all open
source, and you can modify them as you wish.
</p>
"""
ROOMS = [
{
'name': 'econ101',
'display_name': 'Econ 101 class',
'participant_label_file': '_rooms/econ101.txt',
},
{
'name': 'live_demo',
'display_name': 'Room for live demo (no participant labels)',
},
]
# from here on are qualifications requirements for workers
# see description for requirements on Amazon Mechanical Turk website:
# http://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_QualificationRequirementDataStructureArticle.html
# and also in docs for boto:
# https://boto.readthedocs.org/en/latest/ref/mturk.html?highlight=mturk#module-boto.mturk.qualification
mturk_hit_settings = {
'keywords': ['easy', 'bonus', 'choice', 'study'],
'title': 'Title for your experiment',
'description': 'Description for your experiment',
'frame_height': 500,
'preview_template': 'global/MTurkPreview.html',
'minutes_allotted_per_assignment': 60,
'expiration_hours': 7*24, # 7 days
#'grant_qualification_id': 'YOUR_QUALIFICATION_ID_HERE',# to prevent retakes
'qualification_requirements': [
# qualification.LocaleRequirement("EqualTo", "US"),
# qualification.PercentAssignmentsApprovedRequirement("GreaterThanOrEqualTo", 50),
# qualification.NumberHitsApprovedRequirement("GreaterThanOrEqualTo", 5),
# qualification.Requirement('YOUR_QUALIFICATION_ID_HERE', 'DoesNotExist')
]
}
# if you set a property in SESSION_CONFIG_DEFAULTS, it will be inherited by all configs
# in SESSION_CONFIGS, except those that explicitly override it.
# the session config can be accessed from methods in your apps as self.session.config,
# e.g. self.session.config['participation_fee']
SESSION_CONFIG_DEFAULTS = {
'real_world_currency_per_point': 0.00,
'participation_fee': 0.00,
'doc': "",
'mturk_hit_settings': mturk_hit_settings,
}
SESSION_CONFIGS = [
{
'name': 'XP_vol',
'display_name': "XP Expurgement",
'num_demo_participants': 7,
'app_sequence': ['Holt_Laury_Inequity_aversion','Modified_Dictator_Game','Modified_Ultimatum_Game','XP_vol'],
},
{
'name': 'XP_vol_update',
'display_name': "XP Expurgement _update",
'num_demo_participants': 7,
'app_sequence': ['Holt_Laury_Inequity_aversion', 'Modified_Dictator_Game', 'Modified_Ultimatum_Game', 'XP_vol_update'],
},
]
# anything you put after the below line will override
# oTree's default settings. Use with caution.
otree.settings.augment_settings(globals())
| anthropo-lab/XP | XP Expurgement/settings.py | Python | gpl-3.0 | 5,367 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s -train CORPUS -output VECTORS -size SIZE -window WINDOW
-cbow CBOW -sample SAMPLE -hs HS -negative NEGATIVE -threads THREADS -iter ITER
-min_count MIN-COUNT -alpha ALPHA -binary BINARY -accuracy FILE
Trains a neural embedding model on text file CORPUS.
Parameters essentially reproduce those used by the original C tool
(see https://code.google.com/archive/p/word2vec/).
Parameters for training:
-train <file>
Use text data from <file> to train the model
-output <file>
Use <file> to save the resulting word vectors / word clusters
-size <int>
Set size of word vectors; default is 100
-window <int>
Set max skip length between words; default is 5
-sample <float>
Set threshold for occurrence of words. Those that appear with higher frequency in the training data
will be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)
-hs <int>
Use Hierarchical Softmax; default is 0 (not used)
-negative <int>
Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)
-threads <int>
Use <int> threads (default 3)
-iter <int>
Run more training iterations (default 5)
-min_count <int>
This will discard words that appear less than <int> times; default is 5
-alpha <float>
Set the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW
-binary <int>
Save the resulting vectors in binary moded; default is 0 (off)
-cbow <int>
Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)
-accuracy <file>
Compute accuracy of the resulting model analogical inference power on questions file <file>
See an example of questions file at https://code.google.com/p/word2vec/source/browse/trunk/questions-words.txt
Example: python -m gensim.scripts.word2vec_standalone -train data.txt -output vec.txt -size 200 -sample 1e-4 -binary 0 -iter 3
"""
import logging
import os.path
import sys
import argparse
from numpy import seterr
logger = logging.getLogger(__name__)
from gensim.models.word2vec import Word2Vec, LineSentence # avoid referencing __main__ in pickle
if __name__ == "__main__":
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.INFO)
logger.info("running %s", " ".join(sys.argv))
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
seterr(all='raise') # don't ignore numpy errors
parser = argparse.ArgumentParser()
parser.add_argument("-train", help="Use text data from file TRAIN to train the model", required=True)
parser.add_argument("-output", help="Use file OUTPUT to save the resulting word vectors")
parser.add_argument("-window", help="Set max skip length WINDOW between words; default is 5", type=int, default=5)
parser.add_argument("-size", help="Set size of word vectors; default is 100", type=int, default=100)
parser.add_argument("-sample", help="Set threshold for occurrence of words. Those that appear with higher frequency in the training data will be randomly down-sampled; "
"default is 1e-3, useful range is (0, 1e-5)", type=float, default=1e-3)
parser.add_argument("-hs", help="Use Hierarchical Softmax; default is 0 (not used)", type=int, default=0, choices=[0, 1])
parser.add_argument("-negative", help="Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)", type=int, default=5)
parser.add_argument("-threads", help="Use THREADS threads (default 3)", type=int, default=3)
parser.add_argument("-iter", help="Run more training iterations (default 5)", type=int, default=5)
parser.add_argument("-min_count", help="This will discard words that appear less than MIN_COUNT times; default is 5", type=int, default=5)
parser.add_argument("-alpha", help="Set the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW", type=float)
parser.add_argument("-cbow", help="Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)", type=int, default=1, choices=[0, 1])
parser.add_argument("-binary", help="Save the resulting vectors in binary mode; default is 0 (off)", type=int, default=0, choices=[0, 1])
parser.add_argument("-accuracy", help="Use questions from file ACCURACY to evaluate the model")
args = parser.parse_args()
if args.cbow == 0:
skipgram = 1
if not args.alpha:
args.alpha = 0.025
else:
skipgram = 0
if not args.alpha:
args.alpha = 0.05
corpus = LineSentence(args.train)
model = Word2Vec(
corpus, size=args.size, min_count=args.min_count, workers=args.threads,
window=args.window, sample=args.sample, alpha=args.alpha, sg=skipgram,
hs=args.hs, negative=args.negative, cbow_mean=1, iter=args.iter)
if args.output:
outfile = args.output
model.save_word2vec_format(outfile, binary=args.binary)
else:
outfile = args.train.split('.')[0]
model.save(outfile + '.model')
if args.binary == 1:
model.save_word2vec_format(outfile + '.model.bin', binary=True)
else:
model.save_word2vec_format(outfile + '.model.txt', binary=False)
if args.accuracy:
questions_file = args.accuracy
model.accuracy(questions_file)
logger.info("finished running %s", program)
| TaddyLab/gensim | gensim/scripts/word2vec_standalone.py | Python | gpl-3.0 | 5,951 |
import random
from stocker.SSP.investors.base_investor import BaseInvestor
from stocker.common.events import EventStockOrderNew, EventStockTransaction
from stocker.common.orders import OrderBuy, OrderSell
"""
Sample XML
<Investor module="stocker.SSP.investors.random_investor" class="RandomInvestor">
<init_cash type="int">1000</init_cash>
<buy_threshold type="float">0.6</buy_threshold>
<sell_threshold type="float">0.6</sell_threshold>
<report_path type="str">c:\code\stocker_data\inv1.stm</report_path>
</Investor>
"""
class RandomInvestor(BaseInvestor):
init_cash = 1000
buy_threshold = 0.5
sell_threshold = 0.5
def prepare(self):
self.stockbroker.transfer_cash(self, self.init_cash)
def process(self, event):
if isinstance(event, EventStockOrderNew):
order = event.order
if isinstance(order, OrderBuy) and random.random() > self.sell_threshold: self.__process_buy_order(order)
elif isinstance(order, OrderSell) and random.random() > self.buy_threshold: self.__process_sell_order(order)
elif isinstance(event, EventStockTransaction):
if hasattr(event.buy_order, 'investor') and event.buy_order.investor == self:
#print "bought!", event.buy_order
pass
elif hasattr(event.sell_order, 'investor') and event.sell_order.investor == self:
#print "sold!", event.sell_order
pass
def __process_buy_order(self, order):
"""Responds on buy orders"""
if not self.account.shares[order.company_id] > order.amount:
# we dont have any shares of this company
return
new_order = OrderSell(order.company_id, order.amount, order.limit_price, order.expiration_date)
self.stockbroker.new_order(new_order, self)
def __process_sell_order(self, order):
"""Responds on sell orders"""
if self.account.cash < order.amount * order.limit_price:
# we dont have enough money
return
new_order = OrderBuy(order.company_id, order.amount, order.limit_price, order.expiration_date)
self.stockbroker.new_order(new_order, self)
| donpiekarz/Stocker | stocker/SSP/investors/random_investor.py | Python | gpl-3.0 | 2,212 |
import os
import sys
import re
import locale
import mimetypes
import psutil
import time
import base64
from Crypto.Cipher import AES
from Crypto import Random
from nxdrive.logging_config import get_logger
NUXEO_DRIVE_FOLDER_NAME = 'Nuxeo Drive'
log = get_logger(__name__)
WIN32_SUFFIX = os.path.join('library.zip', 'nxdrive')
OSX_SUFFIX = "Contents/Resources/lib/python2.7/site-packages.zip/nxdrive"
ENCODING = locale.getpreferredencoding()
DEFAULT_ENCODING = 'utf-8'
WIN32_PATCHED_MIME_TYPES = {
'image/pjpeg': 'image/jpeg',
'image/x-png': 'image/png',
'image/bmp': 'image/x-ms-bmp',
'audio/x-mpg': 'audio/mpeg',
'video/x-mpeg2a': 'video/mpeg',
'application/x-javascript': 'application/javascript',
'application/x-mspowerpoint.12':
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
}
def current_milli_time():
return int(round(time.time() * 1000))
def normalized_path(path):
"""Return absolute, normalized file path."""
if isinstance(path, bytes):
# Decode path with local encoding when not already decoded explicitly
# by the caller
path = path.decode(ENCODING)
# XXX: we could os.path.normcase as well under Windows but it might be the
# source of unexpected troubles so not doing it for now.
return os.path.normpath(os.path.abspath(os.path.expanduser(path)))
def safe_long_path(path):
"""Utility to prefix path with the long path marker for Windows
http://msdn.microsoft.com/en-us/library/aa365247.aspx#maxpath
"""
if sys.platform == 'win32':
if isinstance(path, bytes):
# Decode path with local encoding when not already decoded
# explicitly by the caller
path = unicode(path.decode(ENCODING))
path = u"\\\\?\\" + path
return path
def path_join(parent, child):
if parent == '/':
return '/' + child
return parent + '/' + child
def default_nuxeo_drive_folder():
"""Find a reasonable location for the root Nuxeo Drive folder
This folder is user specific, typically under the home folder.
Under Windows, try to locate My Documents as a home folder, using the
win32com shell API if allowed, else falling back on a manual detection.
Note that we need to decode the path returned by os.path.expanduser with
the local encoding because the value of the HOME environment variable is
read as a byte string. Using os.path.expanduser(u'~') fails if the home
path contains non ASCII characters since Unicode coercion attempts to
decode the byte string as an ASCII string.
"""
if sys.platform == "win32":
from win32com.shell import shell, shellcon
try:
my_documents = shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL,
None, 0)
except:
# In some cases (not really sure how this happens) the current user
# is not allowed to access its 'My Documents' folder path through
# the win32com shell API, which raises the following error:
# com_error: (-2147024891, 'Access is denied.', None, None)
# We noticed that in this case the 'Location' tab is missing in the
# Properties window of 'My Documents' accessed through the
# Explorer.
# So let's fall back on a manual (and poor) detection.
# WARNING: it's important to check 'Documents' first as under
# Windows 7 there also exists a 'My Documents' folder invisible in
# the Explorer and cmd / powershell but visible from Python.
# First try regular location for documents under Windows 7 and up
log.debug("Access denied to win32com shell API: SHGetFolderPath,"
" falling back on manual detection of My Documents")
my_documents = os.path.expanduser(r'~\Documents')
my_documents = unicode(my_documents.decode(ENCODING))
if not os.path.exists(my_documents):
# Compatibility for Windows XP
my_documents = os.path.expanduser(r'~\My Documents')
my_documents = unicode(my_documents.decode(ENCODING))
if os.path.exists(my_documents):
nuxeo_drive_folder = os.path.join(my_documents,
NUXEO_DRIVE_FOLDER_NAME)
log.info("Will use '%s' as default Nuxeo Drive folder location"
" under Windows", nuxeo_drive_folder)
return nuxeo_drive_folder
# Fall back on home folder otherwise
user_home = os.path.expanduser('~')
user_home = unicode(user_home.decode(ENCODING))
nuxeo_drive_folder = os.path.join(user_home, NUXEO_DRIVE_FOLDER_NAME)
log.info("Will use '%s' as default Nuxeo Drive folder location",
nuxeo_drive_folder)
return nuxeo_drive_folder
def find_resource_dir(directory, default_path):
"""Find the FS path of a directory in various OS binary packages"""
import nxdrive
nxdrive_path = os.path.dirname(nxdrive.__file__)
app_resources = '/Contents/Resources/'
cxfreeze_suffix = os.path.join('library.zip', 'nxdrive')
dir_path = default_path
if app_resources in nxdrive_path:
# OSX frozen distribution, bundled as an app
dir_path = re.sub(app_resources + ".*", app_resources + directory,
nxdrive_path)
elif nxdrive_path.endswith(cxfreeze_suffix):
# cx_Freeze frozen distribution of nxdrive, data is out of the zip
dir_path = nxdrive_path.replace(cxfreeze_suffix, directory)
if not os.path.exists(dir_path):
log.warning("Could not find the resource directory at: %s",
dir_path)
return None
return dir_path
def find_exe_path():
"""Introspect the Python runtime to find the frozen Windows exe"""
import nxdrive
nxdrive_path = os.path.realpath(os.path.dirname(nxdrive.__file__))
log.trace("nxdrive_path: %s", nxdrive_path)
# Detect frozen win32 executable under Windows
if nxdrive_path.endswith(WIN32_SUFFIX):
log.trace("Detected frozen win32 executable under Windows")
exe_path = nxdrive_path.replace(WIN32_SUFFIX, 'ndrivew.exe')
if os.path.exists(exe_path):
log.trace("Returning exe path: %s", exe_path)
return exe_path
# Detect OSX frozen app
if nxdrive_path.endswith(OSX_SUFFIX):
log.trace("Detected OS X frozen app")
exe_path = nxdrive_path.replace(OSX_SUFFIX,
"Contents/MacOS/ndrive")
if os.path.exists(exe_path):
log.trace("Returning exe path: %s", exe_path)
return exe_path
# Fall-back to the regular method that should work both the ndrive script
exe_path = sys.argv[0]
log.trace("Returning default exe path: %s", exe_path)
return exe_path
def force_decode(string, codecs=['utf8', 'cp1252']):
for codec in codecs:
try:
return string.decode(codec)
except:
pass
log.debug("Cannot decode string '%s' with any of the given codecs: %r",
string, codecs)
return ''
def encrypt(plaintext, secret, lazy=True):
"""Symetric encryption using AES"""
secret = _lazysecret(secret) if lazy else secret
iv = Random.new().read(AES.block_size)
encobj = AES.new(secret, AES.MODE_CFB, iv)
return base64.b64encode(iv + encobj.encrypt(plaintext))
def decrypt(ciphertext, secret, lazy=True):
"""Symetric decryption using AES"""
secret = _lazysecret(secret) if lazy else secret
ciphertext = base64.b64decode(ciphertext)
iv = ciphertext[:AES.block_size]
ciphertext = ciphertext[AES.block_size:]
# Dont fail on decrypt
try:
encobj = AES.new(secret, AES.MODE_CFB, iv)
return encobj.decrypt(ciphertext)
except:
return None
def _lazysecret(secret, blocksize=32, padding='}'):
"""Pad secret if not legal AES block size (16, 24, 32)"""
if len(secret) > blocksize:
return secret[:-(len(secret) - blocksize)]
if not len(secret) in (16, 24, 32):
return secret + (blocksize - len(secret)) * padding
return secret
def guess_mime_type(filename):
mime_type, _ = mimetypes.guess_type(filename)
if mime_type:
if sys.platform == 'win32':
# Patch bad Windows MIME types
# See https://jira.nuxeo.com/browse/NXP-11660
# and http://bugs.python.org/issue15207
mime_type = _patch_win32_mime_type(mime_type)
log.trace("Guessed mime type '%s' for '%s'", mime_type, filename)
return mime_type
else:
log.trace("Could not guess mime type for '%s', returing"
" 'application/octet-stream'", filename)
return "application/octet-stream"
def _patch_win32_mime_type(mime_type):
patched_mime_type = WIN32_PATCHED_MIME_TYPES.get(mime_type)
return patched_mime_type if patched_mime_type else mime_type
def deprecated(func):
""""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
def new_func(*args, **kwargs):
log.warning("Call to deprecated function {}.".format(func.__name__))
return func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
class ServerLoader(object):
def __init__(self, remote_client, local_client):
self._remote_client = remote_client
self._local_client = local_client
def sync(self, remote_uid, local):
childs = self._local_client.get_children_info(local)
rchilds = self._remote_client.get_children_info(remote_uid)
existing_childs = dict()
for child in rchilds:
path = os.path.join(local, child.name)
existing_childs[path] = child
for child in childs:
child_uid = None
if not child.path in existing_childs:
if child.folderish:
print "Making folder: %s" % child.path
child_uid = self._remote_client.make_folder(remote_uid, child.name)
else:
print "Making file: %s" % child.path
self._remote_client.stream_file(remote_uid,
self._local_client._abspath(child.path))
else:
child_uid = existing_childs[child.path].uid
if child.folderish:
self.sync(child_uid, child.path)
class PidLockFile(object):
""" This class handle the pid lock file"""
def __init__(self, folder, key):
self.folder = folder
self.key = key
self.locked = False
def _get_sync_pid_filepath(self, process_name=None):
if process_name is None:
process_name = self.key
return os.path.join(self.folder,
'nxdrive_%s.pid' % process_name)
def unlock(self):
if not self.locked:
return
# Clean pid file
pid_filepath = self._get_sync_pid_filepath()
try:
os.unlink(pid_filepath)
except Exception, e:
log.warning("Failed to remove stalled pid file: %s"
" for stopped process %d: %r", pid_filepath,
os.getpid(), e)
def check_running(self, process_name=None):
"""Check whether another sync process is already runnning
If nxdrive.pid file already exists and the pid points to a running
nxdrive program then return the pid. Return None otherwise.
"""
if process_name is None:
process_name = self.key
pid_filepath = self._get_sync_pid_filepath(process_name=process_name)
if os.path.exists(pid_filepath):
with open(safe_long_path(pid_filepath), 'rb') as f:
pid = os.getpid()
try:
pid = int(f.read().strip())
_ = psutil.Process(pid)
# TODO https://jira.nuxeo.com/browse/NXDRIVE-26: Check if
# we can skip the process name verif as it can be
# overridden
return pid
except (ValueError, psutil.NoSuchProcess):
pass
# This is a pid file that is empty or pointing to either a
# stopped process or a non-nxdrive process: let's delete it if
# possible
try:
os.unlink(pid_filepath)
if pid is None:
msg = "Removed old empty pid file: %s" % pid_filepath
else:
msg = ("Removed old pid file: %s for stopped process"
" %d" % (pid_filepath, pid))
log.info(msg)
except Exception, e:
if pid is None:
msg = ("Failed to remove empty stalled pid file: %s:"
" %r" % (pid_filepath, e))
else:
msg = ("Failed to remove stalled pid file: %s for"
" stopped process %d: %r"
% (pid_filepath, pid, e))
log.warning(msg)
self.locked = True
return None
def lock(self):
pid = self.check_running(process_name=self.key)
if pid is not None:
log.warning(
"%s process with pid %d already running.",
self.key, pid)
return pid
# Write the pid of this process
pid_filepath = self._get_sync_pid_filepath(process_name=self.key)
pid = os.getpid()
with open(safe_long_path(pid_filepath), 'wb') as f:
f.write(str(pid))
return None
| IsaacYangSLA/nuxeo-drive | nuxeo-drive-client/nxdrive/utils.py | Python | lgpl-2.1 | 14,052 |
#This script is used to create a timer when making a video in the Blender VSE.
#When executed, the script grabs every marker on the timeline and groups them
#in pairs by name. Markers should be named in the pattern <sectionname>.Start
#and <sectionname>.End and there can be no more than two markers per section.
#Every section should have an associated text strip with the naming pattern
#<sectionname>.Timer
#WARNING: Each *.Start marker should be created before it's associated *.End
#marker. Otherwise they will appear to the script in reverse order and the
#timer for that section will not work.
import bpy
scene = bpy.data.scenes['Scene']
marks = []
st = -1
nm = ''
for marker in scene.timeline_markers:
i = marker.name.find('Start')
if i != -1:
st = marker.frame
nm = marker.name
else:
i = marker.name.find('End')
if i != -1:
nm = marker.name[:i]
marks.append((nm, st, marker.frame))
st = 0
nm = ''
else:
print('Unknown label: ' + marker.name)
for i in marks:
print(i)
def frame_step(scene):
for item in marks:
if scene.frame_current >= item[1] and scene.frame_current < item[2]:
obj = scene.sequence_editor.sequences_all[item[0] + 'Timer']
fps = scene.render.fps / scene.render.fps_base # actual framerate
cur_frame = scene.frame_current - item[1]
obj.text = '{0:.3f}'.format(cur_frame/fps)
break
bpy.app.handlers.frame_change_pre.append(frame_step)
bpy.app.handlers.render_pre.append(frame_step) | grimlock-/batscriptcrazy | py/blender/TimerScript.py | Python | mit | 1,598 |
'''
Author: Juan Jose Conti <jjconti@gmail.com>
This module can parse Python files looking for uses of the % operator.
The % operator is mainly used for string formatting operations.
Use:
python wrapstrings.py file.py funcName
'''
import ast
class RewriteMod(ast.NodeTransformer):
def visit_BinOp(self, node):
'''
Mod es un operador binario.
Una instancia de BinOp tiene atributos left, op y right.
Si node.op es instancia de Mod y node.left es instancia de Str,
tomar node.left y cambiarlo por una llamada a funcion.
'''
if isinstance(node.op, ast.Mod) and (changeall or isinstance(node.left, ast.Str)):
node.left = ast.Call(func=ast.Name(id=funcname), args=[node.left])
return node
return node
if __name__ == '__main__':
import sys
filename = sys.argv[1]
funcname = sys.argv[2]
if len(sys.argv) > 3:
changeall = True
else:
changeall = False
a = ast.parse(open(filename).read())
RewriteMod().visit(a)
"http://dev.pocoo.org/hg/sandbox/file/868ea20c2c1d/ast/"
import codegen
print codegen.to_source(a)
| jjconti/taint-mode-py | wrapstrings/wrapstrings.py | Python | gpl-3.0 | 1,189 |
"""
Created on 06 Nov 2020
@author: Jade Page (jade.page@southcoastscience.com)
"""
from collections import OrderedDict
from scs_core.data.json import PersistentJSONable
# --------------------------------------------------------------------------------------------------------------------
class UptimeList(PersistentJSONable):
"""
classdocs
"""
__FILENAME = "device_uptime_list"
@classmethod
def persistence_location(cls):
return cls.aws_dir(), cls.__FILENAME
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict, skeleton=False):
if not jdict:
return None
uptime_list = jdict.get('uptime_list')
return UptimeList(uptime_list)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, uptime_list):
"""
Constructor
"""
super().__init__()
self.__uptime_list = uptime_list
# ----------------------------------------------------------------------------------------------------------------
@property
def uptime_list(self):
return self.__uptime_list
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['uptime_list'] = self.__uptime_list
return jdict
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "UptimeList:{uptime_list:%s}" % \
self.uptime_list
| south-coast-science/scs_core | src/scs_core/aws/data/uptime_list.py | Python | mit | 1,793 |
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from copy import copy
from werkzeug.exceptions import NotFound
from MaKaC.review import Abstract
from MaKaC.webinterface.rh.conferenceBase import RHFileBase
from MaKaC.webinterface.rh.base import RHDisplayBaseProtected
from MaKaC.errors import NotFoundError, AccessError
from MaKaC.registration import Registrant
from MaKaC.conference import Reviewing, LocalFile
from MaKaC.webinterface.rh.contribMod import RCContributionPaperReviewingStaff
from MaKaC.i18n import _
from indico.web.flask.util import send_file
class RHFileAccess(RHFileBase, RHDisplayBaseProtected):
def _checkParams( self, params ):
try:
RHFileBase._checkParams( self, params )
except:
raise NotFoundError("The file you tried to access does not exist.")
def _checkProtection( self ):
if isinstance(self._file.getOwner(), Reviewing):
selfcopy = copy(self)
selfcopy._target = self._file.getOwner().getContribution()
if not (RCContributionPaperReviewingStaff.hasRights(selfcopy) or \
selfcopy._target.canUserSubmit(self.getAW().getUser()) or \
self._target.canModify( self.getAW() )):
raise AccessError()
elif isinstance(self._file.getOwner(), Registrant) and \
not self._file.getOwner().canUserModify(self.getAW().getUser()):
raise AccessError(_("Access to this resource is forbidden."))
elif isinstance(self._file.getOwner(), Abstract):
RHDisplayBaseProtected._checkProtection(self)
else:
# superseded by attachments
raise NotFound
def _process(self):
assert isinstance(self._file, LocalFile)
return send_file(self._file.getFileName(), self._file.getFilePath(), self._file.getFileType(),
self._file.getCreationDate())
| XeCycle/indico | indico/MaKaC/webinterface/rh/fileAccess.py | Python | gpl-3.0 | 2,592 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The PoissonLogNormalQuadratureCompound distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import poisson as poisson_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import categorical as categorical_lib
from tensorflow.python.ops.distributions import distribution as distribution_lib
__all__ = [
"PoissonLogNormalQuadratureCompound",
]
class PoissonLogNormalQuadratureCompound(distribution_lib.Distribution):
"""`PoissonLogNormalQuadratureCompound` distribution.
The `PoissonLogNormalQuadratureCompound` is an approximation to a
Poisson-LogNormal [compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e.,
```none
p(k|loc, scale)
= int_{R_+} dl LogNormal(l | loc, scale) Poisson(k | l)
= int_{R} dz ((lambda(z) sqrt(2) scale)
* exp(-z**2) / (lambda(z) sqrt(2 pi) sigma)
* Poisson(k | lambda(z)))
= int_{R} dz exp(-z**2) / sqrt(pi) Poisson(k | lambda(z))
approx= sum{ prob[d] Poisson(k | lambda(grid[d])) : d=0, ..., deg-1 }
```
where `lambda(z) = exp(sqrt(2) scale z + loc)` and the `prob,grid` terms
are from [numerical quadrature](
https://en.wikipedia.org/wiki/Numerical_integration) (default:
[Gauss--Hermite quadrature](
https://en.wikipedia.org/wiki/Gauss%E2%80%93Hermite_quadrature)). Note that
the second line made the substitution:
`z(l) = (log(l) - loc) / (sqrt(2) scale)` which implies `lambda(z)` [above]
and `dl = sqrt(2) scale lambda(z) dz`
In the non-approximation case, a draw from the LogNormal prior represents the
Poisson rate parameter. Unfortunately, the non-approximate distribution lacks
an analytical probability density function (pdf). Therefore the
`PoissonLogNormalQuadratureCompound` class implements an approximation based
on [numerical quadrature](
https://en.wikipedia.org/wiki/Numerical_integration) (default:
[Gauss--Hermite quadrature](
https://en.wikipedia.org/wiki/Gauss%E2%80%93Hermite_quadrature)).
Note: although the `PoissonLogNormalQuadratureCompound` is approximately the
Poisson-LogNormal compound distribution, it is itself a valid distribution.
Viz., it possesses a `sample`, `log_prob`, `mean`, `variance`, etc. which are
all mutually consistent.
#### Mathematical Details
The `PoissonLogNormalQuadratureCompound` approximates a Poisson-LogNormal
[compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution). Using
variable-substitution and [numerical quadrature](
https://en.wikipedia.org/wiki/Numerical_integration) (default:
[Gauss--Hermite quadrature](
https://en.wikipedia.org/wiki/Gauss%E2%80%93Hermite_quadrature)) we can
redefine the distribution to be a parameter-less convex combination of `deg`
different Poisson samples.
That is, defined over positive integers, this distribution is parameterized
by a (batch of) `loc` and `scale` scalars.
The probability density function (pdf) is,
```none
pdf(k | loc, scale, deg)
= sum{ prob[d] Poisson(k | lambda=exp(sqrt(2) scale grid[d] + loc))
: d=0, ..., deg-1 }
```
where, [e.g., `grid, w = numpy.polynomial.hermite.hermgauss(deg)`](
https://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.polynomial.hermite.hermgauss.html)
and `prob = w / sqrt(pi)`.
#### Examples
```python
tfd = tf.contrib.distributions
# Create two batches of PoissonLogNormalQuadratureCompounds, one with
# prior `loc = 0.` and another with `loc = 1.` In both cases `scale = 1.`
pln = tfd.PoissonLogNormalQuadratureCompound(
loc=[0., -0.5],
scale=1.,
quadrature_grid_and_probs=(
np.polynomial.hermite.hermgauss(deg=10)),
validate_args=True)
"""
def __init__(self,
loc,
scale,
quadrature_grid_and_probs=None,
validate_args=False,
allow_nan_stats=True,
name="PoissonLogNormalQuadratureCompound"):
"""Constructs the PoissonLogNormalQuadratureCompound on `R**k`.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight. When `None`, defaults to:
`np.polynomial.hermite.hermgauss(deg=8)`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc.dtype != scale[0].dtype`.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, scale]):
loc = ops.convert_to_tensor(loc, name="loc")
self._loc = loc
scale = ops.convert_to_tensor(scale, name="scale")
self._scale = scale
dtype = loc.dtype.base_dtype
if dtype != scale.dtype.base_dtype:
raise TypeError(
"loc.dtype(\"{}\") does not match scale.dtype(\"{}\")".format(
loc.dtype.name, scale.dtype.name))
grid, probs = distribution_util.process_quadrature_grid_and_probs(
quadrature_grid_and_probs, dtype, validate_args)
self._quadrature_grid = grid
self._quadrature_probs = probs
self._quadrature_size = distribution_util.dimension_size(probs, axis=0)
self._mixture_distribution = categorical_lib.Categorical(
logits=math_ops.log(self._quadrature_probs),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
# The following maps the broadcast of `loc` and `scale` to each grid
# point, i.e., we are creating several log-rates that correspond to the
# different Gauss-Hermite quadrature points and (possible) batches of
# `loc` and `scale`.
self._log_rate = (loc[..., array_ops.newaxis]
+ np.sqrt(2.) * scale[..., array_ops.newaxis] * grid)
self._distribution = poisson_lib.Poisson(
log_rate=self._log_rate,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
super(PoissonLogNormalQuadratureCompound, self).__init__(
dtype=dtype,
reparameterization_type=distribution_lib.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[loc, scale],
name=name)
@property
def mixture_distribution(self):
"""Distribution which randomly selects a Poisson with Gauss-Hermite rate."""
return self._mixture_distribution
@property
def distribution(self):
"""Base Poisson parameterized by a Gauss-Hermite grid of rates."""
return self._distribution
@property
def loc(self):
"""Location parameter of the LogNormal prior."""
return self._loc
@property
def scale(self):
"""Scale parameter of the LogNormal prior."""
return self._scale
@property
def quadrature_grid(self):
"""Quadrature grid points."""
return self._quadrature_grid
@property
def quadrature_probs(self):
"""Quadrature normalized weights."""
return self._quadrature_probs
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc),
array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.shape,
self.scale.shape)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
batch_size = (np.prod(self.batch_shape.as_list(), dtype=np.int32)
if self.batch_shape.is_fully_defined()
else math_ops.reduce_prod(self.batch_shape_tensor()))
ids = self._mixture_distribution.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
[batch_size])),
seed=distribution_util.gen_new_seed(
seed, "poisson_lognormal_quadrature_compound"))
# Stride `quadrature_size` for `batch_size` number of times.
offset = math_ops.range(start=0,
limit=batch_size * self._quadrature_size,
delta=self._quadrature_size,
dtype=ids.dtype)
ids += offset
rate = array_ops.gather(
array_ops.reshape(self.distribution.rate, shape=[-1]), ids)
rate = array_ops.reshape(
rate, shape=concat_vectors([n], self.batch_shape_tensor()))
return random_ops.random_poisson(
lam=rate, shape=[], dtype=self.dtype, seed=seed)
def _log_prob(self, x):
return math_ops.reduce_logsumexp(
(self.mixture_distribution.logits
+ self.distribution.log_prob(x[..., array_ops.newaxis])),
axis=-1)
def _mean(self):
return math_ops.exp(
math_ops.reduce_logsumexp(
self.mixture_distribution.logits + self._log_rate,
axis=-1))
def _variance(self):
return math_ops.exp(self._log_variance())
def _stddev(self):
return math_ops.exp(0.5 * self._log_variance())
def _log_variance(self):
# Following calculation is based on law of total variance:
#
# Var[Z] = E[Var[Z | V]] + Var[E[Z | V]]
#
# where,
#
# Z|v ~ interpolate_affine[v](distribution)
# V ~ mixture_distribution
#
# thus,
#
# E[Var[Z | V]] = sum{ prob[d] Var[d] : d=0, ..., deg-1 }
# Var[E[Z | V]] = sum{ prob[d] (Mean[d] - Mean)**2 : d=0, ..., deg-1 }
v = array_ops.stack([
# log(self.distribution.variance()) = log(Var[d]) = log(rate[d])
self._log_rate,
# log((Mean[d] - Mean)**2)
2. * math_ops.log(
math_ops.abs(self.distribution.mean()
- self._mean()[..., array_ops.newaxis])),
], axis=-1)
return math_ops.reduce_logsumexp(
self.mixture_distribution.logits[..., array_ops.newaxis] + v,
axis=[-2, -1])
def static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [static_value(x) for x in args]
if any(vec is None for vec in args_):
return array_ops.concat(args, axis=0)
return [val for vec in args_ for val in vec]
| Kongsea/tensorflow | tensorflow/contrib/distributions/python/ops/poisson_lognormal.py | Python | apache-2.0 | 12,323 |
"""
Support for inheritance of fields down an XBlock hierarchy.
"""
from __future__ import absolute_import
from datetime import datetime
from django.conf import settings
from pytz import UTC
from xmodule.partitions.partitions import UserPartition
from xblock.fields import Scope, Boolean, String, Float, XBlockMixin, Dict, Integer, List
from xblock.runtime import KeyValueStore, KvsFieldData
from xmodule.fields import Date, Timedelta
from ..course_metadata_utils import DEFAULT_START_DATE
# Make '_' a no-op so we can scrape strings
# Using lambda instead of `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class UserPartitionList(List):
"""Special List class for listing UserPartitions"""
def from_json(self, values):
return [UserPartition.from_json(v) for v in values]
def to_json(self, values):
return [user_partition.to_json()
for user_partition in values]
class InheritanceMixin(XBlockMixin):
"""Field definitions for inheritable fields."""
graded = Boolean(
help="Whether this module contributes to the final course grade",
scope=Scope.settings,
default=False,
)
start = Date(
help="Start time when this module is visible",
default=DEFAULT_START_DATE,
scope=Scope.settings
)
due = Date(
display_name=_("Due Date"),
help=_("Enter the default date by which problems are due."),
scope=Scope.settings,
)
visible_to_staff_only = Boolean(
help=_("If true, can be seen only by course staff, regardless of start date."),
default=False,
scope=Scope.settings,
)
course_edit_method = String(
display_name=_("Course Editor"),
help=_("Enter the method by which this course is edited (\"XML\" or \"Studio\")."),
default="Studio",
scope=Scope.settings,
deprecated=True # Deprecated because user would not change away from Studio within Studio.
)
giturl = String(
display_name=_("GIT URL"),
help=_("Enter the URL for the course data GIT repository."),
scope=Scope.settings
)
xqa_key = String(
display_name=_("XQA Key"),
help=_("This setting is not currently supported."), scope=Scope.settings,
deprecated=True
)
annotation_storage_url = String(
help=_("Enter the location of the annotation storage server. The textannotation, videoannotation, and imageannotation advanced modules require this setting."),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("URL for Annotation Storage")
)
annotation_token_secret = String(
help=_("Enter the secret string for annotation storage. The textannotation, videoannotation, and imageannotation advanced modules require this string."),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
graceperiod = Timedelta(
help="Amount of time after the due date that submissions will be accepted",
scope=Scope.settings,
)
group_access = Dict(
help=_("Enter the ids for the content groups this problem belongs to."),
scope=Scope.settings,
)
showanswer = String(
display_name=_("Show Answer"),
help=_(
'Specify when the Show Answer button appears for each problem. '
'Valid values are "always", "answered", "attempted", "closed", '
'"finished", "past_due", "correct_or_past_due", and "never".'
),
scope=Scope.settings,
default="finished",
)
rerandomize = String(
display_name=_("Randomization"),
help=_(
'Specify the default for how often variable values in a problem are randomized. '
'This setting should be set to \"never\" unless you plan to provide a Python '
'script to identify and randomize values in most of the problems in your course. '
'Valid values are \"always\", \"onreset\", \"never\", and \"per_student\".'
),
scope=Scope.settings,
default="never",
)
days_early_for_beta = Float(
display_name=_("Days Early for Beta Users"),
help=_("Enter the number of days before the start date that beta users can access the course."),
scope=Scope.settings,
default=None,
)
static_asset_path = String(
display_name=_("Static Asset Path"),
help=_("Enter the path to use for files on the Files & Uploads page. This value overrides the Studio default, c4x://."),
scope=Scope.settings,
default='',
)
text_customization = Dict(
display_name=_("Text Customization"),
help=_("Enter string customization substitutions for particular locations."),
scope=Scope.settings,
)
use_latex_compiler = Boolean(
display_name=_("Enable LaTeX Compiler"),
help=_("Enter true or false. If true, you can use the LaTeX templates for HTML components and advanced Problem components."),
default=False,
scope=Scope.settings
)
max_attempts = Integer(
display_name=_("Maximum Attempts"),
help=_("Enter the maximum number of times a student can try to answer problems. By default, Maximum Attempts is set to null, meaning that students have an unlimited number of attempts for problems. You can override this course-wide setting for individual problems. However, if the course-wide setting is a specific number, you cannot set the Maximum Attempts for individual problems to unlimited."),
values={"min": 0}, scope=Scope.settings
)
matlab_api_key = String(
display_name=_("Matlab API key"),
help=_("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
"This key is granted for exclusive use in this course for the specified duration. "
"Do not share the API key with other courses. Notify MathWorks immediately "
"if you believe the key is exposed or compromised. To obtain a key for your course, "
"or to report an issue, please contact moocsupport@mathworks.com"),
scope=Scope.settings
)
# This is should be scoped to content, but since it's defined in the policy
# file, it is currently scoped to settings.
user_partitions = UserPartitionList(
display_name=_("Group Configurations"),
help=_("Enter the configurations that govern how students are grouped together."),
default=[],
scope=Scope.settings
)
video_speed_optimizations = Boolean(
display_name=_("Enable video caching system"),
help=_("Enter true or false. If true, video caching will be used for HTML5 videos."),
default=True,
scope=Scope.settings
)
video_bumper = Dict(
display_name=_("Video Pre-Roll"),
help=_(
"""Identify a video, 5-10 seconds in length, to play before course videos. Enter the video ID from"""
""" the Video Uploads page and one or more transcript files in the following format:"""
""" {"video_id": "ID", "transcripts": {"language": "/static/filename.srt"}}."""
""" For example, an entry for a video with two transcripts looks like this:"""
""" {"video_id": "77cef264-d6f5-4cf2-ad9d-0178ab8c77be","""
""" "transcripts": {"en": "/static/DemoX-D01_1.srt", "uk": "/static/DemoX-D01_1_uk.srt"}}"""
),
scope=Scope.settings
)
reset_key = "DEFAULT_SHOW_RESET_BUTTON"
default_reset_button = getattr(settings, reset_key) if hasattr(settings, reset_key) else False
show_reset_button = Boolean(
display_name=_("Show Reset Button for Problems"),
help=_("Enter true or false. If true, problems in the course default to always displaying a 'Reset' button. You can "
"override this in each problem's settings. All existing problems are affected when this course-wide setting is changed."),
scope=Scope.settings,
default=default_reset_button
)
edxnotes = Boolean(
display_name=_("Enable Student Notes"),
help=_("Enter true or false. If true, students can use the Student Notes feature."),
default=False,
scope=Scope.settings
)
edxnotes_visibility = Boolean(
display_name="Student Notes Visibility",
help=_("Indicates whether Student Notes are visible in the course. "
"Students can also show or hide their notes in the courseware."),
default=True,
scope=Scope.user_info
)
in_entrance_exam = Boolean(
display_name=_("Tag this module as part of an Entrance Exam section"),
help=_("Enter true or false. If true, answer submissions for problem modules will be "
"considered in the Entrance Exam scoring/gating algorithm."),
scope=Scope.settings,
default=False
)
def compute_inherited_metadata(descriptor):
"""Given a descriptor, traverse all of its descendants and do metadata
inheritance. Should be called on a CourseDescriptor after importing a
course.
NOTE: This means that there is no such thing as lazy loading at the
moment--this accesses all the children."""
if descriptor.has_children:
parent_metadata = descriptor.xblock_kvs.inherited_settings.copy()
# add any of descriptor's explicitly set fields to the inheriting list
for field in InheritanceMixin.fields.values():
if field.is_set_on(descriptor):
# inherited_settings values are json repr
parent_metadata[field.name] = field.read_json(descriptor)
for child in descriptor.get_children():
inherit_metadata(child, parent_metadata)
compute_inherited_metadata(child)
def inherit_metadata(descriptor, inherited_data):
"""
Updates this module with metadata inherited from a containing module.
Only metadata specified in self.inheritable_metadata will
be inherited
`inherited_data`: A dictionary mapping field names to the values that
they should inherit
"""
try:
descriptor.xblock_kvs.inherited_settings = inherited_data
except AttributeError: # the kvs doesn't have inherited_settings probably b/c it's an error module
pass
def own_metadata(module):
"""
Return a JSON-friendly dictionary that contains only non-inherited field
keys, mapped to their serialized values
"""
return module.get_explicitly_set_fields_by_scope(Scope.settings)
class InheritingFieldData(KvsFieldData):
"""A `FieldData` implementation that can inherit value from parents to children."""
def __init__(self, inheritable_names, **kwargs):
"""
`inheritable_names` is a list of names that can be inherited from
parents.
"""
super(InheritingFieldData, self).__init__(**kwargs)
self.inheritable_names = set(inheritable_names)
def default(self, block, name):
"""
The default for an inheritable name is found on a parent.
"""
if name in self.inheritable_names:
# Walk up the content tree to find the first ancestor
# that this field is set on. Use the field from the current
# block so that if it has a different default than the root
# node of the tree, the block's default will be used.
field = block.fields[name]
ancestor = block.get_parent()
while ancestor is not None:
if field.is_set_on(ancestor):
return field.read_json(ancestor)
else:
ancestor = ancestor.get_parent()
return super(InheritingFieldData, self).default(block, name)
def inheriting_field_data(kvs):
"""Create an InheritanceFieldData that inherits the names in InheritanceMixin."""
return InheritingFieldData(
inheritable_names=InheritanceMixin.fields.keys(),
kvs=kvs,
)
class InheritanceKeyValueStore(KeyValueStore):
"""
Common superclass for kvs's which know about inheritance of settings. Offers simple
dict-based storage of fields and lookup of inherited values.
Note: inherited_settings is a dict of key to json values (internal xblock field repr)
"""
def __init__(self, initial_values=None, inherited_settings=None):
super(InheritanceKeyValueStore, self).__init__()
self.inherited_settings = inherited_settings or {}
self._fields = initial_values or {}
def get(self, key):
return self._fields[key.field_name]
def set(self, key, value):
# xml backed courses are read-only, but they do have some computed fields
self._fields[key.field_name] = value
def delete(self, key):
del self._fields[key.field_name]
def has(self, key):
return key.field_name in self._fields
def default(self, key):
"""
Check to see if the default should be from inheritance. If not
inheriting, this will raise KeyError which will cause the caller to use
the field's global default.
"""
return self.inherited_settings[key.field_name]
| hamzehd/edx-platform | common/lib/xmodule/xmodule/modulestore/inheritance.py | Python | agpl-3.0 | 13,415 |
#QLinearGradient myGradient;
#QPen myPen;
#QPolygonF myPolygon;
#QPainterPath myPath;
#myPath.addPolygon(myPolygon);
#QPainter painter(this);
#painter.setBrush(myGradient);
#painter.setPen(myPen);
#painter.drawPath(myPath);
import math
from PyQt5 import QtCore, QtGui, QtWidgets
class ArrowItem(QtWidgets.QGraphicsItem):
def definePath(self):
poligonArrow=QtGui.QPolygonF()
poligonArrow.append(QtCore.QPointF(0.0, 5.0))
poligonArrow.append(QtCore.QPointF(60.0, 5.0))
poligonArrow.append(QtCore.QPointF(60.0, 10.0))
poligonArrow.append(QtCore.QPointF(80.0, 0.0))
poligonArrow.append(QtCore.QPointF(60.0, -10.0))
poligonArrow.append(QtCore.QPointF(60.0, -5.0))
poligonArrow.append(QtCore.QPointF(0.0, -5.0))
poligonArrow.append(QtCore.QPointF(0.0, 5.0))
arrowPath=QtGui.QPainterPath()
arrowPath.addPolygon(poligonArrow)
return arrowPath
def boundingRect(self):
"""
overloading of the qt bounding rectangle
"""
return QtCore.QRectF(-1,-250 ,80,50)
def paint(self, painter,option,widget):
"""
overloading of the paint method
"""
painter.setPen(QtGui.QPen(QtGui.QColor(79, 106, 25)))
painter.setBrush(QtGui.QColor(122, 163, 39))
painter.drawPath(self.definePath())
| chiamingyen/PythonCAD_py3 | Interface/Entity/arrowitem.py | Python | gpl-2.0 | 1,388 |
from plugininterface import basePlugin
class test_plugin(basePlugin):
def plugin_init(self):
# The Constructor of the plugin
pass
def plugin_loaded(self):
# This function will be called if all plugins are loaded.
pass
def plugin_exploit_modes_requested(self, langClass, isSystem, isUnix):
# This method will be called just befor the user gets the 'available attack' screen.
# You can see that we get the
# * langClass (which represents the current language of the script)
# * A boolean value 'isSystem' which tells us if we can inject system commands.
# * And another boolean 'isUnix' which will be true if it's a unix-like system and false if it's Windows.
# We should return a array which contains tuples with a label and a unique callback string.
ret = []
#print "Language: " + langClass.getName()
if (isSystem):
attack = ("Show some info", "example.sysinfo")
ret.append(attack)
return(ret)
def plugin_callback_handler(self, callbackstring, haxhelper):
# This function will be launched if the user selected one of your attacks.
# The two params you receive here are:
# * callbackstring - The string you have defined in plugin_exploit_modes_requested.
# * haxhelper - A little class which makes it very easy to send an injected command.
if (callbackstring == "example.sysinfo"):
print haxhelper.isUnix()
print haxhelper.isWindows()
print haxhelper.getLangName()
print haxhelper.canExecuteSystemCommands()
print haxhelper.concatCommands(("ver", "echo %USERNAME%"))
if (haxhelper.isUnix()):
# We are in unix
print haxhelper.executeSystemCommand("cat /proc/cpuinfo")
print haxhelper.executeSystemCommand("uname -a")
else:
# We are in Windows
print haxhelper.executeSystemCommand("ver")
| crunchsec/fimap | src/plugins/test_plugin/test_plugin.py | Python | gpl-2.0 | 2,173 |
# Copyright 2012 Nicira Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Brad Hall, Nicira Networks, Inc.
import logging
import unittest
from quantum.plugins.nicira.nicira_nvp_plugin import nvplib
from quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin import NvpPlugin
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger("test_check")
class NvpTests(unittest.TestCase):
def setUp(self):
self.quantum = NvpPlugin()
def tearDown(self):
pass
# These nvplib functions will throw an exception if the check fails
def test_check_default_transport_zone(self):
nvplib.check_default_transport_zone(self.quantum.controller)
| savi-dev/quantum | quantum/plugins/nicira/nicira_nvp_plugin/tests/test_check.py | Python | apache-2.0 | 1,232 |
# $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import os
import unittest
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import FragmentCatalog, BuildFragmentCatalog
from rdkit.six.moves import cPickle
def feq(n1,n2,tol=1e-4):
return abs(n1-n2)<tol
class TestCase(unittest.TestCase):
def setUp(self) :
self.smiList = ["S(SC1=NC2=CC=CC=C2S1)C3=NC4=C(S3)C=CC=C4","CC1=CC(=O)C=CC1=O",
"OC1=C(Cl)C=C(C=C1[N+]([O-])=O)[N+]([O-])=O",
"[O-][N+](=O)C1=CNC(=N)S1", "NC1=CC2=C(C=C1)C(=O)C3=C(C=CC=C3)C2=O",
"OC(=O)C1=C(C=CC=C1)C2=C3C=CC(=O)C(=C3OC4=C2C=CC(=C4Br)O)Br",
"CN(C)C1=C(Cl)C(=O)C2=C(C=CC=C2)C1=O",
"CC1=C(C2=C(C=C1)C(=O)C3=CC=CC=C3C2=O)[N+]([O-])=O",
"CC(=NO)C(C)=NO"]
self.smiList2 = ['OCCC','CCC','C=CC','OC=CC','CC(O)C',
'C=C(O)C','OCCCC','CC(O)CC','C=CCC','CC=CC',
'OC=CCC','CC=C(O)C','OCC=CC','C=C(O)CC',
'C=CC(O)C','C=CCCO',
]
self.list2Acts = [1,0,0,1,1,1,1,1,0,0,1,1,1,1,1,1]
self.list2Obls = [(0,1,2),(1,3),(1,4,5),(1,6,7),(0,8),(0,6,9),(0,1,2,3,10),
(0,1,2,8,11),(1,3,4,5,12),(1,4,5,13),(1,3,6,7,14),(0,1,6,7,9,15)]
ffile = os.path.join(RDConfig.RDDataDir,'FunctionalGroups.txt')
self.catParams = FragmentCatalog.FragCatParams(1,6,ffile)
self.fragCat = FragmentCatalog.FragCatalog(self.catParams)
self.fgen = FragmentCatalog.FragCatGenerator()
def _fillCat(self,smilList):
for smi in self.smiList2:
mol = Chem.MolFromSmiles(smi)
self.fgen.AddFragsFromMol(mol,self.fragCat)
def _testBits(self,fragCat):
fpgen = FragmentCatalog.FragFPGenerator()
obits = [3,2,3,3,2,3,5,5,5,4,5,6]
obls = self.list2Obls
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList2),
',',0,-1,0)
i = 0
for mol in suppl:
fp = fpgen.GetFPForMol(mol, fragCat)
if i < len(obits):
smi = Chem.MolToSmiles(mol)
assert fp.GetNumOnBits()==obits[i],'%s: %s'%(smi,str(fp.GetOnBits()))
obl = fp.GetOnBits()
if i < len(obls):
assert tuple(obl)==obls[i],'%s: %s'%(smi,obl)
i+=1
def test1CatGen(self) :
self._fillCat(self.smiList2)
assert self.fragCat.GetNumEntries()==21
assert self.fragCat.GetFPLength()==21
self._testBits(self.fragCat)
def test2CatStringPickle(self):
self._fillCat(self.smiList2)
# test non-binary pickle:
cat2 = cPickle.loads(cPickle.dumps(self.fragCat))
assert cat2.GetNumEntries()==21
assert cat2.GetFPLength()==21
self._testBits(cat2)
# test binary pickle:
cat2 = cPickle.loads(cPickle.dumps(self.fragCat,1))
assert cat2.GetNumEntries()==21
assert cat2.GetFPLength()==21
self._testBits(cat2)
def test3CatFilePickle(self):
with open(os.path.join(RDConfig.RDCodeDir,'Chem',
'test_data','simple_catalog.pkl'),
'rb') as pklFile:
cat = cPickle.load(pklFile, encoding='bytes')
assert cat.GetNumEntries()==21
assert cat.GetFPLength()==21
self._testBits(cat)
def test4CatGuts(self):
self._fillCat(self.smiList2)
assert self.fragCat.GetNumEntries()==21
assert self.fragCat.GetFPLength()==21
#
# FIX: (Issue 162)
# bits like 11 and 15 are questionable here because the underlying
# fragments are symmetrical, so they can generate one of two
# text representations (i.e. there is nothing to distinguish
# between 'CC<-O>CC' and 'CCC<-O>C').
# This ought to eventually be cleaned up.
descrs = [(0,'CC<-O>',1,(34,)),
(1,'CC',1,()),
(2,'CCC<-O>',2,(34,)),
(3,'CCC',2,()),
(4,'C=C',1,()),
(5,'C=CC',2,()),
(6,'C=C<-O>',1,(34,)),
(7,'C<-O>=CC',2,(34,)),
(8,'CC<-O>C',2,(34,)),
(9,'C=C<-O>C',2,(34,)),
(10,'CCCC<-O>',3,(34,)),
(11,'CCC<-O>C',3,(34,)),
(12,'C=CCC',3,()),
(13,'CC=CC',3,()),
(14,'C<-O>=CCC',3,(34,)),
(15,'CC<-O>=CC',3,(34,)),
(16,'C=CC<-O>',2,(34,)),
]
for i in range(len(descrs)):
id,d,order,ids=descrs[i]
descr = self.fragCat.GetBitDescription(id)
assert descr == d,'%d: %s != %s'%(id,descr,d)
assert self.fragCat.GetBitOrder(id)==order
assert tuple(self.fragCat.GetBitFuncGroupIds(id)) == \
ids,'%d: %s != %s'%(id,
str(self.fragCat.GetBitFuncGroupIds(id)),
str(ids))
def _test5MoreComplex(self):
lastIdx = 0
ranges = {}
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList),
',',0,-1,0)
i = 0
for mol in suppl:
nEnt = self.fgen.AddFragsFromMol(mol,self.fragCat)
ranges[i] = range(lastIdx,lastIdx+nEnt)
lastIdx+=nEnt
i+=1
# now make sure that those bits are contained in the signatures:
fpgen = FragmentCatalog.FragFPGenerator()
i = 0
for mol in suppl:
fp = fpgen.GetFPForMol(mol,self.fragCat)
for bit in ranges[i]:
assert fp[bit],'%s: %s'%(Chem.MolToSmiles(mol),str(bit))
i += 1
def test6Builder(self):
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList2),
',',0,-1,0)
cat = BuildFragmentCatalog.BuildCatalog(suppl,minPath=1,reportFreq=20)
assert cat.GetNumEntries()==21
assert cat.GetFPLength()==21
self._testBits(cat)
def test7ScoreMolecules(self):
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList2),
',',0,-1,0)
cat = BuildFragmentCatalog.BuildCatalog(suppl,minPath=1,reportFreq=20)
assert cat.GetNumEntries()==21
assert cat.GetFPLength()==21
scores,obls = BuildFragmentCatalog.ScoreMolecules(suppl,cat,acts=self.list2Acts,
reportFreq=20)
for i in range(len(self.list2Obls)):
assert tuple(obls[i])==self.list2Obls[i],'%d: %s != %s'%(i,str(obls[i]),
str(self.list2Obls[i]))
scores2 = BuildFragmentCatalog.ScoreFromLists(obls,suppl,cat,acts=self.list2Acts,
reportFreq=20)
for i in range(len(scores)):
assert (scores[i]==scores2[i]).all(),'%d: %s != %s'%(i,str(scores[i]),str(scores2[i]))
def test8MolRanks(self):
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList2),
',',0,-1,0)
cat = BuildFragmentCatalog.BuildCatalog(suppl,minPath=1,reportFreq=20)
assert cat.GetNumEntries()==21
assert cat.GetFPLength()==21
# new InfoGain ranking:
bitInfo,fps = BuildFragmentCatalog.CalcGains(suppl,cat,topN=10,acts=self.list2Acts,
reportFreq=20,biasList=(1,))
entry = bitInfo[0]
assert int(entry[0])==0
assert cat.GetBitDescription(int(entry[0]))=='CC<-O>'
assert feq(entry[1],0.4669)
entry = bitInfo[1]
assert int(entry[0]) in (2,6)
txt = cat.GetBitDescription(int(entry[0]))
self.assertTrue( txt in ('CCC<-O>','C=C<-O>'), txt)
assert feq(entry[1],0.1611)
entry = bitInfo[6]
assert int(entry[0])==16
assert cat.GetBitDescription(int(entry[0]))=='C=CC<-O>'
assert feq(entry[1],0.0560)
# standard InfoGain ranking:
bitInfo,fps = BuildFragmentCatalog.CalcGains(suppl,cat,topN=10,acts=self.list2Acts,
reportFreq=20)
entry = bitInfo[0]
assert int(entry[0])==0
assert cat.GetBitDescription(int(entry[0]))=='CC<-O>'
assert feq(entry[1],0.4669)
entry = bitInfo[1]
assert int(entry[0])==5
assert cat.GetBitDescription(int(entry[0]))=='C=CC'
assert feq(entry[1],0.2057)
def test9Issue116(self):
smiList = ['Cc1ccccc1']
suppl = Chem.SmilesMolSupplierFromText('\n'.join(smiList),
',',0,-1,0)
cat = BuildFragmentCatalog.BuildCatalog(suppl,minPath=2,maxPath=2)
assert cat.GetFPLength()==2
assert cat.GetBitDescription(0)=='ccC'
fpgen = FragmentCatalog.FragFPGenerator()
mol = Chem.MolFromSmiles('Cc1ccccc1')
fp = fpgen.GetFPForMol(mol,cat)
assert fp[0]
assert fp[1]
mol = Chem.MolFromSmiles('c1ccccc1-c1ccccc1')
fp = fpgen.GetFPForMol(mol,cat)
assert not fp[0]
assert fp[1]
if __name__ == '__main__':
unittest.main()
| AlexanderSavelyev/rdkit | rdkit/Chem/UnitTestCatalog.py | Python | bsd-3-clause | 8,988 |
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2017 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Perform (or assist with) cleaning operations.
"""
from __future__ import absolute_import, print_function
from bleachbit import _, expanduser, expandvars
from bleachbit.FileUtilities import children_in_directory
from bleachbit.Options import options
from bleachbit import Command, FileUtilities, Memory, Special,GuiBasic
import glob
import logging
import os.path
import re
import sys
import warnings
import subprocess
if 'posix' == os.name:
from bleachbit import Unix
elif 'nt' == os.name:
from bleachbit import Windows
# Suppress GTK warning messages while running in CLI #34
warnings.simplefilter("ignore", Warning)
try:
import gtk
HAVE_GTK = True
except:
HAVE_GTK = False
# a module-level variable for holding cleaners
backends = {}
class Cleaner:
"""Base class for a cleaner"""
def __init__(self):
self.actions = []
self.id = None
self.description = None
self.name = None
self.options = {}
self.running = []
self.warnings = {}
def add_action(self, option_id, action):
"""Register 'action' (instance of class Action) to be executed
for ''option_id'. The actions must implement list_files and
other_cleanup()"""
self.actions += ((option_id, action), )
def add_option(self, option_id, name, description):
"""Register option (such as 'cache')"""
self.options[option_id] = (name, description)
def add_running(self, detection_type, pathname):
"""Add a way to detect this program is currently running"""
self.running += ((detection_type, pathname), )
def auto_hide(self):
"""Return boolean whether it is OK to automatically hide this
cleaner"""
for (option_id, __name) in self.get_options():
try:
for cmd in self.get_commands(option_id):
for dummy in cmd.execute(False):
return False
for ds in self.get_deep_scan(option_id):
if isinstance(ds, dict):
return False
except Exception as e:
logger = logging.getLogger(__name__)
logger.exception('exception in auto_hide(), cleaner=%s, option=%s',
self.name, option_id)
return True
def get_commands(self, option_id):
"""Get list of Command instances for option 'option_id'"""
for action in self.actions:
if option_id == action[0]:
for cmd in action[1].get_commands():
yield cmd
if option_id not in self.options:
raise RuntimeError("Unknown option '%s'" % option_id)
def get_deep_scan(self, option_id):
"""Get dictionary used to build a deep scan"""
for action in self.actions:
if option_id == action[0]:
for ds in action[1].get_deep_scan():
yield ds
if option_id not in self.options:
raise RuntimeError("Unknown option '%s'" % option_id)
def get_description(self):
"""Brief description of the cleaner"""
return self.description
def get_id(self):
"""Return the unique name of this cleaner"""
return self.id
def get_name(self):
"""Return the human name of this cleaner"""
return self.name
def get_option_descriptions(self):
"""Yield the names and descriptions of each option in a 2-tuple"""
if self.options:
for key in sorted(self.options.keys()):
yield (self.options[key][0], self.options[key][1])
def get_options(self):
"""Return user-configurable options in 2-tuple (id, name)"""
if self.options:
for key in sorted(self.options.keys()):
yield (key, self.options[key][0])
def get_warning(self, option_id):
"""Return a warning as string."""
if option_id in self.warnings:
return self.warnings[option_id]
else:
return None
def is_running(self):
"""Return whether the program is currently running"""
resp_cli=""
logger = logging.getLogger(__name__)
for running in self.running:
test = running[0]
pathname = running[1]
if 'exe' == test and 'posix' == os.name:
if Unix.is_running(pathname):
#print "debug: process '%s' is running" % pathname
logger.debug("Debug: process '%s' is running", pathname)
if options.get("close_run"):
if not subprocess.mswindows:
#print "debug: Closing process '%s'" % pathname
if "--preset" in sys.argv:
resp_cli = raw_input("Do you Want BleachBit to Close " + pathname + " y/n : ")
else:
resp = GuiBasic.message_dialog(None,"Do you Want BleachBit to Close " + pathname,gtk.MESSAGE_WARNING, gtk.BUTTONS_YES_NO)
if gtk.RESPONSE_YES == resp or resp_cli.lower() == "y":
# user cancelled, so don't toggle option
logger.debug("Debug: Closing process '%s'",pathname)
subprocess.check_output(["killall", "-9", pathname])
if not Unix.is_running(pathname):
logger.debug("Debug: Closing process '%s' successful",pathname)
return False
return True
elif 'exe' == test and 'nt' == os.name:
if Windows.is_process_running(pathname):
#print "debug: process '%s' is running" % pathname
logger.debug("Debug: process '%s' is running", pathname)
if options.get("close_run"):
if subprocess.mswindows:
#print "debug: Closing process '%s'" % pathname
if "--preset" in sys.argv:
resp_cli = raw_input("Do you Want BleachBit to Close " + pathname + " y/n : ")
else:
resp = GuiBasic.message_dialog(None,"Do you Want BleachBit to Close " + pathname,gtk.MESSAGE_WARNING, gtk.BUTTONS_YES_NO)
if gtk.RESPONSE_YES == resp or resp_cli.lower() == "y":
logger.debug("debug: Closing process '%s'",pathname)
subprocess.check_output(["taskkill", "/IM", pathname])
if not Windows.is_process_running(pathname):
logger.debug("debug: Closing process '%s' successful",pathname)
return False
logger.debug("process '%s' is running", pathname)
return True
elif 'exe' == test and 'nt' == os.name:
if Windows.is_process_running(pathname):
logger.debug("process '%s' is running", pathname)
return True
elif 'pathname' == test:
expanded = expanduser(expandvars(pathname))
for globbed in glob.iglob(expanded):
if os.path.exists(globbed):
logger.debug("file '%s' exists indicating '%s' is running", self.name)
return True
else:
raise RuntimeError(
"Unknown running-detection test '%s'" % test)
return False
def is_usable(self):
"""Return whether the cleaner is usable (has actions)"""
return len(self.actions) > 0
def set_warning(self, option_id, description):
"""Set a warning to be displayed when option is selected interactively"""
self.warnings[option_id] = description
class Firefox(Cleaner):
"""Mozilla Firefox"""
def __init__(self):
Cleaner.__init__(self)
self.add_option('backup', _('Backup files'), _(
'Delete the backup files'))
self.add_option('cache', _('Cache'), _(
'Delete the web cache, which reduces time to display revisited pages'))
self.add_option('cookies', _('Cookies'), _(
'Delete cookies, which contain information such as web site preferences, authentication, and tracking identification'))
self.add_option(
'crash_reports', _('Crash reports'), _('Delete the files'))
# TRANSLATORS: DOM = Document Object Model.
self.add_option('dom', _('DOM Storage'), _('Delete HTML5 cookies'))
self.add_option('download_history', _(
'Download history'), _('List of files downloaded'))
self.add_option('forms', _('Form history'), _(
'A history of forms entered in web sites and in the Search bar'))
self.add_option('session_restore', _('Session restore'), _(
'Loads the initial session after the browser closes or crashes'))
self.add_option('site_preferences', _(
'Site preferences'), _('Settings for individual sites'))
self.add_option('passwords', _('Passwords'), _(
'A database of usernames and passwords as well as a list of sites that should not store passwords'))
self.set_warning(
'passwords', _('This option will delete your saved passwords.'))
self.add_option(
'url_history', _('URL history'), _('List of visited web pages'))
self.add_option('vacuum', _('Vacuum'), _(
'Clean database fragmentation to reduce space and improve speed without removing any data'))
if 'posix' == os.name:
self.profile_dir = "~/.mozilla/firefox*/*.default*/"
self.add_running('exe', 'firefox')
self.add_running('exe', 'firefox-bin')
self.add_running('pathname', self.profile_dir + 'lock')
elif 'nt' == os.name:
self.profile_dir = "$USERPROFILE\\Application Data\\Mozilla\\Firefox\\Profiles\\*.default*\\"
self.add_running('exe', 'firefox.exe')
self.description = _("Web browser")
self.id = 'firefox'
self.name = "Firefox"
def get_commands(self, option_id):
files = []
# backup files
if 'backup' == option_id:
bookmark_bu_dir = os.path.join(self.profile_dir, 'bookmarkbackups')
files += FileUtilities.expand_glob_join(bookmark_bu_dir, "*.json")
files += FileUtilities.expand_glob_join(
bookmark_bu_dir, "*.jsonlz4")
# browser cache
cache_base = None
if 'posix' == os.name:
cache_base = self.profile_dir
elif 'nt' == os.name:
cache_base = "$localappdata\\Mozilla\\Firefox\\Profiles\\*.default*"
if 'cache' == option_id:
dirs = FileUtilities.expand_glob_join(cache_base, "Cache*")
dirs += FileUtilities.expand_glob_join(cache_base, "OfflineCache")
if 'nt' == os.name:
dirs += FileUtilities.expand_glob_join(
cache_base, "jumpListCache") # Windows 8
if 'posix' == os.name:
# This path is whitelisted under the System - Cache cleaner,
# so it can be cleaned here.
dirs += [expanduser('~/.cache/mozilla')]
for dirname in dirs:
for filename in children_in_directory(dirname, False):
yield Command.Delete(filename)
# Necko Predictive Network Actions
# https://wiki.mozilla.org/Privacy/Reviews/Necko
files += FileUtilities.expand_glob_join(
self.profile_dir, "netpredictions.sqlite")
# cookies
if 'cookies' == option_id:
files += FileUtilities.expand_glob_join(
self.profile_dir, "cookies.txt")
files += FileUtilities.expand_glob_join(
self.profile_dir, "cookies.sqlite")
# crash reports
if 'posix' == os.name:
crashdir = expanduser("~/.mozilla/firefox/Crash Reports")
if 'nt' == os.name:
crashdir = expandvars(
"$USERPROFILE\\Application Data\\Mozilla\\Firefox\\Crash Reports")
if 'crash_reports' == option_id:
for filename in children_in_directory(crashdir, False):
files += [filename]
files += FileUtilities.expand_glob_join(
self.profile_dir, "minidumps/*.dmp")
# DOM storage
if 'dom' == option_id:
files += FileUtilities.expand_glob_join(
self.profile_dir, "webappsstore.sqlite")
# download history
if 'download_history' == option_id:
# Firefox version 1
files += FileUtilities.expand_glob_join(
self.profile_dir, "downloads.rdf")
# Firefox version 3
files += FileUtilities.expand_glob_join(
self.profile_dir, "downloads.sqlite")
# forms
if 'forms' == option_id:
files += FileUtilities.expand_glob_join(
self.profile_dir, "formhistory.dat")
files += FileUtilities.expand_glob_join(
self.profile_dir, "formhistory.sqlite")
# passwords
if 'passwords' == option_id:
# http://kb.mozillazine.org/Password_Manager
files += FileUtilities.expand_glob_join(
self.profile_dir, "signons.txt")
files += FileUtilities.expand_glob_join(
self.profile_dir, "signons[2-3].txt")
files += FileUtilities.expand_glob_join(
self.profile_dir, "signons.sqlite")
files += FileUtilities.expand_glob_join(
self.profile_dir, "logins.json")
# session restore
if 'session_restore' == option_id:
# Names include sessionstore.js, sessionstore.bak,
# sessionstore.bak-20140715214327, sessionstore-1.js
files += FileUtilities.expand_glob_join(
self.profile_dir, "sessionstore*.js")
files += FileUtilities.expand_glob_join(
self.profile_dir, "sessionstore.bak*")
ss_bu_dir = os.path.join(self.profile_dir, 'sessionstore-backups')
files += FileUtilities.expand_glob_join(
ss_bu_dir, 'previous.js')
files += FileUtilities.expand_glob_join(
ss_bu_dir, 'upgrade.js-20*')
files += FileUtilities.expand_glob_join(
ss_bu_dir, 'recovery.js')
files += FileUtilities.expand_glob_join(
ss_bu_dir, 'recovery.bak')
# site-specific preferences
if 'site_preferences' == option_id:
files += FileUtilities.expand_glob_join(
self.profile_dir, "content-prefs.sqlite")
# URL history
if 'url_history' == option_id:
# Firefox version 1
files += FileUtilities.expand_glob_join(
self.profile_dir, "history.dat")
# Firefox 21 on Windows
if 'nt' == os.name:
files += FileUtilities.expand_glob_join(
cache_base, "thumbnails/*.png")
# see also function other_cleanup()
# finish
for filename in files:
yield Command.Delete(filename)
# URL history
if 'url_history' == option_id:
for path in FileUtilities.expand_glob_join(self.profile_dir, "places.sqlite"):
yield Command.Function(path,
Special.delete_mozilla_url_history,
_('Delete the usage history'))
# vacuum
if 'vacuum' == option_id:
paths = []
paths += FileUtilities.expand_glob_join(
self.profile_dir, "*.sqlite")
if not cache_base == self.profile_dir:
paths += FileUtilities.expand_glob_join(cache_base, "*.sqlite")
for path in paths:
yield Command.Function(path,
FileUtilities.vacuum_sqlite3, _("Vacuum"))
class OpenOfficeOrg(Cleaner):
"""Delete OpenOffice.org cache"""
def __init__(self):
Cleaner.__init__(self)
self.options = {}
self.add_option('cache', _('Cache'), _('Delete the cache'))
self.add_option('recent_documents', _('Most recently used'), _(
"Delete the list of recently used documents"))
self.id = 'openofficeorg'
self.name = 'OpenOffice.org'
self.description = _("Office suite")
# reference: http://katana.oooninja.com/w/editions_of_openoffice.org
if 'posix' == os.name:
self.prefixes = ["~/.ooo-2.0", "~/.openoffice.org2",
"~/.openoffice.org2.0", "~/.openoffice.org/3"]
self.prefixes += ["~/.ooo-dev3"]
if 'nt' == os.name:
self.prefixes = [
"$APPDATA\\OpenOffice.org\\3", "$APPDATA\\OpenOffice.org2"]
def get_commands(self, option_id):
# paths for which to run expand_glob_join
egj = []
if 'recent_documents' == option_id:
egj.append(
"user/registry/data/org/openoffice/Office/Histories.xcu")
egj.append(
"user/registry/cache/org.openoffice.Office.Histories.dat")
if 'recent_documents' == option_id and not 'cache' == option_id:
egj.append("user/registry/cache/org.openoffice.Office.Common.dat")
for egj_ in egj:
for prefix in self.prefixes:
for path in FileUtilities.expand_glob_join(prefix, egj_):
if 'nt' == os.name:
path = os.path.normpath(path)
if os.path.lexists(path):
yield Command.Delete(path)
if 'cache' == option_id:
dirs = []
for prefix in self.prefixes:
dirs += FileUtilities.expand_glob_join(
prefix, "user/registry/cache/")
for dirname in dirs:
if 'nt' == os.name:
dirname = os.path.normpath(dirname)
for filename in children_in_directory(dirname, False):
yield Command.Delete(filename)
if 'recent_documents' == option_id:
for prefix in self.prefixes:
for path in FileUtilities.expand_glob_join(prefix, "user/registry/data/org/openoffice/Office/Common.xcu"):
if os.path.lexists(path):
yield Command.Function(path,
Special.delete_ooo_history,
_('Delete the usage history'))
# ~/.openoffice.org/3/user/registrymodifications.xcu
# Apache OpenOffice.org 3.4.1 from openoffice.org on Ubuntu 13.04
# %AppData%\OpenOffice.org\3\user\registrymodifications.xcu
# Apache OpenOffice.org 3.4.1 from openoffice.org on Windows XP
for path in FileUtilities.expand_glob_join(prefix, "user/registrymodifications.xcu"):
if os.path.lexists(path):
yield Command.Function(path,
Special.delete_office_registrymodifications,
_('Delete the usage history'))
class System(Cleaner):
"""Clean the system in general"""
def __init__(self):
Cleaner.__init__(self)
#
# options for Linux and BSD
#
if 'posix' == os.name:
# TRANSLATORS: desktop entries are .desktop files in Linux that
# make up the application menu (the menu that shows BleachBit,
# Firefox, and others. The .desktop files also associate file
# types, so clicking on an .html file in Nautilus brings up
# Firefox.
# More information:
# http://standards.freedesktop.org/menu-spec/latest/index.html#introduction
self.add_option('desktop_entry', _('Broken desktop files'), _(
'Delete broken application menu entries and file associations'))
self.add_option('cache', _('Cache'), _('Delete the cache'))
# TRANSLATORS: Localizations are files supporting specific
# languages, so applications appear in Spanish, etc.
self.add_option('localizations', _('Localizations'), _(
'Delete files for unwanted languages'))
self.set_warning(
'localizations', _("Configure this option in the preferences."))
# TRANSLATORS: 'Rotated logs' refers to old system log files.
# Linux systems often have a scheduled job to rotate the logs
# which means compress all except the newest log and then delete
# the oldest log. You could translate this 'old logs.'
self.add_option(
'rotated_logs', _('Rotated logs'), _('Delete old system logs'))
self.add_option('recent_documents', _('Recent documents list'), _(
'Delete the list of recently used documents'))
self.add_option('trash', _('Trash'), _('Empty the trash'))
#
# options just for Linux
#
if sys.platform.startswith('linux'):
self.add_option('memory', _('Memory'),
# TRANSLATORS: 'free' means 'unallocated'
_('Wipe the swap and free memory'))
self.set_warning(
'memory', _('This option is experimental and may cause system problems.'))
#
# options just for Microsoft Windows
#
if 'nt' == os.name:
self.add_option('logs', _('Logs'), _('Delete the logs'))
self.add_option(
'memory_dump', _('Memory dump'), _('Delete the file memory.dmp'))
self.add_option('muicache', 'MUICache', _('Delete the cache'))
# TRANSLATORS: Prefetch is Microsoft Windows jargon.
self.add_option('prefetch', _('Prefetch'), _('Delete the cache'))
self.add_option(
'recycle_bin', _('Recycle bin'), _('Empty the recycle bin'))
# TRANSLATORS: 'Update' is a noun, and 'Update uninstallers' is an option to delete
# the uninstallers for software updates.
self.add_option('updates', _('Update uninstallers'), _(
'Delete uninstallers for Microsoft updates including hotfixes, service packs, and Internet Explorer updates'))
#
# options for GTK+
#
if HAVE_GTK:
self.add_option('clipboard', _('Clipboard'), _(
'The desktop environment\'s clipboard used for copy and paste operations'))
#
# options common to all platforms
#
# TRANSLATORS: "Custom" is an option allowing the user to specify which
# files and folders will be erased.
self.add_option('custom', _('Custom'), _(
'Delete user-specified files and folders'))
# TRANSLATORS: 'free' means 'unallocated'
self.add_option('free_disk_space', _('Free disk space'),
# TRANSLATORS: 'free' means 'unallocated'
_('Overwrite free disk space to hide deleted files'))
self.set_warning('free_disk_space', _('This option is very slow.'))
self.add_option(
'tmp', _('Temporary files'), _('Delete the temporary files'))
self.description = _("The system in general")
self.id = 'system'
self.name = _("System")
def get_commands(self, option_id):
# This variable will collect fully expanded file names, and
# at the end of this function, they will be checked they exist
# and processed through Command.Delete().
files = []
# cache
if 'posix' == os.name and 'cache' == option_id:
dirname = expanduser("~/.cache/")
for filename in children_in_directory(dirname, True):
if self.whitelisted(filename):
continue
files += [filename]
# custom
if 'custom' == option_id:
for (c_type, c_path) in options.get_custom_paths():
if 'file' == c_type:
files += [c_path]
elif 'folder' == c_type:
files += [c_path]
for path in children_in_directory(c_path, True):
files += [path]
else:
raise RuntimeError(
'custom folder has invalid type %s' % c_type)
# menu
menu_dirs = ['~/.local/share/applications',
'~/.config/autostart',
'~/.gnome/apps/',
'~/.gnome2/panel2.d/default/launchers',
'~/.gnome2/vfolders/applications/',
'~/.kde/share/apps/RecentDocuments/',
'~/.kde/share/mimelnk',
'~/.kde/share/mimelnk/application/ram.desktop',
'~/.kde2/share/mimelnk/application/',
'~/.kde2/share/applnk']
if 'posix' == os.name and 'desktop_entry' == option_id:
for dirname in menu_dirs:
for filename in [fn for fn in children_in_directory(dirname, False)
if fn.endswith('.desktop')]:
if Unix.is_broken_xdg_desktop(filename):
yield Command.Delete(filename)
# unwanted locales
if 'posix' == os.name and 'localizations' == option_id:
for path in Unix.locales.localization_paths(locales_to_keep=options.get_languages()):
if os.path.isdir(path):
for f in FileUtilities.children_in_directory(path, True):
yield Command.Delete(f)
yield Command.Delete(path)
# Windows logs
if 'nt' == os.name and 'logs' == option_id:
paths = (
'$ALLUSERSPROFILE\\Application Data\\Microsoft\\Dr Watson\\*.log',
'$ALLUSERSPROFILE\\Application Data\\Microsoft\\Dr Watson\\user.dmp',
'$LocalAppData\\Microsoft\\Windows\\WER\\ReportArchive\\*\\*',
'$LocalAppData\\Microsoft\\Windows\WER\\ReportQueue\\*\\*',
'$programdata\\Microsoft\\Windows\\WER\\ReportArchive\\*\\*',
'$programdata\\Microsoft\\Windows\\WER\\ReportQueue\\*\\*',
'$localappdata\\Microsoft\\Internet Explorer\\brndlog.bak',
'$localappdata\\Microsoft\\Internet Explorer\\brndlog.txt',
'$windir\\*.log',
'$windir\\imsins.BAK',
'$windir\\OEWABLog.txt',
'$windir\\SchedLgU.txt',
'$windir\\ntbtlog.txt',
'$windir\\setuplog.txt',
'$windir\\REGLOCS.OLD',
'$windir\\Debug\\*.log',
'$windir\\Debug\\Setup\\UpdSh.log',
'$windir\\Debug\\UserMode\\*.log',
'$windir\\Debug\\UserMode\\ChkAcc.bak',
'$windir\\Debug\\UserMode\\userenv.bak',
'$windir\\Microsoft.NET\Framework\*\*.log',
'$windir\\pchealth\\helpctr\\Logs\\hcupdate.log',
'$windir\\security\\logs\\*.log',
'$windir\\security\\logs\\*.old',
'$windir\\SoftwareDistribution\\*.log',
'$windir\\SoftwareDistribution\\DataStore\\Logs\\*',
'$windir\\system32\\TZLog.log',
'$windir\\system32\\config\\systemprofile\\Application Data\\Microsoft\\Internet Explorer\\brndlog.bak',
'$windir\\system32\\config\\systemprofile\\Application Data\\Microsoft\\Internet Explorer\\brndlog.txt',
'$windir\\system32\\LogFiles\\AIT\\AitEventLog.etl.???',
'$windir\\system32\\LogFiles\\Firewall\\pfirewall.log*',
'$windir\\system32\\LogFiles\\Scm\\SCM.EVM*',
'$windir\\system32\\LogFiles\\WMI\\Terminal*.etl',
'$windir\\system32\\LogFiles\\WMI\\RTBackup\EtwRT.*etl',
'$windir\\system32\\wbem\\Logs\\*.lo_',
'$windir\\system32\\wbem\\Logs\\*.log', )
for path in paths:
expanded = expandvars(path)
for globbed in glob.iglob(expanded):
files += [globbed]
# memory
if sys.platform.startswith('linux') and 'memory' == option_id:
yield Command.Function(None, Memory.wipe_memory, _('Memory'))
# memory dump
# how to manually create this file
# http://www.pctools.com/guides/registry/detail/856/
if 'nt' == os.name and 'memory_dump' == option_id:
fname = expandvars('$windir\\memory.dmp')
if os.path.exists(fname):
files += [fname]
for fname in glob.iglob(expandvars('$windir\\Minidump\\*.dmp')):
files += [fname]
# most recently used documents list
if 'posix' == os.name and 'recent_documents' == option_id:
files += [expanduser("~/.recently-used")]
# GNOME 2.26 (as seen on Ubuntu 9.04) will retain the list
# in memory if it is simply deleted, so it must be shredded
# (or at least truncated).
#
# GNOME 2.28.1 (Ubuntu 9.10) and 2.30 (10.04) do not re-read
# the file after truncation, but do re-read it after
# shredding.
#
# https://bugzilla.gnome.org/show_bug.cgi?id=591404
def gtk_purge_items():
"""Purge GTK items"""
gtk.RecentManager().purge_items()
yield 0
for pathname in ["~/.recently-used.xbel", "~/.local/share/recently-used.xbel"]:
pathname = expanduser(pathname)
if os.path.lexists(pathname):
yield Command.Shred(pathname)
if HAVE_GTK:
# Use the Function to skip when in preview mode
yield Command.Function(None, gtk_purge_items, _('Recent documents list'))
if 'posix' == os.name and 'rotated_logs' == option_id:
for path in Unix.rotated_logs():
yield Command.Delete(path)
# temporary files
if 'posix' == os.name and 'tmp' == option_id:
dirnames = ['/tmp', '/var/tmp']
for dirname in dirnames:
for path in children_in_directory(dirname, True):
is_open = FileUtilities.openfiles.is_open(path)
ok = not is_open and os.path.isfile(path) and \
not os.path.islink(path) and \
FileUtilities.ego_owner(path) and \
not self.whitelisted(path)
if ok:
yield Command.Delete(path)
# temporary files
if 'nt' == os.name and 'tmp' == option_id:
dirname = expandvars(
"$USERPROFILE\\Local Settings\\Temp\\")
# whitelist the folder %TEMP%\Low but not its contents
# https://bugs.launchpad.net/bleachbit/+bug/1421726
low = os.path.join(dirname, 'low').lower()
for filename in children_in_directory(dirname, True):
if not low == filename.lower():
yield Command.Delete(filename)
dirname = expandvars("$windir\\temp\\")
for filename in children_in_directory(dirname, True):
yield Command.Delete(filename)
# trash
if 'posix' == os.name and 'trash' == option_id:
dirname = expanduser("~/.Trash")
for filename in children_in_directory(dirname, False):
yield Command.Delete(filename)
# fixme http://www.ramendik.ru/docs/trashspec.html
# http://standards.freedesktop.org/basedir-spec/basedir-spec-0.6.html
# ~/.local/share/Trash
# * GNOME 2.22, Fedora 9
# * KDE 4.1.3, Ubuntu 8.10
dirname = expanduser("~/.local/share/Trash/files")
for filename in children_in_directory(dirname, True):
yield Command.Delete(filename)
dirname = expanduser("~/.local/share/Trash/info")
for filename in children_in_directory(dirname, True):
yield Command.Delete(filename)
dirname = expanduser("~/.local/share/Trash/expunged")
# desrt@irc.gimpnet.org tells me that the trash
# backend puts files in here temporary, but in some situations
# the files are stuck.
for filename in children_in_directory(dirname, True):
yield Command.Delete(filename)
# clipboard
if HAVE_GTK and 'clipboard' == option_id:
def clear_clipboard():
gtk.gdk.threads_enter()
clipboard = gtk.clipboard_get()
clipboard.set_text("")
gtk.gdk.threads_leave()
return 0
yield Command.Function(None, clear_clipboard, _('Clipboard'))
# overwrite free space
shred_drives = options.get_list('shred_drives')
if 'free_disk_space' == option_id and shred_drives:
for pathname in shred_drives:
# TRANSLATORS: 'Free' means 'unallocated.'
# %s expands to a path such as C:\ or /tmp/
display = _("Overwrite free disk space %s") % pathname
def wipe_path_func():
for ret in FileUtilities.wipe_path(pathname, idle=True):
# Yield control to GTK idle because this process
# is very slow. Also display progress.
yield ret
yield 0
yield Command.Function(None, wipe_path_func, display)
# MUICache
if 'nt' == os.name and 'muicache' == option_id:
keys = (
'HKCU\\Software\\Microsoft\\Windows\\ShellNoRoam\\MUICache',
'HKCU\\Software\\Classes\\Local Settings\\Software\\Microsoft\\Windows\\Shell\\MuiCache')
for key in keys:
yield Command.Winreg(key, None)
# prefetch
if 'nt' == os.name and 'prefetch' == option_id:
for path in glob.iglob(expandvars('$windir\\Prefetch\\*.pf')):
yield Command.Delete(path)
# recycle bin
if 'nt' == os.name and 'recycle_bin' == option_id:
# This method allows shredding
recycled_any = False
for path in Windows.get_recycle_bin():
recycled_any = True
yield Command.Delete(path)
# If there were any files deleted, Windows XP will show the
# wrong icon for the recycle bin indicating it is not empty.
# The icon will be incorrect until logging in to Windows again
# or until it is emptied using the Windows API call for emptying
# the recycle bin.
# Windows 10 refreshes the recycle bin icon when the user
# opens the recycle bin folder.
# This is a hack to refresh the icon.
def empty_recycle_bin_func():
import tempfile
tmpdir = tempfile.mkdtemp()
Windows.move_to_recycle_bin(tmpdir)
try:
Windows.empty_recycle_bin(None, True)
except:
logging.getLogger(__name__).info('error in empty_recycle_bin()', exc_info=True)
yield 0
# Using the Function Command prevents emptying the recycle bin
# when in preview mode.
if recycled_any:
yield Command.Function(None, empty_recycle_bin_func, _('Empty the recycle bin'))
# Windows Updates
if 'nt' == os.name and 'updates' == option_id:
for wu in Windows.delete_updates():
yield wu
# return queued files
for filename in files:
if os.path.lexists(filename):
yield Command.Delete(filename)
def whitelisted(self, pathname):
"""Return boolean whether file is whitelisted"""
regexes = [
'^/tmp/.X0-lock$',
'^/tmp/.truecrypt_aux_mnt.*/(control|volume)$',
'^/tmp/.vbox-[^/]+-ipc/lock$',
'^/tmp/.wine-[0-9]+/server-.*/lock$',
'^/tmp/gconfd-[^/]+/lock/ior$',
'^/tmp/fsa/', # fsarchiver
'^/tmp/kde-',
'^/tmp/kdesudo-',
'^/tmp/ksocket-',
'^/tmp/orbit-[^/]+/bonobo-activation-register[a-z0-9-]*.lock$',
'^/tmp/orbit-[^/]+/bonobo-activation-server-[a-z0-9-]*ior$',
'^/tmp/pulse-[^/]+/pid$',
'^/var/tmp/kdecache-',
'^' + expanduser('~/.cache/wallpaper/'),
# Clean Firefox cache from Firefox cleaner (LP#1295826)
'^' + expanduser('~/.cache/mozilla'),
# Clean Google Chrome cache from Google Chrome cleaner (LP#656104)
'^' + expanduser('~/.cache/google-chrome'),
'^' + expanduser('~/.cache/gnome-control-center/'),
# iBus Pinyin
# https://bugs.launchpad.net/bleachbit/+bug/1538919
'^' + expanduser('~/.cache/ibus/')]
for regex in regexes:
if re.match(regex, pathname) is not None:
return True
return False
def register_cleaners():
"""Register all known cleaners: system, CleanerML, and Winapp2"""
global backends
# wipe out any registrations
# Because this is a global variable, cannot use backends = {}
backends.clear()
# initialize "hard coded" (non-CleanerML) backends
backends["firefox"] = Firefox()
backends["openofficeorg"] = OpenOfficeOrg()
backends["system"] = System()
# register CleanerML cleaners
from bleachbit import CleanerML
CleanerML.load_cleaners()
# register Winapp2.ini cleaners
if 'nt' == os.name:
from bleachbit import Winapp
Winapp.load_cleaners()
def create_simple_cleaner(paths):
"""Shred arbitrary files (used in CLI and GUI)"""
cleaner = Cleaner()
cleaner.add_option(option_id='files', name='', description='')
cleaner.name = _("System") # shows up in progress bar
from bleachbit import Action
class CustomFileAction(Action.ActionProvider):
action_key = '__customfileaction'
def get_commands(self):
for path in paths:
if not isinstance(path, (str, unicode)):
raise RuntimeError(
'expected path as string but got %s' % str(path))
if not os.path.isabs(path):
path = os.path.abspath(path)
if os.path.isdir(path):
for child in children_in_directory(path, True):
yield Command.Shred(child)
yield Command.Shred(path)
else:
yield Command.Shred(path)
provider = CustomFileAction(None)
cleaner.add_action('files', provider)
return cleaner
def create_wipe_cleaner(path):
"""Wipe free disk space of arbitrary paths (used in GUI)"""
cleaner = Cleaner()
cleaner.add_option(
option_id='free_disk_space', name='', description='')
cleaner.name = ''
# create a temporary cleaner object
display = _("Overwrite free disk space %s") % path
def wipe_path_func():
for ret in FileUtilities.wipe_path(path, idle=True):
yield ret
yield 0
from bleachbit import Action
class CustomWipeAction(Action.ActionProvider):
action_key = '__customwipeaction'
def get_commands(self):
yield Command.Function(None, wipe_path_func, display)
provider = CustomWipeAction(None)
cleaner.add_action('free_disk_space', provider)
return cleaner
| brahmastra2016/bleachbit | bleachbit/Cleaner.py | Python | gpl-3.0 | 41,217 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for Linear and DNN joined training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import linear
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
from tensorflow.python.util.tf_export import tf_export
# The default learning rates are a historical artifact of the initial
# implementation.
_DNN_LEARNING_RATE = 0.001
_LINEAR_LEARNING_RATE = 0.005
def _check_no_sync_replicas_optimizer(optimizer):
if isinstance(optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):
raise ValueError(
'SyncReplicasOptimizer does not support multi optimizers case. '
'Therefore, it is not supported in DNNLinearCombined model. '
'If you want to use this optimizer, please use either DNN or Linear '
'model.')
def _linear_learning_rate(num_linear_feature_columns):
"""Returns the default learning rate of the linear model.
The calculation is a historical artifact of this initial implementation, but
has proven a reasonable choice.
Args:
num_linear_feature_columns: The number of feature columns of the linear
model.
Returns:
A float.
"""
default_learning_rate = 1. / math.sqrt(num_linear_feature_columns)
return min(_LINEAR_LEARNING_RATE, default_learning_rate)
def _add_layer_summary(value, tag):
summary.scalar('%s/fraction_of_zero_values' % tag, nn.zero_fraction(value))
summary.histogram('%s/activation' % tag, value)
def _dnn_linear_combined_model_fn(features,
labels,
mode,
head,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
input_layer_partitioner=None,
config=None):
"""Deep Neural Net and Linear combined model_fn.
Args:
features: dict of `Tensor`.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype
`int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `Head` instance.
linear_feature_columns: An iterable containing all the feature columns used
by the Linear model.
linear_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the Linear model. Defaults to the Ftrl
optimizer.
dnn_feature_columns: An iterable containing all the feature columns used by
the DNN model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN model. Defaults to the Adagrad
optimizer.
dnn_hidden_units: List of hidden units per DNN layer.
dnn_activation_fn: Activation function applied to each DNN layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability we will drop out a given DNN
coordinate.
input_layer_partitioner: Partitioner for input layer.
config: `RunConfig` object to configure the runtime settings.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: If both `linear_feature_columns` and `dnn_features_columns`
are empty at the same time, or `input_layer_partitioner` is missing,
or features has the wrong type.
"""
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
if not linear_feature_columns and not dnn_feature_columns:
raise ValueError(
'Either linear_feature_columns or dnn_feature_columns must be defined.')
num_ps_replicas = config.num_ps_replicas if config else 0
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
# Build DNN Logits.
dnn_parent_scope = 'dnn'
if not dnn_feature_columns:
dnn_logits = None
else:
dnn_optimizer = optimizers.get_optimizer_instance(
dnn_optimizer, learning_rate=_DNN_LEARNING_RATE)
_check_no_sync_replicas_optimizer(dnn_optimizer)
if not dnn_hidden_units:
raise ValueError(
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified.')
dnn_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
with variable_scope.variable_scope(
dnn_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner):
dnn_logit_fn = dnn._dnn_logit_fn_builder( # pylint: disable=protected-access
units=head.logits_dimension,
hidden_units=dnn_hidden_units,
feature_columns=dnn_feature_columns,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner)
dnn_logits = dnn_logit_fn(features=features, mode=mode)
linear_parent_scope = 'linear'
if not linear_feature_columns:
linear_logits = None
else:
linear_optimizer = optimizers.get_optimizer_instance(
linear_optimizer,
learning_rate=_linear_learning_rate(len(linear_feature_columns)))
_check_no_sync_replicas_optimizer(linear_optimizer)
with variable_scope.variable_scope(
linear_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner) as scope:
logit_fn = linear._linear_logit_fn_builder( # pylint: disable=protected-access
units=head.logits_dimension,
feature_columns=linear_feature_columns)
linear_logits = logit_fn(features=features)
_add_layer_summary(linear_logits, scope.name)
# Combine logits and build full model.
if dnn_logits is not None and linear_logits is not None:
logits = dnn_logits + linear_logits
elif dnn_logits is not None:
logits = dnn_logits
else:
logits = linear_logits
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
train_ops = []
global_step = training_util.get_global_step()
if dnn_logits is not None:
train_ops.append(
dnn_optimizer.minimize(
loss,
var_list=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES,
scope=dnn_parent_scope)))
if linear_logits is not None:
train_ops.append(
linear_optimizer.minimize(
loss,
var_list=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES,
scope=linear_parent_scope)))
train_op = control_flow_ops.group(*train_ops)
with ops.control_dependencies([train_op]):
return distribute_lib.increment_var(global_step)
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
@tf_export('estimator.DNNLinearCombinedClassifier')
class DNNLinearCombinedClassifier(estimator.Estimator):
"""An estimator for TensorFlow Linear and DNN joined classification models.
Note: This estimator is also known as wide-n-deep.
Example:
```python
numeric_feature = numeric_column(...)
categorical_column_a = categorical_column_with_hash_bucket(...)
categorical_column_b = categorical_column_with_hash_bucket(...)
categorical_feature_a_x_categorical_feature_b = crossed_column(...)
categorical_feature_a_emb = embedding_column(
categorical_column=categorical_feature_a, ...)
categorical_feature_b_emb = embedding_column(
categorical_id_column=categorical_feature_b, ...)
estimator = DNNLinearCombinedClassifier(
# wide settings
linear_feature_columns=[categorical_feature_a_x_categorical_feature_b],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[
categorical_feature_a_emb, categorical_feature_b_emb,
numeric_feature],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),
# warm-start settings
warm_start_from="/path/to/checkpoint/dir")
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using softmax cross entropy.
@compatibility(eager)
Estimators are not compatible with eager execution.
@end_compatibility
"""
def __init__(self,
model_dir=None,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
input_layer_partitioner=None,
config=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM):
"""Initializes a DNNLinearCombinedClassifier instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. Defaults to FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. Defaults to Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
n_classes: Number of label classes. Defaults to 2, namely binary
classification. Must be > 1.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
input_layer_partitioner: Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: RunConfig object to configure the runtime settings.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
a `WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights are warm-started, and it is assumed that vocabularies and Tensor
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
linear_feature_columns = linear_feature_columns or []
dnn_feature_columns = dnn_feature_columns or []
self._feature_columns = (
list(linear_feature_columns) + list(dnn_feature_columns))
if not self._feature_columns:
raise ValueError('Either linear_feature_columns or dnn_feature_columns '
'must be defined.')
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
def _model_fn(features, labels, mode, config):
"""Call the _dnn_linear_combined_model_fn."""
return _dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNLinearCombinedClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
warm_start_from=warm_start_from)
@tf_export('estimator.DNNLinearCombinedRegressor')
class DNNLinearCombinedRegressor(estimator.Estimator):
"""An estimator for TensorFlow Linear and DNN joined models for regression.
Note: This estimator is also known as wide-n-deep.
Example:
```python
numeric_feature = numeric_column(...)
categorical_column_a = categorical_column_with_hash_bucket(...)
categorical_column_b = categorical_column_with_hash_bucket(...)
categorical_feature_a_x_categorical_feature_b = crossed_column(...)
categorical_feature_a_emb = embedding_column(
categorical_column=categorical_feature_a, ...)
categorical_feature_b_emb = embedding_column(
categorical_column=categorical_feature_b, ...)
estimator = DNNLinearCombinedRegressor(
# wide settings
linear_feature_columns=[categorical_feature_a_x_categorical_feature_b],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[
categorical_feature_a_emb, categorical_feature_b_emb,
numeric_feature],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),
# warm-start settings
warm_start_from="/path/to/checkpoint/dir")
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using mean squared error.
@compatibility(eager)
Estimators are not compatible with eager execution.
@end_compatibility
"""
def __init__(self,
model_dir=None,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
label_dimension=1,
weight_column=None,
input_layer_partitioner=None,
config=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM):
"""Initializes a DNNLinearCombinedRegressor instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. Defaults to FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. Defaults to Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
input_layer_partitioner: Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: RunConfig object to configure the runtime settings.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
a `WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights are warm-started, and it is assumed that vocabularies and Tensor
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
linear_feature_columns = linear_feature_columns or []
dnn_feature_columns = dnn_feature_columns or []
self._feature_columns = (
list(linear_feature_columns) + list(dnn_feature_columns))
if not self._feature_columns:
raise ValueError('Either linear_feature_columns or dnn_feature_columns '
'must be defined.')
def _model_fn(features, labels, mode, config):
"""Call the _dnn_linear_combined_model_fn."""
return _dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head_lib. # pylint: disable=protected-access
_regression_head_with_mean_squared_error_loss(
label_dimension=label_dimension, weight_column=weight_column,
loss_reduction=loss_reduction),
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNLinearCombinedRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
warm_start_from=warm_start_from)
| eaplatanios/tensorflow | tensorflow/python/estimator/canned/dnn_linear_combined.py | Python | apache-2.0 | 24,368 |
# -*- coding: utf-8 -*-
# Copyright 2016 LasLabs Inc.
# License GPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from odoo.tests.common import TransactionCase
from psycopg2 import IntegrityError
class TestMedicalDrugRoute(TransactionCase):
def setUp(self):
super(TestMedicalDrugRoute, self).setUp()
self.drug_route_perfusion = self.env.ref('medical_medicament.route_33')
self.drug_route_oral = self.env.ref('medical_medicament.route_34')
def test_name_unique(self):
""" Validate drug route unique name sql constraint """
with self.assertRaises(IntegrityError):
self.drug_route_perfusion.name = self.drug_route_oral.name
def test_code_unique(self):
""" Validate drug route unique code sql constraint """
with self.assertRaises(IntegrityError):
self.drug_route_perfusion.code = self.drug_route_oral.code
| laslabs/vertical-medical | medical_medicament/tests/test_medical_drug_route.py | Python | agpl-3.0 | 909 |
"""Edit details of an image."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import helpers
@click.command()
@click.argument('identifier')
@click.option('--add/--remove', default=True,
help="To add or remove Datacenter")
@click.argument('locations', nargs=-1, required=True)
@environment.pass_env
def cli(env, identifier, add, locations):
"""Add/Remove datacenter of an image."""
image_mgr = SoftLayer.ImageManager(env.client)
image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')
if add:
result = image_mgr.add_locations(image_id, locations)
else:
result = image_mgr.remove_locations(image_id, locations)
env.fout(result)
| softlayer/softlayer-python | SoftLayer/CLI/image/datacenter.py | Python | mit | 793 |
import pyknow
| cfernandezmanci/tfi-SistemasExpertos | testpyknow.py | Python | unlicense | 14 |
# IMPORTANT: don't change the "currentScriptVerison" variable name or version format. The Travis script depends on it looking like this
# (major, minor, revision, development)
# example dev version: (1, 2, 3, "beta.4")
# example release version: (2, 3, 4)
currentScriptVersion = (2, 1, 1)
legacyScriptVersion = (2, 0, 2, "legacy")
| neverhood311/Stop-motion-OBJ | src/version.py | Python | gpl-3.0 | 331 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import types
import re
import random
from configman import Namespace, RequiredConfig
from collector.lib.ver_tools import normalize
Compiled_Regular_Expression_Type = type(re.compile(''))
#--------------------------------------------------------------------------
ACCEPT = 0 # save and process
DEFER = 1 # save but don't process
DISCARD = 2 # tell client to go away and not come back
IGNORE = 3 # ignore this submission entirely
#==============================================================================
class LegacyThrottler(RequiredConfig):
required_config = Namespace()
required_config.add_option(
'throttle_conditions',
doc='the throttling rules',
default=[
# drop the browser side of all multi submission hang crashes
("*", '''lambda d: "HangID" in d
and d.get("ProcessType", "browser") == "browser"''', None),
# 100% of crashes with comments
("Comments", '''lambda x: x''', 100),
# 100% of all aurora, beta, esr channels
("ReleaseChannel", '''lambda x: x in ("aurora", "beta", "esr")''', 100),
# 100% of all crashes that report as being nightly
("ReleaseChannel", '''lambda x: x.startswith('nightly')''', 100),
# 10% of Firefox
("ProductName", 'Firefox', 10),
# 100% of Fennec
("ProductName", 'Fennec', 100),
# 100% of all alpha, beta or special
("Version", r'''re.compile(r'\..*?[a-zA-Z]+')''', 100),
# 100% of Thunderbird & SeaMonkey
("ProductName", '''lambda x: x[0] in "TSC"''', 100),
# reject everything else
(None, True, 0)
],
from_string_converter=eval
)
required_config.add_option(
'never_discard',
doc='ignore the Thottleable protocol',
default=True
)
required_config.add_option(
'minimal_version_for_understanding_refusal',
doc='ignore the Thottleable protocol',
default={'Firefox': '3.5.4'},
from_string_converter=eval
)
#--------------------------------------------------------------------------
def __init__(self, config):
self.config = config
self.processed_throttle_conditions = \
self.preprocess_throttle_conditions(
config.throttle_conditions
)
#--------------------------------------------------------------------------
@staticmethod
def regexp_handler_factory(regexp):
def egexp_handler(x):
return regexp.search(x)
return egexp_handler
#--------------------------------------------------------------------------
@staticmethod
def bool_handler_factory(a_bool):
def bool_handler(dummy):
return a_bool
return bool_handler
#--------------------------------------------------------------------------
@staticmethod
def generic_handler_factory(an_object):
def generic_handler(x):
return an_object == x
return generic_handler
#--------------------------------------------------------------------------
def preprocess_throttle_conditions(self, original_throttle_conditions):
new_throttle_conditions = []
for key, condition_str, percentage in original_throttle_conditions:
#print "preprocessing %s %s %d" % (key, condition, percentage)
if isinstance(condition_str, basestring):
try:
condition = eval(condition_str)
self.config.logger.info(
'%s interprets "%s" as python code' %
(self.__class__, condition_str)
)
except Exception:
self.config.logger.info(
'%s interprets "%s" as a literal for an equality test' %
(self.__class__, condition_str)
)
condition = condition_str
else:
condition = condition_str
if isinstance(condition, Compiled_Regular_Expression_Type):
#print "reg exp"
new_condition = self.regexp_handler_factory(condition)
#print newCondition
elif isinstance(condition, bool):
#print "bool"
new_condition = self.bool_handler_factory(condition)
#print newCondition
elif isinstance(condition, types.FunctionType):
new_condition = condition
else:
new_condition = self.generic_handler_factory(condition)
new_throttle_conditions.append((key, new_condition, percentage))
return new_throttle_conditions
#--------------------------------------------------------------------------
def understands_refusal(self, raw_crash):
try:
return normalize(raw_crash['Version']) >= normalize(
self.config.minimal_version_for_understanding_refusal[
raw_crash['ProductName']
])
except KeyError:
return False
#--------------------------------------------------------------------------
def apply_throttle_conditions(self, raw_crash):
"""cycle through the throttle conditions until one matches or we fall
off the end of the list.
returns a tuple of the form (
result:boolean - True: reject; False: accept; None: ignore,
percentage:float
)
"""
#print processed_throttle_conditions
for key, condition, percentage in self.processed_throttle_conditions:
throttle_match = False
try:
if key == '*':
throttle_match = condition(raw_crash)
else:
throttle_match = condition(raw_crash[key])
except KeyError:
if key == None:
throttle_match = condition(None)
else:
#this key is not present in the jsonData - skip
continue
except IndexError:
pass
if throttle_match: # we've got a condition match - apply percent
if percentage is None:
return None, None
random_real_percent = random.random() * 100.0
return random_real_percent > percentage, percentage
# nothing matched, reject
return True, 0
#--------------------------------------------------------------------------
def throttle(self, raw_crash):
throttle_result, percentage = self.apply_throttle_conditions(raw_crash)
if throttle_result is None:
self.config.logger.debug(
"ignoring %s %s",
raw_crash.ProductName,
raw_crash.Version
)
return IGNORE, percentage
if throttle_result: # we're rejecting
#logger.debug('yes, throttle this one')
if (self.understands_refusal(raw_crash)
and not self.config.never_discard):
self.config.logger.debug(
"discarding %s %s",
raw_crash.ProductName,
raw_crash.Version
)
return DISCARD, percentage
else:
self.config.logger.debug(
"deferring %s %s",
raw_crash.ProductName,
raw_crash.Version
)
return DEFER, percentage
else: # we're accepting
self.config.logger.debug(
"not throttled %s %s",
raw_crash.ProductName,
raw_crash.Version
)
return ACCEPT, percentage
| willkg/socorro-collector | collector/throttler.py | Python | mpl-2.0 | 7,973 |
# ravencollectd - ravencollectd.py
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; only version 2 of the License is applicable.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# Authors:
# rub-a-dub-dub @ github
#
# About this plugin:
# This is a plugin for collectd using its Python interface to read data from
# a Rainforest Automation USB dongle (the RAVEn RFA-Z106).
#
# collectd:
# http://collectd.org
# collectd-python:
# http://collectd.org/documentation/manpages/collectd-python.5.shtml
# Rainforest Automation RAVEn RFA-Z106:
# http://rainforestautomation.com/rfa-z106-raven/
#
import sys
import time
import re
import xml.etree.ElementTree as ET
import serial
import collectd
serDevice = "/dev/ttyUSB0"
ser = None
reStartTag = re.compile('^<[a-zA-Z0-9]+>')
reEndTag = re.compile('^<\/[a-zA-Z0-9]+>')
def config_plugin(conf):
'''This will configure the plugin with the serial device name'''
global serDevice
for node in conf.children:
key = node.key.lower()
val = node.values[0]
if key == 'device':
serDevice = val
else:
collectd.warning("ravencollectd: Unknown config key: %s." % key)
continue
def close_plugin():
'''This will clean up all opened connections'''
global ser
if ser is not None:
ser.close()
collectd.info("ravencollectd: Serial port closed.")
else:
collectd.debug("ravencollectd: Asking to close serial port, but it was never open.")
def initialise_plugin():
'''This function opens the serial port looking for a RAVEn. Returns True if successful, False otherwise.'''
global ser
try:
ser = serial.Serial(serDevice, 115200, serial.EIGHTBITS, serial.PARITY_NONE, timeout=0.5)
ser.close()
ser.open()
ser.flushInput()
ser.flushOutput()
collectd.info("ravencollectd: Connected to: " + ser.portstr)
return True
except Exception as e:
collectd.error("ravencollectd: Cannot open serial port: " + str(e))
return False
def isReady():
'''This function is used to check if this object has been initialised correctly and is ready to process data'''
global ser
return (ser is not None)
def getInstantDemandKWh(xmltree):
'''Returns a single float value for the Demand from an Instantaneous Demand response from RAVEn'''
# Get the Instantaneous Demand
fDemand = float(int(xmltree.find('Demand').text,16))
fResult = calculateRAVEnNumber(xmltree, fDemand)
return fResult
def calculateRAVEnNumber(xmltree, value):
'''Calculates a float value from RAVEn using Multiplier and Divisor in XML response'''
# Get calculation parameters from XML - Multiplier, Divisor
fDivisor = float(int(xmltree.find('Divisor').text,16))
fMultiplier = float(int(xmltree.find('Multiplier').text,16))
if (fMultiplier > 0 and fDivisor > 0):
fResult = float( (value * fMultiplier) / fDivisor)
elif (fMultiplier > 0):
fResult = float(value * fMultiplier)
else: # (Divisor > 0) or anything else
fResult = float(value / fDivisor)
return fResult*1000
def write_to_collectd(dataPt):
'''This actually writes the data to collectd'''
val = collectd.Values(plugin='ravencollectd',type='gauge')
val.type_instance = 'instantdemand'
val.plugin_instance = 'raven'
val.dispatch(values=[dataPt])
def read_data():
'''This function will read from the serial device, process the data and write to collectd'''
global ser
if isReady():
# begin listening to RAVEn
rawxml = ""
while True:
# wait for /n terminated line on serial port (up to timeout)
rawline = ser.readline()
# remove null bytes that creep in immediately after connecting
rawline = rawline.strip('\0')
# only bother if this isn't a blank line
if len(rawline) > 0:
# start tag
if reStartTag.match(rawline):
rawxml = rawline
collectd.debug("ravencollectd: Start XML Tag found: " + rawline)
# end tag
elif reEndTag.match(rawline):
rawxml = rawxml + rawline
collectd.debug("End XML Tag Fragment found: " + rawline)
try:
xmltree = ET.fromstring(rawxml)
if xmltree.tag == 'InstantaneousDemand':
write_to_collectd(getInstantDemandKWh(xmltree))
# collectd.debug(getInstantDemandKWh(xmltree))
else:
# collectd.info("ravencollectd: Unrecognised (not implemented) XML Fragment")
# collectd.info(rawxml)
pass
except Exception as e:
collectd.warning("ravencollectd: Exception triggered: " + str(e))
# reset rawxml
rawxml = ""
return
# if it starts with a space, it's inside the fragment
else:
# rawxml = rawxml + rawline
# collectd.debug("ravencollectd: Normal inner XML Fragment: " + str(rawxml))
pass
else:
pass
else:
collectd.warning("ravencollectd: Was asked to begin reading/writing data without opening connections.")
collectd.register_init(initialise_plugin)
collectd.register_config(config_plugin)
collectd.register_read(read_data)
collectd.register_shutdown(close_plugin)
| rub-a-dub-dub/raven-collectd | ravencollectd.py | Python | gpl-2.0 | 6,152 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
use leaf alogrithm to remove redundancy
this algorithm is published in this following paper
1. Bull, S. C., Muldoon, M. R. & Doig, A. J. Maximising the Size of Non-Redundant Protein Datasets Using Graph Theory. PLoS One 8, (2013).
Simon Bull gives an implementation for python 3, hosted at https://github.com/SimonCB765/Leaf
this implementation is for python 2.7
this script can read in a multiple alignment file in fasta format, compute pairwise similarities, and remove redundancy accordint to similarity cutoff
you can choose to use igraph to plot the network
usage python leaf.py test.fa
"""
import sys
import os
import igraph
def read_msa(msa_f):
with open(msa_f) as o_f:
lines = o_f.readlines()
lines = [line.rstrip('\r\n') for line in lines if line]
pro_line_num = [i for i, line in enumerate(
lines) if '>' in line] + [len(lines)]
seqs = [lines[n:pro_line_num[i + 1]]
for i, n in enumerate(pro_line_num[:-1])]
seqs = [(seq[0].split()[0][1:], ''.join(seq[1:])) for seq in seqs]
return seqs
def get_pim(seqs):
def pim(seq1, seq2):
identity = len([i for i, s in enumerate(seq1) if s == seq2[i]])
return identity * 1.0 / len(seq1)
scores = []
seqlen = len(seqs)
for i in range(seqlen):
score_i = []
for j in range(seqlen):
if j < i:
score_i.append(scores[j][i])
elif j > i:
score_i.append(pim(seqs[i][1], seqs[j][1]))
else:
score_i.append(1.0)
scores.append(score_i)
return scores
def leaf(labels, similarities, cutoff, filename):
matrix = [map(lambda x: 1 if x > cutoff else 0, row)
for row in similarities]
for i in range(len(matrix)):
matrix[i][i] = 0
# use igraph to plot the initial network
graph = igraph.Graph.Adjacency(matrix, mode='undirected')
igraph.plot(graph, filename + '.png', vertex_label=range(len(labels)))
adjlist = [[i for i,n in enumerate(row ) if n] for row in matrix]
neighbors = []
remove = []
for i,a in enumerate(adjlist):
print '{0}:{1},'.format(i,a)
# transform adjlist to set
neighbors = [set(n) for i, n in enumerate(adjlist)]
# detect possible max clique
max_neighbors = max(len(l) for l in neighbors)
# the possible clique size is 2 to max_neighbors+1, so the possible
# neighborsize is 1 to max_neighbors
for clique_num in range(1, max_neighbors + 1):
nodes_index = set([i for i, l in enumerate(
neighbors) if len(l) == clique_num])
for i in nodes_index:
if not i in remove: # do not compute removed vertex
# a clique is set of vertex connecting to each other
nodesofinterest = neighbors[i].union([i])
print 'initial nodesofinterest: ',nodesofinterest
if set.intersection(*[neighbors[i].union([i]) for i in nodesofinterest]) == nodesofinterest:
print 'clique nodesofinterest: ',nodesofinterest
# detect vertex without linking to outside vertex
in_clique = [i for i in nodesofinterest if not neighbors[
i].union([i]).difference(nodesofinterest)]
# keep one of the vertex without linking to outside vertex,
# remove rest
if in_clique:
print 'in_clique: ',in_clique
keep = [in_clique[0]]
print 'keep: ',keep
remove_iter = nodesofinterest.difference(set(keep))
print 'remove_iter: ',remove_iter
for r in remove_iter:
if not r in remove: # do not compute removed vertex
print 'remove: ',r
for i in range(len(neighbors)):
if r in neighbors[i]:
neighbors[i].remove(r)
remove += remove_iter
print 'after leaf: ',neighbors
nr_matrix = [matrix[i] for i in range(len(matrix)) if not i in remove]
nr_matrix = [[row[i] for i in range(
len(matrix)) if not i in remove] for row in nr_matrix]
graph = igraph.Graph.Adjacency(nr_matrix, mode='undirected')
nr_labels = [i for i in range(len(matrix)) if not i in remove]
igraph.plot(graph, filename + '_leaf.png', vertex_label=nr_labels)
# continue to remove the one with most neighbors until no vertex has
# neighbors, removed vertex is not considered
while max([len(r) for i, r in enumerate(neighbors) if not i in remove]) > 0:
max_index = max([(len(r), i) for i, r in enumerate(neighbors) if not i in remove])[1]
print 'remove: ',max_index
remove.append(max_index)
for i in set(range(len(neighbors))).difference(set(remove)): # do not compute remove vertex
if max_index in neighbors[i]:
neighbors[i].remove(max_index)
print 'final remove: ',remove
nr_matrix = [matrix[i] for i in range(len(matrix)) if not i in remove]
nr_matrix = [[row[i] for i in range(
len(matrix)) if not i in remove] for row in nr_matrix]
nr_labels = [i for i in range(len(matrix)) if not i in remove]
# plot non-redundant notwork
graph = igraph.Graph.Adjacency(nr_matrix, mode='undirected')
igraph.plot(graph, filename + '_nr.png', vertex_label=nr_labels)
nr_similarities = [similarities[i] for i in range(len(similarities)) if not i in remove]
nr_similarities = [[row[i] for i in range(
len(similarities)) if not i in remove] for row in nr_similarities]
nr_labels = [labels[i] for i in range(len(similarities)) if not i in remove]
return nr_labels, nr_similarities
def main():
seqs = read_msa(sys.argv[-1])
filename = os.path.splitext(os.path.split(sys.argv[-1])[1])[0]
seqnames = [seq[0] for seq in seqs]
similarities = get_pim(seqs)
for cutoff in [0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95]:
# for cutoff in [0.8]:
nr_names,nr_similarities = leaf(seqnames, similarities, cutoff, filename+'_'+str(cutoff))
nr_seqs = [seq for seq in seqs if seq[0] in nr_names]
with open(filename+'_nr_seqs_'+str(cutoff)+'.fas','w') as w_f:
for pro,seq in nr_seqs:
print >> w_f,'>{0}'.format(pro)
print >> w_f,'{0}'.format(seq)
if __name__ == "__main__":
main()
| lituan/tools | leaf/leaf.py | Python | cc0-1.0 | 6,618 |
# Copyright (C) 2015 Optiv, Inc. (brad.spengler@optiv.com)
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class StealthWebHistory(Signature):
name = "stealth_webhistory"
description = "Clears web history"
severity = 3
categories = ["stealth"]
authors = ["Optiv"]
minimum = "1.2"
def run(self):
file_indicators = [
".*\\\\History\\\\History\.IE5\\\\.*",
".*\\\\Temporary\\\\ Internet\\ Files\\\\Content\.IE5\\\\.*",
]
if self.results["target"]["category"] == "file":
file_indicators.append(".*\\\\Cookies\\\\.*")
found_cleaner = False
for indicator in file_indicators:
file_match = self.check_delete_file(pattern=indicator, regex=True, all=True)
if file_match and len(file_match) > 10:
for match in file_match:
self.data.append({"file" : match })
found_cleaner = True
return found_cleaner
| lixiangning888/whole_project | modules/signatures_orginal_20151110/stealth_webhistory.py | Python | lgpl-3.0 | 1,097 |
"""
Universe Release Versioning - Version Information
"""
import functools
import sys
from universe.dev import DevelopmentMetadata
@functools.total_ordering
class VersionInfo(tuple):
"""Provide information about a version
This subclasses a tuple to be compliant with other expectations of
``__version_info__``
"""
def __new__(cls, args, **kwargs):
if not isinstance(args, tuple):
args = (args,)
return tuple.__new__(cls, (args,))
def __init__(self, args, **kwargs):
if not isinstance(args, tuple):
args = (args,)
self.branch = None
self.dev = None
try:
self.major = int(args[0])
except ValueError:
branches = str(args[0]).split('-')
point_info = branches.pop(0).split('.')
self.major = int(point_info[0])
self.minor = int(point_info[1])
self.true_patch = self.patch = int(point_info[2])
if self.patch == 0:
try:
self.true_patch = int(branches.pop(0))
except (IndexError, ValueError):
raise ValueError(
'Development version requires original patch after'
)
try:
self.dev = DevelopmentMetadata(branches.pop(0))
except IndexError:
raise ValueError('Development metadata must be present')
if branches:
raise ValueError('Patch cannot be 0 in non-tip')
if branches:
self.branch = VersionInfo('-'.join(branches))
else:
self.minor = int(args[1])
self.true_patch = self.patch = int(args[2])
if kwargs.get('branch'):
self.branch = VersionInfo(kwargs['branch'])
if kwargs.get('dev'):
self.dev = DevelopmentMetadata(kwargs['dev'])
self.patch = 0
elif self.true_patch == 0:
raise ValueError('Patch cannot be 0 in true tip')
def __getitem__(self, key):
return (self.major, self.minor, self.patch)[key]
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def __lt__(self, other):
return self[:3] < other[:3]
def __eq__(self, other):
return self[:3] == other[:3]
def __str__(self):
return str(self[:3])
def __repr__(self):
return repr(self[:3])
def module_version_info(module):
"""Gets a ``VersionInfo`` object from this module. It expects to see
a ``__version__`` attribute with the respective string
"""
return VersionInfo(module.__version__)
def package_version_info(package):
"""Gets a ``VersionInfo`` object from this package name. It expects to
see a ``__version__`` attribute with the respective string.
"""
return module_version_info(sys.modules[package])
| Alphadelta14/universe | universe/info.py | Python | mit | 2,927 |
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme.common.provider import cleanup_vm
from cfme.services.catalogs import cloud_catalog_item as cct
from cfme.automate.service_dialogs import ServiceDialog
from cfme.services.catalogs.catalog import Catalog
from cfme.services.catalogs.service_catalogs import ServiceCatalogs
from cfme.services import requests
from cfme.web_ui import flash
from utils import testgen
from utils.log import logger
from utils.wait import wait_for
pytestmark = [
pytest.mark.usefixtures("logged_in"),
pytest.mark.meta(server_roles="+automate"),
pytest.mark.ignore_stream("5.2")
]
def pytest_generate_tests(metafunc):
# Filter out providers without templates defined
argnames, argvalues, idlist = testgen.cloud_providers(metafunc, 'provisioning')
new_argvalues = []
new_idlist = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
if not args['provisioning']:
# Don't know what type of instance to provision, move on
continue
# required keys should be a subset of the dict keys set
if not {'image'}.issubset(args['provisioning'].viewkeys()):
# Need image for image -> instance provisioning
continue
new_idlist.append(idlist[i])
new_argvalues.append([args[argname] for argname in argnames])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.yield_fixture(scope="function")
def dialog():
dialog = "dialog_" + fauxfactory.gen_alphanumeric()
element_data = dict(
ele_label="ele_" + fauxfactory.gen_alphanumeric(),
ele_name=fauxfactory.gen_alphanumeric(),
ele_desc="my ele desc",
choose_type="Text Box",
default_text_box="default value"
)
service_dialog = ServiceDialog(label=dialog, description="my dialog",
submit=True, cancel=True,
tab_label="tab_" + fauxfactory.gen_alphanumeric(),
tab_desc="my tab desc",
box_label="box_" + fauxfactory.gen_alphanumeric(),
box_desc="my box desc")
service_dialog.create(element_data)
flash.assert_success_message('Dialog "%s" was added' % dialog)
yield dialog
@pytest.yield_fixture(scope="function")
def catalog():
cat_name = "cat_" + fauxfactory.gen_alphanumeric()
catalog = Catalog(name=cat_name, description="my catalog")
catalog.create()
yield catalog
@pytest.mark.meta(blockers=1242706)
def test_cloud_catalog_item(setup_provider, provider, provisioning, dialog, catalog, request):
"""Tests cloud catalog item
Metadata:
test_flag: provision
"""
vm_name = 'test_servicecatalog-%s' % fauxfactory.gen_alphanumeric()
request.addfinalizer(lambda: cleanup_vm(vm_name + "_0001", provider))
image = provisioning['image']['name']
item_name = fauxfactory.gen_alphanumeric()
cloud_catalog_item = cct.Instance(
item_type=provisioning['item_type'],
name=item_name,
description="my catalog",
display_in=True,
catalog=catalog.name,
dialog=dialog,
catalog_name=image,
vm_name=vm_name,
instance_type=provisioning['instance_type'],
availability_zone=provisioning['availability_zone'],
cloud_tenant=provisioning['cloud_tenant'],
cloud_network=provisioning['cloud_network'],
security_groups=[provisioning['security_group']],
provider_mgmt=provider.mgmt,
provider=provider.name,
guest_keypair=provisioning['guest_keypair'])
cloud_catalog_item.create()
service_catalogs = ServiceCatalogs("service_name")
service_catalogs.order(catalog.name, cloud_catalog_item)
flash.assert_no_errors()
logger.info('Waiting for cfme provision request for service %s' % item_name)
row_description = item_name
cells = {'Description': row_description}
row, __ = wait_for(requests.wait_for_request, [cells, True],
fail_func=requests.reload, num_sec=1000, delay=20)
assert row.last_message.text == 'Request complete'
| thom-at-redhat/cfme_tests | cfme/tests/services/test_cloud_service_catalogs.py | Python | gpl-2.0 | 4,272 |
import math
import operator as op
class ColumnExpression:
def __init__(self, *args):
if len(args) < 1:
raise ValueError
elif len(args) == 1:
self.operator = None
self.args = args[0]
else:
self.operator = args[0]
self.args = args[1:]
assert callable(self.operator)
def __repr__(self):
if self.operator is not None:
return repr(self.operator) + repr(self.args)
else:
return 'Column(' + repr(self.args) + ')'
def __str__(self):
if self.operator is not None:
return str(self.operator) + str(self.args)
else:
return 'Column(' + str(self.args) + ')'
def __call__(self, frame):
if self.operator is None:
return frame[self.args]
else:
return self.operator(*(a(frame) if isinstance(a, ColumnExpression) else a for a in self.args))
def call(self, func):
"""Apply a function of one argument to the column expression
"""
return ColumnExpression(func, self)
def _binop_right(self, operator, y):
return ColumnExpression(operator, self, y)
def _binop_left(self, operator, x):
return ColumnExpression(operator, x, self)
def __add__(self, y):
return self._binop_right(op.add, y)
def __radd__(self, x):
return self._binop_left(op.add, x)
def __sub__(self, y):
return self._binop_right(op.sub, y)
def __rsub__(self, x):
return self._binop_left(op.sub, x)
def __mul__(self, y):
return self._binop_right(op.mul, y)
def __rmul__(self, x):
return self._binop_left(op.mul, x)
def __truediv__(self, y):
return self._binop_right(op.truediv, y)
def __rtruediv__(self, x):
return self._binop_left(op.truediv, x)
def __floordiv__(self, y):
return self._binop_right(op.floordiv, y)
def __rfloordiv__(self, x):
return self._binop_left(op.floordiv, x)
def __mod__(self, y):
return self._binop_right(op.mod, y)
def __rmod__(self, x):
return self._binop_left(op.mod, x)
def __divmod__(self, y):
return self._binop_right(divmod, y)
def __rdivmod__(self, x):
return self._binop_left(divmod, x)
def __pow__(self, y):
return self._binop_right(op.pow, y)
def __rpow__(self, x):
return self._binop_left(op.pow, x)
def __lshift__(self, y):
return self._binop_right(op.lshift, y)
def __rlshift__(self, x):
return self._binop_left(op.lshift, x)
def __rshift__(self, y):
return self._binop_right(op.rshift, y)
def __rrshift__(self, x):
return self._binop_left(op.rshift, x)
def __and__(self, y):
return self._binop_right(op.and_, y)
def __rand__(self, x):
return self._binop_left(op.and_, x)
def __xor__(self, y):
return self._binop_right(op.xor, y)
def __rxor__(self, x):
return self._binop_left(op.xor, x)
def __or__(self, y):
return self._binop_right(op.or_, y)
def __ror__(self, x):
return self._binop_left(op.or_, x)
def __neg__(self):
return ColumnExpression(op.neg, self)
def __pos__(self):
return ColumnExpression(op.pos, self)
def __abs__(self):
return ColumnExpression(op.abs, self)
def __invert__(self):
return ColumnExpression(op.invert, self)
def __complex__(self):
return ColumnExpression(complex, self)
def __int__(self):
return ColumnExpression(int, self)
def __float__(self):
return ColumnExpression(float, self)
def __round__(self, n=None):
return ColumnExpression(round, self)
def __ceil__(self):
return ColumnExpression(math.ceil, self)
def __floor__(self):
return ColumnExpression(math.floor, self)
def __trunc__(self):
return ColumnExpression(math.trunc, self)
def __eq__(self, y):
return self._binop_right(op.eq, y)
def __ne__(self, y):
return self._binop_right(op.ne, y)
def __lt__(self, y):
return self._binop_right(op.lt, y)
def __le__(self, y):
return self._binop_right(op.le, y)
def __gt__(self, y):
return self._binop_right(op.gt, y)
def __ge__(self, y):
return self._binop_right(op.ge, y)
| DGrady/dataframe | dataframe/column_expression.py | Python | mit | 4,428 |
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""No-op authorization plugin allowing boto anonymous access.
This allows users to use gsutil for accessing publicly readable buckets and
objects without first signing up for an account.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from boto.auth_handler import AuthHandler
class NoOpAuth(AuthHandler):
"""No-op authorization plugin class."""
capability = ['hmac-v4-s3', 's3']
def __init__(self, path, config, provider):
pass
def add_auth(self, http_request):
pass
| catapult-project/catapult | third_party/gsutil/gslib/no_op_auth_plugin.py | Python | bsd-3-clause | 1,217 |
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
# pylint: disable=no-self-use
# pylint: disable=too-few-public-methods
from caffe.proto import caffe_pb2
from caffe import Net
from google.protobuf import text_format
# pylint: disable=invalid-name
# pylint: disable=no-member
LayerType = caffe_pb2.LayerParameter.LayerType
EltwiseOp = caffe_pb2.EltwiseParameter.EltwiseOp
PoolMethod = caffe_pb2.PoolingParameter.PoolMethod
DBType = caffe_pb2.DataParameter.DB
# pylint: enable=invalid-name
# pylint: enable=no-member
class NetworkBuilder(object):
def __init__(self, training_batch_size=20, testing_batch_size=20, **kwargs):
self.training_batch_size = training_batch_size
self.testing_batch_size = testing_batch_size
self.other_args = kwargs
def _make_inception(self, network, x1x1, x3x3r, x3x3, x5x5r, x5x5, proj,
name_generator):
"""Make Inception submodule."""
layers = []
split = self._make_split_layer(network)
layers.append(split)
context1 = self._make_conv_layer(network, kernel_size=1, num_output=x1x1,
bias_value=0)
layers.append(context1)
relu1 = self._make_relu_layer(network)
layers.append(relu1)
context2a = self._make_conv_layer(network, kernel_size=1, num_output=x3x3r,
bias_value=0)
layers.append(context2a)
relu2a = self._make_relu_layer(network)
layers.append(relu2a)
context2b = self._make_conv_layer(network, kernel_size=3, num_output=x3x3,
pad=1)
layers.append(context2b)
relu2b = self._make_relu_layer(network)
layers.append(relu2b)
context3a = self._make_conv_layer(network, kernel_size=1, num_output=x5x5r,
bias_value=0)
layers.append(context3a)
relu3a = self._make_relu_layer(network)
layers.append(relu3a)
context3b = self._make_conv_layer(network, kernel_size=5, num_output=x5x5,
pad=2)
layers.append(context3b)
relu3b = self._make_relu_layer(network)
layers.append(relu3b)
context4a = self._make_maxpool_layer(network, kernel_size=3)
layers.append(context4a)
relu4a = self._make_relu_layer(network)
layers.append(relu4a)
context4b = self._make_conv_layer(network, kernel_size=1, num_output=proj,
pad=1, bias_value=0)
layers.append(context4b)
relu4b = self._make_relu_layer(network)
layers.append(relu4b)
concat = self._make_concat_layer(network)
layers.append(concat)
connections = [
(split.name, (split.top, context1.bottom)),
(split.name, (split.top, context2a.bottom)),
(split.name, (split.top, context3a.bottom)),
(split.name, (split.top, context4a.bottom)),
(context2a.name,
(context2a.top, relu2a.bottom, relu2a.top, context2b.bottom)),
(context3a.name,
(context3a.top, relu3a.bottom, relu3a.top, context3b.bottom)),
(context4a.name,
(context4a.top, relu4a.bottom, relu4a.top, context4b.bottom)),
(context1.name, (context1.top, relu1.bottom, relu1.top, concat.bottom)),
(context2b.name,
(context2b.top, relu2b.bottom, relu2b.top, concat.bottom)),
(context3b.name,
(context3b.top, relu3b.bottom, relu3b.top, concat.bottom)),
(context4b.name,
(context4b.top, relu4b.bottom, relu4b.top, concat.bottom)),
]
for connection in connections:
self._tie(connection, name_generator)
return layers
def _make_prod_layer(self, network, coeff=None):
layer = network.layers.add()
layer.name = 'prod'
layer.type = LayerType.Value('ELTWISE')
params = layer.eltwise_param
params.operation = EltwiseOp.Value('PROD')
if coeff:
for c in coeff:
params.coeff.append(c)
return layer
def _make_sum_layer(self, network, coeff=None):
layer = network.layers.add()
layer.name = 'sum'
layer.type = LayerType.Value('ELTWISE')
params = layer.eltwise_param
params.operation = EltwiseOp.Value('SUM')
if coeff:
for c in coeff:
params.coeff.append(c)
return layer
def _make_upsampling_layer(self, network, stride):
layer = network.layers.add()
layer.name = 'upsample'
layer.type = LayerType.Value('UPSAMPLING')
params = layer.upsampling_param
params.kernel_size = stride
return layer
def _make_folding_layer(self, network, channels, height, width, prefix=''):
layer = network.layers.add()
layer.name = '%sfolding' % (prefix)
layer.type = LayerType.Value('FOLDING')
params = layer.folding_param
params.channels_folded = channels
params.height_folded = height
params.width_folded = width
return layer
def _make_conv_layer(self, network, kernel_size, num_output, stride=1, pad=0,
bias_value=0.1, shared_name=None, wtype='xavier', std=0.01):
"""Make convolution layer."""
layer = network.layers.add()
layer.name = 'conv_%dx%d_%d' % (kernel_size, kernel_size, stride)
layer.type = LayerType.Value('CONVOLUTION')
params = layer.convolution_param
params.num_output = num_output
params.kernel_size = kernel_size
params.stride = stride
params.pad = pad
weight_filler = params.weight_filler
weight_filler.type = wtype
if weight_filler.type == 'gaussian':
weight_filler.mean = 0
weight_filler.std = std
bias_filler = params.bias_filler
bias_filler.type = 'constant'
bias_filler.value = bias_value
layer.blobs_lr.append(1)
layer.blobs_lr.append(2)
layer.weight_decay.append(1)
layer.weight_decay.append(0)
if shared_name:
layer.param.append('%s_w' % shared_name)
layer.param.append('%s_b' % shared_name)
return layer
def _make_maxpool_layer(self, network, kernel_size, stride=1):
"""Make max pooling layer."""
layer = network.layers.add()
layer.name = 'maxpool_%dx%d_%d' % (kernel_size, kernel_size, stride)
layer.type = LayerType.Value('POOLING')
params = layer.pooling_param
params.pool = PoolMethod.Value('MAX')
params.kernel_size = kernel_size
params.stride = stride
return layer
def _make_avgpool_layer(self, network, kernel_size, stride=1):
"""Make average pooling layer."""
layer = network.layers.add()
layer.name = 'avgpool_%dx%d_%d' % (kernel_size, kernel_size, stride)
layer.type = LayerType.Value('POOLING')
params = layer.pooling_param
params.pool = PoolMethod.Value('AVE')
params.kernel_size = kernel_size
params.stride = stride
return layer
def _make_lrn_layer(self, network, name='lrn'):
"""Make local response normalization layer."""
layer = network.layers.add()
layer.name = name
layer.type = LayerType.Value('LRN')
params = layer.lrn_param
params.local_size = 5
params.alpha = 0.0001
params.beta = 0.75
return layer
def _make_concat_layer(self, network, dim=1):
"""Make depth concatenation layer."""
layer = network.layers.add()
layer.name = 'concat'
layer.type = LayerType.Value('CONCAT')
params = layer.concat_param
params.concat_dim = dim
return layer
def _make_dropout_layer(self, network, dropout_ratio=0.5):
"""Make dropout layer."""
layer = network.layers.add()
layer.name = 'dropout'
layer.type = LayerType.Value('DROPOUT')
params = layer.dropout_param
params.dropout_ratio = dropout_ratio
return layer
def _make_tensor_layer(self, network, num_output, weight_lr=1,
bias_lr=2, bias_value=0.1, prefix='',
shared_name=None,
wtype='xavier', std=0.01):
"""Make tensor product layer."""
layer = network.layers.add()
layer.name = '%stensor_product' % prefix
layer.type = LayerType.Value('TENSOR_PRODUCT')
params = layer.inner_product_param
params.num_output = num_output
weight_filler = params.weight_filler
weight_filler.type = wtype
if wtype == 'gaussian':
weight_filler.mean = 0
weight_filler.std = std
bias_filler = params.bias_filler
bias_filler.type = 'constant'
bias_filler.value = bias_value
layer.blobs_lr.append(weight_lr)
layer.blobs_lr.append(bias_lr)
layer.weight_decay.append(1)
layer.weight_decay.append(0)
if shared_name:
layer.param.append('%s_w' % shared_name)
layer.param.append('%s_b' % shared_name)
return layer
def _make_inner_product_layer(self, network, num_output, weight_lr=1,
bias_lr=2, bias_value=0.1, prefix='',
shared_name=None,
wtype='xavier', std=0.01):
"""Make inner product layer."""
layer = network.layers.add()
layer.name = '%sinner_product' % prefix
layer.type = LayerType.Value('INNER_PRODUCT')
params = layer.inner_product_param
params.num_output = num_output
weight_filler = params.weight_filler
weight_filler.type = wtype
if wtype == 'gaussian':
weight_filler.mean = 0
weight_filler.std = std
bias_filler = params.bias_filler
bias_filler.type = 'constant'
bias_filler.value = bias_value
layer.blobs_lr.append(weight_lr)
layer.blobs_lr.append(bias_lr)
layer.weight_decay.append(1)
layer.weight_decay.append(0)
if shared_name:
layer.param.append('%s_w' % shared_name)
layer.param.append('%s_b' % shared_name)
return layer
def _make_split_layer(self, network):
"""Make split layer."""
layer = network.layers.add()
layer.name = 'split'
layer.type = LayerType.Value('SPLIT')
return layer
def _make_relu_layer(self, network):
"""Make ReLU layer."""
layer = network.layers.add()
layer.name = 'relu'
layer.type = LayerType.Value('RELU')
return layer
def _tie(self, layers, name_generator):
"""Generate a named connection between layer endpoints."""
name = 'ep_%s_%d' % (layers[0], name_generator.next())
for layer in layers[1]:
layer.append(name)
def _connection_name_generator(self):
"""Generate a unique id."""
index = 0
while True:
yield index
index += 1
def _build_rnn_network(self, wtype='xavier', std=0.01, batchsize=100, numstep=24):
network = caffe_pb2.NetParameter()
network.force_backward = True
network.name = 'rotation_rnn'
network.input.append('images')
network.input_dim.append(batchsize)
network.input_dim.append(3)
network.input_dim.append(64)
network.input_dim.append(64)
for t in range(numstep):
network.input.append('rotations%d' % t)
network.input_dim.append(batchsize)
network.input_dim.append(3)
network.input_dim.append(1)
network.input_dim.append(1)
layers = []
name_generator = self._connection_name_generator()
tensor_view = []
relu2_view = []
relu2_view_split = []
concat = []
dec_fc1 = []
dec_relu1 = []
dec_fc2 = []
dec_relu2 = []
dec_relu2_split = []
dec_img_fc1 = []
dec_img_relu1 = []
dec_img_fold = []
dec_img_up1 = []
dec_img_conv1 = []
dec_img_relu2 = []
dec_img_up2 = []
dec_img_conv2 = []
dec_img_relu3 = []
dec_img_up3 = []
dec_img_conv3 = []
dec_mask_fc1 = []
dec_mask_relu1 = []
dec_mask_fold = []
dec_mask_up1 = []
dec_mask_conv1 = []
dec_mask_relu2 = []
dec_mask_up2 = []
dec_mask_conv2 = []
dec_mask_relu3 = []
dec_mask_up3 = []
dec_mask_conv3 = []
conv1 = self._make_conv_layer(network, kernel_size=5, stride=2, pad=2, num_output=64, shared_name='conv1')
conv1.bottom.append('images')
relu1 = self._make_relu_layer(network)
conv2 = self._make_conv_layer(network, kernel_size=5, stride=2, pad=2, num_output=128, shared_name='conv2')
relu2 = self._make_relu_layer(network)
conv3 = self._make_conv_layer(network, kernel_size=5, stride=2, pad=2, num_output=256, shared_name='conv3')
relu3 = self._make_relu_layer(network)
fc1 = self._make_inner_product_layer(network, num_output=1024, shared_name='fc1')
relu4 = self._make_relu_layer(network)
fc2 = self._make_inner_product_layer(network, num_output=1024, shared_name='fc2')
relu5 = self._make_relu_layer(network)
enc_split = self._make_split_layer(network)
fc1_id = self._make_inner_product_layer(network, num_output=512, shared_name='fc1_id')
relu1_id = self._make_relu_layer(network)
id_split = self._make_split_layer(network)
fc1_view = self._make_inner_product_layer(network, num_output=512, shared_name='fc1_view')
relu1_view = self._make_relu_layer(network)
tensor_view.append(self._make_tensor_layer(network, num_output=512, shared_name='tensor_view'))
tensor_view[-1].bottom.append('rotations0')
relu2_view.append(self._make_relu_layer(network))
relu2_view_split.append(self._make_split_layer(network))
connections = []
connections.append((conv1.name, (conv1.top, relu1.bottom, relu1.top, conv2.bottom)))
connections.append((conv2.name, (conv2.top, relu2.bottom, relu2.top, conv3.bottom)))
connections.append((conv3.name, (conv3.top, relu3.bottom, relu3.top, fc1.bottom)))
connections.append((fc1.name, (fc1.top, relu4.bottom, relu4.top, fc2.bottom)))
connections.append((fc2.name, (fc2.top, relu5.bottom)))
connections.append((relu5.name, (relu5.top, enc_split.bottom)))
connections.append((enc_split.name, (enc_split.top, fc1_id.bottom)))
connections.append((fc1_id.name, (fc1_id.top, relu1_id.bottom, relu1_id.top, id_split.bottom)))
connections.append((enc_split.name, (enc_split.top, fc1_view.bottom)))
connections.append((fc1_view.name, (fc1_view.top, relu1_view.bottom, relu1_view.top, tensor_view[-1].bottom)))
for t in range(numstep):
# Action.
if t > 0:
tensor_view.append(self._make_tensor_layer(network, num_output=512, shared_name='tensor_view'))
tensor_view[-1].bottom.append('rotations%d' % t)
relu2_view.append(self._make_relu_layer(network))
relu2_view_split.append(self._make_split_layer(network))
# Decoder.
concat.append(self._make_concat_layer(network))
dec_fc1.append(self._make_inner_product_layer(network, num_output=1024, shared_name='dec_fc1'))
dec_relu1.append(self._make_relu_layer(network))
dec_fc2.append(self._make_inner_product_layer(network, num_output=1024, shared_name='dec_fc2'))
dec_relu2.append(self._make_relu_layer(network))
dec_relu2_split.append(self._make_split_layer(network))
# Dec img path.
dec_img_fc1.append(self._make_inner_product_layer(network, num_output=16384, shared_name='dec_img_fc1'))
dec_img_relu1.append(self._make_relu_layer(network))
dec_img_fold.append(self._make_folding_layer(network,256,8,8))
dec_img_up1.append(self._make_upsampling_layer(network,stride=2))
dec_img_conv1.append(self._make_conv_layer(network, kernel_size=5, stride=1, pad=2, num_output=128, shared_name='dec_img_conv1'))
dec_img_relu2.append(self._make_relu_layer(network))
dec_img_up2.append(self._make_upsampling_layer(network,stride=2))
dec_img_conv2.append(self._make_conv_layer(network, kernel_size=5, stride=1, pad=2, num_output=64, shared_name='dec_img_conv2'))
dec_img_relu3.append(self._make_relu_layer(network))
dec_img_up3.append(self._make_upsampling_layer(network,stride=2))
dec_img_conv3.append(self._make_conv_layer(network, kernel_size=5, stride=1, pad=2, num_output=3, shared_name='dec_img_conv3'))
# Dec mask path.
dec_mask_fc1.append(self._make_inner_product_layer(network, num_output=8192, shared_name='dec_mask_fc1'))
dec_mask_relu1.append(self._make_relu_layer(network))
dec_mask_fold.append(self._make_folding_layer(network,128,8,8))
dec_mask_up1.append(self._make_upsampling_layer(network,stride=2))
dec_mask_conv1.append(self._make_conv_layer(network, kernel_size=5, stride=1, pad=2, num_output=64, shared_name='dec_mask_conv1'))
dec_mask_relu2.append(self._make_relu_layer(network))
dec_mask_up2.append(self._make_upsampling_layer(network,stride=2))
dec_mask_conv2.append(self._make_conv_layer(network, kernel_size=5, stride=1, pad=2, num_output=32, shared_name='dec_mask_conv2'))
dec_mask_relu3.append(self._make_relu_layer(network))
dec_mask_up3.append(self._make_upsampling_layer(network,stride=2))
dec_mask_conv3.append(self._make_conv_layer(network, kernel_size=5, stride=1, pad=2, num_output=1, shared_name='dec_mask_conv3'))
# dec connections.
if t > 0:
connections.append((relu2_view_split[-2].name, (relu2_view_split[-2].top, tensor_view[-1].bottom)))
connections.append((tensor_view[-1].name, (tensor_view[-1].top, relu2_view[-1].bottom)))
connections.append((relu2_view[-1].name, (relu2_view[-1].top, relu2_view_split[-1].bottom)))
connections.append((id_split.name, (id_split.top, concat[-1].bottom)))
connections.append((relu2_view_split[-1].name, (relu2_view_split[-1].top, concat[-1].bottom)))
connections.append((concat[-1].name, (concat[-1].top, dec_fc1[-1].bottom)))
connections.append((dec_fc1[-1].name, (dec_fc1[-1].top, dec_relu1[-1].bottom, dec_relu1[-1].top, dec_fc2[-1].bottom)))
connections.append((dec_fc2[-1].name, (dec_fc2[-1].top, dec_relu2[-1].bottom)))
connections.append((dec_relu2[-1].name, (dec_relu2[-1].top, dec_relu2_split[-1].bottom)))
# dec image connections.
connections.append((dec_relu2_split[-1].name, (dec_relu2_split[-1].top, dec_img_fc1[-1].bottom)))
connections.append((dec_img_fc1[-1].name, (dec_img_fc1[-1].top, dec_img_relu1[-1].bottom, dec_img_relu1[-1].top, dec_img_fold[-1].bottom)))
connections.append((dec_img_fold[-1].name, (dec_img_fold[-1].top, dec_img_up1[-1].bottom)))
connections.append((dec_img_up1[-1].name, (dec_img_up1[-1].top, dec_img_conv1[-1].bottom)))
connections.append((dec_img_conv1[-1].name, (dec_img_conv1[-1].top, dec_img_relu2[-1].bottom, dec_img_relu2[-1].top, dec_img_up2[-1].bottom)))
connections.append((dec_img_up2[-1].name, (dec_img_up2[-1].top, dec_img_conv2[-1].bottom)))
connections.append((dec_img_conv2[-1].name, (dec_img_conv2[-1].top, dec_img_relu3[-1].bottom, dec_img_relu3[-1].top, dec_img_up3[-1].bottom)))
connections.append((dec_img_up3[-1].name, (dec_img_up3[-1].top, dec_img_conv3[-1].bottom)))
# dec mask connections.
connections.append((dec_relu2_split[-1].name, (dec_relu2_split[-1].top, dec_mask_fc1[-1].bottom)))
connections.append((dec_mask_fc1[-1].name, (dec_mask_fc1[-1].top, dec_mask_relu1[-1].bottom, dec_mask_relu1[-1].top, dec_mask_fold[-1].bottom)))
connections.append((dec_mask_fold[-1].name, (dec_mask_fold[-1].top, dec_mask_up1[-1].bottom)))
connections.append((dec_mask_up1[-1].name, (dec_mask_up1[-1].top, dec_mask_conv1[-1].bottom)))
connections.append((dec_mask_conv1[-1].name, (dec_mask_conv1[-1].top, dec_mask_relu2[-1].bottom, dec_mask_relu2[-1].top, dec_mask_up2[-1].bottom)))
connections.append((dec_mask_up2[-1].name, (dec_mask_up2[-1].top, dec_mask_conv2[-1].bottom)))
connections.append((dec_mask_conv2[-1].name, (dec_mask_conv2[-1].top, dec_mask_relu3[-1].bottom, dec_mask_relu3[-1].top, dec_mask_up3[-1].bottom)))
connections.append((dec_mask_up3[-1].name, (dec_mask_up3[-1].top, dec_mask_conv3[-1].bottom)))
layers = [ conv1, relu1, conv2, relu2, conv3, relu3, fc1, relu4, fc2, relu5, enc_split, fc1_id, relu1_id, id_split ]
layers += tensor_view
layers += relu2_view
layers += relu2_view_split
layers += concat
layers += dec_fc1
layers += dec_relu1
layers += dec_fc2
layers += dec_relu2
layers += dec_relu2_split
layers += dec_img_fc1
layers += dec_img_relu1
layers += dec_img_fold
layers += dec_img_up1
layers += dec_img_conv1
layers += dec_img_relu2
layers += dec_img_up2
layers += dec_img_conv2
layers += dec_img_relu3
layers += dec_img_up3
layers += dec_img_conv3
layers += dec_mask_fc1
layers += dec_mask_relu1
layers += dec_mask_fold
layers += dec_mask_up1
layers += dec_mask_conv1
layers += dec_mask_relu2
layers += dec_mask_up2
layers += dec_mask_conv2
layers += dec_mask_relu3
layers += dec_mask_up3
layers += dec_mask_conv3
final_img_concat = self._make_concat_layer(network)
for idx,l in enumerate(dec_img_conv3):
l.name = 't%d_%s' % (idx,l.name)
connections.append((l.name, (l.top, final_img_concat.bottom)))
final_img_concat.top.append('images_concat')
final_img_concat.loss_weight.append(10.0)
final_mask_concat = self._make_concat_layer(network)
for idx,l in enumerate(dec_mask_conv3):
l.name = 't%d_%s' % (idx,l.name)
connections.append((l.name, (l.top, final_mask_concat.bottom)))
final_mask_concat.top.append('masks_concat')
final_mask_concat.loss_weight.append(1.0)
layers += [ final_img_concat, final_mask_concat ]
# make connections.
for connection in connections:
self._tie(connection, name_generator)
for l in tensor_view[0:]:
tmp = reversed(l.bottom)
l.ClearField('bottom')
l.bottom.extend(tmp)
# Fix up the names based on the connections that were generated.
for pos, layer in enumerate(layers):
layer.name += '_%d' % pos
return network
def build_network(self, netname, batchsize=100, numstep=2):
"""main method."""
if netname == 'rnn':
network = self._build_rnn_network(batchsize=batchsize, numstep=numstep)
else:
print('unknown netname: %s' % netname)
return
network_filename = '%s.prototxt' % netname
print network
with open(network_filename, 'w') as network_file:
network_file.write(text_format.MessageToString(network))
return Net(network_filename)
if __name__ == '__main__':
__Network_builder__ = NetworkBuilder()
__Network_builder__.build_network(netname='rnn', batchsize=31, numstep=4)
| wasidennis/ObjectFlow | caffe-cedn-dev/python/generate_rnn.py | Python | mit | 22,308 |
# This file is part of fedmsg.
# Copyright (C) 2012 - 2014 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
""" Test config. """
import os
import socket
import random
SEP = os.path.sep
here = os.getcwd()
hostname = socket.gethostname().split('.', 1)[0]
ssl_enabled_for_tests = True
try:
import M2Crypto
import m2ext
except:
ssl_enabled_for_tests = False
# Pick random ports for the tests so travis-ci doesn't flip out.
port = random.randint(4000, 20000)
gpg_key_unittest = 'FBDA 92E4 338D FFD9 EB83 F8F6 3FBD B725 DA19 B4EC'
gpg_key_main = 'FBDA 92E4 338D FFD9 EB83 F8F6 3FBD B725 DA19 B4EC'
config = dict(
topic_prefix="org.fedoraproject",
topic_prefix_re="^org\.fedoraproject\.(dev|stg|prod)",
endpoints={
"unittest.%s" % hostname: [
"tcp://*:%i" % (port + 1),
"tcp://*:%i" % (port + 2),
],
"twisted.%s" % hostname: [
"tcp://*:%i" % (port + 3),
],
"__main__.%s" % hostname: [
"tcp://*:%i" % (port + 4),
"tcp://*:%i" % (port + 5),
"tcp://*:%i" % (port + 6),
"tcp://*:%i" % (port + 7),
"tcp://*:%i" % (port + 8),
"tcp://*:%i" % (port + 9),
],
"unittest2.%s" % hostname: [
"tcp://*:%i" % (port + 10),
"tcp://*:%i" % (port + 11),
],
# Guarantee that we don't fall over with a bogus endpoint.
"blah.%s": "tcp://www.flugle.horn:88",
},
relay_inbound=["tcp://127.0.0.1:%i" % (port - 1)],
replay_endpoints={
'unittest.%s' % hostname: "tcp://127.0.0.1:%i" % (port + 1),
'unittest2.%s' % hostname: "tcp://127.0.0.1:%i" % (port + 2),
},
persistent_store=None,
environment="dev",
high_water_mark=0,
io_threads=1,
irc=[],
zmq_enabled=True,
zmq_strict=False,
zmq_reconnect_ivl=100,
zmq_reconnect_ivl_max=1000,
# SSL stuff.
sign_messages=ssl_enabled_for_tests,
validate_signatures=ssl_enabled_for_tests,
ssldir=SEP.join([here, 'test_certs/keys']),
crl_location="http://threebean.org/fedmsg-tests/crl.pem",
crl_cache="/tmp/crl.pem",
crl_cache_expiry=10,
certnames={
"unittest.%s" % hostname: "shell-app01.phx2.fedoraproject.org",
"unittest2.%s" % hostname: "shell-app01.phx2.fedoraproject.org",
"__main__.%s" % hostname: "shell-app01.phx2.fedoraproject.org",
# In prod/stg, map hostname to the name of the cert in ssldir.
# Unfortunately, we can't use socket.getfqdn()
#"app01.stg": "app01.stg.phx2.fedoraproject.org",
},
gpg_keys={
"unittest.%s" % hostname: gpg_key_unittest,
"unittest2.%s" % hostname: gpg_key_unittest,
"__main__.%s" % hostname: gpg_key_main,
}
)
| cicku/fedmsg | fedmsg/tests/fedmsg-test-config.py | Python | lgpl-2.1 | 3,504 |
#! /usr/bin/env python3
# Copyright (c) 2015 - thewisenerd <thewisenerd@protonmail.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import argparse
import json
import signal
import sys
from operator import itemgetter, attrgetter
import __cfg
import __pymoviedb
from __pymoviedb import __pymoviedb_init, __pymoviedb_check, __pymoviedb_do
from __helpers import _cfg_list_file, _cfg_err_file
def sigint_handler(signum, frame):
# sort back movies
n = sorted(__pymoviedb.movies.values(), key=itemgetter('base'))
__pymoviedb.movies = {}
for v in n:
__pymoviedb.movies[v['imdbID']] = v
# write moviews
with open(_cfg_list_file(), "w") as f:
json.dump(n, f, indent=2)
# write err
with open(_cfg_err_file(), "w") as f:
f.writelines(sorted(__pymoviedb.err_lines))
# exit gracefully.
exit()
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTSTP, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
if (__name__ == "__main__"):
global args
parser = argparse.ArgumentParser()
parser.add_argument("action", help="action", choices=["init", "check", "do"])
parser.add_argument("-v", "--verbose", help="be more verbose", action="store_true")
args = parser.parse_args()
if args.verbose:
__cfg.__verbose = True
if args.action == "init":
__pymoviedb_init()
elif args.action == "check":
__pymoviedb_check()
elif args.action == "do":
__pymoviedb_do()
exit()
| thewisenerd/pymoviedb | src/pymoviedb/__main__.py | Python | gpl-2.0 | 2,130 |
# Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslotest import base
from os_windows.utils import hostutilsv2
class HostUtilsV2TestCase(base.BaseTestCase):
"""Unit tests for the Hyper-V hostutilsv2 class."""
def setUp(self):
self._hostutils = hostutilsv2.HostUtilsV2()
self._hostutils._conn_cimv2 = mock.MagicMock()
self._hostutils._conn_virt = mock.MagicMock()
super(HostUtilsV2TestCase, self).setUp()
| cloudbase/os-windows | os_windows/tests/utils/test_hostutilsv2.py | Python | apache-2.0 | 1,056 |
# -*- coding: utf-8 -*-
"""Fake data generator.
To use:
1. Install fake-factory.
pip install fake-factory
2. Create your OSF user account
3. Run the script, passing in your username (email).
::
python3 -m scripts.create_fakes --user fred@cos.io
This will create 3 fake public projects, each with 3 fake contributors (with
you as the creator).
To create a project with a complex component structure, pass in a list representing the depth you would
like each component to contain.
Examples:
python3 -m scripts.create_fakes -u fred@cos --components '[1, 1, 1, 1]' --nprojects 1
...will create a project with 4 components.
python3 -m scripts.create_fakes -u fred@cos --components '4' --nprojects 1
...will create a project with a series of components, 4 levels deep.
python3 -m scripts.create_fakes -u fred@cos --components '[1, [1, 1]]' --nprojects 1
...will create a project with two top level components, and one with a depth of 2 components.
python3 -m scripts.create_fakes -u fred@cos --nprojects 3 --preprint True
...will create 3 preprints with the default provider osf
python3 -m scripts.create_fakes -u fred@cos --nprojects 3 --preprint True --preprintprovider osf,test_provider
...will create 3 preprints with the providers osf and test_provider
"""
from __future__ import print_function, absolute_import
import ast
import sys
import mock
import argparse
import logging
import django
import pytz
from faker import Factory
from faker.providers import BaseProvider
django.setup()
from framework.auth import Auth
from osf_tests.factories import UserFactory, ProjectFactory, NodeFactory, RegistrationFactory, PreprintFactory, PreprintProviderFactory, fake_email
from osf import models
from website.app import init_app
class Sciencer(BaseProvider):
# Science term Faker Provider created by @csheldonhess
# https://github.com/csheldonhess/FakeConsumer/blob/master/faker/providers/science.py
word_list = ('abiosis', 'abrade', 'absorption', 'acceleration', 'accumulation',
'acid', 'acidic', 'activist', 'adaptation', 'agonistic', 'agrarian', 'airborne',
'alchemist', 'alignment', 'allele', 'alluvial', 'alveoli', 'ambiparous',
'amphibian', 'amplitude', 'analysis', 'ancestor', 'anodize', 'anomaly',
'anther', 'antigen', 'apiary', 'apparatus', 'application', 'approximation',
'aquatic', 'aquifer', 'arboreal', 'archaeology', 'artery', 'assessment',
'asteroid', 'atmosphere', 'atomic', 'atrophy', 'attenuate', 'aven', 'aviary',
'axis', 'bacteria', 'balance', 'bases', 'biome', 'biosphere', 'black hole',
'blight', 'buoyancy', 'calcium', 'canopy', 'capacity', 'capillary', 'carapace',
'carcinogen', 'catalyst', 'cauldron', 'celestial', 'cells', 'centigrade',
'centimeter', 'centrifugal', 'chemical reaction', 'chemicals', 'chemistry',
'chlorophyll', 'choked', 'chromosome', 'chronic', 'churn', 'classification',
'climate', 'cloud', 'comet', 'composition', 'compound', 'compression',
'condensation', 'conditions', 'conduction', 'conductivity', 'conservation',
'constant', 'constellation', 'continental', 'convection', 'convention', 'cool',
'core', 'cosmic', 'crater', 'creature', 'crepuscular', 'crystals', 'cycle', 'cytoplasm',
'dampness', 'data', 'decay', 'decibel', 'deciduous', 'defoliate', 'density',
'denude', 'dependency', 'deposits', 'depth', 'desiccant', 'detritus',
'development', 'digestible', 'diluted', 'direction', 'disappearance', 'discovery',
'dislodge', 'displace', 'dissection', 'dissolution', 'dissolve', 'distance',
'diurnal', 'diverse', 'doldrums', 'dynamics', 'earthquake', 'eclipse', 'ecology',
'ecosystem', 'electricity', 'elements', 'elevation', 'embryo', 'endangered',
'endocrine', 'energy', 'entropy', 'environment', 'enzyme', 'epidermis', 'epoch',
'equilibrium', 'equine', 'erosion', 'essential', 'estuary', 'ethical', 'evaporation',
'event', 'evidence', 'evolution', 'examination', 'existence', 'expansion',
'experiment', 'exploration ', 'extinction', 'extreme', 'facet', 'fault', 'fauna',
'feldspar', 'fermenting', 'fission', 'fissure', 'flora', 'flourish', 'flowstone',
'foliage', 'food chain', 'forage', 'force', 'forecast', 'forensics', 'formations',
'fossil fuel', 'frequency', 'friction', 'fungi', 'fusion', 'galaxy', 'gastric',
'geo-science', 'geothermal', 'germination', 'gestation', 'global', 'gravitation',
'green', 'greenhouse effect', 'grotto', 'groundwater', 'habitat', 'heat', 'heavens',
'hemisphere', 'hemoglobin', 'herpetologist', 'hormones', 'host', 'humidity', 'hyaline',
'hydrogen', 'hydrology', 'hypothesis', 'ichthyology', 'illumination', 'imagination',
'impact of', 'impulse', 'incandescent', 'indigenous', 'inertia', 'inevitable', 'inherit',
'inquiry', 'insoluble', 'instinct', 'instruments', 'integrity', 'intelligence',
'interacts with', 'interdependence', 'interplanetary', 'invertebrate', 'investigation',
'invisible', 'ions', 'irradiate', 'isobar', 'isotope', 'joule', 'jungle', 'jurassic',
'jutting', 'kilometer', 'kinetics', 'kingdom', 'knot', 'laser', 'latitude', 'lava',
'lethal', 'life', 'lift', 'light', 'limestone', 'lipid', 'lithosphere', 'load',
'lodestone', 'luminous', 'luster', 'magma', 'magnet', 'magnetism', 'mangrove', 'mantle',
'marine', 'marsh', 'mass', 'matter', 'measurements', 'mechanical', 'meiosis', 'meridian',
'metamorphosis', 'meteor', 'microbes', 'microcosm', 'migration', 'millennia', 'minerals',
'modulate', 'moisture', 'molecule', 'molten', 'monograph', 'monolith', 'motion',
'movement', 'mutant', 'mutation', 'mysterious', 'natural', 'navigable', 'navigation',
'negligence', 'nervous system', 'nesting', 'neutrons', 'niche', 'nocturnal',
'nuclear energy', 'numerous', 'nurture', 'obsidian', 'ocean', 'oceanography', 'omnivorous',
'oolites (cave pearls)', 'opaque', 'orbit', 'organ', 'organism', 'ornithology',
'osmosis', 'oxygen', 'paleontology', 'parallax', 'particle', 'penumbra',
'percolate', 'permafrost', 'permutation', 'petrify', 'petrograph', 'phenomena',
'physical property', 'planetary', 'plasma', 'polar', 'pole', 'pollination',
'polymer', 'population', 'precipitation', 'predator', 'prehensile', 'preservation',
'preserve', 'pressure', 'primate', 'pristine', 'probe', 'process', 'propagation',
'properties', 'protected', 'proton', 'pulley', 'qualitative data', 'quantum', 'quark',
'quarry', 'radiation', 'radioactivity', 'rain forest', 'ratio', 'reaction', 'reagent',
'realm', 'redwoods', 'reeds', 'reflection', 'refraction', 'relationships between', 'reptile',
'research', 'resistance', 'resonate', 'rookery', 'rubble', 'runoff', 'salinity', 'sandbar',
'satellite', 'saturation', 'scientific investigation', 'scientist\'s', 'sea floor', 'season',
'sedentary', 'sediment', 'sedimentary', 'seepage', 'seismic', 'sensors', 'shard',
'similarity', 'solar', 'soluble', 'solvent', 'sonic', 'sound', 'source', 'species',
'spectacular', 'spectrum', 'speed', 'sphere', 'spring', 'stage', 'stalactite',
'stalagmites', 'stimulus', 'substance', 'subterranean', 'sulfuric acid', 'surface',
'survival', 'swamp', 'sylvan', 'symbiosis', 'symbol', 'synergy', 'synthesis', 'taiga',
'taxidermy', 'technology', 'tectonics', 'temperate', 'temperature', 'terrestrial',
'thermals', 'thermometer', 'thrust', 'torque', 'toxin', 'trade winds', 'pterodactyl',
'transformation tremors', 'tropical', 'umbra', 'unbelievable', 'underwater', 'unearth',
'unique', 'unite', 'unity', 'universal', 'unpredictable', 'unusual', 'ursine', 'vacuole',
'valuable', 'vapor', 'variable', 'variety', 'vast', 'velocity', 'ventifact', 'verdant',
'vespiary', 'viable', 'vibration', 'virus', 'viscosity', 'visible', 'vista', 'vital',
'vitreous', 'volt', 'volume', 'vulpine', 'wave', 'wax', 'weather', 'westerlies', 'wetlands',
'whitewater', 'xeriscape', 'xylem', 'yield', 'zero-impact', 'zone', 'zygote', 'achieving',
'acquisition of', 'an alternative', 'analysis of', 'approach toward', 'area', 'aspects of',
'assessment of', 'assuming', 'authority', 'available', 'benefit of', 'circumstantial',
'commentary', 'components', 'concept of', 'consistent', 'corresponding', 'criteria',
'data', 'deduction', 'demonstrating', 'derived', 'distribution', 'dominant', 'elements',
'equation', 'estimate', 'evaluation', 'factors', 'features', 'final', 'function',
'initial', 'instance ', 'interpretation of', 'maintaining ', 'method', 'perceived',
'percent', 'period', 'positive', 'potential', 'previous', 'primary', 'principle',
'procedure', 'process', 'range', 'region', 'relevant', 'required', 'research',
'resources', 'response', 'role', 'section', 'select', 'significant ', 'similar',
'source', 'specific', 'strategies', 'structure', 'theory', 'transfer', 'variables',
'corvidae', 'passerine', 'Pica pica', 'Chinchilla lanigera', 'Nymphicus hollandicus',
'Melopsittacus undulatus', )
def science_word(cls):
"""
:example 'Lorem'
"""
return cls.random_element(cls.word_list)
def science_words(cls, nb=3):
"""
Generate an array of random words
:example array('Lorem', 'ipsum', 'dolor')
:param nb how many words to return
"""
return [cls.science_word() for _ in range(0, nb)]
def science_sentence(cls, nb_words=6, variable_nb_words=True):
"""
Generate a random sentence
:example 'Lorem ipsum dolor sit amet.'
:param nb_words around how many words the sentence should contain
:param variable_nb_words set to false if you want exactly $nbWords returned,
otherwise $nbWords may vary by +/-40% with a minimum of 1
"""
if nb_words <= 0:
return ''
if variable_nb_words:
nb_words = cls.randomize_nb_elements(nb_words)
words = cls.science_words(nb_words)
words[0] = words[0].title()
return ' '.join(words) + '.'
def science_sentences(cls, nb=3):
"""
Generate an array of sentences
:example array('Lorem ipsum dolor sit amet.', 'Consectetur adipisicing eli.')
:param nb how many sentences to return
:return list
"""
return [cls.science_sentence() for _ in range(0, nb)]
def science_paragraph(cls, nb_sentences=3, variable_nb_sentences=True):
"""
Generate a single paragraph
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param nb_sentences around how many sentences the paragraph should contain
:param variable_nb_sentences set to false if you want exactly $nbSentences returned,
otherwise $nbSentences may vary by +/-40% with a minimum of 1
:return string
"""
if nb_sentences <= 0:
return ''
if variable_nb_sentences:
nb_sentences = cls.randomize_nb_elements(nb_sentences)
return ' '.join(cls.science_sentences(nb_sentences))
def science_paragraphs(cls, nb=3):
"""
Generate an array of paragraphs
:example array($paragraph1, $paragraph2, $paragraph3)
:param nb how many paragraphs to return
:return array
"""
return [cls.science_paragraph() for _ in range(0, nb)]
def science_text(cls, max_nb_chars=200):
"""
Generate a text string.
Depending on the $maxNbChars, returns a string made of words, sentences, or paragraphs.
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param max_nb_chars Maximum number of characters the text should contain (minimum 5)
:return string
"""
text = []
if max_nb_chars < 5:
raise ValueError('text() can only generate text of at least 5 characters')
if max_nb_chars < 25:
# join words
while not text:
size = 0
# determine how many words are needed to reach the $max_nb_chars once;
while size < max_nb_chars:
word = (' ' if size else '') + cls.science_word()
text.append(word)
size += len(word)
text.pop()
text[0] = text[0][0].upper() + text[0][1:]
last_index = len(text) - 1
text[last_index] += '.'
elif max_nb_chars < 100:
# join sentences
while not text:
size = 0
# determine how many sentences are needed to reach the $max_nb_chars once
while size < max_nb_chars:
sentence = (' ' if size else '') + cls.science_sentence()
text.append(sentence)
size += len(sentence)
text.pop()
else:
# join paragraphs
while not text:
size = 0
# determine how many paragraphs are needed to reach the $max_nb_chars once
while size < max_nb_chars:
paragraph = ('\n' if size else '') + cls.science_paragraph()
text.append(paragraph)
size += len(paragraph)
text.pop()
return ''.join(text)
logger = logging.getLogger('create_fakes')
SILENT_LOGGERS = [
'factory',
'website.mails',
]
for logger_name in SILENT_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
fake = Factory.create()
fake.add_provider(Sciencer)
def create_fake_user():
email = fake_email()
name = fake.name()
user = UserFactory(username=email, fullname=name,
is_registered=True, emails=[email],
date_registered=fake.date_time(tzinfo=pytz.UTC),
)
user.set_password('faker123')
user.save()
logger.info('Created user: {0} <{1}>'.format(user.fullname, user.username))
return user
def parse_args():
parser = argparse.ArgumentParser(description='Create fake data.')
parser.add_argument('-u', '--user', dest='user', required=True)
parser.add_argument('--nusers', dest='n_users', type=int, default=3)
parser.add_argument('--nprojects', dest='n_projects', type=int, default=3)
parser.add_argument('-c', '--components', dest='n_components', type=evaluate_argument, default='0')
parser.add_argument('-p', '--privacy', dest='privacy', type=str, default='private', choices=['public', 'private'])
parser.add_argument('-n', '--name', dest='name', type=str, default=None)
parser.add_argument('-t', '--tags', dest='n_tags', type=int, default=5)
parser.add_argument('--presentation', dest='presentation_name', type=str, default=None)
parser.add_argument('-r', '--registration', dest='is_registration', type=bool, default=False)
parser.add_argument('-pre', '--preprint', dest='is_preprint', type=bool, default=False)
parser.add_argument('-preprovider', '--preprintprovider', dest='preprint_provider', type=str, default=None)
return parser.parse_args()
def evaluate_argument(string):
return ast.literal_eval(string)
def create_fake_project(creator, n_users, privacy, n_components, name, n_tags, presentation_name, is_registration, is_preprint, preprint_provider):
auth = Auth(user=creator)
project_title = name if name else fake.science_sentence()
if is_preprint:
provider = None
if preprint_provider:
try:
provider = models.PreprintProvider.objects.get(_id=provider)
except models.PreprintProvider.DoesNotExist:
pass
if not provider:
provider = PreprintProviderFactory(name=fake.science_word())
privacy = 'public'
mock_change_identifier_preprints = mock.patch('website.identifiers.client.CrossRefClient.update_identifier')
mock_change_identifier_preprints.start()
project = PreprintFactory(title=project_title, description=fake.science_paragraph(), creator=creator, provider=provider)
node = project.node
elif is_registration:
project = RegistrationFactory(title=project_title, description=fake.science_paragraph(), creator=creator)
node = project
else:
project = ProjectFactory(title=project_title, description=fake.science_paragraph(), creator=creator)
node = project
node.set_privacy(privacy)
for _ in range(n_users):
contrib = create_fake_user()
node.add_contributor(contrib, auth=auth)
if isinstance(n_components, int):
for _ in range(n_components):
NodeFactory(parent=node, title=fake.science_sentence(), description=fake.science_paragraph(),
creator=creator)
elif isinstance(n_components, list):
render_generations_from_node_structure_list(node, creator, n_components)
for _ in range(n_tags):
node.add_tag(fake.science_word(), auth=auth)
if presentation_name is not None:
node.add_tag(presentation_name, auth=auth)
node.add_tag('poster', auth=auth)
node.save()
project.save()
logger.info('Created project: {0}'.format(node.title))
return project
def render_generations_from_parent(parent, creator, num_generations):
current_gen = parent
for generation in range(0, num_generations):
next_gen = NodeFactory(
parent=current_gen,
creator=creator,
title=fake.science_sentence(),
description=fake.science_paragraph()
)
current_gen = next_gen
return current_gen
def render_generations_from_node_structure_list(parent, creator, node_structure_list):
new_parent = None
for node_number in node_structure_list:
if isinstance(node_number, list):
render_generations_from_node_structure_list(new_parent or parent, creator, node_number)
else:
new_parent = render_generations_from_parent(parent, creator, node_number)
return new_parent
def main():
args = parse_args()
creator = models.OSFUser.objects.get(username=args.user)
for i in range(args.n_projects):
name = args.name + str(i) if args.name else ''
create_fake_project(creator, args.n_users, args.privacy, args.n_components, name, args.n_tags,
args.presentation_name, args.is_registration, args.is_preprint, args.preprint_provider)
print('Created {n} fake projects.'.format(n=args.n_projects))
sys.exit(0)
if __name__ == '__main__':
init_app(set_backends=True, routes=False)
main()
| Johnetordoff/osf.io | scripts/create_fakes.py | Python | apache-2.0 | 19,763 |
import sys
import os
from os.path import dirname, join
from flask import Flask, request, abort
app = Flask(__name__)
config = {
'working_directory': os.getcwd(),
'always_regenerate': True}
@app.route('/')
def provide_briefcast():
from briefscaster import briefcast
url_root = request.url_root
items = briefcast.find_briefs(config['working_directory'])
rss_string = briefcast.create_feed(
items,
url_root)
return app.response_class(rss_string,
mimetype='application/briefcast')
@app.route('/brieflist/<key>')
def brieflist(key):
from briefscaster import briefcast
briefs_cache = briefcast.get_briefs_cache()
if not key in briefs_cache:
abort(404)
if config['always_regenerate']:
briefcast.create_brieflist(briefs_cache[key]['bs_filename'])
filename = briefs_cache[key]['filename']
with open(filename) as f:
return app.response_class(f.read(), mimetype='application/brief')
def main():
try:
config['working_directory'] = sys.argv[1]
except IndexError:
pass
print 'briefs-caster - Serving up some fine briefs for you\n'
print 'Open http://<IP_ADDRESS>:5000 from the Briefs app\n'
print 'CTRL-C to exit the server'
app.run('0.0.0.0')
def get_briefs_utils():
"""
Trys to povide an executable bs and compact-briefs utilities
"""
local_bs = join(dirname(__file__), 'bin', 'bc-bs')
local_compact_briefs = join(dirname(__file__), 'bin', 'bc-compact-briefs')
if os.access(local_bs, os.X_OK) and \
os.access(local_compact_briefs, os.X_OK):
# The local versions are executable, we will use those
return (local_bs, local_compact_briefs,)
else:
# Assume that briefs-caster has been installed with easy_install or pip
# and guess that they are on the path
return ('bc-bs', 'bc-compact-briefs',)
if __name__ == '__main__':
get_briefs_utils()
main()
| robmadole/briefs-caster | src/briefscaster/__init__.py | Python | bsd-3-clause | 1,980 |
#!/usr/bin/env python
# MIT License
#
# Copyright (c) 2017 Francesco Evangelista
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import argparse
import datetime
import os
import re
import subprocess
import sys
from os import environ, listdir
from os.path import isfile, join
vmd_cube_help = """vmd_cube is a script to render cube files with vmd.
To generate cube files with Psi4 add the command cubeprop() at the end of your input file."""
vmd_exe = ""
vmd_script_name = "vmd_mo_script.vmd"
vmd_template = """#
# VMD script to plot MOs from cube files
#
# Load the molecule and change the atom style
mol load cube PARAM_CUBEFILE.cube
mol modcolor 0 PARAM_CUBENUM Element
mol modstyle 0 PARAM_CUBENUM Licorice 0.110000 10.000000 10.000000
#mol modstyle 0 PARAM_CUBENUM CPK 0.400000 0.40000 30.000000 16.000000
# Define the material
material change ambient Opaque 0.310000
material change diffuse Opaque 0.720000
material change specular Opaque 0.500000
material change shininess Opaque 0.480000
material change opacity Opaque 1.000000
material change outline Opaque 0.000000
material change outlinewidth Opaque 0.000000
material change transmode Opaque 0.000000
material change specular Opaque 0.750000
material change ambient EdgyShiny 0.310000
material change diffuse EdgyShiny 0.720000
material change shininess EdgyShiny 1.0000
material change opacity EdgyShiny PARAM_OPACITY
# Customize atom colors
color Element C silver
color Element H white
# Rotate and translate the molecule
rotate x by PARAM_RX
rotate y by PARAM_RY
rotate z by PARAM_RZ
translate by PARAM_TX PARAM_TY PARAM_TZ
scale by PARAM_SCALE
# Eliminate the axis and perfect the view
axes location Off
display projection Orthographic
display depthcue off
display resize PARAM_IMAGEW PARAM_IMAGEH
color Display Background white"""
vmd_template_surface = """#
# Add the surfaces
mol color ColorID PARAM_SURF1ID
mol representation Isosurface PARAM_ISOVALUE1 0 0 0 1 1
mol selection all
mol material EdgyShiny
mol addrep PARAM_CUBENUM
mol color ColorID PARAM_SURF2ID
mol representation Isosurface PARAM_ISOVALUE2 0 0 0 1 1
mol selection all
mol material EdgyShiny
mol addrep PARAM_CUBENUM
"""
vmd_template_interactive = """#
# Disable rendering
mol off PARAM_CUBENUM
"""
vmd_template_render = """
# Render
render TachyonInternal PARAM_CUBEFILE.tga
mol delete PARAM_CUBENUM
"""
vmd_template_rotate = """
light 1 off
light 0 rot y 30.0
light 0 rot x -30.0
"""
default_path = os.getcwd()
# Default parameters
options = {"SURF1ID" : [None,"Surface1 Color Id"],
"SURF2ID" : [None,"Surface2 Color Id"],
"ISOVALUE1" : [None,"Isosurface1 Value"],
"ISOVALUE2" : [None,"Isosurface2 Value"],
"RX" : [None,"X-axis Rotation"],
"RY" : [None,"Y-axis Rotation"],
"RZ" : [None,"Z-axis Rotation"],
"TX" : [None,"X-axis Translation"],
"TY" : [None,"Y-axis Translation"],
"TZ" : [None,"Z-axis Translation"],
"OPACITY" : [None,"Opacity"],
"CUBEDIR" : [None,"Cubefile Directory"],
"SCALE" : [None,"Scaling Factor"],
"MONTAGE" : [None,"Montage"],
"LABEL_MOS" : [None,"Label MOs"],
"FONTSIZE" : [None,"Font size"],
"IMAGEW" : [None,"Image width"],
"IMAGEH" : [None,"Image height"],
"VMDPATH" : [None,"VMD Path"],
"INTERACTIVE": [None,"Interactive Mode"],
"GZIP" : [None,"Gzip Cube Files"]}
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def multigsub(subs,str):
for k,v in subs.items():
str = re.sub(k,v,str)
return str
def find_vmd(options):
if environ['VMDPATH']:
vmdpath = environ['VMDPATH']
vmdpath = multigsub({" " : r"\ "},vmdpath)
options["VMDPATH"][0] = vmdpath
else:
print("Please set the VMDPATH environmental variable to the path of VMD.")
exit(1)
def save_setup_command(argv):
file_name = join(default_path, 'vmd_cube_command')
f = open(file_name, 'w')
f.write('# setup command was executed '+datetime.datetime.now().strftime("%d-%B-%Y %H:%M:%S"+"\n"))
f.write(" ".join(argv[:])+"\n")
f.close()
def read_options(options):
parser = argparse.ArgumentParser(description=vmd_cube_help)
parser.add_argument('data', metavar='<cubefile dir>', type=str, nargs='?',default=".",
help='The directory containing the cube files.')
parser.add_argument('--color1', metavar='<integer>', type=int, nargs='?',default=3,
help='the color ID of surface 1 (integer, default = 3)')
parser.add_argument('--color2', metavar='<integer>', type=int, nargs='?',default=23,
help='the color ID of surface 2 (integer, default = 23)')
parser.add_argument('--iso', metavar='<isovalue>', type=float, nargs='?',default=0.05,
help='the isosurface value (float, default = 0.05)')
parser.add_argument('--rx', metavar='<angle>', type=float, nargs='?',default=30.0,
help='the x-axis rotation angle (float, default = 30.0)')
parser.add_argument('--ry', metavar='<angle>', type=float, nargs='?',default=40.0,
help='the y-axis rotation angle (float, default = 40.0)')
parser.add_argument('--rz', metavar='<angle>', type=float, nargs='?',default=15.0,
help='the z-axis rotation angle (float, default = 15.0)')
parser.add_argument('--tx', metavar='<length>', type=float, nargs='?',default=0.0,
help='the x-axis translation (float, default = 0.0)')
parser.add_argument('--ty', metavar='<length>', type=float, nargs='?',default=0.0,
help='the y-axis translation (float, default = 0.0)')
parser.add_argument('--tz', metavar='<length>', type=float, nargs='?',default=0.0,
help='the z-axis translation (float, default = 0.0)')
parser.add_argument('--opacity', metavar='<opacity>', type=float, nargs='?',default=1.0,
help='opacity of the isosurface (float, default = 1.0)')
parser.add_argument('--scale', metavar='<factor>', type=float, nargs='?',default=1.0,
help='the scaling factor (float, default = 1.0)')
parser.add_argument('--no-montage', action="store_true",
help='call montage to combine images. (string, default = false)')
parser.add_argument('--no-labels', action="store_true",
help='do not add labels to images. (string, default = false)')
parser.add_argument('--imagesize', metavar='<integer>', type=int, nargs='?',default=250,
help='the size of each image (integer, default = 250)')
parser.add_argument('--imagew', metavar='<integer>', type=int, nargs='?',default=250,
help='the width of images (integer, default = 250)')
parser.add_argument('--imageh', metavar='<integer>', type=int, nargs='?',default=250,
help='the height of images (integer, default = 250)')
parser.add_argument('--fontsize', metavar='<integer>', type=int, nargs='?',default=20,
help='the font size (integer, default = 20)')
parser.add_argument('--interactive', action="store_true",
help='run in interactive mode (default = false)')
parser.add_argument('--gzip', action="store_true",
help='gzip cube files (default = false)')
parser.add_argument('--national_scheme', action="store_true",
help='use a red/blue color scheme. (string, default = false)')
parser.add_argument('--silver_scheme', action="store_true",
help='use a gray/white color scheme. (string, default = false)')
parser.add_argument('--bright_scheme', action="store_true",
help='use a soft yellow/blue color scheme. (string, default = false)')
parser.add_argument('--electron_scheme', action="store_true",
help='use a purple/green color scheme. (string, default = false)')
args = parser.parse_args()
options["CUBEDIR"][0] = str(args.data)
options["SURF1ID"][0] = str(args.color1)
options["SURF2ID"][0] = str(args.color2)
options["ISOVALUE1"][0] = str(args.iso)
options["ISOVALUE2"][0] = str(-args.iso)
options["RX"][0] = str(args.rx)
options["RY"][0] = str(args.ry)
options["RZ"][0] = str(args.rz)
options["TX"][0] = str(args.tx)
options["TY"][0] = str(args.ty)
options["TZ"][0] = str(args.tz)
options["OPACITY"][0] = str(args.opacity)
options["SCALE"][0] = str(args.scale)
options["LABEL_MOS"][0] = str(not args.no_labels)
options["MONTAGE"][0] = str(not args.no_montage)
options["FONTSIZE"][0] = str(args.fontsize)
options["IMAGEW"][0] = str(args.imagew)
options["IMAGEH"][0] = str(args.imageh)
options["INTERACTIVE"][0] = str(args.interactive)
options["GZIP"][0] = str(args.gzip)
if args.national_scheme:
options["SURF1ID"][0] = '23'
options["SURF2ID"][0] = '30'
if args.silver_scheme:
options["SURF1ID"][0] = '2'
options["SURF2ID"][0] = '8'
if args.electron_scheme:
options["SURF1ID"][0] = '13'
options["SURF2ID"][0] = '12'
if args.bright_scheme:
options["SURF1ID"][0] = '32'
options["SURF2ID"][0] = '22'
print("Parameters:")
sorted_parameters = sorted(options.keys())
for k in sorted_parameters:
print(" %-20s %s" % (options[k][1],options[k][0]))
def find_cubes(options):
# Find all the cube files in a given directory
dir = options["CUBEDIR"][0]
sorted_files = []
zipped_files = []
for f in listdir(options["CUBEDIR"][0]):
if "\'" in f:
nf = f.replace("\'", "p")
os.rename(f,nf)
f = nf
if "\"" in f:
nf = f.replace("\"", "pp")
os.rename(f,nf)
f = nf
if f[-5:] == '.cube':
sorted_files.append(f)
elif f[-8:] == '.cube.gz':
found_zipped = True
# unzip file
sorted_files.append(f[:-3])
zipped_files.append(f)
if len(zipped_files) > 0:
print("\nDecompressing gzipped cube files")
FNULL = open(os.devnull, 'w')
subprocess.call(("gzip -d %s" % " ".join(zipped_files)),stdout=FNULL, shell=True)
options["GZIP"][0] = 'True'
return sorted(sorted_files)
def write_and_run_vmd_script(options,cube_files):
vmd_script = open(vmd_script_name,"w+")
vmd_script.write(vmd_template_rotate)
# Define a map that contains all the values of the VMD parameters
replacement_map = {}
for k,v in options.iteritems():
key = "PARAM_" + k.upper()
replacement_map[key] = v[0]
for n,f in enumerate(cube_files):
replacement_map["PARAM_CUBENUM"] = "%03d" % n
replacement_map["PARAM_CUBEFILE"] = options["CUBEDIR"][0] + "/" + f[:-5]
vmd_script_surface = multigsub(replacement_map,vmd_template_surface)
vmd_script_head = multigsub(replacement_map,vmd_template)
if options["INTERACTIVE"][0] == 'True':
vmd_script_render = multigsub(replacement_map,vmd_template_interactive)
else:
vmd_script_render = multigsub(replacement_map,vmd_template_render)
vmd_script.write(vmd_script_head + "\n" + vmd_script_surface + "\n" + vmd_script_render)
if options["INTERACTIVE"][0] == 'False':
vmd_script.write("quit")
vmd_script.close()
# Call VMD in text mode
FNULL = open(os.devnull, 'w')
subprocess.call(("%s -dispdev text -e %s" % (options["VMDPATH"][0],vmd_script_name)),stdout=FNULL, shell=True)
else:
vmd_script.close()
# Call VMD in graphic mode
FNULL = open(os.devnull, 'w')
subprocess.call(("%s -e %s" % (options["VMDPATH"][0],vmd_script_name)),stdout=FNULL, shell=True)
def call_montage(options,cube_files):
if options["MONTAGE"][0] == 'True':
# Optionally, combine all figures into one image using montage
montage_exe = which("montage")
if montage_exe:
alpha_mos = []
beta_mos = []
densities = []
basis_functions = []
for f in cube_files:
tga_file = f[:-5] + ".tga"
if "Psi_a" in f:
alpha_mos.append(tga_file)
if "Psi_b" in f:
beta_mos.append(tga_file)
if "D" in f:
densities.append(tga_file)
if "Phi" in f:
basis_functions.append(tga_file)
# Sort the MOs
sorted_mos = []
for set in [alpha_mos,beta_mos]:
sorted_set = []
for s in set:
s_split = s.split('_')
sorted_set.append((int(s_split[2]),"Psi_a_%s_%s" % (s_split[2],s_split[3])))
sorted_set = sorted(sorted_set)
sorted_mos.append([s[1] for s in sorted_set])
os.chdir(options["CUBEDIR"][0])
# Add labels
if options["LABEL_MOS"][0] == 'True':
for f in sorted_mos[0]:
f_split = f.split('_')
label = r'%s\ \(%s\)' % (f_split[3][:-4],f_split[2])
subprocess.call(("montage -pointsize %s -label %s %s -geometry '%sx%s+0+0>' %s" %
(options["FONTSIZE"][0],label,f,options["IMAGEW"][0],options["IMAGEH"][0],f)), shell=True)
# Combine together in one image
if len(alpha_mos) > 0:
subprocess.call(("%s %s -geometry +2+2 AlphaMOs.tga" % (montage_exe," ".join(sorted_mos[0]))), shell=True)
if len(beta_mos) > 0:
subprocess.call(("%s %s -geometry +2+2 BetaMOs.tga" % (montage_exe," ".join(sorted_mos[1]))), shell=True)
if len(densities) > 0:
subprocess.call(("%s %s -geometry +2+2 Densities.tga" % (montage_exe," ".join(densities))), shell=True)
if len(basis_functions) > 0:
subprocess.call(("%s %s -geometry +2+2 BasisFunctions.tga" % (montage_exe," ".join(basis_functions))), shell=True)
def zip_files(cube_files,options):
"""Gzip cube files if requested or necessary."""
if options["GZIP"][0] == 'True':
print("\nCompressing cube files")
FNULL = open(os.devnull, 'w')
subprocess.call(("gzip %s" % " ".join(cube_files)),stdout=FNULL, shell=True)
def main(argv):
find_vmd(options)
read_options(options)
save_setup_command(argv)
cube_files = find_cubes(options)
write_and_run_vmd_script(options,cube_files)
call_montage(options,cube_files)
zip_files(cube_files,options)
if __name__ == '__main__':
main(sys.argv)
| psi4/psi4 | psi4/share/psi4/scripts/vmd_cube.py | Python | lgpl-3.0 | 16,500 |
##
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Declaration of toolchains.linalg namespace.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
from pkgutil import extend_path
# we're not the only ones in this namespace
__path__ = extend_path(__path__, __name__) #@ReservedAssignment
| pneerincx/easybuild-framework | easybuild/toolchains/linalg/__init__.py | Python | gpl-2.0 | 1,340 |
# URL extractor
# Copyright 2004, Paul McGuire
from pyparsing import Literal,Suppress,CharsNotIn,CaselessLiteral,\
Word,dblQuotedString,alphanums,SkipTo
import urllib.request, urllib.parse, urllib.error
import pprint
# Define the pyparsing grammar for a URL, that is:
# URLlink ::= <a href= URL>linkText</a>
# URL ::= doubleQuotedString | alphanumericWordPath
# Note that whitespace may appear just about anywhere in the link. Note also
# that it is not necessary to explicitly show this in the pyparsing grammar; by default,
# pyparsing skips over whitespace between tokens.
linkOpenTag = (Literal("<") + "a" + "href" + "=").suppress() + \
( dblQuotedString | Word(alphanums+"/") ) + \
Suppress(">")
linkCloseTag = Literal("<") + "/" + CaselessLiteral("a") + ">"
link = linkOpenTag + SkipTo(linkCloseTag) + linkCloseTag.suppress()
# Go get some HTML with some links in it.
serverListPage = urllib.request.urlopen( "http://www.yahoo.com" )
htmlText = serverListPage.read()
serverListPage.close()
# scanString is a generator that loops through the input htmlText, and for each
# match yields the tokens and start and end locations (for this application, we are
# not interested in the start and end values).
for toks,strt,end in link.scanString(htmlText):
print(toks.asList())
# Rerun scanString, but this time create a dict of text:URL key-value pairs.
# Need to reverse the tokens returned by link, using a parse action.
link.setParseAction( lambda st,loc,toks: [ toks[1], toks[0] ] )
# Create dictionary from list comprehension, assembled from each pair of tokens returned
# from a matched URL.
pprint.pprint(
dict( [ toks for toks,strt,end in link.scanString(htmlText) ] )
)
| miguelalexanderdiaz/lenguajes_project | pyparsing-2.0.2/examples/urlExtractor.py | Python | gpl-2.0 | 1,794 |
# -*- coding: utf-8 -*-
GIORNI_SETTIMANA = ['LU', 'MA', 'ME', 'GI', 'VE', 'SA', 'DO']
GIORNI_SETTIMANA_FULL = ['lunedì', 'martedì', 'mercoledì', 'giovedì', 'venerdì', 'sabato', 'domenica']
TIME_TOLERANCE_MIN = 5 # show also rides scheduled 5 min ago
MAX_FERMATE_NEAR_LOCATION = 5
MAX_PERCORSI = 8
PATH_FERMATA_PROXIMITY_THRESHOLD = 1.0
DAY_START_HOUR = 6
MIN_TO_SWITCH_TO_NEXT_HOUR = 52 # if it's 13.52 don't show 13
NOTIFICATION_MODE_NONE = "NONE"
NOTIFICATION_MODE_ALL = "ALL"
NOTIFICATION_MODE_PERCORSI = "PERCORSI"
DEFAULT_NOTIFICATIONS_MODE = NOTIFICATION_MODE_ALL
NOTIFICATIONS_MODES = [NOTIFICATION_MODE_ALL, NOTIFICATION_MODE_PERCORSI, NOTIFICATION_MODE_NONE]
# should follow same order as in main.NOTIFICHE_BUTTONS
PERCORSO_COMMAND_PREFIX = '/percorso_'
COST_PER_0_KM = 1.0
COST_PER_10_KM = 1.5
def compute_cost(distance):
return COST_PER_0_KM + (COST_PER_10_KM-COST_PER_0_KM)/10*distance
def getCommand(prefix, suffix, escapeMarkdown=True):
import utility
result = "{}{}".format(prefix, suffix)
if escapeMarkdown:
return utility.escapeMarkdown(result)
return result
def getIndexFromCommand(command, prefix):
import utility
index = command[len(prefix):]
if utility.representsInt(index):
return int(index)
return None
| kercos/PickMeUp | params.py | Python | apache-2.0 | 1,293 |
#!/usr/bin/env python2
#encoding:utf-8
#author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:unlicense (http://unlicense.org/)
"""
urllib2 caching handler
Modified from http://code.activestate.com/recipes/491261/
"""
from __future__ import with_statement
__author__ = "dbr/Ben"
__version__ = "1.9"
import os
import time
import errno
import httplib
import urllib2
import StringIO
from hashlib import md5
from threading import RLock
cache_lock = RLock()
def locked_function(origfunc):
"""Decorator to execute function under lock"""
def wrapped(*args, **kwargs):
cache_lock.acquire()
try:
return origfunc(*args, **kwargs)
finally:
cache_lock.release()
return wrapped
def calculate_cache_path(cache_location, url):
"""Checks if [cache_location]/[hash_of_url].headers and .body exist
"""
thumb = md5(url).hexdigest()
header = os.path.join(cache_location, thumb + ".headers")
body = os.path.join(cache_location, thumb + ".body")
return header, body
def check_cache_time(path, max_age):
"""Checks if a file has been created/modified in the [last max_age] seconds.
False means the file is too old (or doesn't exist), True means it is
up-to-date and valid"""
if not os.path.isfile(path):
return False
cache_modified_time = os.stat(path).st_mtime
time_now = time.time()
if cache_modified_time < time_now - max_age:
# Cache is old
return False
else:
return True
@locked_function
def exists_in_cache(cache_location, url, max_age):
"""Returns if header AND body cache file exist (and are up-to-date)"""
hpath, bpath = calculate_cache_path(cache_location, url)
if os.path.exists(hpath) and os.path.exists(bpath):
return(
check_cache_time(hpath, max_age)
and check_cache_time(bpath, max_age)
)
else:
# File does not exist
return False
@locked_function
def store_in_cache(cache_location, url, response):
"""Tries to store response in cache."""
hpath, bpath = calculate_cache_path(cache_location, url)
try:
outf = open(hpath, "wb")
headers = str(response.info())
outf.write(headers)
outf.close()
outf = open(bpath, "wb")
outf.write(response.read())
outf.close()
except IOError:
return True
else:
return False
@locked_function
def delete_from_cache(cache_location, url):
"""Deletes a response in cache."""
hpath, bpath = calculate_cache_path(cache_location, url)
try:
if os.path.exists(hpath):
os.remove(hpath)
if os.path.exists(bpath):
os.remove(bpath)
except IOError:
return True
else:
return False
class CacheHandler(urllib2.BaseHandler):
"""Stores responses in a persistant on-disk cache.
If a subsequent GET request is made for the same URL, the stored
response is returned, saving time, resources and bandwidth
"""
@locked_function
def __init__(self, cache_location, max_age = 21600):
"""The location of the cache directory"""
self.max_age = max_age
self.cache_location = cache_location
if not os.path.exists(self.cache_location):
try:
os.mkdir(self.cache_location)
except OSError, e:
if e.errno == errno.EEXIST and os.path.isdir(self.cache_location):
# File exists, and it's a directory,
# another process beat us to creating this dir, that's OK.
pass
else:
# Our target dir is already a file, or different error,
# relay the error!
raise
def default_open(self, request):
"""Handles GET requests, if the response is cached it returns it
"""
if request.get_method() != "GET":
return None # let the next handler try to handle the request
if exists_in_cache(
self.cache_location, request.get_full_url(), self.max_age
):
return CachedResponse(
self.cache_location,
request.get_full_url(),
set_cache_header = True
)
else:
return None
def http_response(self, request, response):
"""Gets a HTTP response, if it was a GET request and the status code
starts with 2 (200 OK etc) it caches it and returns a CachedResponse
"""
if (request.get_method() == "GET"
and str(response.code).startswith("2")
):
if 'x-local-cache' not in response.info():
# Response is not cached
set_cache_header = store_in_cache(
self.cache_location,
request.get_full_url(),
response
)
else:
set_cache_header = True
return CachedResponse(
self.cache_location,
request.get_full_url(),
set_cache_header = set_cache_header
)
else:
return response
class CachedResponse(StringIO.StringIO):
"""An urllib2.response-like object for cached responses.
To determine if a response is cached or coming directly from
the network, check the x-local-cache header rather than the object type.
"""
@locked_function
def __init__(self, cache_location, url, set_cache_header=True):
self.cache_location = cache_location
hpath, bpath = calculate_cache_path(cache_location, url)
StringIO.StringIO.__init__(self, file(bpath, "rb").read())
self.url = url
self.code = 200
self.msg = "OK"
headerbuf = file(hpath, "rb").read()
if set_cache_header:
headerbuf += "x-local-cache: %s\r\n" % (bpath)
self.headers = httplib.HTTPMessage(StringIO.StringIO(headerbuf))
def info(self):
"""Returns headers
"""
return self.headers
def geturl(self):
"""Returns original URL
"""
return self.url
@locked_function
def recache(self):
new_request = urllib2.urlopen(self.url)
set_cache_header = store_in_cache(
self.cache_location,
new_request.url,
new_request
)
CachedResponse.__init__(self, self.cache_location, self.url, True)
@locked_function
def delete_cache(self):
delete_from_cache(
self.cache_location,
self.url
)
if __name__ == "__main__":
def main():
"""Quick test/example of CacheHandler"""
opener = urllib2.build_opener(CacheHandler("/tmp/"))
response = opener.open("http://google.com")
print response.headers
print "Response:", response.read()
response.recache()
print response.headers
print "After recache:", response.read()
# Test usage in threads
from threading import Thread
class CacheThreadTest(Thread):
lastdata = None
def run(self):
req = opener.open("http://google.com")
newdata = req.read()
if self.lastdata is None:
self.lastdata = newdata
assert self.lastdata == newdata, "Data was not consistent, uhoh"
req.recache()
threads = [CacheThreadTest() for x in range(50)]
print "Starting threads"
[t.start() for t in threads]
print "..done"
print "Joining threads"
[t.join() for t in threads]
print "..done"
main()
| badloop/SickRage | lib/tvdb_api/tvdb_cache.py | Python | gpl-3.0 | 7,781 |
#!/usr/bin/env python2
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import argparse
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import lmdb
import logging
import numpy as np
import os
import PIL.Image
import Queue
import sys
import threading
# Add path for Origae-6 package
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import origae.config # noqa
from origae import extensions, log # noqa
from origae.job import Job # noqa
# Import origae.config first to set the path to Caffe
import caffe.io # noqa
import caffe_pb2 # noqa
logger = logging.getLogger('origae.tools.create_dataset')
class DbWriter(threading.Thread):
"""
Abstract class for writing to databases
"""
def __init__(self, output_dir, total_batches):
self._dir = output_dir
self.write_queue = Queue.Queue(10)
# sequence number
self.seqn = 0
self.total_batches = total_batches
self.processed_batches = 0
self.done = False
threading.Thread.__init__(self)
def write_batch_threadsafe(self, batch):
"""
This function writes a batch of data into the database
This may be called from multiple threads
"""
self.write_queue.put(batch)
def set_done(self):
"""
Instructs writer thread to complete after queue becomes empty
"""
self.done = True
def run(self):
"""
DB Writer thread entry point
"""
while True:
try:
batch = self.write_queue.get(timeout=0.1)
except Queue.Empty:
if self.done:
# break out of main loop and terminate
break
else:
# just keep looping
continue
self.write_batch_threadunsafe(batch)
class LmdbWriter(DbWriter):
def __init__(self,
dataset_dir,
stage,
feature_encoding,
label_encoding,
**kwargs):
self.stage = stage
db_dir = os.path.join(dataset_dir, stage)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
super(LmdbWriter, self).__init__(dataset_dir, **kwargs)
# create LMDB for features
self.feature_db = self.create_lmdb("features")
# will create LMDB for labels later if necessary
self.label_db = None
# encoding
self.feature_encoding = feature_encoding
self.label_encoding = label_encoding
def create_lmdb(self, db_type):
sub_dir = os.path.join(self.stage, db_type)
db_dir = os.path.join(self._dir, sub_dir)
db = lmdb.open(
db_dir,
map_async=True,
max_dbs=0)
logger.info('Created %s db for stage %s in %s' % (db_type,
self.stage,
sub_dir))
return db
def array_to_datum(self, data, scalar_label, encoding):
if data.ndim != 3:
raise ValueError('Invalid number of dimensions: %d' % data.ndim)
if encoding == 'none':
if data.shape[0] == 3:
# RGB to BGR
# XXX see issue #59
data = data[[2, 1, 0], ...]
datum = caffe.io.array_to_datum(data, scalar_label)
else:
# Transpose to (height, width, channel)
data = data.transpose((1, 2, 0))
datum = caffe_pb2.Datum()
datum.height = data.shape[0]
datum.width = data.shape[1]
datum.channels = data.shape[2]
datum.label = scalar_label
if data.shape[2] == 1:
# grayscale
data = data[:, :, 0]
s = StringIO()
if encoding == 'png':
PIL.Image.fromarray(data).save(s, format='PNG')
elif encoding == 'jpg':
PIL.Image.fromarray(data).save(s, format='JPEG', quality=90)
else:
raise ValueError('Invalid encoding type')
datum.data = s.getvalue()
datum.encoded = True
return datum
def write_batch(self, batch):
"""
encode data into datum objects
this may be called from multiple encoder threads
"""
datums = []
for (feature, label) in batch:
# restrict features to 3D data (Caffe Datum objects)
if feature.ndim != 3:
raise ValueError("LMDB/Caffe expect 3D data - ndim=%d" % feature.ndim)
# restrict labels to 3D data (Caffe Datum objects) or scalars
if not (label.ndim == 3 or label.size == 1):
raise ValueError("LMDB/Caffe expect 3D or scalar label - ndim=%d" % label.ndim)
if label.size > 1:
label_datum = self.array_to_datum(
label,
0,
self.label_encoding)
# setting label to 0 - it will be unused as there is
# a dedicated label DB
label = 0
else:
label = label[0]
label_datum = None
feature_datum = self.array_to_datum(
feature,
label,
self.feature_encoding)
datums.append(
(feature_datum.SerializeToString(),
label_datum.SerializeToString() if label_datum else None))
self.write_batch_threadsafe(datums)
def write_batch_threadunsafe(self, batch):
"""
Write batch do DB, this must only be called from the writer thread
"""
feature_datums = []
label_datums = []
for (feature, label) in batch:
key = "%09d" % self.seqn
if label is not None:
if self.label_db is None:
self.label_db = self.create_lmdb("labels")
label_datums.append((key, label))
feature_datums.append((key, feature))
self.seqn += 1
self.write_datums(self.feature_db, feature_datums)
if len(label_datums) > 0:
self.write_datums(self.label_db, label_datums)
self.processed_batches += 1
logger.info('Processed %d/%d' % (self.processed_batches, self.total_batches))
def write_datums(self, db, batch):
try:
with db.begin(write=True) as lmdb_txn:
for key, datum in batch:
lmdb_txn.put(key, datum)
except lmdb.MapFullError:
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit * 2
try:
db.set_mapsize(new_limit) # double it
except AttributeError as e:
version = tuple(int(x) for x in lmdb.__version__.split('.'))
if version < (0, 87):
raise ValueError('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__)
else:
raise e
# try again
self.write_datums(db, batch)
class Encoder(threading.Thread):
def __init__(self, queue, writer, extension, error_queue, force_same_shape):
self.extension = extension
self.queue = queue
self.writer = writer
self.label_shape = None
self.feature_shape = None
self.feature_sum = None
self.processed_count = 0
self.sample_count = 0
self.error_queue = error_queue
self.force_same_shape = force_same_shape
threading.Thread.__init__(self)
def run(self):
data = []
while True:
# get entry ID
# don't block- if the queue is empty then we're done
try:
batch = self.queue.get_nowait()
except Queue.Empty:
# break out of main loop and terminate
break
try:
data = []
for entry_id in batch:
# call into extension to format entry into number arrays
entry_value = self.extension.encode_entry(entry_id)
# entry_value is either a list of (feature, label) tuples
# or a single tuple
if not isinstance(entry_value, list):
entry_value = [entry_value] # convert to list
for feature, label in entry_value:
# check feature and label shapes
if self.feature_shape is None:
self.feature_shape = feature.shape
if self.label_shape is None:
self.label_shape = label.shape
if self.force_same_shape:
if self.feature_shape != feature.shape:
raise ValueError("Feature shape mismatch (last:%s, previous:%s)"
% (repr(feature.shape), repr(self.feature_shape)))
if self.label_shape != label.shape:
raise ValueError("Label shape mismatch (last:%s, previous:%s)"
% (repr(label.shape), repr(self.label_shape)))
if self.feature_sum is None:
self.feature_sum = np.zeros(self.feature_shape, dtype=np.float64)
# accumulate sum for mean file calculation
self.feature_sum += feature
# aggregate data
data.append((feature, label))
self.sample_count += 1
self.processed_count += 1
if len(data) >= 0:
# write data
self.writer.write_batch(data)
except Exception as e:
self.error_queue.put('%s: %s' % (type(e).__name__, e.message))
raise
class DbCreator(object):
def create_db(self,
extension,
stage,
dataset_dir,
batch_size,
num_threads,
feature_encoding,
label_encoding,
force_same_shape):
# retrieve itemized list of entries
entry_ids = extension.itemize_entries(stage)
entry_count = len(entry_ids)
if entry_count > 0:
# create a queue to write errors to
error_queue = Queue.Queue()
# create and fill encoder queue
encoder_queue = Queue.Queue()
batch_indices = xrange(0, len(entry_ids), batch_size)
for batch in [entry_ids[start:start+batch_size] for start in batch_indices]:
# queue this batch
encoder_queue.put(batch)
# create db writer
writer = LmdbWriter(
dataset_dir,
stage,
total_batches=len(batch_indices),
feature_encoding=feature_encoding,
label_encoding=label_encoding)
writer.daemon = True
writer.start()
# create encoder threads
encoders = []
for _ in xrange(num_threads):
encoder = Encoder(encoder_queue, writer, extension, error_queue, force_same_shape)
encoder.daemon = True
encoder.start()
encoders.append(encoder)
# wait for all encoder threads to complete and aggregate data
feature_sum = None
processed_count = 0
sample_count = 0
feature_shape = None
label_shape = None
for encoder in encoders:
encoder.join()
# catch errors that may have occurred in reader thread
if not error_queue.empty():
while not error_queue.empty():
err = error_queue.get()
logger.error(err)
raise Exception(err)
if feature_shape is None:
feature_shape = encoder.feature_shape
logger.info('Feature shape for stage %s: %s' % (stage, repr(feature_shape)))
if label_shape is None:
label_shape = encoder.label_shape
logger.info('Label shape for stage %s: %s' % (stage, repr(label_shape)))
if force_same_shape:
if encoder.feature_shape and feature_shape != encoder.feature_shape:
raise ValueError("Feature shape mismatch (last:%s, previous:%s)"
% (repr(feature_shape), repr(encoder.feature_shape)))
if encoder.label_shape and label_shape != encoder.label_shape:
raise ValueError("Label shape mismatch (last:%s, previous:%s)"
% (repr(label_shape), repr(encoder.label_shape)))
if feature_sum is None:
feature_sum = encoder.feature_sum
elif encoder.feature_sum is not None:
feature_sum += encoder.feature_sum
processed_count += encoder.processed_count
sample_count += encoder.sample_count
# write mean file
if feature_sum is not None:
self.save_mean(feature_sum, sample_count, dataset_dir, stage)
# wait for writer thread to complete
writer.set_done()
writer.join()
if processed_count != entry_count:
# TODO: handle this more gracefully
raise ValueError('Number of processed entries (%d) does not match entry count (%d)'
% (processed_count, entry_count))
logger.info('Found %d entries for stage %s' % (sample_count, stage))
def save_mean(self, feature_sum, entry_count, dataset_dir, stage):
"""
Save mean to file
"""
data = np.around(feature_sum / entry_count).astype(np.uint8)
mean_file = os.path.join(stage, 'mean.binaryproto')
# Transform to caffe's format requirements
if data.ndim == 3:
if data.shape[0] == 3:
# channel swap
# XXX see issue #59
data = data[[2, 1, 0], ...]
elif data.ndim == 2:
# Add a channels axis
data = data[np.newaxis, :, :]
blob = caffe_pb2.BlobProto()
blob.num = 1
blob.channels, blob.height, blob.width = data.shape
blob.data.extend(data.astype(float).flat)
with open(os.path.join(dataset_dir, mean_file), 'wb') as outfile:
outfile.write(blob.SerializeToString())
logger.info('Created mean file for stage %s in %s' % (stage, mean_file))
def create_generic_db(jobs_dir, dataset_id, stage):
"""
Create a generic DB
"""
# job directory defaults to that defined in Origae-6 config
if jobs_dir == 'none':
jobs_dir = origae.config.config_value('jobs_dir')
# load dataset job
dataset_dir = os.path.join(jobs_dir, dataset_id)
if not os.path.isdir(dataset_dir):
raise IOError("Dataset dir %s does not exist" % dataset_dir)
dataset = Job.load(dataset_dir)
# create instance of extension
extension_id = dataset.extension_id
extension_class = extensions.data.get_extension(extension_id)
extension = extension_class(**dataset.extension_userdata)
# encoding
feature_encoding = dataset.feature_encoding
label_encoding = dataset.label_encoding
batch_size = dataset.batch_size
num_threads = dataset.num_threads
force_same_shape = dataset.force_same_shape
# create main DB creator object and execute main method
db_creator = DbCreator()
db_creator.create_db(
extension,
stage,
dataset_dir,
batch_size,
num_threads,
feature_encoding,
label_encoding,
force_same_shape)
logger.info('Generic DB creation Done')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DB creation tool - Origae-6')
# Positional arguments
parser.add_argument(
'dataset',
help='Dataset Job ID')
# Optional arguments
parser.add_argument(
'-j',
'--jobs_dir',
default='none',
help='Jobs directory (default: from Origae-6 config)',
)
parser.add_argument(
'-s',
'--stage',
default='train',
help='Stage (train, val, test)',
)
args = vars(parser.parse_args())
try:
create_generic_db(
args['jobs_dir'],
args['dataset'],
args['stage']
)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e.message))
raise
| winnerineast/Origae-6 | origae/tools/create_generic_db.py | Python | gpl-3.0 | 17,266 |
#!/usr/bin/python
class PWMObject():
def __init__(self, pwm, channel):
self.pwm = pwm
self.channel = channel
def pwm_on(self, time_on):
self.pwm.setPWM(self.channel, 0, time_on)
def pwm_on_advanced(self, time_off, time_on):
self.pwm.setPWM(self.channel, time_off, time_on)
def pwm_off(self):
self.pwm.setPWM(self.channel, 0, 0) | crtgregoric/RPi-Robot | pwm_objects/pwm_object.py | Python | gpl-3.0 | 389 |
import pickle
import bpy
from mathutils import *
def create_camera_path(filename, camname='Camera'):
cam = bpy.data.objects[camname] # Get first camera
cam.scale = Vector((1,1,1))
mat = cam.matrix_world
####
with open(filename, 'rb') as infile:
X,Y,T,O = pickle.load(infile)
mesh = bpy.data.meshes.new(name='CameraPath')
mesh.from_pydata(O, [(i,i+1) for i in range(len(O)-1)], [])
mesh.update()
obj = bpy.data.objects.new('CameraPath', mesh)
obj.matrix_world = cam.matrix_world
bpy.context.scene.objects.link(obj)
obj.select = True
def create_mem_curve(filename, camname='Camera', scale=1.0):
cam = bpy.data.objects[camname] # Get first camera
cam.scale = Vector((1,1,1))
mat = cam.matrix_world
####
with open(filename, 'rb') as infile:
X,Y,T,O = pickle.load(infile)
verts = []
mY = max(Y)
sY = [y/mY * scale for y in Y]
for o,m in zip(O,sY):
verts.append(o)
x,y,z = o
verts.append((x,y+m,z))
faces = []
for i in range(len(O)-1):
j = i*2
faces.append((j+0,j+2,j+3,j+1))
mesh = bpy.data.meshes.new(name='CameraPath')
mesh.from_pydata(verts, [], faces)
mesh.update()
obj = bpy.data.objects.new('CameraPath', mesh)
obj.matrix_world = cam.matrix_world
bpy.context.scene.objects.link(obj)
obj.select = True
| ginkgo/micropolis | tools/blender_graph.py | Python | gpl-3.0 | 1,455 |
# Generated by Django 2.2.24 on 2021-10-02 14:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0072_product_order_index'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='order_index',
),
]
| flavoi/diventi | diventi/products/migrations/0073_remove_product_order_index.py | Python | apache-2.0 | 337 |
#!/usr/bin/env python
"""
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete as many transactions as you like
(ie, buy one and sell one share of the stock multiple times).
However, you may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).
"""
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
Idea: if we buy stock at day i, and then if day i + 1 the stock price increase,
then we sell that stock and buy new one at day i + 1.
otherwise, we don't.
Time: O(n)
"""
if prices == None or len(prices) == 0:
return 0
profit = 0
for i in range(len(prices) - 1):
if prices[i + 1] > prices[i]:
profit += prices[i + 1] - prices[i]
return profit
| weixsong/algorithm | leetcode/122.py | Python | mit | 1,001 |
#
# Documentation build configuration file, created by sphinx-quickstart
#
# This file is execfile()d with the current directory set to its containing dir
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../.."))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.1"
# Add any Sphinx extension module names here, as strings.
# They can be extensions coming with Sphinx (named "sphinx.ext.*")
# or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinxcontrib.bibtex",
"sphinx.ext.mathjax",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
source_encoding = "utf-8"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "The General Theory of Relativity"
copyright = "2019-, Albert Einstein"
# The version info for the project you"re documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = "0.0"
# The full version, including alpha/beta/rc tags.
# release = "0.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ""
# Else, today_fmt is used as the format for a strftime call.
today_fmt = "%d %B %Y"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents
# default_role = None
# If true, "()" will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ["src."]
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "haiku"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# If not "", a "Last updated on:" timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ""
# This is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = ".html"
# Output file base name for HTML help builder.
htmlhelp_basename = "somedoc"
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
"papersize": "a4paper",
# The font size ("10pt", "11pt" or "12pt").
"pointsize": "11pt",
# Remove the "Release ..." subtitle from the LaTeX frontpage.
"releasename": ""
# Additional stuff for the LaTeX preamble.
# "preamble": "",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
# Usually the list will only consist of one tuple for the master file.
latex_documents = [
(
"index",
"project_documentation.tex",
"""Documentation of the The General Theory of Relativity
project""",
"Albert Einstein",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ""
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
| hmgaudecker/econ-project-templates | docs/bld/example/python/python_example/src/documentation/conf.py | Python | bsd-3-clause | 7,153 |
"""
Django settings for life project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n!)-@x$)!mqxu@#p37f(a7+83pr*@o(ud8dq75x=iy*0^@uwt9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'emails'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'life.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates/'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'life.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/' | sehgalvibhor/my_story | life/life/settings.py | Python | mit | 3,205 |
from .constants import *
from .validators import *
from .protofeed import *
from .main import *
__version__ = "2.2.0"
| araichev/make_gtfs | make_gtfs/__init__.py | Python | mit | 119 |
"""Manipulate differences, differentials and characteristics."""
| ranea/ArxPy | arxpy/differential/__init__.py | Python | mit | 65 |
# -*- coding: utf-8 -*-
from datetime import datetime
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
# use Django-tagging for tags. If Django-tagging cannot be found, create our own
# I did not author this little snippet, I found it somewhere on the web,
# and cannot remember where exactly it was.
try:
from tagging.fields import TagField
tagfield_help_text = 'Separate tags with spaces, put quotes around multiple-word tags.'
except ImportError:
class TagField(models.CharField):
def __init__(self, **kwargs):
default_kwargs = {'max_length': 255, 'blank': True}
default_kwargs.update(kwargs)
super(TagField, self).__init__(**default_kwargs)
def get_internal_type(self):
return 'CharField'
tagfield_help_text = 'Django-tagging was not found, tags will be treated as plain text.'
# End tagging snippet
class VideoCategory(models.Model):
""" A model to help categorize videos """
title = models.CharField(max_length=255)
slug = models.SlugField(
unique=True,
help_text="A url friendly slug for the category",
)
description = models.TextField(null=True, blank=True)
class Meta:
verbose_name_plural = "Video Categories"
def __unicode__(self):
return "%s" % self.title
@models.permalink
def get_absolute_url(self):
return ('videostream_category_detail', [self.slug])
class Video(models.Model):
"""
This is our Base Video Class, with fields that will be available
to all other Video models.
"""
title = models.CharField(max_length=255)
slug = models.SlugField(unique=True,
help_text="A url friendly slug for the video clip.")
description = models.TextField(null=True, blank=True)
tags = TagField(help_text=tagfield_help_text)
categories = models.ManyToManyField(VideoCategory)
allow_comments = models.BooleanField(default=False)
## TODO:
## In future we may want to allow for more control over publication
is_public = models.BooleanField(default=False)
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
publish_date = models.DateTimeField(null=True, blank=True)
author = models.ForeignKey(User, null=True, blank=True)
class Meta:
ordering = ('-publish_date', '-created_date')
get_latest_by = 'publish_date'
def __unicode__(self):
return "%s" % self.title
@models.permalink
def get_absolute_url(self):
return ('videostream_video_detail', (), {
'year': self.publish_date.strftime("%Y"),
'month': self.publish_date.strftime("%b"),
'day': self.publish_date.strftime("%d"),
'slug': self.slug
})
def save(self, *args, **kwargs):
self.modified_date = datetime.now()
if self.publish_date == None and self.is_public:
self.publish_date = datetime.now()
super(Video, self).save(*args, **kwargs)
class BasicVideo(Video):
"""
This is our basic HTML5 Video type. BasicVideo can have more than
one HTML5 Video as a 'video type'. This allows us to create different
video formats, one for each type format.
"""
pass
class HTML5Video(models.Model):
OGG = 0
WEBM = 1
MP4 = 2
FLASH = 3
VIDEO_TYPE = (
(OGG, 'video/ogg'),
(WEBM, 'video/webm'),
(MP4, 'video/mp4'),
(FLASH, 'video/flv'),
)
video_type = models.IntegerField(
choices=VIDEO_TYPE,
default=WEBM,
help_text="The Video type"
)
video_file = models.FileField(
upload_to="videos/html5/",
help_text="The file you wish to upload. Make sure that it's the correct format.",
)
# Allow for multiple video types for a single video
basic_video = models.ForeignKey(BasicVideo)
class Meta:
verbose_name = "Html 5 Video"
verbose_name_plural = "Html 5 Videos"
class EmbedVideo(Video):
video_url = models.URLField(null=True, blank=True)
video_code = models.TextField(
null=True,
blank=True,
help_text="Use the video embed code instead of the url if your frontend does not support embedding with the URL only."
)
class FlashVideo(Video):
"""
This model is what was once called "VideoStream". Since we want to support
videos from other sources as well, this model was renamed to FlashVideo.
"""
original_file = models.FileField(
upload_to="videos/flash/source/",
null=True,
blank=True,
help_text="Make sure that the video you are uploading has a audo bitrate of at least 16. The encoding wont function on a lower audio bitrate."
)
flv_file = models.FileField(
upload_to="videos/flash/flv/",
null=True,
blank=True,
help_text="If you already have an encoded flash video, upload it here (no encoding needed)."
)
thumbnail = models.ImageField(
blank=True,
null=True,
upload_to="videos/flash/thumbnails/",
help_text="If you uploaded a flv clip that was already encoded, you will need to upload a thumbnail as well. If you are planning use django-video to encode, you dont have to upload a thumbnail, as django-video will create it for you"
)
# This option allows us to specify whether we need to encode the clip
encode = models.BooleanField(
default=False,
help_text="Encode or Re-Encode the clip. If you only wanted to change some information on the item, and do not want to encode the clip again, make sure this option is not selected."
)
def get_player_size(self):
""" this method returns the styles for the player size """
size = getattr(settings, 'VIDEOSTREAM_SIZE', '320x240').split('x')
return "width: %spx; height: %spx;" % (size[0], size[1])
| andrewebdev/django-video | src/videostream/models.py | Python | bsd-3-clause | 5,986 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
from sqlalchemy import distinct
from stalker import db, Version
class RepresentationManager(object):
"""Manages Task/Version :class:`.Representation`\ s.
RepresentationManager manages all these different representations as one
and supplies easy switching or on load switching for different
representations.
"""
pass
class Representation(object):
"""A single representation of a Version.
A representation is basically a Version instance, what does matter is the
content of this version instance, it can be a hires polygonal model, a
delayed load archive or arnold scene source file or a geometry with only
one bounding box.
In Anima Pipeline, different representations are managed through the
Version.take_name attribute. So if the base take name for a given Version
is **Main** then **Main_BBox** or **Main_ASS** or **Main_GPU** is
considered as the other representations.
This is done in that way to allow easy creations of different
representations, that is without using any special script or attribute and
it is flexible enough.
"""
base_repr_name = 'Base'
repr_separator = '@'
def __init__(self, version=None):
self._version = None
self.version = version
def _validate_version(self, version):
"""Validates the given version value
:param version: :class:`.Version`
:return: :class:`.Version`
"""
if version is not None:
from stalker import Version
if not isinstance(version, Version):
raise TypeError(
'%(class)s.version should be a '
'stalker.models.version.Version instance, not '
'%(version_class)s' %
{
'class': self.__class__.__name__,
'version_class': version.__class__.__name__
}
)
return version
@property
def version(self):
"""getter for the _version attribute
:return:
"""
return self._version
@version.setter
def version(self, version):
"""getter for the _version attribute
:return:
"""
self._version = self._validate_version(version)
def has_any_repr(self):
"""Returns true or false depending if the version has any
representation or not
:returns: bool
"""
return len(self.list_all()) > 1
def has_repr(self, repr_name):
"""Returns True or False depending on that this reference has a
Representation with the given name
:param str repr_name: The desired representation name
:return:
"""
return self.find(repr_name) is not None
def is_repr(self, repr_name=''):
"""Returns a bool value depending if the version is the requested
representation in its representation series.
:param str repr_name: Representation name
:return:
"""
base_take_name = self.get_base_take_name(self.version)
if repr_name != self.base_repr_name and repr_name != base_take_name:
resolved_repr_name = \
'%s%s%s' % (base_take_name, self.repr_separator, repr_name)
else:
resolved_repr_name = base_take_name
return self.version.take_name == resolved_repr_name
def is_base(self):
"""Returns a bool value depending if the version is the base of its
representations series.
:return: bool
"""
base_take_name = self.get_base_take_name(self.version)
return self.version.take_name == base_take_name
@classmethod
def get_base_take_name(cls, version):
"""Returns the base take_name for the related version
:return: str
"""
# find the base repr name from the current version
take_name = ''
if isinstance(version, Version):
take_name = version.take_name
elif isinstance(version, str):
take_name = version
if cls.repr_separator in take_name:
# it is a repr
base_repr_take_name = take_name.split(cls.repr_separator)[0]
else:
# it is the base repr
base_repr_take_name = take_name
return base_repr_take_name
def list_all(self):
"""lists other representations
"""
base_take_name = self.get_base_take_name(self.version)
# find any version that starts with the base_repr_name
# under the same task
take_names = map(
lambda x: x[0],
db.DBSession.query(distinct(Version.take_name))
.filter(Version.task == self.version.task)
.all()
)
take_names.sort()
repr_names = []
for take_name in take_names:
if take_name.startswith(base_take_name):
if take_name != base_take_name:
repr_names.append(
take_name[len(base_take_name) +
len(self.repr_separator):]
)
else:
repr_names.append(self.base_repr_name)
return repr_names
def find(self, repr_name=''):
"""returns the Version instance with the given representation name.
:param repr_name: The take name of the desires representation.
:return: :class:`.Version`
"""
base_take_name = self.get_base_take_name(self.version)
if repr_name == self.base_repr_name:
take_name = base_take_name
else:
take_name = '%s%s%s' % (
base_take_name, self.repr_separator, repr_name
)
return Version.query\
.filter_by(task=self.version.task)\
.filter_by(take_name=take_name)\
.filter_by(is_published=True)\
.order_by(Version.version_number.desc())\
.first()
@property
def repr(self):
"""returns the current representation name
"""
if not self.version:
return None
take_name = self.version.take_name
if self.repr_separator in take_name:
# it is a repr
repr_name = take_name.split(self.repr_separator)[1]
else:
# it is the base repr
repr_name = self.base_repr_name
return repr_name
| sergeneren/anima | anima/repr.py | Python | bsd-2-clause | 6,665 |
class SampleSource(object):
pass
| akloster/pysiology | pysiology/sample_source.py | Python | bsd-2-clause | 38 |
# This is a randomized test that generates different pathnames every
# time it is invoked, and tests the encoding of those pathnames.
#
# It uses a simple probabilistic model to generate valid pathnames
# that have proven likely to expose bugs and divergent behaviour in
# different encoding implementations.
from mercurial import parsers
from mercurial import store
import binascii, itertools, math, os, random, sys, time
import collections
if sys.version_info[:2] < (2, 6):
sys.exit(0)
validchars = set(map(chr, range(0, 256)))
alphanum = range(ord('A'), ord('Z'))
for c in '\0/':
validchars.remove(c)
winreserved = ('aux con prn nul'.split() +
['com%d' % i for i in xrange(1, 10)] +
['lpt%d' % i for i in xrange(1, 10)])
def casecombinations(names):
'''Build all case-diddled combinations of names.'''
combos = set()
for r in names:
for i in xrange(len(r) + 1):
for c in itertools.combinations(xrange(len(r)), i):
d = r
for j in c:
d = ''.join((d[:j], d[j].upper(), d[j + 1:]))
combos.add(d)
return sorted(combos)
def buildprobtable(fp, cmd='hg manifest tip'):
'''Construct and print a table of probabilities for path name
components. The numbers are percentages.'''
counts = collections.defaultdict(lambda: 0)
for line in os.popen(cmd).read().splitlines():
if line[-2:] in ('.i', '.d'):
line = line[:-2]
if line.startswith('data/'):
line = line[5:]
for c in line:
counts[c] += 1
for c in '\r/\n':
counts.pop(c, None)
t = sum(counts.itervalues()) / 100.0
fp.write('probtable = (')
for i, (k, v) in enumerate(sorted(counts.iteritems(), key=lambda x: x[1],
reverse=True)):
if (i % 5) == 0:
fp.write('\n ')
vt = v / t
if vt < 0.0005:
break
fp.write('(%r, %.03f), ' % (k, vt))
fp.write('\n )\n')
# A table of character frequencies (as percentages), gleaned by
# looking at filelog names from a real-world, very large repo.
probtable = (
('t', 9.828), ('e', 9.042), ('s', 8.011), ('a', 6.801), ('i', 6.618),
('g', 5.053), ('r', 5.030), ('o', 4.887), ('p', 4.363), ('n', 4.258),
('l', 3.830), ('h', 3.693), ('_', 3.659), ('.', 3.377), ('m', 3.194),
('u', 2.364), ('d', 2.296), ('c', 2.163), ('b', 1.739), ('f', 1.625),
('6', 0.666), ('j', 0.610), ('y', 0.554), ('x', 0.487), ('w', 0.477),
('k', 0.476), ('v', 0.473), ('3', 0.336), ('1', 0.335), ('2', 0.326),
('4', 0.310), ('5', 0.305), ('9', 0.302), ('8', 0.300), ('7', 0.299),
('q', 0.298), ('0', 0.250), ('z', 0.223), ('-', 0.118), ('C', 0.095),
('T', 0.087), ('F', 0.085), ('B', 0.077), ('S', 0.076), ('P', 0.076),
('L', 0.059), ('A', 0.058), ('N', 0.051), ('D', 0.049), ('M', 0.046),
('E', 0.039), ('I', 0.035), ('R', 0.035), ('G', 0.028), ('U', 0.026),
('W', 0.025), ('O', 0.017), ('V', 0.015), ('H', 0.013), ('Q', 0.011),
('J', 0.007), ('K', 0.005), ('+', 0.004), ('X', 0.003), ('Y', 0.001),
)
for c, _ in probtable:
validchars.remove(c)
validchars = list(validchars)
def pickfrom(rng, table):
c = 0
r = rng.random() * sum(i[1] for i in table)
for i, p in table:
c += p
if c >= r:
return i
reservedcombos = casecombinations(winreserved)
# The first component of a name following a slash.
firsttable = (
(lambda rng: pickfrom(rng, probtable), 90),
(lambda rng: rng.choice(validchars), 5),
(lambda rng: rng.choice(reservedcombos), 5),
)
# Components of a name following the first.
resttable = firsttable[:-1]
# Special suffixes.
internalsuffixcombos = casecombinations('.hg .i .d'.split())
# The last component of a path, before a slash or at the end of a name.
lasttable = resttable + (
(lambda rng: '', 95),
(lambda rng: rng.choice(internalsuffixcombos), 5),
)
def makepart(rng, k):
'''Construct a part of a pathname, without slashes.'''
p = pickfrom(rng, firsttable)(rng)
l = len(p)
ps = [p]
while l <= k:
p = pickfrom(rng, resttable)(rng)
l += len(p)
ps.append(p)
ps.append(pickfrom(rng, lasttable)(rng))
return ''.join(ps)
def makepath(rng, j, k):
'''Construct a complete pathname.'''
return ('data/' + '/'.join(makepart(rng, k) for _ in xrange(j)) +
rng.choice(['.d', '.i']))
def genpath(rng, count):
'''Generate random pathnames with gradually increasing lengths.'''
mink, maxk = 1, 4096
def steps():
x, k = 0, mink
for i in xrange(count):
yield mink + int(round(math.sqrt((maxk - mink) * float(i) / count)))
for k in steps():
x = rng.randint(1, k)
y = rng.randint(1, k)
yield makepath(rng, x, y)
def runtests(rng, seed, count):
nerrs = 0
for p in genpath(rng, count):
h = store._pathencode(p) # uses C implementation, if available
r = store._hybridencode(p, True) # reference implementation in Python
if h != r:
if nerrs == 0:
print >> sys.stderr, 'seed:', hex(seed)[:-1]
print >> sys.stderr, "\np: '%s'" % p.encode("string_escape")
print >> sys.stderr, "h: '%s'" % h.encode("string_escape")
print >> sys.stderr, "r: '%s'" % r.encode("string_escape")
nerrs += 1
return nerrs
def main():
import getopt
# Empirically observed to take about a second to run
count = 100
seed = None
opts, args = getopt.getopt(sys.argv[1:], 'c:s:',
['build', 'count=', 'seed='])
for o, a in opts:
if o in ('-c', '--count'):
count = int(a)
elif o in ('-s', '--seed'):
seed = long(a, base=0) # accepts base 10 or 16 strings
elif o == '--build':
buildprobtable(sys.stdout,
'find .hg/store/data -type f && '
'cat .hg/store/fncache 2>/dev/null')
sys.exit(0)
if seed is None:
try:
seed = long(binascii.hexlify(os.urandom(16)), 16)
except AttributeError:
seed = long(time.time() * 1000)
rng = random.Random(seed)
if runtests(rng, seed, count):
sys.exit(1)
if __name__ == '__main__':
main()
| iaddict/mercurial.rb | vendor/mercurial/tests/test-pathencode.py | Python | mit | 6,431 |
import json
import unittest
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.core import exceptions
from django.core.urlresolvers import reverse
from django_dynamic_fixture import G
from django_webtest import WebTest
from icekit.models import Layout
from icekit.page_types.layout_page.models import LayoutPage
from icekit.utils import fluent_contents
from . import models
User = get_user_model()
class LocationModelTests(unittest.TestCase):
def setUp(self):
self.location = models.Location.objects.create(
title='Test Location',
slug='test-location',
map_description='Location for testing',
map_center_description='Level 5, 48 Chippen St, Chippendale, NSW',
)
def tearDown(self):
models.Location.objects.all().delete()
def test_clean(self):
# Map center is required
with self.assertRaises(exceptions.ValidationError):
models.Location.objects.create(
title='Test Location 1',
slug='test-location-1',
).clean()
# Can create location with map center
models.Location.objects.create(
title='Test Location 2',
slug='test-location-2',
map_center_description='somewhere',
).clean()
# Can create location with lat/long center
models.Location.objects.create(
title='Test Location 3',
slug='test-location-3',
map_center_lat='-33.85',
map_center_long='151.17',
).clean()
# Cannot create location with center description and lat/long values
with self.assertRaises(exceptions.ValidationError):
models.Location.objects.create(
title='Test Location 1',
slug='test-location-1',
map_center_description='somewhere',
map_center_lat='-33.85',
).clean()
with self.assertRaises(exceptions.ValidationError):
models.Location.objects.create(
title='Test Location 1',
slug='test-location-1',
map_center_description='somewhere',
map_center_long='-33.85',
).clean()
# Must include both lat and long fields if either are set
with self.assertRaises(exceptions.ValidationError):
models.Location.objects.create(
title='Test Location 1',
slug='test-location-1',
map_center_lat='-33.85',
).clean()
with self.assertRaises(exceptions.ValidationError):
models.Location.objects.create(
title='Test Location 1',
slug='test-location-1',
map_center_long='-33.85',
).clean()
def test_location_has_absolute_url(self):
self.assertEqual(
'/location/test-location/',
self.location.get_absolute_url())
def test_render_map(self):
self.assertEquals(
'<div id="{container_id}" class="google-map"></div>'
'<script>'
' gkGoogleMaps = window.gkGoogleMaps || [];'
' gkGoogleMaps.push({data});'
'</script>'.format(
container_id=self.location.get_map_element_id(),
data=json.dumps(self.location.get_map_data()),
),
self.location.render_map())
def test_get_map_data(self):
self.assertEquals(
{
'containerSelector': '#' + self.location.get_map_element_id(),
'center': self.location.map_center_description,
'marker': self.location.map_marker_description or self.location.map_center_description,
'zoom': self.location.map_zoom,
'href': self.location.get_map_href(),
'key': getattr(settings, 'GOOGLE_MAPS_API_KEY', ''),
'description': [
line for line in self.location.map_description.splitlines()
if line
],
},
self.location.get_map_data()
)
def test_get_map_href(self):
# Location has map center description, no map marker
self.assertEqual(
'//maps.google.com/maps?'
'q=Level+5%2C+48+Chippen+St%2C+Chippendale%2C+NSW',
self.location.get_map_href())
# Location with map center lat/long
self.location.map_center_description = ''
self.location.map_center_lat = '100.1234'
self.location.map_center_long = '100.2345'
self.location.save()
self.assertEqual(
'//maps.google.com/maps?ll=100.1234%2C100.2345',
self.location.get_map_href())
def test_get_map_element_id(self):
self.assertEqual(
'google-map-%d' % id(self.location),
self.location.get_map_element_id())
class LocationViewsTests(WebTest):
def setUp(self):
self.location = models.Location.objects.create(
title='Test Location',
slug='test-location',
map_description='Location for testing',
map_center_description='Level 5, 48 Chippen St, Chippendale, NSW',
)
def test_list_view(self):
listing_url = reverse('icekit_plugins_location_index')
# Location not yet published, does not appear in listing
response = self.app.get(listing_url)
response.mustcontain(no=[
self.location.get_absolute_url(),
self.location.title
])
# Published Location does appear in listing
self.location.publish()
response = self.app.get(listing_url)
response.mustcontain(
self.location.get_absolute_url(),
self.location.title
)
def test_detail_view(self):
# Location not yet published, detail page is not available
response = self.app.get(
self.location.get_absolute_url(),
expect_errors=True)
self.assertEqual(404, response.status_code)
# Published Location page is available
self.location.publish()
response = self.app.get(
self.location.get_published().get_absolute_url())
response.mustcontain(
self.location.title
)
class LocationItemTests(WebTest):
def setUp(self):
self.location = models.Location.objects.create(
title='Test Location',
slug='test-location',
map_description='Location for testing',
map_center_description='Level 5, 48 Chippen St, Chippendale, NSW',
)
self.layout = G(
Layout,
template_name='icekit/layouts/default.html',
)
self.layout.content_types.add(
ContentType.objects.get_for_model(LayoutPage))
self.layout.save()
self.staff_user = User.objects.create(
email='test@test.com',
is_staff=True,
is_active=True,
is_superuser=True,
)
self.page = LayoutPage()
self.page.title = 'Test Page'
self.page.slug = 'test-page'
self.page.parent_site = Site.objects.first()
self.page.layout = self.layout
self.page.author = self.staff_user
self.page.save()
self.location_item = fluent_contents.create_content_instance(
models.LocationItem,
self.page,
location=self.location,
)
self.page.publish()
def test_unpublished_location_does_not_render(self):
response = self.app.get(self.page.get_published().get_absolute_url())
# Must *not* contain, hence `no` kwarg
response.mustcontain(no=[
self.location.get_absolute_url(),
self.location.title
])
def test_published_location_renders(self):
self.location.publish()
response = self.app.get(self.page.get_published().get_absolute_url())
response.mustcontain(
self.location.get_absolute_url(),
self.location.title,
)
| ic-labs/django-icekit | icekit/plugins/location/tests.py | Python | mit | 8,271 |
__author__ = 'sebastians'
# The function provided here can be used if you get no image output with cmd.png (can be no or a black picture).
# Can be also used if you experience segmentation faults with cmd.ray
from pymol import cmd
import os
def png_workaround(filepath, width=1024, height=768):
"""Workaround for (a) severe bug(s) in PyMOL preventing ray-traced images to be produced in command-line mode.
Use this function in case neither cmd.ray() or cmd.png() work.
"""
cmd.set('ray_trace_frames', 1) # Frames are raytraced before saving an image.
cmd.viewport(width, height) # Set resolution
### Workaround for raytracing in command-line mode
cmd.mpng(filepath, 1, 1) # Use batch png mode with 1 frame only
cmd.mplay() # cmd.mpng needs the animation to 'run'
os.rename("".join([filepath[:-4], '0001.png']), "".join([filepath[:-4], '.png'])) # Remove frame number in filename | ssalentin/pymol-animations | special-topics/workaround-png.py | Python | mit | 923 |
# -*- coding: utf8 -*-
#!/usr/bin/python
#
# This is derived from a cadquery script for generating QFP/GullWings models in X3D format.
#
# from https://bitbucket.org/hyOzd/freecad-macros
# author hyOzd
#
# Dimensions are from Jedec MS-026D document.
## file of parametric definitions
import collections
from collections import namedtuple
footprints_dir="TerminalBlock_Altech.pretty"
##enabling optional/default values to None
def namedtuple_with_defaults(typename, field_names, default_values=()):
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None,) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
Params = namedtuple_with_defaults("Params", [
'manufacture', # Manufacture
'serie', # ModelName
'W', # package width
'H', # package height
'WD', # > Y distance form pin center to package edge
'A1', # package board seperation
'pin_number', # Pin number serie
'PE', # Distance from edge to pin
'PS', # Pin distance
'PD', # Pin diameter
'PL', # Pin length
'PF', # Pin form
'SW', # Blender width
'rotation', # Rotation
'body_color_key', # Body colour
'pin_color_key', # Pin colour
'dest_dir_prefix' # Destination directory
])
all_params = {
'AK300': Params( # ModelName
#
#
#
manufacture = 'Altech', # Model name
serie = 'AK300', # Model name
W = 12.5, # Package width
H = 12.5, # Package height
WD = 6.5, # > Y distance form pin center to package edge
A1 = 0.1, # package board seperation
pin_number = [ 2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24], # Which pin configuration
PE = 2.5, # Distance from edge to pin
PS = 5.0, # Distance between pins
PD = [1.0, 0.8], # Pin size, [1.0] diameter 1 mm, [1.0, 0.8] rectangle 1.0x0.8
PL = 4.5, # Pin length
PF = 'rect', # Pin form 'round' or 'rect'
SW = 2.7, # Blender width
rotation = 0, # Rotation if required
body_color_key = 'black body', # Body color
pin_color_key = 'metal grey pins', # Pin color
dest_dir_prefix = 'TerminalBlock_Altech.3dshapes' # Destination directory
),
}
| poeschlr/kicad-3d-models-in-freecad | cadquery/FCAD_script_generator/Altech/cq_parameters.py | Python | gpl-2.0 | 2,670 |
"""empty message
Revision ID: fccffb2e4db6
Revises: 0745219a2583
Create Date: 2018-03-05 15:32:55.741303
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'fccffb2e4db6'
down_revision = '0745219a2583'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('area', sa.Column('x_position', sa.Float(), nullable=True))
op.add_column('area', sa.Column('y_position', sa.Float(), nullable=True))
op.add_column('area', sa.Column('z_position', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('area', 'z_position')
op.drop_column('area', 'y_position')
op.drop_column('area', 'x_position')
# ### end Alembic commands ###
| WildflowerSchools/sensei | migrations/versions/fccffb2e4db6_.py | Python | mit | 934 |
import sys
import pickle
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from subprocess import Popen, PIPE
from collections import defaultdict
def tree(): return defaultdict(tree)
def run_command_with_params_and_get_output(command, args):
# Compile first
Popen(["make", command+"_compile"] + args, stdout=PIPE).communicate()[0]
print(" ".join(["make", command+"_run"] + args))
ret_val = Popen(["make", command+"_run"] + args, stdout=PIPE).communicate()[0].strip().split()
print(ret_val)
return ret_val
def get_values(v, command_range, epoch_range, batch_size_range, thread_range, rank_range, sync_range):
values = []
for c in command_range:
for e in epoch_range:
for b in batch_size_range:
for t in thread_range:
for r in rank_range:
for s in sync_range:
values.append(v[c][e][b][t][r][s])
return values
def draw_epoch_loss_graph(should_load_from_file, epoch_range, batch_size_range, thread_range, rank_range, sync_range, commands, gammas):
total_iter = len(batch_size_range) * len(thread_range) * len(rank_range) * len(sync_range) * len(commands)
cur_iter = 0
loss_values = tree()
overall_time_values = tree()
gradient_time_values = tree()
if not should_load_from_file:
for b in batch_size_range:
for t in thread_range:
for r in rank_range:
for s in sync_range:
for ii, c in enumerate(commands):
print("Iteration %d of %d" % (cur_iter, total_iter))
cur_iter += 1
output = run_command_with_params_and_get_output(c, ["N_EPOCHS="+str(epoch_range), "BATCH_SIZE="+str(b), "NTHREAD="+str(t), "RLENGTH="+str(r), "SHOULD_SYNC="+\
str(s), "SHOULD_PRINT_LOSS_TIME_EVERY_EPOCH=1", "START_GAMMA="+str(gammas[ii])])
values = [float(x) for x in output]
losses = [values[i] for i in range(0, len(values), 3)]
overall_times = [values[i] for i in range(1, len(values), 3)]
gradient_times = [values[i] for i in range(2, len(values), 3)]
loss_values[c][epoch_range][b][t][r][s] = losses
overall_time_values[c][epoch_range][b][t][r][s] = overall_times
gradient_time_values[c][epoch_range][b][t][r][s] = gradient_times
else:
with open('objs3.pickle') as f:
loss_values, overall_time_values, gradient_time_values = pickle.load(f)
with open('objs3.pickle', "w") as f:
pickle.dump([loss_values, overall_time_values, gradient_time_values], f)
for b in batch_size_range:
for t in thread_range:
for r in rank_range:
title = "Epoch_Loss_batch=%d_thread=%d_rank=%d" % (b, t, r)
plt.figure()
plt.title(title, fontsize=12)
plt.xlabel("Epoch")
plt.ylabel("Loss")
for s in sync_range:
for c in commands:
losses = loss_values[c][epoch_range][b][t][r][s]
epochs = list(range(1, epoch_range+1))
plt.plot(epochs, losses, label=c+" sync="+str(s), marker='o')
plt.yscale('log')
plt.xscale('log')
plt.legend(loc="upper right", fontsize=8)
plt.savefig(title + ".png")
plt.clf()
def draw_time_loss_graph(should_load_from_file, epoch_range, batch_size_range, thread_range, rank_range, sync_range, commands):
total_iter = len(batch_size_range) * len(thread_range) * len(rank_range) * len(sync_range) * len(commands)
cur_iter = 0
loss_values = tree()
overall_time_values = tree()
gradient_time_values = tree()
if not should_load_from_file:
for b in batch_size_range:
for t in thread_range:
for r in rank_range:
for s in sync_range:
for c in commands:
print("Iteration %d of %d" % (cur_iter, total_iter))
cur_iter += 1
output = run_command_with_params_and_get_output(c, ["N_EPOCHS="+str(epoch_range), "BATCH_SIZE="+str(b), "NTHREAD="+str(t), "RLENGTH="+str(r), "SHOULD_SYNC="+\
str(s), "SHOULD_PRINT_LOSS_TIME_EVERY_EPOCH=1"])
values = [float(x) for x in output]
losses = [values[i] for i in range(0, len(values), 3)]
overall_times = [values[i] for i in range(1, len(values), 3)]
gradient_times = [values[i] for i in range(2, len(values), 3)]
loss_values[c][epoch_range][b][t][r][s] = losses
overall_time_values[c][epoch_range][b][t][r][s] = overall_times
gradient_time_values[c][epoch_range][b][t][r][s] = gradient_times
else:
with open('objs2.pickle') as f:
loss_values, overall_time_values, gradient_time_values = pickle.load(f)
with open('objs2.pickle', "w") as f:
pickle.dump([loss_values, overall_time_values, gradient_time_values], f)
for b in batch_size_range:
for t in thread_range:
for r in rank_range:
title = "Overall_Time_Loss_batch=%d_thread=%d_rank=%d" % (b, t, r)
plt.figure()
plt.title(title, fontsize=12)
plt.xlabel("Time")
plt.ylabel("Loss")
for s in sync_range:
for c in commands:
times = overall_time_values[c][epoch_range][b][t][r][s]
losses = loss_values[c][epoch_range][b][t][r][s]
if 'hog' in c:
if s:
plt.plot(times, losses, label=c)
else:
plt.plot(times, losses, label=c+" sync="+str(s))
plt.yscale('log')
#plt.xscale('log')
plt.legend(loc="upper right", fontsize=8)
plt.savefig(title + ".png")
plt.clf()
for b in batch_size_range:
for t in thread_range:
for r in rank_range:
title = "Gradient_Time_Loss_batch=%d_thread=%d_rank=%d" % (b, t, r)
plt.figure()
plt.title(title, fontsize=12)
plt.xlabel("Time")
plt.ylabel("Loss")
for s in sync_range:
for c in commands:
times = gradient_time_values[c][epoch_range][b][t][r][s]
losses = loss_values[c][epoch_range][b][t][r][s]
if 'hog' in c:
if s:
plt.plot(times, losses, label=c)
else:
plt.plot(times, losses, label=c+" sync="+str(s))
plt.yscale('log')
#plt.xscale('log')
plt.legend(loc="upper right", fontsize=8)
plt.savefig(title + ".png")
plt.clf()
hog_command = [x for x in commands if 'hog' in x]
if len(hog_command) != 0:
hog_command = hog_command[0]
else:
return
for b in batch_size_range:
for t in thread_range:
for r in rank_range:
title = "Gradient_Time_Loss_Ratios_batch=%d_thread=%d_rank=%d" % (b, t, r)
plt.figure()
plt.title(title, fontsize=12)
plt.xlabel("Time")
plt.ylabel("Loss(hog)/Loss(cyc)")
for s in sync_range:
for c in commands:
if c == hog_command:
continue
hog_times = gradient_time_values[hog_command][epoch_range][b][t][r][s]
cyc_times = gradient_time_values[c][epoch_range][b][t][r][s]
hog_losses = loss_values[hog_command][epoch_range][b][t][r][s]
cyc_losses = loss_values[c][epoch_range][b][t][r][s]
# Compute cyc losses -- the best loss cyc achieved by hog's time
cyc_losses_aligned = []
for i1, t1 in enumerate(hog_times):
best_loss = 1000000000
for i2, t2 in enumerate(cyc_times):
if t2 > t1:
break
best_loss = min(best_loss, cyc_losses[i2])
cyc_losses_aligned.append(best_loss)
loss_ratio = [hog_losses[i] / cyc_losses_aligned[i] for i in range(len(hog_losses))]
plt.plot(hog_times, loss_ratio, label=c+" sync="+str(s))
plt.legend(loc="upper right", fontsize=8)
plt.savefig(title + ".png")
plt.clf()
for b in batch_size_range:
for t in thread_range:
for r in rank_range:
title = "Overall_Time_Loss_Ratios_batch=%d_thread=%d_rank=%d" % (b, t, r)
plt.figure()
plt.title(title, fontsize=12)
plt.xlabel("Time")
plt.ylabel("Loss(hog)/Loss(cyc)")
for s in sync_range:
for c in commands:
if hog_command == c:
continue
hog_times = overall_time_values[hog_command][epoch_range][b][t][r][s]
cyc_times = overall_time_values[c][epoch_range][b][t][r][s]
hog_losses = loss_values[hog_command][epoch_range][b][t][r][s]
cyc_losses = loss_values[c][epoch_range][b][t][r][s]
# Compute cyc losses -- the best loss cyc achieved by hog's time
cyc_losses_aligned = []
for i1, t1 in enumerate(hog_times):
best_loss = 1000000000
for i2, t2 in enumerate(cyc_times):
if t2 > t1:
break
best_loss = min(best_loss, cyc_losses[i2])
cyc_losses_aligned.append(best_loss)
loss_ratio = [hog_losses[i] / cyc_losses_aligned[i] for i in range(len(hog_losses))]
plt.plot(hog_times, loss_ratio, label=c+" sync="+str(s))
plt.legend(loc="upper right", fontsize=8)
plt.savefig(title + ".png")
plt.clf()
def draw_all_graphs(load_previous, epoch_range, batch_size_range, thread_range, rank_range,
sync_range, commands, average_over_n_rep):
total_iter = len(epoch_range) * len(batch_size_range) * len(thread_range) * len(rank_range) * len(sync_range) * len(commands) * average_over_n_rep
average_losses = tree()
average_gradient_times = tree()
average_total_times = tree()
if not load_previous:
cur_iter = 0
# Collect all relevant data
for epoch in epoch_range:
for batch_size in batch_size_range:
for thread in thread_range:
for rank in rank_range:
for sync in sync_range:
for command in commands:
avg_overall_time, avg_gradient_time, avg_loss = 0, 0, 0
for i in range(average_over_n_rep):
print("Iteration %d of %d" % (cur_iter, total_iter))
cur_iter += 1
# Run command with all params
output = run_command_with_params_and_get_output(command, ["N_EPOCHS="+str(epoch), "BATCH_SIZE="+str(batch_size), "NTHREAD="+str(thread), "RLENGTH="+str(rank), "SHOULD_SYNC="+str(sync)])
# overall elapsed, gradient time, loss
overall_time = float(output[0])
gradient_time = float(output[1])
loss = float(output[2])
avg_overall_time += overall_time
avg_gradient_time += gradient_time
avg_loss += loss
avg_overall_time /= average_over_n_rep
avg_gradient_time /= average_over_n_rep
avg_loss /= average_over_n_rep
average_losses[command][epoch][batch_size][thread][rank][sync] = avg_loss
average_gradient_times[command][epoch][batch_size][thread][rank][sync] = avg_gradient_time
average_total_times[command][epoch][batch_size][thread][rank][sync] = avg_overall_time
else:
with open('objs.pickle') as f:
average_losses, average_gradient_times, average_total_times = pickle.load(f)
with open('objs.pickle', 'w') as f:
pickle.dump([average_losses, average_gradient_times, average_total_times], f)
"""# Reminder: arrays of form [command][epoch][batch_size][thread][rank][sync]
plt.clf()"""
for (time_data, label) in [(average_gradient_times, "Gradient Time"), (average_total_times, "Overall Time")]:
for r in rank_range:
for b in batch_size_range:
f, plots = plt.subplots(1, len(thread_range), sharex=True, sharey=True)
title = "Epoch_%s_Plot_Batch=%d_Rank=%d" % (label, b, r)
f.suptitle(title, fontsize=12)
for index, t in enumerate(thread_range):
plots[index].set_title("%d threads" % t)
for s in sync_range:
for c in commands:
plots[index].set_xlabel("Epoch")
plots[index].tick_params(axis='both', which='major', labelsize=5)
plots[index].tick_params(axis='both', which='minor', labelsize=5)
plots[index].set_ylabel(label)
times = get_values(time_data, [c], epoch_range, [b], [t], [r], [s])
epochs = epoch_range
low = min(times)
high = max(times)
if 'hog' in c:
if s == 0:
plots[index].plot(epochs, times, label=c)
else:
plots[index].plot(epochs, times, label=c+" sync="+str(s))
#plots[index].set_ylim([math.ceil(low-0.5*(high-low)), math.ceil(high+0.5*(high-low))])
plots[index].legend(loc="upper left", fontsize=5)
#f.subplots_adjust(hspace=0)
f.tight_layout()
f.subplots_adjust(top=.85)
f.savefig(title+".png")
f.clf()
for (time_data, label) in [(average_gradient_times, "Gradient Time"), (average_total_times, "Overall Time")]:
for r in rank_range:
for b in batch_size_range:
f, plots = plt.subplots(1, len(epoch_range), sharex=True, sharey=True)
title = "Thread_%s_Plot_Batch=%d_Rank=%d" % (label, b, r)
f.suptitle(title, fontsize=12)
for index, e in enumerate(epoch_range):
plots[index].set_title("%d epoch" % e)
for s in sync_range:
for c in commands:
plots[index].tick_params(axis='both', which='major', labelsize=8)
plots[index].tick_params(axis='both', which='minor', labelsize=8)
plots[index].set_xlabel("Thread")
plots[index].set_ylabel(label)
times = get_values(time_data, [c], [e], [b], thread_range, [r], [s])
threads = thread_range
low = min(times)
high = max(times)
if 'hog' in c:
if s == 0:
plots[index].plot(threads, times, label=c)
else:
plots[index].plot(threads, times, label=c+" sync="+str(s))
#plots[index].set_ylim([math.ceil(low-0.5*(high-low)), math.ceil(high+0.5*(high-low))])
plots[index].legend(loc="upper left", fontsize=5)
#f.subplots_adjust(hspace=0)
f.tight_layout()
f.subplots_adjust(top=.85)
f.savefig(title+".png")
f.clf()
########################################
# TIME RATIOS OVER 1 THREAD
########################################
if 1 in thread_range:
for r in rank_range:
for b in batch_size_range:
for e in epoch_range:
for s in sync_range:
for c in commands:
if 'hog' in c and not s:
continue
title = ""
if 'hog' in c:
title = "Overall_Speedup_Over_Serial_%s_Batch=%d_Epoch=%d_Rank=%d" % (c, b, e, r)
else:
title = "Overall_Speedup_Over_Serial_%s_Sync=%d_Batch=%d_Epoch=%d_Rank=%d" % (c, s, b, e, r)
plt.figure()
plt.title(title, fontsize=12)
plt.ylabel("Serial_Time/Time_With_N_Threads")
plt.xlabel("N")
base_time = average_total_times[c][e][b][1][r][s]
time_values = get_values(average_total_times, [c], [e], [b], thread_range, [r], [s])
time_ratios = [float(base_time)/x for x in time_values]
plt.plot(thread_range, time_ratios)
plt.legend(loc="upper left", fontsize=5)
plt.savefig(title+".png")
plt.clf()
for r in rank_range:
for b in batch_size_range:
for e in epoch_range:
for s in sync_range:
for c in commands:
if 'hog' in c and not s:
continue
title = ""
if 'hog' in c:
title = "Gradient_Speedup_Over_Serial_%s_Batch=%d_Epoch=%d_Rank=%d" % (c, b, e, r)
else:
title = "Gradient_Speedup_Over_Serial_%s_Sync=%d_Batch=%d_Epoch=%d_Rank=%d" % (c, s, b, e, r)
plt.figure()
plt.title(title, fontsize=12)
plt.ylabel("Serial_Time/Time_With_N_Threads")
plt.xlabel("N")
base_time = average_gradient_times[c][e][b][1][r][s]
time_values = get_values(average_total_times, [c], [e], [b], thread_range, [r], [s])
time_ratios = [float(base_time)/x for x in time_values]
plt.plot(thread_range, time_ratios)
plt.legend(loc="upper left", fontsize=5)
plt.savefig(title+".png")
plt.clf()
########################################
# TIME RATIOS OVER HOG PER EPOCH
########################################
hog_command = [x for x in commands if 'hog' in x][0]
for t in thread_range:
for r in rank_range:
for b in batch_size_range:
title = "Overall_Time_Ratios_Over_Hog_Per_Epoch_Batch=%d_Thread=%d_Rank=%d" % (b, t, r)
plt.figure()
plt.title(title, fontsize=12)
plt.ylabel("Hog Time / Cyclades Time")
plt.xlabel("Epoch")
for s in sync_range:
for c in commands:
if 'hog' not in c:
baseline_hog_times = get_values(average_total_times, [hog_command], epoch_range, [b], [t], [r], [s])
times = get_values(average_total_times, [c], epoch_range, [b], [t], [r], [s])
ratio_times = [float(baseline_hog_times[i]) / float(times[i]) for i in range(len(times))]
plt.plot(epoch_range, ratio_times, label=c+"_sync="+str(s))
plt.legend(loc="upper left", fontsize=5)
plt.savefig(title+".png")
plt.clf()
hog_command = [x for x in commands if 'hog' in x][0]
for t in thread_range:
for r in rank_range:
for b in batch_size_range:
title = "Gradient_Time_Ratios_Over_Hog_Per_Epoch_Batch=%d_Thread=%d_Rank=%d" % (b, t, r)
plt.figure()
plt.title(title, fontsize=12)
plt.ylabel("Hog Time / Cyclades Time")
plt.xlabel("Epoch")
for s in sync_range:
for c in commands:
if 'hog' not in c:
baseline_hog_times = get_values(average_total_times, [hog_command], epoch_range, [b], [t], [r], [s])
times = get_values(average_gradient_times, [c], epoch_range, [b], [t], [r], [s])
ratio_times = [float(baseline_hog_times[i]) / float(times[i]) for i in range(len(times))]
plt.plot(epoch_range, ratio_times, label=c+"_sync="+str(s))
plt.legend(loc="upper left", fontsize=5)
plt.savefig(title+".png")
plt.clf()
#####################################
# TIME RATIOS OVER HOG PER THREAD
#####################################
for e in epoch_range:
for r in rank_range:
for b in batch_size_range:
title = "Overall_Time_Ratios_Over_Hog_Per_Thread_Batch=%d_Epoch=%d_Rank=%d" % (b, e, r)
plt.figure()
plt.title(title, fontsize=12)
plt.ylabel("Hog Time / Cyclades Time")
plt.xlabel("Thread")
for s in sync_range:
for c in commands:
if 'hog' not in c:
baseline_hog_times = get_values(average_total_times, [hog_command], [e], [b], thread_range, [r], [s])
times = get_values(average_total_times, [c], [e], [b], thread_range, [r], [s])
ratio_times = [float(baseline_hog_times[i]) / float(times[i]) for i in range(len(times))]
plt.plot(thread_range, ratio_times, label=c+"_sync="+str(s))
plt.legend(loc="upper left", fontsize=5)
plt.savefig(title+".png")
plt.clf()
for e in epoch_range:
for r in rank_range:
for b in batch_size_range:
title = "Gradient_Time_Ratios_Over_Hog_Per_Thread_Batch=%d_Epoch=%d_Rank=%d" % (b, e, r)
plt.figure()
plt.title(title, fontsize=12)
plt.ylabel("Hog Time / Cyclades Time")
plt.xlabel("Thread")
for s in sync_range:
for c in commands:
if 'hog' not in c:
baseline_hog_times = get_values(average_total_times, [hog_command], [e], [b], thread_range, [r], [s])
times = get_values(average_gradient_times, [c], [e], [b], thread_range, [r], [s])
ratio_times = [float(baseline_hog_times[i]) / float(times[i]) for i in range(len(times))]
plt.plot(thread_range, ratio_times, label=c+"_sync="+str(s))
plt.legend(loc="upper left", fontsize=5)
plt.savefig(title+".png")
plt.clf()
#draw_time_loss_graph(1, 200, [500], [1, 8, 16], [30], [0, 1], ["cyc_word_embeddings_cyc", "cyc_word_embeddings_hog"])
draw_time_loss_graph(0, 100, [8000], [8], [15], [1], ["cyc_graph_cuts_cyc", "cyc_graph_cuts_hog"])
#draw_epoch_loss_graph(0, 100, [300], [8], [2], [1], ["cyc_word_embeddings_cyc"], [.9])
| pxinghao/dimmwitted | graph_cuts/run_full_benchmark.py | Python | apache-2.0 | 25,455 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2016, Andrei Korostelev <andrei at korostelev dot net>
#
# Before using this product in any way please read the license agreement.
# If you do not agree to the terms in this agreement you are not allowed
# to use this product or parts of it. You can read this license in the
# file named LICENSE.
#
"""
Unit tests for CcPy config parser
"""
import unittest
import sys
sys.path.append("..")
import ccpy.ccpyconfparser as ccpyconfparser
import ccpy.svntask as svntask
import ccpy.gittask as gittask
import ccpy.maketask as maketask
import ccpy.exectask as exectask
import ccpy.util as util
import ccpy.common as common
class CcPyConfParserTestCase(unittest.TestCase):
_logger = util.initLogger(
common.LoggerName,
'CcPyConfParserTest.log',
common.ProductName +
' v.' +
common.ProductVersion,
"DEBUG")
def testNonExistentConfig(self):
self.assertRaises(ccpyconfparser.ParseError, ccpyconfparser.parse, "ccpy.conf.nonexistent")
def testGoodConfig1Order(self):
myProjects = ccpyconfparser.parse("ccpy.conf.good.1")
self.assertEqual(len(myProjects), 5)
prjName, prjVal = next(myProjects)
self.assertEqual(prjName, 'Product2')
prjName, prjVal = next(myProjects)
self.assertEqual(prjName, 'Product3')
prjName, prjVal = next(myProjects)
self.assertEqual(prjName, 'Product4')
prjName, prjVal = next(myProjects)
self.assertEqual(prjName, 'Product5')
prjName, prjVal = next(myProjects)
self.assertEqual(prjName, 'Product6')
self.assertRaises(StopIteration, myProjects.__next__)
def testGoodConfig1Contents(self):
try:
myProjects = ccpyconfparser.parse("ccpy.conf.good.1")
self.assertEqual(len(myProjects), 5)
# Product2 project
myProjName = 'Product2'
myTasks = myProjects[myProjName]['tasks']
self.assertEqual(len(myTasks), 6)
self.assertEqual(
len([task for task in myTasks if isinstance(task, svntask.SvnTask)]), 1)
self.assertEqual(
len([task for task in myTasks if isinstance(task, gittask.GitTask)]), 1)
self.assertEqual(
len([task for task in myTasks if isinstance(task, maketask.MakeTask)]), 2)
self.assertEqual(
len([task for task in myTasks if isinstance(task, exectask.ExecTask)]), 2)
self.assertEqual(myProjects[myProjName]['emailFrom'], 'product2.builds@company.com')
self.assertEqual(
myProjects[myProjName]['emailTo'], [
'product2.developer@company.com', 'product2.buildmaster@company.com'])
self.assertEqual(myProjects[myProjName]['emailFormat'], util.EmailFormat.attachment)
self.assertEqual(myProjects[myProjName]['emailServerHost'], 'localhost')
self.assertEqual(myProjects[myProjName]['emailServerPort'], 25)
self.assertEqual(myProjects[myProjName]['emailServerUsername'], None)
self.assertEqual(myProjects[myProjName]['emailServerPassword'], None)
self.assertEqual(myProjects[myProjName]['failOnError'], True)
myTask = myTasks[0]
self.assertTrue(isinstance(myTask, svntask.SvnTask))
self.assertEqual(myTask.url, "https://company.com/repos/product2/mk")
self.assertEqual(myTask.workingDir, "/ProductBuilds/mk")
self.assertTrue(myTask.preCleanWorkingDir)
myTask = myTasks[1]
self.assertTrue(isinstance(myTask, maketask.MakeTask))
self.assertEqual(myTask.workingDir, "/ProductBuilds/SysInfra/Projects/common")
self.assertEqual(myTask.args, "clean release")
self.assertEqual(myTask.timeout, 120)
myTask = myTasks[2]
self.assertTrue(isinstance(myTask, maketask.MakeTask))
self.assertEqual(myTask.workingDir, "/ProductBuilds/SysInfra/Projects/logging")
self.assertEqual(myTask.args, "")
self.assertEqual(myTask.timeout, 600)
myTask = myTasks[3]
self.assertTrue(isinstance(myTask, exectask.ExecTask))
self.assertEqual(myTask.executable, "commontests")
self.assertEqual(myTask.args, "--xmlout")
self.assertEqual(myTask.workingDir, "/ProductBuilds/SysInfra/TestProjects/commontests")
self.assertEqual(myTask.timeout, 30)
self.assertEqual(myTask.warningExitCode, 2)
myTask = myTasks[4]
self.assertTrue(isinstance(myTask, exectask.ExecTask))
self.assertEqual(myTask.executable, "loggingTests")
self.assertEqual(myTask.args, "")
self.assertEqual(myTask.workingDir, "")
self.assertEqual(myTask.timeout, 600)
myTask = myTasks[5]
self.assertTrue(isinstance(myTask, gittask.GitTask))
self.assertEqual(myTask.url, "https://company.com/repos/product2/Common")
self.assertEqual(myTask.workingDir, "/ProductBuilds/Common")
self.assertFalse(myTask.preCleanWorkingDir)
# Product3 project
myProjName = "Product3"
myTasks = myProjects[myProjName]['tasks']
self.assertEqual(len(myTasks), 1)
self.assertEqual(
len([task for task in myTasks if isinstance(task, svntask.SvnTask)]), 1)
self.assertEqual(myProjects[myProjName]['emailFrom'], 'product3.builds@company.com')
self.assertEqual(myProjects[myProjName]['emailTo'], ['product3.developer@company.com'])
self.assertEqual(myProjects[myProjName]['emailFormat'], util.EmailFormat.attachment)
self.assertEqual(myProjects[myProjName]['emailAttachments'], [])
self.assertEqual(myProjects[myProjName]['failOnError'], False)
myTask = myTasks[0]
self.assertTrue(isinstance(myTask, svntask.SvnTask))
self.assertEqual(myTask.url, "https://company.com/repos/product3/server")
self.assertEqual(myTask.workingDir, "/ProductBuilds/server")
self.assertFalse(myTask.preCleanWorkingDir)
# Product4 project
myProjName = "Product4"
myTasks = myProjects[myProjName]['tasks']
self.assertEqual(len(myTasks), 1)
self.assertEqual(
len([task for task in myTasks if isinstance(task, maketask.MakeTask)]), 1)
self.assertEqual(myProjects[myProjName]['failOnError'], True)
self.assertEqual(myProjects[myProjName]['emailFrom'], '')
self.assertEqual(myProjects[myProjName]['emailTo'], [])
myTask = myTasks[0]
self.assertTrue(isinstance(myTask, maketask.MakeTask))
self.assertEqual(myTask.workingDir, "/ProductBuilds/SysInfra/Projects/common")
self.assertEqual(myTask.args, "")
self.assertEqual(myTask.timeout, 600)
# Product5 project
myProjName = "Product5"
self.assertEqual(
myProjects[myProjName]['emailFrom'],
'product5.buildserver@company.com')
self.assertEqual(
myProjects[myProjName]['emailTo'], [
'product5.developer@company.com', 'product5.buildmaster@company.com'])
self.assertEqual(myProjects[myProjName]['emailFormat'], util.EmailFormat.plain)
self.assertEqual(myProjects[myProjName]['emailServerHost'], 'localhost')
self.assertEqual(myProjects[myProjName]['emailServerPort'], 25)
self.assertEqual(myProjects[myProjName]['emailServerUsername'], None)
self.assertEqual(myProjects[myProjName]['emailServerPassword'], None)
self.assertEqual(myProjects[myProjName]['emailAttachments'], [])
# Product6 project
myProjName = "Product6"
self.assertEqual(
myProjects[myProjName]['emailFrom'],
'product6.buildserver@company.com')
self.assertEqual(myProjects[myProjName]['emailTo'], ['product6.developer@company.com'])
self.assertEqual(myProjects[myProjName]['emailFormat'], util.EmailFormat.html)
self.assertEqual(myProjects[myProjName]['emailServerHost'], 'smtp.mymail.com')
self.assertEqual(myProjects[myProjName]['emailServerPort'], 2626)
self.assertEqual(myProjects[myProjName]['emailServerUsername'], 'jos')
self.assertEqual(myProjects[myProjName]['emailServerPassword'], 'topsecret')
self.assertEqual(myProjects[myProjName]['emailAttachments'], ['/var/log/messages', '/var/log/messages.1'])
except BaseException as e:
print(("Error. %s. %s. %s" % (type(e), str(e), util.formatTb())))
self.assertTrue(False)
def testBadConfig1(self):
self.assertRaises(ccpyconfparser.ParseError, ccpyconfparser.parse, "ccpy.conf.bad.1")
if __name__ == '__main__':
unittest.main()
| kindkaktus/CcPy | tst/ccpyconfparsertest.py | Python | bsd-3-clause | 9,126 |
import unittest
import os
from sqltxt.table import Table
from sqltxt.column import Column, ColumnName, AmbiguousColumnNameError
from sqltxt.expression import Expression
class TableTest(unittest.TestCase):
def setUp(self):
self.data_path = os.path.join(os.path.dirname(__file__), '../data')
table_header = ["col_a", "col_b"]
table_contents = """1,1
2,3
3,2"""
self.table_a = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e "{0}"'.format(table_contents),
columns = table_header
)
table_header = ["col_a", "col_b"]
table_contents = """1,w
2,x
2,y
5,z"""
self.table_b = Table.from_cmd(
name = 'table_b',
cmd = 'echo -e "{0}"'.format(table_contents),
columns = table_header
)
def test_subset_rows(self):
conditions = [
[Expression('col_b', '==', '1'), 'or', Expression('col_a', '==', '2')]
]
self.table_a.subset_rows(conditions)
cmds_actual = self.table_a.cmds
cmds_expected = [
'echo -e "1,1\n2,3\n3,2"',
"awk -F',' 'OFS=\",\" { if (($2 == 1 || $1 == 2)) { print $1,$2 } }'"]
self.assertEqual(cmds_actual, cmds_expected)
def test_order_columns(self):
col_name_order = [ColumnName('col_b'), ColumnName('col_a')]
self.table_a.order_columns(col_name_order)
cmds_actual = self.table_a.cmds
cmds_expected = ['echo -e "1,1\n2,3\n3,2"', "awk -F',' 'OFS=\",\" { print $2,$1 }'"]
self.assertEqual(cmds_actual, cmds_expected)
def test_sort(self):
sort_by_col_names = [ColumnName('col_a'), ColumnName('col_b')]
self.table_a.sort(sort_by_col_names)
cmds_actual = self.table_a.cmds
cmds_expected = ['echo -e "1,1\n2,3\n3,2"', "sort -t, -k 1,1 -k 2,2"]
self.assertEqual(cmds_actual, cmds_expected)
sort_by_cols = [self.table_a.get_column_for_name(cn) for cn in sort_by_col_names]
self.assertEqual(self.table_a.sorted_by, sort_by_cols)
def test_is_sorted_by(self):
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = ['col_a', 'col_b'])
table_from_cmd.sorted_by = [Column('table_a.col_a'), Column('table_a.col_b')]
self.assertTrue(table_from_cmd.is_sorted_by([0]))
self.assertFalse(table_from_cmd.is_sorted_by([1]))
self.assertTrue(table_from_cmd.is_sorted_by([0,1]))
def test_get_column_for_name_raises_on_ambiguity(self):
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = ['col_a', 'col_a'])
with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):
table_from_cmd.get_column_for_name(ColumnName('col_a'))
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = ['ta.col_a', 'tb.col_a'])
with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):
table_from_cmd.get_column_for_name(ColumnName('col_a'))
first_column = Column('ta.col_a')
first_column.add_name('col_alpha')
second_column = Column('tb.col_a')
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = [first_column, second_column])
with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):
table_from_cmd.get_column_for_name(ColumnName('col_a'))
def test_sample_rows(self):
self.table_a.sample_rows(1)
cmds_actual = self.table_a.cmds
cmds_expected = ['echo -e "1,1\n2,3\n3,2"',
"""awk -v seed=$RANDOM -v n={0} '
BEGIN {{ srand(seed) }}
NR <= n {{ reservoir[NR] = $0 }}
NR > n {{ M = int(rand() * NR) + 1; if (M <= n) {{ reservoir[M] = $0 }}}}
END {{ for (key in reservoir) {{ print reservoir[key] }}}}'""".format(1)
]
self.assertEqual(cmds_actual, cmds_expected)
def test_get_cmd_str(self):
table_from_file = Table.from_file_path(os.path.join(self.data_path, 'table_a.txt'))
# output from a file-backed Table to STDOUT
cmd_actual = table_from_file.get_cmd_str()
cmd_expected = 'tail -n+2 {}/table_a.txt'.format(self.data_path)
self.assertEqual(cmd_actual, cmd_expected)
table_from_cmd = Table.from_cmd(
'table_a',
cmd = 'echo -e "1,2,3,4"',
columns = ['col_a', 'col_b', 'col_c', 'col_d'])
# output from a command-backed Table to STDOUT
cmd_actual = table_from_cmd.get_cmd_str()
cmd_expected = 'echo -e "1,2,3,4"'
self.assertEqual(cmd_actual, cmd_expected)
# add a command, then output
table_from_cmd.cmds += ['sort']
# to STDOUT
cmd_actual = table_from_cmd.get_cmd_str()
cmd_expected = 'echo -e "1,2,3,4" | sort'
self.assertEqual(cmd_actual, cmd_expected)
| shahin/sqltxt | tests/unit/table_test.py | Python | mit | 5,179 |
#!/usr/bin/env python3
import argparse
import datetime
import json
import re
from copy import deepcopy
from gocddash.analysis import data_access, go_client, domain
from gocddash.util import app_config
from gocddash.console_parsers.junit_report_parser import JunitConsoleParser
from gocddash.console_parsers.determine_parser import get_log_parser
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--app-cfg', help='application config')
parser.add_argument('-f', '--file-source', help='go client file source')
return parser.parse_args()
def setup_go_client(pargs):
application_cfg_path = pargs.app_cfg
app_config.create_app_config(application_cfg_path)
file_source = pargs.file_source
if file_source:
app_config.get_app_config().cfg['GO_SERVER_URL'] = file_source
go_client.go_client(
app_config.get_app_config().cfg['GO_SERVER_URL'],
(app_config.get_app_config().cfg['GO_SERVER_USER'], app_config.get_app_config().cfg['GO_SERVER_PASSWD'])
)
def log(string):
print(str(datetime.datetime.now()) + " " + string)
class SyncController:
def __init__(self, db, go, chunk_size=10):
self.db = db
self.go = go
self.chunk_size = chunk_size
self.to_notify = []
self.max_to_sync = 500
def sync(self):
self.sync_agents()
self.sync_pipeline_list()
self.update_sync_rules()
self.sync_pipelines()
self.notify_breakers()
def sync_agents(self):
"""
Update mapping from uuid to go-agent name in database.
"""
json_text = self.go.get_agents()
for agent in json.loads(json_text)["_embedded"]["agents"]:
self.db.save_agent(agent['uuid'], agent['hostname'])
def sync_pipeline_list(self):
json_text = self.go.get_pipeline_groups()
for group in json.loads(json_text):
for pipeline in group['pipelines']:
self.db.save_pipeline(pipeline['name'], group['name'])
def update_sync_rules(self):
json_text = self.go.get_pipeline_groups()
group_for_pipeline = {}
for group in json.loads(json_text):
for pipeline in group['pipelines']:
group_copy = deepcopy(group)
group_copy['pipelines'] = [pipeline]
group_for_pipeline[pipeline['name']] = group_copy
for new_pipeline in self.db.list_new_pipelines():
self.determine_sync_attributes(
new_pipeline['pipeline_name'],
group_for_pipeline[new_pipeline['pipeline_name']]
)
def determine_sync_attributes(self, pipeline_name, pipeline_group_structure):
"""
Update new pipelines, i.e. pipelines where the sync field is NULL in
the database, if they match some rule. All rules are applied in the
order given by db.list_pipeline_sync_rules(), so the last rule wins.
"""
for rule in self.db.list_pipeline_sync_rules():
assert rule['kind'] == 're'
nodes = JsonNodes(pipeline_group_structure).nodes
for key, value in nodes:
if key == rule['pipeline_groups_field']:
if re.search(rule['pattern'], value):
kwargs = {}
for param in [
'sync',
'log_parser',
'email_notifications'
]:
if rule[param] is not None:
kwargs[param] = rule[param]
self.db.update_pipeline(pipeline_name, **kwargs)
def sync_pipelines(self):
for pipeline_name in self.db.get_pipelines_to_sync():
self.sync_pipeline(pipeline_name)
def sync_pipeline(self, pipeline_name):
max_ins = self.max_instance_for_pipeline(pipeline_name)
wanted_pipeline_instances = self.get_wanted_instances(pipeline_name, max_ins)
fetched_pipelines_history = self.get_pipeline_history(pipeline_name, wanted_pipeline_instances)
for pipeline_instance in fetched_pipelines_history:
self.store_synced_pipeline(pipeline_name, pipeline_instance)
done = self.sync_stages(pipeline_name, pipeline_instance)
self.db.store_pipeline_instance_done(pipeline_instance["id"], done)
def store_synced_pipeline(self, pipeline_name, pipeline_instance):
pipeline_counter = pipeline_instance["counter"]
print('Store synced pipeline', pipeline_name, pipeline_counter)
pipeline_id = pipeline_instance["id"]
instance = domain.PipelineInstance(
pipeline_name,
pipeline_counter,
pipeline_instance["build_cause"]["trigger_message"],
pipeline_id
)
if not self.db.pipeline_instance_exists(pipeline_name, pipeline_counter):
self.db.insert_pipeline_instance(instance)
def sync_stages(self, pipeline_name, pipeline_instance):
"""
Find all stages for a pipeline instance, and sync them.
Return whether all were done.
"""
pipeline_counter = pipeline_instance["counter"]
pipeline_id = pipeline_instance["id"]
done = True
for stage in pipeline_instance['stages']:
done &= self.sync_stage(pipeline_name, pipeline_counter, pipeline_id, stage)
return done
def sync_stage(self, pipeline_name, pipeline_counter, pipeline_id, stage):
"""
Find any new runs for a stage, and sync them.
Return whether all were done.
"""
if not stage['scheduled']:
return False
stage_name = stage['name']
current_stage_counter = int(stage['counter'])
previous_stage_counter = self.db.get_latest_synced_stage(pipeline_id, stage_name)
stage_counters = range(previous_stage_counter + 1, current_stage_counter + 1)
done = True
for stage_counter in stage_counters:
done &= self.sync_stage_occurrence(
pipeline_name,
pipeline_counter,
pipeline_id,
stage_name,
stage_counter
)
return done
def sync_stage_occurrence(self, pipeline_name, pipeline_counter, pipeline_id,
stage_name, stage_counter):
"""
Store information about stage run from go-server and sync its jobs.
Return whether we were done with the stage.
"""
stage_occurrence_json = self.go.get_stage_instance(pipeline_name, pipeline_counter,
stage_counter, stage_name)
stage_occurrence = json.loads(stage_occurrence_json)
stage_result = stage_occurrence["result"]
if stage_result == 'Unknown':
print(" Skipping stage: {} / {} - still in progress".format(
stage_name, stage_counter))
return False
print(" Fetching stage: {} / {}".format(stage_name, stage_counter))
stage_id = stage_occurrence["id"]
# Leave for now but a Stage doesn't have a scheduled_date in the API
timestamp = self.ms_timestamp_to_date(stage_occurrence["jobs"][0]["scheduled_date"])
stage = domain.Stage(stage_name, stage_occurrence["approved_by"], stage_result,
stage_counter, stage_id, timestamp)
self.db.insert_stage(pipeline_id, stage)
all_done = True
for job in stage_occurrence['jobs']:
if job.get("state") == "Completed":
self.sync_job(pipeline_name,
pipeline_counter,
stage_id,
stage_name,
stage_counter,
job)
else:
all_done = False
return all_done
def sync_job(self, pipeline_name, pipeline_counter, stage_id, stage_name, stage_counter, job):
"""
Store information about job and tests from go-server.
Remember what we should notify breakers about.
Sync failure info if failure.
"""
print('sync_job')
job_name = job['name']
agent_uuid = job['agent_uuid']
scheduled_date = self.ms_timestamp_to_date(job['scheduled_date'])
job_id = job['id']
job_result = job['result']
try:
parser = JunitConsoleParser(pipeline_name, pipeline_counter, stage_counter, stage_name, job_name)
tests_run, tests_failed, tests_skipped = parser.parse_bar_chart_info()
except LookupError as error:
print('Failed parsing test results for {}/{}/{}/{}/{}: {}'.format(
pipeline_name, pipeline_counter, stage_counter, stage_name, job_name, error
))
tests_run, tests_failed, tests_skipped = 0, 0, 0
job = domain.Job(job_id, stage_id, job_name, agent_uuid, scheduled_date,
job_result, tests_run, tests_failed, tests_skipped)
self.db.insert_job(stage_id, job)
print('job result', job_result)
if job_result != 'Passed' and self.should_notify(pipeline_name):
stage_failure_info = domain.get_pipeline_head(pipeline_name)
failure_streak = domain.get_latest_failure_streak(pipeline_name)
self.to_notify.append((stage_failure_info, failure_streak))
if job_result == 'Failed' and not self.db.is_failure_downloaded(stage_id):
self.sync_failure_info(pipeline_counter, pipeline_name,
stage_id, stage_name, stage_counter, job_name)
def should_notify(self, pipeline_name):
"""
Are email notifications enabled for this pipeline?
"""
pipeline = self.db.get_pipeline(pipeline_name)
return pipeline and pipeline['email_notifications']
def sync_failure_info(self, pipeline_counter, pipeline_name,
stage_id, stage_name, stage_counter, job_name):
"""
Store failure information from go-server for a given job,
as extracted from its log parser.
"""
try:
log_parser_class = get_log_parser(pipeline_name)
log_parser = log_parser_class(pipeline_name, pipeline_counter, stage_counter, stage_name, job_name)
failure_stage = log_parser.get_failure_stage()
self.db.insert_failure_information(stage_id, failure_stage)
log_parser.insert_info(stage_id)
except LookupError as error:
print("Failed to sync failure info for {}/{}/{}/{}/{}: {}".format(
pipeline_counter, pipeline_name, stage_name, stage_counter, job_name, error)
)
@staticmethod
def ms_timestamp_to_date(ms):
"""
Datetime object with truncated fractions of seconds from POSIX timestamp.
"""
return datetime.datetime.fromtimestamp(ms // 1000)
def max_instance_for_pipeline(self, pipeline_name):
"""
Return the highest pipeline counter in Go for the given pipeline.
"""
try:
history_json = self.go.request_pipeline_history(pipeline_name)
return json.loads(history_json)['pipelines'][0]['counter']
except LookupError:
return 0
def get_wanted_instances(self, pipeline_name, counter):
"""
Get a list of pipeline_counter indicating what to fetch for a pipeline.
Start at `counter` and go back (but not past 1).
Don't include instances we already have, don't fetch more than
self.chunk_size at a time, and never go back more than `self.max_to_sync`
from the initial value of `counter`.
"""
oldest_we_want = max(1, counter - self.max_to_sync + 1)
counters = []
while len(counters) < self.chunk_size:
if counter < oldest_we_want:
break
if not self.db.pipeline_instance_done(pipeline_name, counter):
counters.append(counter)
counter -= 1
return counters
def get_pipeline_history(self, pipeline_name, pipeline_counters):
"""
Get the history for given pipeline_name, and list of pipeline_counter.
Since we get the historical information in chunks, we store all historical
information we get from the go-server in pipeline_cache. If find the
pipeline counter we're looking for in the pipeline_cache, we get if from
there, otherwise, we get more history from the go-server.
"""
def add_to(some_pipeline_cache, offset=[0]):
"""
Fetch pipeline history and store in a dictionary.
Increase offset by page_size for each call.
Return whether we managed to add something or not.
"""
try:
history_json = self.go.request_pipeline_history(
pipeline_name, offset[0])
except LookupError:
return False
history = json.loads(history_json)
instances = history.get('pipelines', [])
for instance in instances:
some_pipeline_cache[instance['counter']] = instance
offset[0] += history["pagination"]["page_size"]
return len(instances) > 0
pipeline_history = []
pipeline_cache = {}
remaining_sorted_counters = sorted(pipeline_counters)
while remaining_sorted_counters:
ctr = remaining_sorted_counters[-1]
if ctr in pipeline_cache:
pipeline_history.append(pipeline_cache[ctr])
remaining_sorted_counters.remove(ctr)
elif pipeline_cache and min(pipeline_cache.keys()) < ctr:
# If the go-server had this instance, we would have
# found it by now. It's missing!
remaining_sorted_counters.remove(ctr)
else:
if not add_to(pipeline_cache):
break
return pipeline_history
def check_notification_needs(self, pipeline_instance):
pass
def notify_breakers(self):
pass
class JsonNodes:
"""
Parse a Python data structure coming from json, and build a
list of (key, value) pairs. The keys show the hierarchy using
dot notation. E.g. {'a': ['b': 6, 'o': 0]} should put
[('a.b', 6), ('a.o', 0)] in its .nodes attribute.
"""
def __init__(self, json_structure, prefix=None):
"""
Delegate lists and dicts, and solve the trivial case
"""
if isinstance(json_structure, list):
self.nodes = self.json_nodes_list(json_structure, prefix)
elif isinstance(json_structure, dict):
self.nodes = self.json_nodes_dict(json_structure, prefix)
else:
# If this was neither a list nor a dict, it's a final value,
# and the path to it is already in the prefix.
# Return a list like the cases above would.
self.nodes = [(prefix, json_structure)]
@classmethod
def json_nodes_list(cls, json_structure, prefix=None):
result = []
for elm in json_structure:
result.extend(cls(elm, prefix).nodes)
return result
@classmethod
def json_nodes_dict(cls, json_structure, prefix=None):
result = []
for key, value in json_structure.items():
if not prefix:
new_prefix = key
else:
new_prefix = prefix + '.' + key
result.extend(cls(value, new_prefix).nodes)
return result
if __name__ == '__main__':
setup_go_client(parse_args())
go = go_client.go_client()
db = data_access.get_connection(app_config.get_app_config().cfg['DB_PATH'])
controller = SyncController(db, go)
log("Starting synchronization.")
controller.sync()
log("Synchronization finished.")
log('Done!')
| magnus-lycka/gocddash | gocddash/gocddash_sync.py | Python | mit | 16,101 |
import unittest
from datetime import datetime, timedelta
from automover.test.helpers import *
from automover.remover import handle_remove
class TestRemover(unittest.TestCase):
def setUp(self):
self.client = client = DummyClient()
self.torrents = [
DummyTorrent(client, '1', datetime.now(), 0, '/matchpath/', False, True, ['http://matchtracker.com']),
DummyTorrent(client, '2', datetime.now(), 2, '/matchpath/', True, True, ['http://matchtracker.com']),
DummyTorrent(client, '3', datetime.now()-timedelta(hours=20), 0.5, '/matchpath/', True, True, ['http://matchtracker.com']),
DummyTorrent(client, '4', datetime.now()-timedelta(hours=50), 50, '/matchpath/', True, True, ['http://matchtracker.com']),
DummyTorrent(client, '5', datetime.now()-timedelta(hours=50), 50, '/matchpath/', True, True, ['http://matchtracker.com']),
DummyTorrent(client, '6', datetime.now(), 50, '/matchpath/', False, True, ['http://matchtracker.com']),
DummyTorrent(client, '7', datetime.now(), 50, '/matchpath/', True, True, ['http://matchNOTtracker.com']),
DummyTorrent(client, '8', datetime.now(), 50, '/matchNOTpath/', True, True, ['http://matchtracker.com']),
]
self.client.torrents = self.torrents
def test_timed_remove(self):
handle_remove(self.client, {'fakesite1': ('time', 'matchtracker', '3')}, ['/matchpath'])
self.assertEqual([torrent.torrent_id for torrent in self.torrents], ['1', '2', '6', '7', '8'], 'Did not remove correct torrents')
def test_ratio_remove(self):
handle_remove(self.client, {'fakesite1': ('ratio', 'matchtracker', 1.5)}, ['/matchpath'])
self.assertEqual([torrent.torrent_id for torrent in self.torrents], ['1', '3', '6', '7', '8'], 'Did not remove correct torrents')
def test_combined_remove(self):
handle_remove(self.client, {'fakesite1': ('ratio', 'matchtracker', 1.5), 'fakesite2': ('time', 'matchtracker', '3')}, ['/matchpath'])
self.assertEqual([torrent.torrent_id for torrent in self.torrents], ['1', '6', '7', '8'], 'Did not remove correct torrents') | JohnDoee/rtorrent-automover | automover/test/test_remover.py | Python | bsd-3-clause | 2,175 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
dirname = os.path.dirname(__file__)
with open(os.path.join(dirname, 'requirements.txt')) as requirements_file:
requirements = requirements_file.readlines()
test_requirements = [
'coverage',
'flake8',
'pep8-naming',
'mock',
]
setup(
name='openupgradelib',
version='0.1.2',
description="A library with support functions to be called from Odoo "
"migration scripts.",
long_description=readme + '\n\n' + history,
author="Odoo Community Association",
author_email='support@odoo-community.org',
url='https://github.com/OCA/openupgradelib',
packages=['openupgradelib'],
package_dir={'openupgradelib': 'openupgradelib'},
include_package_data=True,
install_requires=requirements,
license="AGPL-3",
zip_safe=False,
keywords='openupgradelib',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| pedrobaeza/openupgradelib | setup.py | Python | agpl-3.0 | 1,718 |
from random import randrange as rand
import pygame, sys
cell_size = 18
cols = 10
rows = 22
maxfps = 30
colors = [
(0, 0, 0 ),
(255, 85, 85),
(100, 200, 115),
(120, 108, 245),
(255, 140, 50 ),
(50, 120, 52 ),
(146, 202, 73 ),
(150, 161, 218 ),
(35, 35, 35)
]
tetris_shapes = [
[[1, 1, 1],
[0, 1, 0]],
[[0, 2, 2],
[2, 2, 0]],
[[3, 3, 0],
[0, 3, 3]],
[[4, 0, 0],
[4, 4, 4]],
[[0, 0, 5],
[5, 5, 5]],
[[6, 6, 6, 6]],
[[7, 7],
[7, 7]]
]
def rotate_clockwise(shape):
return [ [ shape[y][x]
for y in range(len(shape)) ]
for x in range(len(shape[0]) - 1, -1, -1) ]
def check_collision(board, shape, offset):
off_x, off_y = offset
for cy, row in enumerate(shape):
for cx, cell in enumerate(row):
try:
if cell and board[ cy + off_y ][ cx + off_x ]:
return True
except IndexError:
return True
return False
def remove_row(board, row):
del board[row]
return [[0 for i in range(cols)]] + board
def join_matrixes(mat1, mat2, mat2_off):
off_x, off_y = mat2_off
for cy, row in enumerate(mat2):
for cx, val in enumerate(row):
mat1[cy+off_y-1 ][cx+off_x] += val
return mat1
def new_board():
board = [ [ 0 for x in range(cols) ]
for y in range(rows) ]
board += [[ 1 for x in range(cols)]]
return board
class TetrisApp(object):
def __init__(self):
pygame.init()
pygame.key.set_repeat(250,25)
self.width = cell_size*(cols+6)
self.height = cell_size*rows
self.rlim = cell_size*cols
self.bground_grid = [[ 8 if x%2==y%2 else 0 for x in range(cols)] for y in range(rows)]
self.default_font = pygame.font.Font(
pygame.font.get_default_font(), 12)
self.screen = pygame.display.set_mode((self.width, self.height))
pygame.event.set_blocked(pygame.MOUSEMOTION)
self.next_stone = tetris_shapes[rand(len(tetris_shapes))]
self.init_game()
def new_stone(self):
self.stone = self.next_stone[:]
self.next_stone = tetris_shapes[rand(len(tetris_shapes))]
self.stone_x = int(cols / 2 - len(self.stone[0])/2)
self.stone_y = 0
if check_collision(self.board,
self.stone,
(self.stone_x, self.stone_y)):
self.gameover = True
def init_game(self):
self.board = new_board()
self.new_stone()
self.level = 1
self.score = 0
self.lines = 0
pygame.time.set_timer(pygame.USEREVENT+1, 1000)
def disp_msg(self, msg, topleft):
x,y = topleft
for line in msg.splitlines():
self.screen.blit(
self.default_font.render(
line,
False,
(255,255,255),
(0,0,0)),
(x,y))
y+=14
def center_msg(self, msg):
for i, line in enumerate(msg.splitlines()):
msg_image = self.default_font.render(line, False,
(255,255,255), (0,0,0))
msgim_center_x, msgim_center_y = msg_image.get_size()
msgim_center_x //= 2
msgim_center_y //= 2
self.screen.blit(msg_image, (
self.width // 2-msgim_center_x,
self.height // 2-msgim_center_y+i*22))
def draw_matrix(self, matrix, offset):
off_x, off_y = offset
for y, row in enumerate(matrix):
for x, val in enumerate(row):
if val:
pygame.draw.rect(
self.screen,
colors[val],
pygame.Rect(
(off_x+x) *
cell_size,
(off_y+y) *
cell_size,
cell_size,
cell_size),0)
def add_cl_lines(self, n):
linescores = [0, 40, 100, 300, 1200]
self.lines += n
self.score += linescores[n] * self.level
if self.lines >= self.level*6:
self.level += 1
newdelay = 1000-50*(self.level-1)
newdelay = 100 if newdelay < 100 else newdelay
pygame.time.set_timer(pygame.USEREVENT+1, newdelay)
def move(self, delta_x):
if not self.gameover and not self.paused:
new_x = self.stone_x + delta_x
if new_x < 0:
new_x = 0
if new_x > cols - len(self.stone[0]):
new_x = cols - len(self.stone[0])
if not check_collision(self.board,
self.stone,
(new_x, self.stone_y)):
self.stone_x = new_x
def quit(self):
self.center_msg("Exiting...")
pygame.display.update()
sys.exit()
def drop(self, manual):
if not self.gameover and not self.paused:
self.score += 1 if manual else 0
self.stone_y += 1
if check_collision(self.board,
self.stone,
(self.stone_x, self.stone_y)):
self.board = join_matrixes(
self.board,
self.stone,
(self.stone_x, self.stone_y))
self.new_stone()
cleared_rows = 0
while True:
for i, row in enumerate(self.board[:-1]):
if 0 not in row:
self.board = remove_row(
self.board, i)
cleared_rows += 1
break
else:
break
self.add_cl_lines(cleared_rows)
return True
return False
def insta_drop(self):
if not self.gameover and not self.paused:
while(not self.drop(True)):
pass
def rotate_stone(self):
if not self.gameover and not self.paused:
new_stone = rotate_clockwise(self.stone)
if not check_collision(self.board,
new_stone,
(self.stone_x, self.stone_y)):
self.stone = new_stone
def toggle_pause(self):
self.paused = not self.paused
def start_game(self):
if self.gameover:
self.init_game()
self.gameover = False
def run(self):
key_actions = {
'ESCAPE': self.quit,
'LEFT': lambda:self.move(-1),
'RIGHT': lambda:self.move(+1),
'DOWN': lambda:self.drop(True),
'UP': self.rotate_stone,
'p': self.toggle_pause,
'SPACE': self.start_game,
'RETURN': self.insta_drop
}
self.gameover = False
self.paused = False
dont_burn_my_cpu = pygame.time.Clock()
while 1:
self.screen.fill((0,0,0))
if self.gameover:
self.center_msg("""Game Over!\nYour score: %d
Press space to continue""" % self.score)
else:
if self.paused:
self.center_msg("Paused")
else:
pygame.draw.line(self.screen,
(255,255,255),
(self.rlim+1, 0),
(self.rlim+1, self.height-1))
self.disp_msg("Next:", (
self.rlim+cell_size,
2))
self.disp_msg("Score: %d\n\nLevel: %d\
\nLines: %d" % (self.score, self.level, self.lines),
(self.rlim+cell_size, cell_size*5))
self.draw_matrix(self.bground_grid, (0,0))
self.draw_matrix(self.board, (0,0))
self.draw_matrix(self.stone,
(self.stone_x, self.stone_y))
self.draw_matrix(self.next_stone,
(cols+1,2))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.USEREVENT+1:
self.drop(False)
elif event.type == pygame.QUIT:
self.quit()
elif event.type == pygame.KEYDOWN:
for key in key_actions:
if event.key == eval("pygame.K_"
+key):
key_actions[key]()
dont_burn_my_cpu.tick(maxfps)
if __name__ == '__main__':
App = TetrisApp()
App.run() | saintdragon2/python-3-lecture-2015 | sinsojae_mid_final/nobless/tetris.py | Python | mit | 6,787 |
#------------------------------------------------------------------------------
# Copyright (c) 2007, Riverbank Computing Limited
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD license.
# However, when used with the GPL version of PyQt the additional terms described in the PyQt GPL exception also apply
#
# Author: Riverbank Computing Limited
#------------------------------------------------------------------------------
""" Defines the concrete implementations of the traits Toolkit interface for
the PyQt user interface toolkit.
"""
# import enthought.qt before anything else is done so the sipapi
# can be set correctly if needed
import enthought.qt
#-------------------------------------------------------------------------------
# Define the reference to the exported GUIToolkit object:
#-------------------------------------------------------------------------------
import toolkit
# Reference to the GUIToolkit object for PyQt.
toolkit = toolkit.GUIToolkit()
| enthought/etsproxy | enthought/traits/ui/qt4/__init__.py | Python | bsd-3-clause | 1,028 |
''' Unit tests for folder summary_json '''
import os
from time import time
import pytest
from pytest_mock import mocker
import shutil
import jsonpickle
import folder
from folder._os import Stat
from ..base_test import setup_tmp_dir, touch
class TestFolderSummary(object):
def setup_method(self, method):
self.tmp_dir, self.source_dir, self.destination_dir = setup_tmp_dir()
touch(os.path.join(self.source_dir, 'file_1.pdf'))
touch(os.path.join(self.source_dir, 'file_2.pdf'))
touch(os.path.join(self.source_dir, 'file_3.pdf'))
os.makedirs(os.path.join(self.source_dir, 'another_folder'))
touch(os.path.join(self.source_dir, 'another_folder', 'another_file.mp4'))
self.summary = {
'path': self.source_dir,
'file_names': ['file_1.pdf', 'file_2.pdf', 'file_3.pdf'],
'folder_names': ['another_folder'],
'files': {
'file_1.pdf': self.file_summary_dict('file_1.pdf'),
'file_2.pdf': self.file_summary_dict('file_2.pdf'),
'file_3.pdf': self.file_summary_dict('file_3.pdf')
},
'folders': {
'another_folder': self.file_summary_dict('another_folder')
}
}
def teardown_method(self, method):
shutil.rmtree(self.tmp_dir)
def test_successful_folder_summary_generation(self, mocker):
mocker.patch.object(Stat, 'timestamps', [1, 2, 3])
mocker.patch.object(Stat, 'size', 1024)
[json_string, children] = folder.summary_json(self.source_dir)
assert json_string == jsonpickle.encode(self.summary)
assert children == [os.path.join(self.source_dir, 'another_folder')]
def test_ignore_init_file_during_folder_summary_generation(self, mocker):
mocker.patch.object(Stat, 'timestamps', [1, 2, 3])
mocker.patch.object(Stat, 'size', 1024)
touch(os.path.join(self.source_dir, '.sync_folders.init'))
[json_string, children] = folder.summary_json(self.source_dir)
assert json_string.find('.sync_folders.init') == -1
def test_empty_folder_summary_for_empty_folder(self):
shutil.rmtree(self.source_dir)
os.makedirs(self.source_dir)
[json_string, children] = folder.summary_json(self.source_dir)
assert json_string == jsonpickle.encode({
'path': self.source_dir,
'file_names': [],
'folder_names': [],
'files': {},
'folders': {}
})
assert children == []
def test_get_summary_for_folder_if_summary_present(self):
open(os.path.join(self.source_dir, '.sync_folders.init'), 'w').write(
jsonpickle.encode(self.summary)
)
assert folder.get_summary(self.source_dir) == self.summary
def test_get_summary_generates_folder_summary_if_summary_not_present(self, mocker):
mocker.patch.object(Stat, 'timestamps', [1, 2, 3])
mocker.patch.object(Stat, 'size', 1024)
assert folder.get_summary(self.source_dir) == self.summary
def file_summary_dict(self, name):
''' File summary representation '''
return {
'name': name,
'updated_at': 2,
'created_at': 1,
'size': 1024,
'last_access_time': 3
}
| praveenram/sync_folders | tests/folder/test_folder_summary.py | Python | mit | 3,345 |
#!/usr/bin/python
# Copyright: (c) 2015, Google Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubernetes
version_added: "2.1"
deprecated:
removed_in: "2.9"
why: This module used the oc command line tool, where as M(k8s_raw) goes over the REST API.
alternative: Use M(k8s_raw) instead.
short_description: Manage Kubernetes resources
description:
- This module can manage Kubernetes resources on an existing cluster using
the Kubernetes server API. Users can specify in-line API data, or
specify an existing Kubernetes YAML file.
- Currently, this module
(1) Only supports HTTP Basic Auth
(2) Only supports 'strategic merge' for update, http://goo.gl/fCPYxT
SSL certs are not working, use C(validate_certs=off) to disable.
options:
api_endpoint:
description:
- The IPv4 API endpoint of the Kubernetes cluster.
required: true
aliases: [ endpoint ]
inline_data:
description:
- The Kubernetes YAML data to send to the API I(endpoint). This option is
mutually exclusive with C('file_reference').
required: true
file_reference:
description:
- Specify full path to a Kubernets YAML file to send to API I(endpoint).
This option is mutually exclusive with C('inline_data').
patch_operation:
description:
- Specify patch operation for Kubernetes resource update.
- For details, see the description of PATCH operations at
U(https://github.com/kubernetes/kubernetes/blob/release-1.5/docs/devel/api-conventions.md#patch-operations).
default: Strategic Merge Patch
choices: [ JSON Patch, Merge Patch, Strategic Merge Patch ]
aliases: [ patch_strategy ]
version_added: 2.4
certificate_authority_data:
description:
- Certificate Authority data for Kubernetes server. Should be in either
standard PEM format or base64 encoded PEM data. Note that certificate
verification is broken until ansible supports a version of
'match_hostname' that can match the IP address against the CA data.
state:
description:
- The desired action to take on the Kubernetes data.
required: true
choices: [ absent, present, replace, update ]
default: present
url_password:
description:
- The HTTP Basic Auth password for the API I(endpoint). This should be set
unless using the C('insecure') option.
aliases: [ password ]
url_username:
description:
- The HTTP Basic Auth username for the API I(endpoint). This should be set
unless using the C('insecure') option.
default: admin
aliases: [ username ]
insecure:
description:
- Reverts the connection to using HTTP instead of HTTPS. This option should
only be used when execuing the M('kubernetes') module local to the Kubernetes
cluster using the insecure local port (locahost:8080 by default).
validate_certs:
description:
- Enable/disable certificate validation. Note that this is set to
C(false) until Ansible can support IP address based certificate
hostname matching (exists in >= python3.5.0).
type: bool
default: 'no'
author:
- Eric Johnson (@erjohnso) <erjohnso@google.com>
'''
EXAMPLES = '''
# Create a new namespace with in-line YAML.
- name: Create a kubernetes namespace
kubernetes:
api_endpoint: 123.45.67.89
url_username: admin
url_password: redacted
inline_data:
kind: Namespace
apiVersion: v1
metadata:
name: ansible-test
labels:
label_env: production
label_ver: latest
annotations:
a1: value1
a2: value2
state: present
# Create a new namespace from a YAML file.
- name: Create a kubernetes namespace
kubernetes:
api_endpoint: 123.45.67.89
url_username: admin
url_password: redacted
file_reference: /path/to/create_namespace.yaml
state: present
# Do the same thing, but using the insecure localhost port
- name: Create a kubernetes namespace
kubernetes:
api_endpoint: 123.45.67.89
insecure: true
file_reference: /path/to/create_namespace.yaml
state: present
'''
RETURN = '''
# Example response from creating a Kubernetes Namespace.
api_response:
description: Raw response from Kubernetes API, content varies with API.
returned: success
type: complex
contains:
apiVersion: "v1"
kind: "Namespace"
metadata:
creationTimestamp: "2016-01-04T21:16:32Z"
name: "test-namespace"
resourceVersion: "509635"
selfLink: "/api/v1/namespaces/test-namespace"
uid: "6dbd394e-b328-11e5-9a02-42010af0013a"
spec:
finalizers:
- kubernetes
status:
phase: "Active"
'''
import base64
import json
try:
import yaml
HAS_LIB_YAML = True
except ImportError:
HAS_LIB_YAML = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
############################################################################
############################################################################
# For API coverage, this Anislbe module provides capability to operate on
# all Kubernetes objects that support a "create" call (except for 'Events').
# In order to obtain a valid list of Kubernetes objects, the v1 spec file
# was referenced and the below python script was used to parse the JSON
# spec file, extract only the objects with a description starting with
# 'create a'. The script then iterates over all of these base objects
# to get the endpoint URL and was used to generate the KIND_URL map.
#
# import json
# from urllib2 import urlopen
#
# r = urlopen("https://raw.githubusercontent.com/kubernetes"
# "/kubernetes/master/api/swagger-spec/v1.json")
# v1 = json.load(r)
#
# apis = {}
# for a in v1['apis']:
# p = a['path']
# for o in a['operations']:
# if o["summary"].startswith("create a") and o["type"] != "v1.Event":
# apis[o["type"]] = p
#
# def print_kind_url_map():
# results = []
# for a in apis.keys():
# results.append('"%s": "%s"' % (a[3:].lower(), apis[a]))
# results.sort()
# print("KIND_URL = {")
# print(",\n".join(results))
# print("}")
#
# if __name__ == '__main__':
# print_kind_url_map()
############################################################################
############################################################################
KIND_URL = {
"binding": "/api/v1/namespaces/{namespace}/bindings",
"configmap": "/api/v1/namespaces/{namespace}/configmaps",
"endpoints": "/api/v1/namespaces/{namespace}/endpoints",
"limitrange": "/api/v1/namespaces/{namespace}/limitranges",
"namespace": "/api/v1/namespaces",
"node": "/api/v1/nodes",
"persistentvolume": "/api/v1/persistentvolumes",
"persistentvolumeclaim": "/api/v1/namespaces/{namespace}/persistentvolumeclaims", # NOQA
"pod": "/api/v1/namespaces/{namespace}/pods",
"podtemplate": "/api/v1/namespaces/{namespace}/podtemplates",
"replicationcontroller": "/api/v1/namespaces/{namespace}/replicationcontrollers", # NOQA
"resourcequota": "/api/v1/namespaces/{namespace}/resourcequotas",
"secret": "/api/v1/namespaces/{namespace}/secrets",
"service": "/api/v1/namespaces/{namespace}/services",
"serviceaccount": "/api/v1/namespaces/{namespace}/serviceaccounts",
"daemonset": "/apis/extensions/v1beta1/namespaces/{namespace}/daemonsets",
"deployment": "/apis/extensions/v1beta1/namespaces/{namespace}/deployments",
"horizontalpodautoscaler": "/apis/extensions/v1beta1/namespaces/{namespace}/horizontalpodautoscalers", # NOQA
"ingress": "/apis/extensions/v1beta1/namespaces/{namespace}/ingresses",
"job": "/apis/extensions/v1beta1/namespaces/{namespace}/jobs",
}
USER_AGENT = "ansible-k8s-module/0.0.1"
# TODO(erjohnso): SSL Certificate validation is currently unsupported.
# It can be made to work when the following are true:
# - Ansible consistently uses a "match_hostname" that supports IP Address
# matching. This is now true in >= python3.5.0. Currently, this feature
# is not yet available in backports.ssl_match_hostname (still 3.4).
# - Ansible allows passing in the self-signed CA cert that is created with
# a kubernetes master. The lib/ansible/module_utils/urls.py method,
# SSLValidationHandler.get_ca_certs() needs a way for the Kubernetes
# CA cert to be passed in and included in the generated bundle file.
# When this is fixed, the following changes can be made to this module,
# - Remove the 'return' statement in line 254 below
# - Set 'required=true' for certificate_authority_data and ensure that
# ansible's SSLValidationHandler.get_ca_certs() can pick up this CA cert
# - Set 'required=true' for the validate_certs param.
def decode_cert_data(module):
return
# pylint: disable=unreachable
d = module.params.get("certificate_authority_data")
if d and not d.startswith("-----BEGIN"):
module.params["certificate_authority_data"] = base64.b64decode(d)
def api_request(module, url, method="GET", headers=None, data=None):
body = None
if data:
data = json.dumps(data)
response, info = fetch_url(module, url, method=method, headers=headers, data=data)
if int(info['status']) == -1:
module.fail_json(msg="Failed to execute the API request: %s" % info['msg'], url=url, method=method, headers=headers)
if response is not None:
body = json.loads(response.read())
return info, body
def k8s_create_resource(module, url, data):
info, body = api_request(module, url, method="POST", data=data, headers={"Content-Type": "application/json"})
if info['status'] == 409:
name = data["metadata"].get("name", None)
info, body = api_request(module, url + "/" + name)
return False, body
elif info['status'] >= 400:
module.fail_json(msg="failed to create the resource: %s" % info['msg'], url=url)
return True, body
def k8s_delete_resource(module, url, data):
name = data.get('metadata', {}).get('name')
if name is None:
module.fail_json(msg="Missing a named resource in object metadata when trying to remove a resource")
url = url + '/' + name
info, body = api_request(module, url, method="DELETE")
if info['status'] == 404:
return False, "Resource name '%s' already absent" % name
elif info['status'] >= 400:
module.fail_json(msg="failed to delete the resource '%s': %s" % (name, info['msg']), url=url)
return True, "Successfully deleted resource name '%s'" % name
def k8s_replace_resource(module, url, data):
name = data.get('metadata', {}).get('name')
if name is None:
module.fail_json(msg="Missing a named resource in object metadata when trying to replace a resource")
headers = {"Content-Type": "application/json"}
url = url + '/' + name
info, body = api_request(module, url, method="PUT", data=data, headers=headers)
if info['status'] == 409:
name = data["metadata"].get("name", None)
info, body = api_request(module, url + "/" + name)
return False, body
elif info['status'] >= 400:
module.fail_json(msg="failed to replace the resource '%s': %s" % (name, info['msg']), url=url)
return True, body
def k8s_update_resource(module, url, data, patch_operation):
# PATCH operations are explained in details at:
# https://github.com/kubernetes/kubernetes/blob/release-1.5/docs/devel/api-conventions.md#patch-operations
PATCH_OPERATIONS_MAP = {
'JSON Patch': 'application/json-patch+json',
'Merge Patch': 'application/merge-patch+json',
'Strategic Merge Patch': 'application/strategic-merge-patch+json',
}
name = data.get('metadata', {}).get('name')
if name is None:
module.fail_json(msg="Missing a named resource in object metadata when trying to update a resource")
headers = {"Content-Type": PATCH_OPERATIONS_MAP[patch_operation]}
url = url + '/' + name
info, body = api_request(module, url, method="PATCH", data=data, headers=headers)
if info['status'] == 409:
name = data["metadata"].get("name", None)
info, body = api_request(module, url + "/" + name)
return False, body
elif info['status'] >= 400:
module.fail_json(msg="failed to update the resource '%s': %s" % (name, info['msg']), url=url)
return True, body
def main():
module = AnsibleModule(
argument_spec=dict(
http_agent=dict(type='str', default=USER_AGENT),
url_username=dict(type='str', default='admin', aliases=['username']),
url_password=dict(type='str', default='', no_log=True, aliases=['password']),
force_basic_auth=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=False),
certificate_authority_data=dict(type='str'),
insecure=dict(type='bool', default=False),
api_endpoint=dict(type='str', required=True),
patch_operation=dict(type='str', default='Strategic Merge Patch', aliases=['patch_strategy'],
choices=['JSON Patch', 'Merge Patch', 'Strategic Merge Patch']),
file_reference=dict(type='str'),
inline_data=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present', 'replace', 'update'])
),
mutually_exclusive=(('file_reference', 'inline_data'),
('url_username', 'insecure'),
('url_password', 'insecure')),
required_one_of=(('file_reference', 'inline_data')),
)
if not HAS_LIB_YAML:
module.fail_json(msg="missing python library: yaml")
decode_cert_data(module)
api_endpoint = module.params.get('api_endpoint')
state = module.params.get('state')
insecure = module.params.get('insecure')
inline_data = module.params.get('inline_data')
file_reference = module.params.get('file_reference')
patch_operation = module.params.get('patch_operation')
if inline_data:
if not isinstance(inline_data, dict) and not isinstance(inline_data, list):
data = yaml.safe_load(inline_data)
else:
data = inline_data
else:
try:
f = open(file_reference, "r")
data = [x for x in yaml.safe_load_all(f)]
f.close()
if not data:
module.fail_json(msg="No valid data could be found.")
except:
module.fail_json(msg="The file '%s' was not found or contained invalid YAML/JSON data" % file_reference)
# set the transport type and build the target endpoint url
transport = 'https'
if insecure:
transport = 'http'
target_endpoint = "%s://%s" % (transport, api_endpoint)
body = []
changed = False
# make sure the data is a list
if not isinstance(data, list):
data = [data]
for item in data:
namespace = "default"
if item and 'metadata' in item:
namespace = item.get('metadata', {}).get('namespace', "default")
kind = item.get('kind', '').lower()
try:
url = target_endpoint + KIND_URL[kind]
except KeyError:
module.fail_json(msg="invalid resource kind specified in the data: '%s'" % kind)
url = url.replace("{namespace}", namespace)
else:
url = target_endpoint
if state == 'present':
item_changed, item_body = k8s_create_resource(module, url, item)
elif state == 'absent':
item_changed, item_body = k8s_delete_resource(module, url, item)
elif state == 'replace':
item_changed, item_body = k8s_replace_resource(module, url, item)
elif state == 'update':
item_changed, item_body = k8s_update_resource(module, url, item, patch_operation)
changed |= item_changed
body.append(item_body)
module.exit_json(changed=changed, api_response=body)
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/clustering/k8s/_kubernetes.py | Python | gpl-3.0 | 16,555 |
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from geonode.catalogue.backends.generic import CatalogueBackend \
as GenericCatalogueBackend
class CatalogueBackend(GenericCatalogueBackend):
"""GeoNetwork CSW Backend"""
def __init__(self, *args, **kwargs):
super(CatalogueBackend, self).__init__(*args, **kwargs)
self.catalogue.formats = ['Dublin Core', 'ISO']
| Phil-LiDAR2-Geonode/pl2-geonode | geonode/catalogue/backends/geonetwork.py | Python | gpl-3.0 | 1,161 |
# -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
import collections
from PyQt5.QtWidgets import (
# QSplitter,
QMenu,
QTabWidget
)
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt
from ninja_ide import translations
from ninja_ide.gui.ide import IDE
from ninja_ide.gui import dynamic_splitter
from ninja_ide.tools.logger import NinjaLogger
logger = NinjaLogger('ninja_ide.gui.explorer.explorer_container')
# TODO: Each tab should handle close and reopen and notify the explorer
class ExplorerContainer(dynamic_splitter.DynamicSplitter):
# ExplorerContainer SIGNALS
"""
goToDefinition(int)
projectOpened(QString)
projectClosed(QString)
"""
__TABS = collections.OrderedDict()
__created = False
def __init__(self, orientation=Qt.Vertical):
super(ExplorerContainer, self).__init__(orientation)
self.setProperty("lightcolored", True)
self.create_tab_widget()
IDE.register_service('explorer_container', self)
connections = (
{'target': 'central_container',
'signal_name': "splitterBaseRotated()",
'slot': self.rotate_tab_position},
{'target': 'central_container',
'signal_name': 'splitterBaseRotated()',
'slot': self.rotate_tab_position},
)
self._point = None
self._widget_index = 0
self.__splitted_tabs = {}
self.menu = QMenu()
self.actionSplit = self.menu.addAction(translations.TR_SPLIT_TAB)
self.actionSplit.triggered.connect(self._split_widget)
self.actionUndock = self.menu.addAction(translations.TR_UNDOCK)
self.actionUndock.triggered.connect(self._undock_widget)
self.actionCloseSplit = self.menu.addAction(
translations.TR_CLOSE_SPLIT)
self.actionCloseSplit.triggered.connect(self._close_split)
self.menuMoveToSplit = self.menu.addMenu(translations.TR_MOVE_TO_SPLIT)
IDE.register_signals('explorer_container', connections)
self.__created = True
@classmethod
def register_tab(cls, tab_name, obj, icon=None):
""" Register a tab providing the service name and the instance """
cls.__TABS[obj] = (tab_name, icon)
if cls.__created:
explorer.add_tab(tab_name, obj, icon)
def install(self):
ide = IDE.get_service('ide')
ide.place_me_on("explorer_container", self, "lateral")
ide.goingDown.connect(self.save_configuration)
for obj in ExplorerContainer.__TABS:
tabname, icon = ExplorerContainer.__TABS[obj]
self.add_tab(tabname, obj, icon)
obj.dockWidget['PyQt_PyObject'].connect(self._dock_widget)
obj.undockWidget.connect(self._undock_widget)
logger.debug("Falta conectar change_tab_title")
# obj.changeTitle.connect(self._change_tab_title)
# obj.changeTitle['PyQt_PyObject',
# 'QString'].connect(self._change_tab_title)
if self.count() == 0:
self.hide()
def _dock_widget(self, widget):
tab_widget = self.widget(0)
if tab_widget.count() == 0:
central = IDE.get_service('central_container')
central.change_lateral_visibility()
tabname, icon = ExplorerContainer.__TABS[widget]
self.add_tab(tabname, widget, icon)
def _change_tab_title(self, widget, title):
for i in range(self.count()):
tab_widget = self.widget(i)
index = tab_widget.indexOf(widget)
if index != -1:
data = ExplorerContainer.__TABS[widget]
data = tuple([title] + list(data[1:]))
ExplorerContainer.__TABS[widget] = data
tab_widget.setTabText(index, title)
break
def _undock_widget(self):
tab_widget = self.widget(self._widget_index)
bar = tab_widget.tabBar()
index = bar.tabAt(self._point)
widget = tab_widget.widget(index)
widget.setParent(None)
widget.resize(500, 500)
widget.show()
if tab_widget.count() == 0:
central = IDE.get_service('central_container')
central.change_lateral_visibility()
def _split_widget(self):
current_tab_widget = self.widget(self._widget_index)
if current_tab_widget.count() == 1:
return
tab_widget = self.create_tab_widget()
index_widget = self.indexOf(tab_widget)
tab_widget = self.widget(self._widget_index)
bar = tab_widget.tabBar()
index = bar.tabAt(self._point)
widget = tab_widget.widget(index)
tabname, icon = ExplorerContainer.__TABS[widget]
self.add_tab(tabname, widget, icon, index_widget)
self._reset_size()
def _close_split(self):
self._move_to_split(0)
def _move_to_split(self, index_widget=-1):
obj = self.sender()
if index_widget == -1:
index_widget = int(obj.text()) - 1
tab_widget = self.widget(self._widget_index)
bar = tab_widget.tabBar()
index = bar.tabAt(self._point)
widget = tab_widget.widget(index)
tabname, icon = ExplorerContainer.__TABS[widget]
self.add_tab(tabname, widget, icon, index_widget)
if tab_widget.count() == 0:
tab_widget.deleteLater()
self._reset_size()
def _reset_size(self):
sizes = [self.height() / self.count()] * self.count()
self.setSizes(sizes)
def create_tab_widget(self):
tab_widget = QTabWidget()
tab_widget.setStyleSheet("QTabWidget::pane {border: 0;}")
tab_widget.setTabPosition(QTabWidget.East)
tab_widget.setMovable(True)
tabBar = tab_widget.tabBar()
tabBar.hide()
tabBar.setContextMenuPolicy(Qt.CustomContextMenu)
self.addWidget(tab_widget)
index = self.indexOf(tab_widget)
tabBar.customContextMenuRequested['const QPoint&'].connect(
lambda point: self.show_tab_context_menu(index, point))
return tab_widget
def add_tab(self, tabname, obj, icon=None, widget_index=0):
obj.setWindowTitle(tabname)
if icon is not None:
qicon = QIcon(icon)
self.widget(widget_index).addTab(obj, qicon, tabname)
obj.setWindowIcon(qicon)
else:
self.widget(widget_index).addTab(obj, tabname)
func = getattr(obj, 'install_tab', None)
if isinstance(func, collections.Callable):
func()
def rotate_tab_position(self):
for i in range(self.count()):
widget = self.widget(i)
if widget.tabPosition() == QTabWidget.East:
widget.setTabPosition(QTabWidget.West)
else:
widget.setTabPosition(QTabWidget.East)
def shortcut_index(self, index):
self.setCurrentIndex(index)
def show_tab_context_menu(self, widget_index, point):
bar = self.widget(widget_index).tabBar()
self._point = point
self._widget_index = widget_index
if widget_index != 0:
self.actionUndock.setVisible(False)
self.actionCloseSplit.setVisible(True)
else:
self.actionUndock.setVisible(True)
self.actionCloseSplit.setVisible(False)
self.menuMoveToSplit.clear()
if self.count() > 1:
for i in range(1, self.count() + 1):
action = self.menuMoveToSplit.addAction("%d" % i)
action.triggered.connect(self._move_to_split)
self.menu.exec_(bar.mapToGlobal(point))
def enterEvent(self, event):
super(ExplorerContainer, self).enterEvent(event)
for index in range(self.count()):
bar = self.widget(index).tabBar()
bar.show()
def leaveEvent(self, event):
super(ExplorerContainer, self).leaveEvent(event)
for index in range(self.count()):
bar = self.widget(index).tabBar()
bar.hide()
def save_configuration(self):
# ninja_settings = IDE.ninja_settings()
# ninja_settings.setValue("explorer/tabs", self.__splitted_tabs)
pass
explorer = ExplorerContainer()
| centaurialpha/ninja-ide | ninja_ide/gui/explorer/explorer_container.py | Python | gpl-3.0 | 8,986 |
"""
fullEval.py
DO NOT MODIFY ANY CODES IN THIS FILE
OTHERWISE YOUR RESULTS MAY BE INCORRECTLY EVALUATED!
@author: John See, 2017
For questions or bug reporting, please send an email to johnsee@mmu.edu.my
"""
import os
import cv2
import numpy as np
import pickle
import sys, getopt
import matplotlib.pyplot as plt
from computeDistances import computeDistances
# Defaults
dbSize = 1000 # number of images in food database
nPerCat = 100 # number of images in food database for each category
nC = 10 # number of categories
nRetrieved = 100 # number of images to retrieve
loadFV = True # flag to indicate if feature vector will be loaded
# Read command line args
myopts, args = getopt.getopt(sys.argv[1:],"r:th")
# parsing command line args
for o, a in myopts:
if o == '-r':
nRetrieved = int(a)
if (nRetrieved > dbSize):
print("Error: Number of retrieved images exceeds size of database!")
sys.exit()
elif o == '-t': # extract features before evaluating
cont = input('Caution! Do you wish to continue with feature extraction? (y/n): ')
if (cont == 'y'):
exec(open("featureExtraction.py").read())
loadFV = False
print('Done extracting')
else:
print("\nCommand aborted. Start over again.")
sys.exit()
elif o == '-h':
print("\nUsage: %s -r numRetrieved # to specify number of retrieved images" % sys.argv[0])
print("\n %s -t # to enable feature extraction before evaluation" % sys.argv[0])
print(" ")
sys.exit()
else:
print(' ')
if loadFV:
# load pickled features
fv = pickle.load(open("feat.pkl", "rb") )
print('Features loaded')
# EDIT THIS TO YOUR OWN PATH IF DIFFERENT
dbpath = os.getcwd() + '/../fooddb'
# these labels are the abbreviations of the actual food names
labels = ('AK','BL','CD','CL','DR','MG','NL','PG','RC','ST')
featvect = [] # empty list for holding features
FEtime = np.zeros(dbSize)
# find all pairwise distances
D = computeDistances(fv)
# *** Evaluation ----------------------------------------------------------
avg_prec = np.zeros(dbSize)
# iterate through all images from each category as query image
for c in range(nC):
for i in range(nPerCat):
idx = (c*nPerCat) + i;
# access distances of all images from query image, sort them asc
nearest_idx = np.argsort(D[idx, :]);
# quick way of finding category label for top K retrieved images
retrievedCats = np.uint8(np.floor((nearest_idx[1:nRetrieved+1])/nPerCat));
# find matches
hits = (retrievedCats == np.floor(idx/nPerCat))
# calculate average precision of the ranked matches
if np.sum(hits) != 0:
avg_prec[idx] = np.sum(hits*np.cumsum(hits)/(np.arange(nRetrieved)+1)) / np.sum(hits)
else:
avg_prec[idx] = 0.0
mean_avg_prec = np.mean(avg_prec)
mean_avg_prec_perCat = np.mean(avg_prec.reshape(nPerCat, nC), axis=0)
recall = np.sum(hits) / nPerCat
# *** Results & Visualization-----------------------------------------------
print('Mean Average Precision, MAP@%d: %.4f'%(nRetrieved,mean_avg_prec))
print('Recall Rate@%d: %.4f'%(nRetrieved,recall))
x = np.arange(nC)+0.5
plt.xticks(x, list(labels) )
plt.xlim([0,10]), plt.ylim([0,1])
markerline, stemlines, baseline = plt.stem(x, mean_avg_prec_perCat, '-.')
plt.grid(True)
plt.xlabel('Food categories'), plt.ylabel('MAP per category')
#fig, axs = plt.subplots(2, 5, figsize=(12, 6), facecolor='w', edgecolor='w')
#fig.subplots_adjust(hspace = .5, wspace=.001)
#axs = axs.ravel()
#for i in range(nC):
# imgfile = os.path.join(dbpath, str(nearest_idx[i+1]) + '.jpg')
# matched_img = cv2.cvtColor(cv2.imread(imgfile), cv2.COLOR_BGR2RGB)
# axs[i].imshow(matched_img)
# axs[i].set_title(str(i+1) + '. ' + labels[retrievedCats[i]])
# axs[i].set_xticks([])
# axs[i].set_yticks([])
plt.show()
| jackwong95/MMURandomStuff | TDS3651 - Visual Information Processing/Assignment/Part 2/SIFT/fullEval.py | Python | apache-2.0 | 4,031 |
from io_processing.result_interpreter.abst_result_interpreter import AbstractInterpreter, \
InterpreterOptions
from io_processing.surveillance_handler import CanBusHandler
import csv
from math import floor
from tools.ecu_logging import ECULogger
from config import can_registration
import logging
class CanBusInterpreter(AbstractInterpreter):
def __init__(self, export_options=False, file_path=False):
AbstractInterpreter.__init__(self, export_options, file_path)
self.file_path = file_path
self._init_csv(file_path)
self.known = []
self._bus_ids = []
self.bytes_trans = {}
self.last_time = 0
# counting msgs
self.no_simple_msgs = {} # depending on stream
self.no_auth_msgs = {} # per ECU
self.no_str_msgs = {} # per Stream
# countings segments
self.no_simple_msgs_seg = {} # depending on stream
self.no_auth_msgs_seg = {} # per ECU
self.no_str_msgs_seg = {} # per Stream
# counting bytes
self.no_simple_msgs_bytes = {} # depending on stream
self.no_auth_msgs_bytes = {} # per ECU
self.no_str_msgs_bytes = {} # per Stream
def get_handler(self):
return [CanBusHandler]
def interprete_data(self, data):
# Initialize
cur_time, mon_inputs, bus_info = data[0], data[1], []
for mon_input in mon_inputs:
try:
# CSV Export
if InterpreterOptions.CSV_MSG_FILE in self.export_options:
self._export_csv_1(mon_input)
# add information
bus_info.append([mon_input[0], mon_input[1], mon_input[3], mon_input[9], mon_input[8], mon_input[2]])
self.bytes_trans[mon_input[1]] += mon_input[6]
# count appearances(Messages, Bytes, Segments) per Bus and Comp// Counting Segments!
self._append_apearances(mon_input)
except KeyError:
self.bytes_trans[mon_input[1]] = mon_input[6]
self._append_apearances(mon_input)
# calculate bus load/ avg. datarate
try:
if cur_time >= self.last_time:
info = self._calc_datarate_export(cur_time, self.last_time, self.bytes_trans)
else:
info = []
except:
pass
# forward to connected device
if InterpreterOptions.CONNECTION in self.export_options:
self._export_connection([info, bus_info])
# reset
self._reset_and_print(cur_time)
def on_finish(self, cur_time=False):
''' Export the number of ECU Advertisements/ Stream Authorization/ Simple Messages'''
if InterpreterOptions.TXT_FILES in self.export_options:
self._export_txt_file(cur_time)
def _export_txt_file(self, cur_time):
txt_lines = []
if cur_time: txt_lines.append("-------------- Results after simulation time: %s -------------- " % cur_time)
txt_lines.append(self._pretty_can_str("Number of sent Stream Authorization Messages", self.no_str_msgs, "Messages"))
txt_lines.append(self._pretty_bus_comp("Number of sent ECU Authentication Messages", self.no_auth_msgs, "Messages"))
txt_lines.append(self._pretty_can_str("Number of sent Simple Messages", self.no_simple_msgs, "Messages"))
txt_lines.append(self._pretty_can_str("Number of sent Stream Authorization Segments", self.no_str_msgs_seg, "Segments"))
txt_lines.append(self._pretty_bus_comp("Number of sent ECU Authentication Segments", self.no_auth_msgs_seg, "Segments"))
txt_lines.append(self._pretty_can_str("Number of sent Simple Segments", self.no_simple_msgs_seg, "Segments"))
txt_lines.append(self._pretty_can_str("Number of sent Stream Authorization Bytes", self.no_str_msgs_bytes, "Bytes"))
txt_lines.append(self._pretty_bus_comp("Number of sent ECU Authentication Bytes", self.no_auth_msgs_bytes, "Bytes"))
txt_lines.append(self._pretty_can_str("Number of sent Simple Bytes", self.no_simple_msgs_bytes, "Bytes"))
try:
out_txt = "\n\n".join(txt_lines)
idx = self.file_path[::-1].find('.')
file_path = self.file_path[:(-idx - 1)] + "_timings_ecus.txt"
with open(file_path, "w") as text_file:
text_file.write(out_txt)
except:
pass
def _append_apearances(self, mon_input):
'''raises the counter of appeared messages'''
if mon_input[3] == 'MonitorTags.CB_PROCESSING_MESSAGE':
if mon_input[4] in can_registration.ECU_AUTH_MESSAGES:
self._inc_set(self.no_auth_msgs_seg, mon_input[1], mon_input[2], 1)
if mon_input[9].count('0') == len(mon_input[9]):
self._inc_set(self.no_auth_msgs_bytes, mon_input[1], mon_input[2], len(mon_input[9]))
else:
self._inc_set(self.no_auth_msgs_bytes, mon_input[1], mon_input[2], mon_input[6])
self._inc_set(self.no_auth_msgs, mon_input[1], mon_input[2], 1)
elif mon_input[4] in can_registration.STREAM_AUTH_MESSAGES:
self._inc_set(self.no_str_msgs_seg, mon_input[1], mon_input[7], 1)
if mon_input[9].count('0') == len(mon_input[9]):
self._inc_set(self.no_str_msgs_bytes, mon_input[1], mon_input[7], len(mon_input[9]))
else:
self._inc_set(self.no_str_msgs, mon_input[1], mon_input[7], 1)
self._inc_set(self.no_str_msgs_bytes, mon_input[1], mon_input[7], mon_input[6])
else:
self._inc_set(self.no_simple_msgs_seg, mon_input[1], mon_input[4], 1)
if mon_input[9].count('0') == len(mon_input[9]):
self._inc_set(self.no_simple_msgs_bytes, mon_input[1], mon_input[4], len(mon_input[9]))
else:
self._inc_set(self.no_simple_msgs, mon_input[1], mon_input[4], 1)
self._inc_set(self.no_simple_msgs_bytes, mon_input[1], mon_input[4], mon_input[6])
def _calc_datarate_export(self, cur_time, last_time, bytes_trans):
''' calculates the datarate and writes it to the file'''
try:
datarate = {}
info = {}
for ky in bytes_trans:
datarate[ky] = float(bytes_trans[ky]) / (cur_time - last_time)
info[ky] = [cur_time, ky, datarate[ky] / 1000.0]
if InterpreterOptions.CSV_DR_FILE in self.export_options:
try:
self.csv_writer.writerow(["BUS DATARATE", info[ky][0], info[ky][1], info[ky][2]])
except:
ECULogger().log_traceback()
return info
except:
pass
def _export_csv_1(self, mon_input):
self.csv_writer.writerow(["BUS MESSAGES", mon_input[0], mon_input[1], mon_input[3], mon_input[9]])
def _inc_set(self, dict_inc, ky, ky2, stp):
''' increases the value of the dictionary at
[ky][ky2] '''
try:
dict_inc[ky]
except:
dict_inc[ky] = {}
try:
dict_inc[ky][ky2] += stp
except:
dict_inc[ky][ky2] = stp
def _extend_ids(self, bus_id):
''' add to dict this bus'''
if bus_id not in self._bus_ids:
self._bus_ids.append(bus_id)
self.bytes_trans[bus_id] = 0
def _init_csv(self, filepath):
try:
idx = filepath[::-1].find('.')
filepath = filepath[:(-idx - 1)] + filepath[(-idx - 1):]
self.csv_writer = csv.writer(open(filepath, 'w'), delimiter=',')
# Headline
self.csv_writer.writerow(["Information Type", "Time", "Bus ID", "Monitor Tag/ Datarate", "Unique Message ID"])
except:
pass # logging.error("CAN Bus Interpreter - CSV: Could not initialize filepath: %s" % filepath)
def _pretty_can_str(self, intro_txt, comp_cat_dict, units=""):
try:
hash_line = "##########################################################################################"
newline = "\n"
tab = "\t"
template = "\n\n\tBus Id: \t%s\n\tStream: \t%s\n\tValue: \t\t%s %s"
res_str = hash_line + newline + tab + tab + intro_txt + newline + hash_line
for comp in comp_cat_dict.keys():
try:
for cat in comp_cat_dict[comp].keys():
try:
res_str += newline
res_str += (template % (comp, cat, comp_cat_dict[comp][cat], units))
except:
pass
except:
pass
return res_str
except:
""
def _pretty_bus_comp(self, intro_txt, comp_cat_dict, units=""):
try:
hash_line = "##########################################################################################"
newline = "\n"
tab = "\t"
template = "\n\n\tBus Id: \t%s\n\tSender Id: \t%s\n\tValue: \t\t%s %s"
res_str = hash_line + newline + tab + tab + intro_txt + newline + hash_line
for comp in comp_cat_dict.keys():
try:
for cat in comp_cat_dict[comp].keys():
try:
res_str += newline
res_str += (template % (comp, cat, comp_cat_dict[comp][cat], units))
except:
pass
except:
pass
return res_str
except:
""
def _reset_and_print(self, cur_time):
self.bytes_trans = {}
self._bus_ids = []
self.last_time = cur_time
# print current results
self.on_finish(cur_time)
| PhilippMundhenk/IVNS | ECUSimulation/io_processing/result_interpreter/can_bus_interpreter.py | Python | mit | 10,769 |
#!/usr/bin/env python3
import unittest
import amulet
class TestDeploy(unittest.TestCase):
"""
Trivial deployment test for Apache Hadoop NameNode.
This charm cannot do anything useful by itself, so integration testing
is done in the bundle.
"""
def test_deploy(self):
self.d = amulet.Deployment(series='trusty')
self.d.add('namenode', 'apache-hadoop-namenode')
self.d.setup(timeout=900)
self.d.sentry.wait(timeout=1800)
self.unit = self.d.sentry['namenode'][0]
if __name__ == '__main__':
unittest.main()
| c0s/juju-apache-bigtop-namenode | tests/01-basic-deployment.py | Python | apache-2.0 | 578 |
#! /usr/bin/env python3
import re
__all__ = ["AnsiFormatter"]
class AnsiFormatter:
# RegExp detecting blank-only and single-char blocks
blankBlock = re.compile( "^([^\t\S]+|[^\t])$" )
# Messages
msg = {
'wiked-diff-empty': '(No difference)',
'wiked-diff-same': '=',
'wiked-diff-ins': '+',
'wiked-diff-del': '-',
'wiked-diff-block-left': '◀',
'wiked-diff-block-right': '▶',
}
# Characters used for highlighting
# newline = "\n"
# tab = "\t"
# space = " "
newline = "¶\n"
tab = "→"
space = "·"
omittedChars = "…"
separator_symbol = "~" * 64
# Colors
color_insert = 10
color_delete = 9
color_same = None
color_separator = 5
# Default color for moved blocks
color_moved = 3
# Block colors
colors_fg = [226, 136, 214, 105, 165, 128, 14, 63, 133]
colors_bg = colors_fg
def __init__(self):
# Stack of color codes
self.color_stack = []
##
## Main formatter method which formats diff fragments using ANSI colors.
##
## @param array fragments Fragments array, abstraction layer for diff code
## @param bool showBlockMoves
## Enable block move layout with highlighted blocks and marks at the original positions (True)
## @param bool coloredBlocks
## Display blocks in differing colors (rainbow color scheme) (False)
## @return string ANSI formatted code of diff
##
def format( self,
fragments,
showBlockMoves=True,
coloredBlocks=False ):
# No change, only one unchanged block in containers
if len(fragments) == 5 and fragments[2].type == '=':
return self.containerStart + \
self.noChangeStart + \
self.ansiEscape( self.msg['wiked-diff-empty'] ) + \
self.noChangeEnd + \
self.containerEnd
# Cycle through fragments
markupFragments = []
for fragment in fragments:
text = fragment.text
type = fragment.type
color = fragment.color
markup = ""
# Test if text is blanks-only or a single character
blank = False
if text != '':
blank = self.blankBlock.search( text ) is not None
# Add container start markup
if type == '{':
markup = self.containerStart
# Add container end markup
elif type == '}':
markup = self.containerEnd
# Add fragment start markup
elif type == '[':
markup = self.fragmentStart
# Add fragment end markup
elif type == ']':
markup = self.fragmentEnd
# Add fragment separator markup
elif type == ',':
markup = self.separator
# Add omission markup
elif type == '~':
markup = self.omittedChars
# Add omission markup
elif type == ' ~':
markup = ' ' + self.omittedChars
# Add omission markup
elif type == '~ ':
markup = self.omittedChars + ' '
# Add colored left-pointing block start markup
elif type == '(<':
if coloredBlocks is True:
markup = self.blockColoredStart(color)
else:
markup = self.blockStart
# Add colored right-pointing block start markup
elif type == '(>':
if coloredBlocks is True:
markup = self.blockColoredStart(color)
else:
markup = self.blockStart
# Add colored block end markup
elif type == ' )':
markup = self.blockEnd
# Add '=' (unchanged) text and moved block
elif type == '=':
text = self.ansiEscape( text )
if color != 0:
markup = self.markupBlanks( text, True )
else:
markup = self.markupBlanks( text )
# Add '-' text
elif type == '-':
if blank is True:
markup = self.deleteStartBlank
else:
markup = self.deleteStart
# handle text after self.insertStartBlank or self.insertStart sets the color!
text = self.ansiEscape( text )
text = self.markupBlanks( text, True )
markup += text + self.deleteEnd
# Add '+' text
elif type == '+':
if blank is True:
markup = self.insertStartBlank
else:
markup = self.insertStart
# handle text after self.insertStartBlank or self.insertStart sets the color!
text = self.ansiEscape( text )
text = self.markupBlanks( text, True )
markup += text + self.insertEnd
# Add '<' and '>' code
elif type == '<' or type == '>':
# Display as deletion at original position
if showBlockMoves is False:
if blank is True:
markup = self.deleteStartBlank
else:
markup = self.deleteStart
# handle text after self.insertStartBlank or self.insertStart sets the color!
text = self.ansiEscape( text )
text = self.markupBlanks( text, True )
markup += text + self.deleteEnd
# Display as mark
else:
if type == '<':
if coloredBlocks is True:
markup = self.markLeftColored(color)
else:
markup = self.markLeft
else:
if coloredBlocks is True:
markup = self.markRightColored(color)
else:
markup = self.markRight
markupFragments.append( markup )
# Join fragments
markup = "".join(markupFragments)
# Clear the color stack
assert(len(self.color_stack) == 0)
# self.color_stack.clear()
return markup
##
## Markup tabs, newlines, and spaces in diff fragment text.
##
## @param bool highlight Highlight newlines and spaces in addition to tabs
## @param string text Text code to be marked-up
## @return string Marked-up text
##
def markupBlanks( self, text, highlight=False ):
if highlight is True:
text = text.replace(" ", self.space)
# some terminals or pagers don't interpret colors across several lines
reset = "\033[0m"
color = self.color_stack[-1] if self.color_stack else reset
text = text.replace("\n", self.newline.replace("\n", reset + "\n" + color))
text = text.replace("\t", self.tab)
return text
##
## Replace ANSI escape codes with their plain-text representation.
##
## @param string text Text to be escaped
## @return string Escaped code
##
def ansiEscape( self, text ):
return text.replace("\033[", "\\033[")
# Assemble ANSI escape code for given colors, add it to the stack and return it.
def pushColor(self, fg=None, bg=None):
code = "\033[00"
if fg is not None:
code += ";38;5;" + str(fg)
if bg is not None:
code += ";48;5;" + str(bg)
code += "m"
self.color_stack.append(code)
return code
# Pop current color from the stack and return the next one on the stack.
def popColor(self):
try:
self.color_stack.pop()
return self.color_stack[-1]
except IndexError:
# fall back to reset if the stack is empty
return "\033[0m"
@property
def noChangeStart(self):
return self.pushColor(self.color_same)
@property
def noChangeEnd(self):
return self.popColor()
@property
def containerStart(self):
return self.pushColor()
@property
def containerEnd(self):
return self.popColor()
@property
def fragmentStart(self):
return ""
@property
def fragmentEnd(self):
return ""
@property
def separator(self):
return self.pushColor(self.color_separator) + \
"\n" + self.separator_symbol + "\n" + \
self.popColor()
@property
def insertStart(self):
return self.pushColor(self.color_insert)
@property
def insertStartBlank(self):
return self.pushColor(fg=0, bg=self.color_insert)
@property
def insertEnd(self):
return self.popColor()
@property
def deleteStart(self):
return self.pushColor(self.color_delete)
@property
def deleteStartBlank(self):
return self.pushColor(fg=0, bg=self.color_delete)
@property
def deleteEnd(self):
return self.popColor()
@property
def blockStart(self):
return self.pushColor(fg=self.color_moved)
def blockColoredStart(self, num):
color = self.colors_fg[ num % len(self.colors_fg) ]
return self.pushColor(fg=color)
@property
def blockEnd(self):
return self.popColor()
@property
def markLeft(self):
fg = 0
bg = self.color_moved
return self.pushColor(fg=fg, bg=bg) + self.msg["wiked-diff-block-left"] + self.popColor()
def markLeftColored(self, num):
fg = 0
bg = self.colors_bg[ num % len(self.colors_bg) ]
return self.pushColor(fg=fg, bg=bg) + self.msg["wiked-diff-block-left"] + self.popColor()
@property
def markRight(self):
fg = 0
bg = self.color_moved
return self.pushColor(fg=fg, bg=bg) + self.msg["wiked-diff-block-right"] + self.popColor()
def markRightColored(self, num):
fg = 0
bg = self.colors_bg[ num % len(self.colors_bg) ]
return self.pushColor(fg=fg, bg=bg) + self.msg["wiked-diff-block-right"] + self.popColor()
| lahwaacz/python-wikeddiff | WikEdDiff/AnsiFormatter.py | Python | gpl-3.0 | 10,367 |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for VM tasks like spawn, snapshot, suspend, resume etc.
"""
import collections
import os
import time
import decorator
from oslo.config import cfg
from oslo.utils import excutils
from oslo.utils import strutils
from oslo.utils import units
from oslo.vmware import exceptions as vexc
from oslo_concurrency import lockutils
from nova.api.metadata import base as instance_metadata
from nova import compute
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import utils
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt import driver
from nova.virt import hardware
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vif as vmwarevif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
CONF = cfg.CONF
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('remove_unused_base_images', 'nova.virt.imagecache')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('my_ip', 'nova.netconf')
LOG = logging.getLogger(__name__)
VMWARE_POWER_STATES = {
'poweredOff': power_state.SHUTDOWN,
'poweredOn': power_state.RUNNING,
'suspended': power_state.SUSPENDED}
RESIZE_TOTAL_STEPS = 4
DcInfo = collections.namedtuple('DcInfo',
['ref', 'name', 'vmFolder'])
class VirtualMachineInstanceConfigInfo(object):
"""Parameters needed to create and configure a new instance."""
def __init__(self, instance, instance_name, image_info,
datastore, dc_info, image_cache):
# Some methods called during spawn take the instance parameter purely
# for logging purposes.
# TODO(vui) Clean them up, so we no longer need to keep this variable
self.instance = instance
# Get the instance name. In some cases this may differ from the 'uuid',
# for example when the spawn of a rescue instance takes place.
self.instance_name = instance_name or instance.uuid
self.ii = image_info
self.root_gb = instance.root_gb
self.datastore = datastore
self.dc_info = dc_info
self._image_cache = image_cache
@property
def cache_image_folder(self):
if self.ii.image_id is None:
return
return self._image_cache.get_image_cache_folder(
self.datastore, self.ii.image_id)
@property
def cache_image_path(self):
if self.ii.image_id is None:
return
cached_image_file_name = "%s.%s" % (self.ii.image_id,
self.ii.file_type)
return self.cache_image_folder.join(cached_image_file_name)
# Note(vui): See https://bugs.launchpad.net/nova/+bug/1363349
# for cases where mocking time.sleep() can have unintended effects on code
# not under test. For now, unblock the affected test cases by providing
# a wrapper function to work around needing to mock time.sleep()
def _time_sleep_wrapper(delay):
time.sleep(delay)
@decorator.decorator
def retry_if_task_in_progress(f, *args, **kwargs):
retries = max(CONF.vmware.api_retry_count, 1)
delay = 1
for attempt in range(1, retries + 1):
if attempt != 1:
_time_sleep_wrapper(delay)
delay = min(2 * delay, 60)
try:
f(*args, **kwargs)
return
except vexc.TaskInProgress:
pass
class VMwareVMOps(object):
"""Management class for VM-related tasks."""
def __init__(self, session, virtapi, volumeops, cluster=None,
datastore_regex=None):
"""Initializer."""
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops
self._cluster = cluster
self._root_resource_pool = vm_util.get_res_pool_ref(self._session,
self._cluster)
self._datastore_regex = datastore_regex
# Ensure that the base folder is unique per compute node
if CONF.remove_unused_base_images:
self._base_folder = '%s%s' % (CONF.my_ip,
CONF.image_cache_subdirectory_name)
else:
# Aging disable ensures backward compatibility
self._base_folder = CONF.image_cache_subdirectory_name
self._tmp_folder = 'vmware_temp'
self._rescue_suffix = '-rescue'
self._migrate_suffix = '-orig'
self._datastore_dc_mapping = {}
self._datastore_browser_mapping = {}
self._imagecache = imagecache.ImageCacheManager(self._session,
self._base_folder)
def _extend_virtual_disk(self, instance, requested_size, name, dc_ref):
service_content = self._session.vim.service_content
LOG.debug("Extending root virtual disk to %s", requested_size)
vmdk_extend_task = self._session._call_method(
self._session.vim,
"ExtendVirtualDisk_Task",
service_content.virtualDiskManager,
name=name,
datacenter=dc_ref,
newCapacityKb=requested_size,
eagerZero=False)
try:
self._session._wait_for_task(vmdk_extend_task)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Extending virtual disk failed with error: %s'),
e, instance=instance)
# Clean up files created during the extend operation
files = [name.replace(".vmdk", "-flat.vmdk"), name]
for file in files:
ds_path = ds_util.DatastorePath.parse(file)
self._delete_datastore_file(ds_path, dc_ref)
LOG.debug("Extended root virtual disk")
def _delete_datastore_file(self, datastore_path, dc_ref):
try:
ds_util.file_delete(self._session, datastore_path, dc_ref)
except (vexc.CannotDeleteFileException,
vexc.FileFaultException,
vexc.FileLockedException,
vexc.FileNotFoundException):
LOG.debug("Unable to delete %(ds)s. There may be more than "
"one process or thread trying to delete the file",
{'ds': datastore_path},
exc_info=True)
def _extend_if_required(self, dc_info, image_info, instance,
root_vmdk_path):
"""Increase the size of the root vmdk if necessary."""
if instance.root_gb * units.Gi > image_info.file_size:
size_in_kb = instance.root_gb * units.Mi
self._extend_virtual_disk(instance, size_in_kb,
root_vmdk_path, dc_info.ref)
def _configure_config_drive(self, instance, vm_ref, dc_info, datastore,
injected_files, admin_password):
session_vim = self._session.vim
cookies = session_vim.client.options.transport.cookiejar
uploaded_iso_path = self._create_config_drive(instance,
injected_files,
admin_password,
datastore.name,
dc_info.name,
instance.uuid,
cookies)
uploaded_iso_path = datastore.build_path(uploaded_iso_path)
self._attach_cdrom_to_vm(
vm_ref, instance,
datastore.ref,
str(uploaded_iso_path))
def build_virtual_machine(self, instance, instance_name, image_info,
dc_info, datastore, network_info, extra_specs):
vif_infos = vmwarevif.get_vif_info(self._session,
self._cluster,
utils.is_neutron(),
image_info.vif_model,
network_info)
if extra_specs.storage_policy:
profile_spec = vm_util.get_storage_profile_spec(
self._session, extra_specs.storage_policy)
else:
profile_spec = None
# Get the create vm config spec
client_factory = self._session.vim.client.factory
config_spec = vm_util.get_vm_create_spec(client_factory,
instance,
instance_name,
datastore.name,
vif_infos,
extra_specs,
image_info.os_type,
profile_spec=profile_spec)
# Create the VM
vm_ref = vm_util.create_vm(self._session, instance, dc_info.vmFolder,
config_spec, self._root_resource_pool)
return vm_ref
def _get_extra_specs(self, flavor):
extra_specs = vm_util.ExtraSpecs()
for (key, type) in (('cpu_limit', int),
('cpu_reservation', int),
('cpu_shares_level', str),
('cpu_shares_share', int)):
value = flavor.extra_specs.get('quota:' + key)
if value:
setattr(extra_specs.cpu_limits, key, type(value))
hw_version = flavor.extra_specs.get('vmware:hw_version')
extra_specs.hw_version = hw_version
if CONF.vmware.pbm_enabled:
storage_policy = flavor.extra_specs.get('vmware:storage_policy',
CONF.vmware.pbm_default_policy)
extra_specs.storage_policy = storage_policy
return extra_specs
def _fetch_image_as_file(self, context, vi, image_ds_loc):
"""Download image as an individual file to host via HTTP PUT."""
session = self._session
session_vim = session.vim
cookies = session_vim.client.options.transport.cookiejar
LOG.debug("Downloading image file data %(image_id)s to "
"%(file_path)s on the data store "
"%(datastore_name)s",
{'image_id': vi.ii.image_id,
'file_path': image_ds_loc,
'datastore_name': vi.datastore.name},
instance=vi.instance)
images.fetch_image(
context,
vi.instance,
session._host,
session._port,
vi.dc_info.name,
vi.datastore.name,
image_ds_loc.rel_path,
cookies=cookies)
def _fetch_image_as_vapp(self, context, vi, image_ds_loc):
"""Download stream optimized image to host as a vApp."""
# The directory of the imported disk is the unique name
# of the VM use to import it with.
vm_name = image_ds_loc.parent.basename
LOG.debug("Downloading stream optimized image %(image_id)s to "
"%(file_path)s on the data store "
"%(datastore_name)s as vApp",
{'image_id': vi.ii.image_id,
'file_path': image_ds_loc,
'datastore_name': vi.datastore.name},
instance=vi.instance)
images.fetch_image_stream_optimized(
context,
vi.instance,
self._session,
vm_name,
vi.datastore.name,
vi.dc_info.vmFolder,
self._root_resource_pool)
def _prepare_sparse_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, "tmp-sparse.vmdk")
return tmp_dir_loc, tmp_image_ds_loc
def _prepare_flat_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
ds_util.mkdir(self._session, tmp_image_ds_loc.parent, vi.dc_info.ref)
vm_util.create_virtual_disk(
self._session, vi.dc_info.ref,
vi.ii.adapter_type,
vi.ii.disk_type,
str(tmp_image_ds_loc),
vi.ii.file_size_in_kb)
flat_vmdk_name = vi.cache_image_path.basename.replace('.vmdk',
'-flat.vmdk')
flat_vmdk_ds_loc = tmp_dir_loc.join(vi.ii.image_id, flat_vmdk_name)
self._delete_datastore_file(str(flat_vmdk_ds_loc), vi.dc_info.ref)
return tmp_dir_loc, flat_vmdk_ds_loc
def _prepare_stream_optimized_image(self, vi):
vm_name = "%s_%s" % (constants.IMAGE_VM_PREFIX,
uuidutils.generate_uuid())
tmp_dir_loc = vi.datastore.build_path(vm_name)
tmp_image_ds_loc = tmp_dir_loc.join("%s.vmdk" % tmp_dir_loc.basename)
return tmp_dir_loc, tmp_image_ds_loc
def _prepare_iso_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
return tmp_dir_loc, tmp_image_ds_loc
def _move_to_cache(self, dc_ref, src_folder_ds_path, dst_folder_ds_path):
try:
ds_util.file_move(self._session, dc_ref,
src_folder_ds_path, dst_folder_ds_path)
except vexc.FileAlreadyExistsException:
# Folder move has failed. This may be due to the fact that a
# process or thread has already completed the operation.
# Since image caching is synchronized, this can only happen
# due to action external to the process.
# In the event of a FileAlreadyExists we continue,
# all other exceptions will be raised.
LOG.warning(_LW("Destination %s already exists! Concurrent moves "
"can lead to unexpected results."),
dst_folder_ds_path)
def _cache_sparse_image(self, vi, tmp_image_ds_loc):
tmp_dir_loc = tmp_image_ds_loc.parent.parent
converted_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
# converts fetched image to preallocated disk
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(tmp_image_ds_loc),
str(converted_image_ds_loc))
self._delete_datastore_file(str(tmp_image_ds_loc), vi.dc_info.ref)
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _cache_flat_image(self, vi, tmp_image_ds_loc):
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _cache_stream_optimized_image(self, vi, tmp_image_ds_loc):
dst_path = vi.cache_image_folder.join("%s.vmdk" % vi.ii.image_id)
ds_util.mkdir(self._session, vi.cache_image_folder, vi.dc_info.ref)
try:
ds_util.disk_move(self._session, vi.dc_info.ref,
tmp_image_ds_loc, dst_path)
except vexc.FileAlreadyExistsException:
pass
def _cache_iso_image(self, vi, tmp_image_ds_loc):
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _get_vm_config_info(self, instance, image_info, instance_name=None,
storage_policy=None):
"""Captures all relevant information from the spawn parameters."""
if (instance.root_gb != 0 and
image_info.file_size > instance.root_gb * units.Gi):
reason = _("Image disk size greater than requested disk size")
raise exception.InstanceUnacceptable(instance_id=instance.uuid,
reason=reason)
allowed_ds_types = ds_util.get_allowed_datastore_types(
image_info.disk_type)
datastore = ds_util.get_datastore(self._session,
self._cluster,
self._datastore_regex,
storage_policy,
allowed_ds_types)
dc_info = self.get_datacenter_ref_and_name(datastore.ref)
return VirtualMachineInstanceConfigInfo(instance,
instance_name,
image_info,
datastore,
dc_info,
self._imagecache)
def _get_image_callbacks(self, vi):
disk_type = vi.ii.disk_type
if disk_type == constants.DISK_TYPE_STREAM_OPTIMIZED:
image_fetch = self._fetch_image_as_vapp
else:
image_fetch = self._fetch_image_as_file
if vi.ii.is_iso:
image_prepare = self._prepare_iso_image
image_cache = self._cache_iso_image
elif disk_type == constants.DISK_TYPE_SPARSE:
image_prepare = self._prepare_sparse_image
image_cache = self._cache_sparse_image
elif disk_type == constants.DISK_TYPE_STREAM_OPTIMIZED:
image_prepare = self._prepare_stream_optimized_image
image_cache = self._cache_stream_optimized_image
elif disk_type in constants.SUPPORTED_FLAT_VARIANTS:
image_prepare = self._prepare_flat_image
image_cache = self._cache_flat_image
else:
reason = _("disk type '%s' not supported") % disk_type
raise exception.InvalidDiskInfo(reason=reason)
return image_prepare, image_fetch, image_cache
def _fetch_image_if_missing(self, context, vi):
image_prepare, image_fetch, image_cache = self._get_image_callbacks(vi)
LOG.debug("Processing image %s", vi.ii.image_id)
with lockutils.lock(str(vi.cache_image_path),
lock_file_prefix='nova-vmware-fetch_image'):
self.check_cache_folder(vi.datastore.name, vi.datastore.ref)
ds_browser = self._get_ds_browser(vi.datastore.ref)
if not ds_util.file_exists(self._session, ds_browser,
vi.cache_image_folder,
vi.cache_image_path.basename):
LOG.debug("Preparing fetch location")
tmp_dir_loc, tmp_image_ds_loc = image_prepare(vi)
LOG.debug("Fetch image to %s", tmp_image_ds_loc)
image_fetch(context, vi, tmp_image_ds_loc)
LOG.debug("Caching image")
image_cache(vi, tmp_image_ds_loc)
LOG.debug("Cleaning up location %s", str(tmp_dir_loc))
self._delete_datastore_file(str(tmp_dir_loc), vi.dc_info.ref)
def _create_and_attach_ephemeral_disk(self, instance, vm_ref, vi, size,
adapter_type, filename):
path = str(ds_util.DatastorePath(vi.datastore.name, instance.uuid,
filename))
disk_type = constants.DISK_TYPE_THIN
vm_util.create_virtual_disk(
self._session, vi.dc_info.ref,
adapter_type,
disk_type,
path,
size)
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
adapter_type, disk_type,
path, size, False)
def _create_ephemeral(self, bdi, instance, vm_ref, vi):
ephemerals = None
if bdi is not None:
ephemerals = driver.block_device_info_get_ephemerals(bdi)
for idx, eph in enumerate(ephemerals):
size = eph['size'] * units.Mi
adapter_type = eph.get('disk_bus', vi.ii.adapter_type)
filename = vm_util.get_ephemeral_name(idx)
self._create_and_attach_ephemeral_disk(instance, vm_ref, vi,
size, adapter_type,
filename)
# There may be block devices defined but no ephemerals. In this case
# we need to allocate a ephemeral disk if required
if not ephemerals and instance.ephemeral_gb:
size = instance.ephemeral_gb * units.Mi
filename = vm_util.get_ephemeral_name(0)
self._create_and_attach_ephemeral_disk(instance, vm_ref, vi,
size, vi.ii.adapter_type,
filename)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None,
instance_name=None, power_on=True,
flavor=None):
client_factory = self._session.vim.client.factory
image_info = images.VMwareImage.from_image(instance.image_ref,
image_meta)
# Read flavors for extra_specs
if flavor is None:
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance.instance_type_id)
extra_specs = self._get_extra_specs(flavor)
vi = self._get_vm_config_info(instance, image_info, instance_name,
extra_specs.storage_policy)
# Creates the virtual machine. The virtual machine reference returned
# is unique within Virtual Center.
vm_ref = self.build_virtual_machine(instance,
vi.instance_name,
image_info,
vi.dc_info,
vi.datastore,
network_info,
extra_specs)
# Cache the vm_ref. This saves a remote call to the VC. This uses the
# instance_name. This covers all use cases including rescue and resize.
vm_util.vm_ref_cache_update(vi.instance_name, vm_ref)
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
if CONF.flat_injected:
self._set_machine_id(client_factory, instance, network_info,
vm_ref=vm_ref)
# Set the vnc configuration of the instance, vnc port starts from 5900
if CONF.vnc_enabled:
self._get_and_set_vnc_config(client_factory, instance, vm_ref)
block_device_mapping = []
if block_device_info is not None:
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
# NOTE(mdbooth): the logic here is that we ignore the image if there
# are block device mappings. This behaviour is incorrect, and a bug in
# the driver. We should be able to accept an image and block device
# mappings.
if len(block_device_mapping) > 0:
msg = "Block device information present: %s" % block_device_info
# NOTE(mriedem): block_device_info can contain an auth_password
# so we have to scrub the message before logging it.
LOG.debug(strutils.mask_password(msg), instance=instance)
for root_disk in block_device_mapping:
connection_info = root_disk['connection_info']
# TODO(hartsocks): instance is unnecessary, remove it
# we still use instance in many locations for no other purpose
# than logging, can we simplify this?
self._volumeops.attach_root_volume(connection_info, instance,
vi.datastore.ref)
else:
self._imagecache.enlist_image(
image_info.image_id, vi.datastore, vi.dc_info.ref)
self._fetch_image_if_missing(context, vi)
if image_info.is_iso:
self._use_iso_image(vm_ref, vi)
elif image_info.linked_clone:
self._use_disk_image_as_linked_clone(vm_ref, vi)
else:
self._use_disk_image_as_full_clone(vm_ref, vi)
# Create ephemeral disks
self._create_ephemeral(block_device_info, instance, vm_ref, vi)
if configdrive.required_by(instance):
self._configure_config_drive(
instance, vm_ref, vi.dc_info, vi.datastore,
injected_files, admin_password)
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def _create_config_drive(self, instance, injected_files, admin_password,
data_store_name, dc_name, upload_folder, cookies):
if CONF.config_drive_format != 'iso9660':
reason = (_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
raise exception.InstancePowerOnFailure(reason=reason)
LOG.info(_LI('Using config drive for instance'), instance=instance)
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files,
extra_md=extra_md)
try:
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
with utils.tempdir() as tmp_path:
tmp_file = os.path.join(tmp_path, 'configdrive.iso')
cdb.make_drive(tmp_file)
upload_iso_path = "%s/configdrive.iso" % (
upload_folder)
images.upload_iso_to_datastore(
tmp_file, instance,
host=self._session._host,
port=self._session._port,
data_center_name=dc_name,
datastore_name=data_store_name,
cookies=cookies,
file_path=upload_iso_path)
return upload_iso_path
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed with error: %s'),
e, instance=instance)
def _attach_cdrom_to_vm(self, vm_ref, instance,
datastore, file_path):
"""Attach cdrom to VM by reconfiguration."""
client_factory = self._session.vim.client.factory
devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
client_factory,
devices,
'ide')
cdrom_attach_config_spec = vm_util.get_cdrom_attach_config_spec(
client_factory, datastore, file_path,
controller_key, unit_number)
if controller_spec:
cdrom_attach_config_spec.deviceChange.append(controller_spec)
LOG.debug("Reconfiguring VM instance to attach cdrom %s",
file_path, instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, cdrom_attach_config_spec)
LOG.debug("Reconfigured VM instance to attach cdrom %s",
file_path, instance=instance)
def _create_vm_snapshot(self, instance, vm_ref):
LOG.debug("Creating Snapshot of the VM instance", instance=instance)
snapshot_task = self._session._call_method(
self._session.vim,
"CreateSnapshot_Task", vm_ref,
name="%s-snapshot" % instance.uuid,
description="Taking Snapshot of the VM",
memory=False,
quiesce=True)
self._session._wait_for_task(snapshot_task)
LOG.debug("Created Snapshot of the VM instance", instance=instance)
task_info = self._session._call_method(vim_util,
"get_dynamic_property",
snapshot_task, "Task", "info")
snapshot = task_info.result
return snapshot
@retry_if_task_in_progress
def _delete_vm_snapshot(self, instance, vm_ref, snapshot):
LOG.debug("Deleting Snapshot of the VM instance", instance=instance)
delete_snapshot_task = self._session._call_method(
self._session.vim,
"RemoveSnapshot_Task", snapshot,
removeChildren=False, consolidate=True)
self._session._wait_for_task(delete_snapshot_task)
LOG.debug("Deleted Snapshot of the VM instance", instance=instance)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
Steps followed are:
1. Get the name of the vmdk file which the VM points to right now.
Can be a chain of snapshots, so we need to know the last in the
chain.
2. Create the snapshot. A new vmdk is created which the VM points to
now. The earlier vmdk becomes read-only.
3. Call CopyVirtualDisk which coalesces the disk chain to form a single
vmdk, rather a .vmdk metadata file and a -flat.vmdk disk data file.
4. Now upload the -flat.vmdk file to the image store.
5. Delete the coalesced .vmdk and -flat.vmdk created.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
service_content = self._session.vim.service_content
def _get_vm_and_vmdk_attribs():
# Get the vmdk info that the VM is pointing to
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
instance.uuid)
if not vmdk.path:
LOG.debug("No root disk defined. Unable to snapshot.")
raise error_util.NoRootDiskDefined()
datastore_name = ds_util.DatastorePath.parse(vmdk.path).datastore
os_type = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "summary.config.guestId")
return (vmdk.path, vmdk.adapter_type,
vmdk.disk_type, datastore_name, os_type)
(vmdk_file_path_before_snapshot, adapter_type, disk_type,
datastore_name, os_type) = _get_vm_and_vmdk_attribs()
snapshot = self._create_vm_snapshot(instance, vm_ref)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
def _check_if_tmp_folder_exists():
# Copy the contents of the VM that were there just before the
# snapshot was taken
ds_ref_ret = self._session._call_method(
vim_util, "get_dynamic_property", vm_ref, "VirtualMachine",
"datastore")
if ds_ref_ret is None:
raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
self.check_temp_folder(datastore_name, ds_ref)
return ds_ref
ds_ref = _check_if_tmp_folder_exists()
# Generate a random vmdk file name to which the coalesced vmdk content
# will be copied to. A random name is chosen so that we don't have
# name clashes.
random_name = uuidutils.generate_uuid()
dest_vmdk_file_path = ds_util.DatastorePath(
datastore_name, self._tmp_folder, "%s.vmdk" % random_name)
dest_vmdk_data_file_path = ds_util.DatastorePath(
datastore_name, self._tmp_folder, "%s-flat.vmdk" % random_name)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
def _copy_vmdk_content():
# Consolidate the snapshotted disk to a temporary vmdk.
LOG.debug('Copying snapshotted disk %s.',
vmdk_file_path_before_snapshot,
instance=instance)
copy_disk_task = self._session._call_method(
self._session.vim,
"CopyVirtualDisk_Task",
service_content.virtualDiskManager,
sourceName=vmdk_file_path_before_snapshot,
sourceDatacenter=dc_info.ref,
destName=str(dest_vmdk_file_path),
destDatacenter=dc_info.ref,
force=False)
self._session._wait_for_task(copy_disk_task)
LOG.debug('Copied snapshotted disk %s.',
vmdk_file_path_before_snapshot,
instance=instance)
_copy_vmdk_content()
self._delete_vm_snapshot(instance, vm_ref, snapshot)
cookies = self._session.vim.client.options.transport.cookiejar
def _upload_vmdk_to_image_repository():
# Upload the contents of -flat.vmdk file which has the disk data.
LOG.debug("Uploading image %s", image_id,
instance=instance)
images.upload_image(
context,
image_id,
instance,
os_type=os_type,
disk_type=constants.DEFAULT_DISK_TYPE,
adapter_type=adapter_type,
image_version=1,
host=self._session._host,
port=self._session._port,
data_center_name=dc_info.name,
datastore_name=datastore_name,
cookies=cookies,
file_path="%s/%s-flat.vmdk" % (self._tmp_folder, random_name))
LOG.debug("Uploaded image %s", image_id,
instance=instance)
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
_upload_vmdk_to_image_repository()
def _clean_temp_data():
"""Delete temporary vmdk files generated in image handling
operations.
"""
# The data file is the one occupying space, and likelier to see
# deletion problems, so prioritize its deletion first. In the
# unlikely event that its deletion fails, the small descriptor file
# is retained too by design since it makes little sense to remove
# it when the data disk it refers to still lingers.
for f in dest_vmdk_data_file_path, dest_vmdk_file_path:
self._delete_datastore_file(f, dc_info.ref)
_clean_temp_data()
def reboot(self, instance, network_info):
"""Reboot a VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session, props)
pwr_state = query['runtime.powerState']
tools_status = query['summary.guest.toolsStatus']
tools_running_status = query['summary.guest.toolsRunningStatus']
# Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
reason = _("instance is not powered on")
raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
if (tools_status == "toolsOk" and
tools_running_status == "guestToolsRunning"):
LOG.debug("Rebooting guest OS of VM", instance=instance)
self._session._call_method(self._session.vim, "RebootGuest",
vm_ref)
LOG.debug("Rebooted guest OS of VM", instance=instance)
else:
LOG.debug("Doing hard reboot of VM", instance=instance)
reset_task = self._session._call_method(self._session.vim,
"ResetVM_Task", vm_ref)
self._session._wait_for_task(reset_task)
LOG.debug("Did hard reboot of VM", instance=instance)
def _destroy_instance(self, instance, destroy_disks=True,
instance_name=None):
# Destroy a VM instance
# Get the instance name. In some cases this may differ from the 'uuid',
# for example when the spawn of a rescue instance takes place.
if instance_name is None:
instance_name = instance.uuid
try:
vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
if vm_ref is None:
LOG.warning(_LW('Instance does not exist on backend'),
instance=instance)
return
lst_properties = ["config.files.vmPathName", "runtime.powerState",
"datastore"]
props = self._session._call_method(vim_util,
"get_object_properties",
None, vm_ref, "VirtualMachine", lst_properties)
query = vm_util.get_values_from_object_properties(
self._session, props)
pwr_state = query['runtime.powerState']
vm_config_pathname = query.get('config.files.vmPathName')
vm_ds_path = None
if vm_config_pathname is not None:
vm_ds_path = ds_util.DatastorePath.parse(
vm_config_pathname)
# Power off the VM if it is in PoweredOn state.
if pwr_state == "poweredOn":
vm_util.power_off_instance(self._session, instance, vm_ref)
# Un-register the VM
try:
LOG.debug("Unregistering the VM", instance=instance)
self._session._call_method(self._session.vim,
"UnregisterVM", vm_ref)
LOG.debug("Unregistered the VM", instance=instance)
except Exception as excep:
LOG.warning(_LW("In vmwareapi:vmops:_destroy_instance, got "
"this exception while un-registering the VM: "
"%s"), excep)
# Delete the folder holding the VM related content on
# the datastore.
if destroy_disks and vm_ds_path:
try:
dir_ds_compliant_path = vm_ds_path.parent
LOG.debug("Deleting contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': vm_ds_path.datastore},
instance=instance)
ds_ref_ret = query['datastore']
ds_ref = ds_ref_ret.ManagedObjectReference[0]
dc_info = self.get_datacenter_ref_and_name(ds_ref)
ds_util.file_delete(self._session,
dir_ds_compliant_path,
dc_info.ref)
LOG.debug("Deleted contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': vm_ds_path.datastore},
instance=instance)
except Exception:
LOG.warning(_LW("In vmwareapi:vmops:_destroy_instance, "
"exception while deleting the VM contents "
"from the disk"), exc_info=True)
except Exception as exc:
LOG.exception(exc, instance=instance)
finally:
vm_util.vm_ref_cache_delete(instance_name)
def destroy(self, instance, destroy_disks=True):
"""Destroy a VM instance.
Steps followed for each VM are:
1. Power off, if it is in poweredOn state.
2. Un-register.
3. Delete the contents of the folder holding the VM related data.
"""
# If there is a rescue VM then we need to destroy that one too.
LOG.debug("Destroying instance", instance=instance)
if instance['vm_state'] == vm_states.RESCUED:
LOG.debug("Rescue VM configured", instance=instance)
try:
self.unrescue(instance, power_on=False)
LOG.debug("Rescue VM destroyed", instance=instance)
except Exception:
rescue_name = instance.uuid + self._rescue_suffix
self._destroy_instance(instance,
destroy_disks=destroy_disks,
instance_name=rescue_name)
# NOTE(arnaud): Destroy uuid-orig and uuid VMs iff it is not
# triggered by the revert resize api call. This prevents
# the uuid-orig VM to be deleted to be able to associate it later.
if instance.task_state != task_states.RESIZE_REVERTING:
# When a VM deletion is triggered in the middle of VM resize and
# before the state is set to RESIZED, the uuid-orig VM needs
# to be deleted. This will avoid VM leaks.
# The method _destroy_instance will check that the vmref
# exists before attempting the deletion.
resize_orig_vmname = instance.uuid + self._migrate_suffix
vm_orig_ref = vm_util.get_vm_ref_from_name(self._session,
resize_orig_vmname)
if vm_orig_ref:
self._destroy_instance(instance,
destroy_disks=destroy_disks,
instance_name=resize_orig_vmname)
self._destroy_instance(instance, destroy_disks=destroy_disks)
LOG.debug("Instance destroyed", instance=instance)
def pause(self, instance):
msg = _("pause not supported for vmwareapi")
raise NotImplementedError(msg)
def unpause(self, instance):
msg = _("unpause not supported for vmwareapi")
raise NotImplementedError(msg)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
# Only PoweredOn VMs can be suspended.
if pwr_state == "poweredOn":
LOG.debug("Suspending the VM", instance=instance)
suspend_task = self._session._call_method(self._session.vim,
"SuspendVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug("Suspended the VM", instance=instance)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
reason = _("instance is powered off and cannot be suspended.")
raise exception.InstanceSuspendFailure(reason=reason)
else:
LOG.debug("VM was already in suspended state. So returning "
"without doing anything", instance=instance)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
if pwr_state.lower() == "suspended":
LOG.debug("Resuming the VM", instance=instance)
suspend_task = self._session._call_method(
self._session.vim,
"PowerOnVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug("Resumed the VM", instance=instance)
else:
reason = _("instance is not in a suspended state")
raise exception.InstanceResumeFailure(reason=reason)
def rescue(self, context, instance, network_info, image_meta):
"""Rescue the specified instance.
- shutdown the instance VM.
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
vm_util.power_off_instance(self._session, instance, vm_ref)
instance_name = instance.uuid + self._rescue_suffix
self.spawn(context, instance, image_meta,
None, None, network_info,
instance_name=instance_name,
power_on=False)
# Attach vmdk to the rescue VM
vmdk = vm_util.get_vmdk_info(self._session, vm_ref, instance.uuid)
rescue_vm_ref = vm_util.get_vm_ref_from_name(self._session,
instance_name)
self._volumeops.attach_disk_to_vm(rescue_vm_ref,
instance,
vmdk.adapter_type,
vmdk.disk_type,
vmdk.path)
vm_util.power_on_instance(self._session, instance,
vm_ref=rescue_vm_ref)
def unrescue(self, instance, power_on=True):
"""Unrescue the specified instance."""
# Get the original vmdk_path
vm_ref = vm_util.get_vm_ref(self._session, instance)
vmdk = vm_util.get_vmdk_info(self._session, vm_ref, instance.uuid)
instance_name = instance.uuid + self._rescue_suffix
# detach the original instance disk from the rescue disk
vm_rescue_ref = vm_util.get_vm_ref_from_name(self._session,
instance_name)
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_rescue_ref,
"VirtualMachine", "config.hardware.device")
device = vm_util.get_vmdk_volume_disk(hardware_devices, path=vmdk.path)
vm_util.power_off_instance(self._session, instance, vm_rescue_ref)
self._volumeops.detach_disk_from_vm(vm_rescue_ref, instance, device)
self._destroy_instance(instance, instance_name=instance_name)
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def power_off(self, instance):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
"""
vm_util.power_off_instance(self._session, instance)
def power_on(self, instance):
vm_util.power_on_instance(self._session, instance)
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the clone disk step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
instance_uuid = instance.uuid
LOG.debug("Updating instance '%(instance_uuid)s' progress to"
" %(progress)d",
{'instance_uuid': instance_uuid, 'progress': progress},
instance=instance)
instance.progress = progress
instance.save()
def migrate_disk_and_power_off(self, context, instance, dest,
flavor):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
# Checks if the migration needs a disk resize down.
if flavor['root_gb'] < instance['root_gb']:
reason = _("Unable to shrink disk.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Read the host_ref for the destination. If this is None then the
# VC will decide on placement
host_ref = self._get_host_ref_from_name(dest)
# 1. Power off the instance
vm_util.power_off_instance(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Disassociate the linked vsphere VM from the instance
vm_util.disassociate_vmref_from_instance(self._session, instance,
vm_ref,
suffix=self._migrate_suffix)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
ds_ref = ds_util.get_datastore(
self._session, self._cluster,
datastore_regex=self._datastore_regex).ref
dc_info = self.get_datacenter_ref_and_name(ds_ref)
# 3. Clone the VM for instance
vm_util.clone_vmref_for_instance(self._session, instance, vm_ref,
host_ref, ds_ref, dc_info.vmFolder)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# Destroy the original VM. The vm_ref needs to be searched using the
# instance.uuid + self._migrate_suffix as the identifier. We will
# not get the vm when searched using the instanceUuid but rather will
# be found using the uuid buried in the extraConfig
vm_ref = vm_util.search_vm_ref_by_identifier(self._session,
instance.uuid + self._migrate_suffix)
if vm_ref is None:
LOG.debug("instance not present", instance=instance)
return
try:
LOG.debug("Destroying the VM", instance=instance)
destroy_task = self._session._call_method(
self._session.vim,
"Destroy_Task", vm_ref)
self._session._wait_for_task(destroy_task)
LOG.debug("Destroyed the VM", instance=instance)
except Exception as excep:
LOG.warning(_LW("In vmwareapi:vmops:confirm_migration, got this "
"exception while destroying the VM: %s"), excep)
def finish_revert_migration(self, context, instance, network_info,
block_device_info, power_on=True):
"""Finish reverting a resize."""
vm_util.associate_vmref_for_instance(self._session, instance,
suffix=self._migrate_suffix)
if power_on:
vm_util.power_on_instance(self._session, instance)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
if resize_instance:
client_factory = self._session.vim.client.factory
vm_resize_spec = vm_util.get_vm_resize_spec(client_factory,
instance)
vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec)
# Resize the disk (if larger)
old_root_gb = instance.system_metadata['old_instance_type_root_gb']
if instance['root_gb'] > int(old_root_gb):
root_disk_in_kb = instance['root_gb'] * units.Mi
vmdk_info = vm_util.get_vmdk_info(self._session, vm_ref,
instance.uuid)
vmdk_path = vmdk_info.path
data_store_ref = ds_util.get_datastore(self._session,
self._cluster, datastore_regex=self._datastore_regex).ref
dc_info = self.get_datacenter_ref_and_name(data_store_ref)
self._extend_virtual_disk(instance, root_disk_in_kb, vmdk_path,
dc_info.ref)
# TODO(ericwb): add extend for ephemeral disk
# 4. Start VM
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False):
"""Spawning live_migration operation for distributing high-load."""
vm_ref = vm_util.get_vm_ref(self._session, instance_ref)
host_ref = self._get_host_ref_from_name(dest)
if host_ref is None:
raise exception.HostNotFound(host=dest)
LOG.debug("Migrating VM to host %s", dest, instance=instance_ref)
try:
vm_migrate_task = self._session._call_method(
self._session.vim,
"MigrateVM_Task", vm_ref,
host=host_ref,
priority="defaultPriority")
self._session._wait_for_task(vm_migrate_task)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance_ref, dest, block_migration)
post_method(context, instance_ref, dest, block_migration)
LOG.debug("Migrated VM to host %s", dest, instance=instance_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_LI("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds"), instances_info)
for instance in instances:
LOG.info(_LI("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance):
"""Return data about the VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
"runtime.powerState"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(
self._session, vm_props)
max_mem = int(query.get('summary.config.memorySizeMB', 0)) * 1024
num_cpu = int(query.get('summary.config.numCpu', 0))
return hardware.InstanceInfo(
state=VMWARE_POWER_STATES[query['runtime.powerState']],
max_mem_kb=max_mem,
mem_kb=max_mem,
num_cpu=num_cpu)
def _get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config",
"summary.quickStats",
"summary.runtime"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session,
vm_props)
data = {}
# All of values received are objects. Convert them to dictionaries
for value in query.values():
prop_dict = vim_util.object_to_dict(value, list_depth=1)
data.update(prop_dict)
return data
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
# Add a namespace to all of the diagnostsics
return {'vmware:' + k: v for k, v in data.items()}
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
state = data.get('powerState')
if state:
state = power_state.STATE_MAP[VMWARE_POWER_STATES[state]]
uptime = data.get('uptimeSeconds', 0)
config_drive = configdrive.required_by(instance)
diags = diagnostics.Diagnostics(state=state,
driver='vmwareapi',
config_drive=config_drive,
hypervisor_os='esxi',
uptime=uptime)
diags.memory_details.maximum = data.get('memorySizeMB', 0)
diags.memory_details.used = data.get('guestMemoryUsage', 0)
# TODO(garyk): add in cpu, nic and disk stats
return diags
def _get_vnc_console_connection(self, instance):
"""Return connection info for a vnc console."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
opt_value = self._session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
vm_util.VNC_CONFIG_KEY)
if opt_value:
port = int(opt_value.value)
else:
raise exception.ConsoleTypeUnavailable(console_type='vnc')
return {'port': port,
'internal_access_path': None}
@staticmethod
def _get_machine_id_str(network_info):
machine_id_str = ''
for vif in network_info:
# TODO(vish): add support for dns2
# TODO(sateesh): add support for injection of ipv6 configuration
network = vif['network']
ip_v4 = netmask_v4 = gateway_v4 = broadcast_v4 = dns = None
subnets_v4 = [s for s in network['subnets'] if s['version'] == 4]
if len(subnets_v4) > 0:
if len(subnets_v4[0]['ips']) > 0:
ip_v4 = subnets_v4[0]['ips'][0]
if len(subnets_v4[0]['dns']) > 0:
dns = subnets_v4[0]['dns'][0]['address']
netmask_v4 = str(subnets_v4[0].as_netaddr().netmask)
gateway_v4 = subnets_v4[0]['gateway']['address']
broadcast_v4 = str(subnets_v4[0].as_netaddr().broadcast)
interface_str = ";".join([vif['address'],
ip_v4 and ip_v4['address'] or '',
netmask_v4 or '',
gateway_v4 or '',
broadcast_v4 or '',
dns or ''])
machine_id_str = machine_id_str + interface_str + '#'
return machine_id_str
def _set_machine_id(self, client_factory, instance, network_info,
vm_ref=None):
"""Set the machine id of the VM for guest tools to pick up
and reconfigure the network interfaces.
"""
if vm_ref is None:
vm_ref = vm_util.get_vm_ref(self._session, instance)
machine_id_change_spec = vm_util.get_machine_id_change_spec(
client_factory,
self._get_machine_id_str(network_info))
LOG.debug("Reconfiguring VM instance to set the machine id",
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, machine_id_change_spec)
LOG.debug("Reconfigured VM instance to set the machine id",
instance=instance)
@utils.synchronized('vmware.get_and_set_vnc_port')
def _get_and_set_vnc_config(self, client_factory, instance, vm_ref):
"""Set the vnc configuration of the VM."""
port = vm_util.get_vnc_port(self._session)
vnc_config_spec = vm_util.get_vnc_config_spec(
client_factory, port)
LOG.debug("Reconfiguring VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, vnc_config_spec)
LOG.debug("Reconfigured VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
def _get_ds_browser(self, ds_ref):
ds_browser = self._datastore_browser_mapping.get(ds_ref.value)
if not ds_browser:
ds_browser = self._session._call_method(
vim_util, "get_dynamic_property", ds_ref, "Datastore",
"browser")
self._datastore_browser_mapping[ds_ref.value] = ds_browser
return ds_browser
def _get_host_ref_from_name(self, host_name):
"""Get reference to the host with the name specified."""
host_objs = self._session._call_method(vim_util, "get_objects",
"HostSystem", ["name"])
vm_util._cancel_retrieve_if_necessary(self._session, host_objs)
for host in host_objs:
if hasattr(host, 'propSet'):
if host.propSet[0].val == host_name:
return host.obj
return None
def _create_folder_if_missing(self, ds_name, ds_ref, folder):
"""Create a folder if it does not exist.
Currently there are two folder that are required on the datastore
- base folder - the folder to store cached images
- temp folder - the folder used for snapshot management and
image uploading
This method is aimed to be used for the management of those
folders to ensure that they are created if they are missing.
The ds_util method mkdir will be used to check if the folder
exists. If this throws and exception 'FileAlreadyExistsException'
then the folder already exists on the datastore.
"""
path = ds_util.DatastorePath(ds_name, folder)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
try:
ds_util.mkdir(self._session, path, dc_info.ref)
LOG.debug("Folder %s created.", path)
except vexc.FileAlreadyExistsException:
# NOTE(hartsocks): if the folder already exists, that
# just means the folder was prepped by another process.
pass
def check_cache_folder(self, ds_name, ds_ref):
"""Check that the cache folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._base_folder)
def check_temp_folder(self, ds_name, ds_ref):
"""Check that the temp folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._tmp_folder)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
client_factory = self._session.vim.client.factory
self._set_machine_id(client_factory, instance, network_info)
def manage_image_cache(self, context, instances):
if not CONF.remove_unused_base_images:
LOG.debug("Image aging disabled. Aging will not be done.")
return
datastores = ds_util.get_available_datastores(self._session,
self._cluster,
self._datastore_regex)
datastores_info = []
for ds in datastores:
dc_info = self.get_datacenter_ref_and_name(ds.ref)
datastores_info.append((ds, dc_info))
self._imagecache.update(context, instances, datastores_info)
def _get_valid_vms_from_retrieve_result(self, retrieve_result):
"""Returns list of valid vms from RetrieveResult object."""
lst_vm_names = []
while retrieve_result:
token = vm_util._get_token(retrieve_result)
for vm in retrieve_result.objects:
vm_name = None
conn_state = None
for prop in vm.propSet:
if prop.name == "name":
vm_name = prop.val
elif prop.name == "runtime.connectionState":
conn_state = prop.val
# Ignoring the orphaned or inaccessible VMs
if conn_state not in ["orphaned", "inaccessible"]:
lst_vm_names.append(vm_name)
if token:
retrieve_result = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
return lst_vm_names
def instance_exists(self, instance):
try:
vm_util.get_vm_ref(self._session, instance)
return True
except exception.InstanceNotFound:
return False
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance."""
vif_model = image_meta.get("hw_vif_model",
constants.DEFAULT_VIF_MODEL)
vif_model = vm_util.convert_vif_model(vif_model)
vif_info = vmwarevif.get_vif_dict(self._session, self._cluster,
vif_model, utils.is_neutron(), vif)
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that there is not a race with the port index management
with lockutils.lock(instance.uuid,
lock_file_prefix='nova-vmware-hot-plug'):
port_index = vm_util.get_attach_port_index(self._session, vm_ref)
client_factory = self._session.vim.client.factory
attach_config_spec = vm_util.get_network_attach_config_spec(
client_factory, vif_info, port_index)
LOG.debug("Reconfiguring VM to attach interface",
instance=instance)
try:
vm_util.reconfigure_vm(self._session, vm_ref,
attach_config_spec)
except Exception as e:
LOG.error(_LE('Attaching network adapter failed. Exception: '
' %s'),
e, instance=instance)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
LOG.debug("Reconfigured VM to attach interface", instance=instance)
def detach_interface(self, instance, vif):
"""Detach an interface from the instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that there is not a race with the port index management
with lockutils.lock(instance.uuid,
lock_file_prefix='nova-vmware-hot-plug'):
port_index = vm_util.get_vm_detach_port_index(self._session,
vm_ref,
vif['id'])
if port_index is None:
msg = _("No device with interface-id %s exists on "
"VM") % vif['id']
raise exception.NotFound(msg)
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
device = vmwarevif.get_network_device(hardware_devices,
vif['address'])
if device is None:
msg = _("No device with MAC address %s exists on the "
"VM") % vif['address']
raise exception.NotFound(msg)
client_factory = self._session.vim.client.factory
detach_config_spec = vm_util.get_network_detach_config_spec(
client_factory, device, port_index)
LOG.debug("Reconfiguring VM to detach interface",
instance=instance)
try:
vm_util.reconfigure_vm(self._session, vm_ref,
detach_config_spec)
except Exception as e:
LOG.error(_LE('Detaching network adapter failed. Exception: '
'%s'),
e, instance=instance)
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
LOG.debug("Reconfigured VM to detach interface", instance=instance)
def _use_disk_image_as_full_clone(self, vm_ref, vi):
"""Uses cached image disk by copying it into the VM directory."""
instance_folder = vi.instance_name
root_disk_name = "%s.vmdk" % vi.instance_name
root_disk_ds_loc = vi.datastore.build_path(instance_folder,
root_disk_name)
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(vi.cache_image_path),
str(root_disk_ds_loc))
self._extend_if_required(
vi.dc_info, vi.ii, vi.instance, str(root_disk_ds_loc))
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi, False)
def _sized_image_exists(self, sized_disk_ds_loc, ds_ref):
ds_browser = self._get_ds_browser(ds_ref)
return ds_util.file_exists(
self._session, ds_browser, sized_disk_ds_loc.parent,
sized_disk_ds_loc.basename)
def _use_disk_image_as_linked_clone(self, vm_ref, vi):
"""Uses cached image as parent of a COW child in the VM directory."""
sized_image_disk_name = "%s.vmdk" % vi.ii.image_id
if vi.root_gb > 0:
sized_image_disk_name = "%s.%s.vmdk" % (vi.ii.image_id, vi.root_gb)
sized_disk_ds_loc = vi.cache_image_folder.join(sized_image_disk_name)
# Ensure only a single thread extends the image at once.
# We do this by taking a lock on the name of the extended
# image. This allows multiple threads to create resized
# copies simultaneously, as long as they are different
# sizes. Threads attempting to create the same resized copy
# will be serialized, with only the first actually creating
# the copy.
#
# Note that the object is in a per-nova cache directory,
# so inter-nova locking is not a concern. Consequently we
# can safely use simple thread locks.
with lockutils.lock(str(sized_disk_ds_loc),
lock_file_prefix='nova-vmware-image'):
if not self._sized_image_exists(sized_disk_ds_loc,
vi.datastore.ref):
LOG.debug("Copying root disk of size %sGb", vi.root_gb)
try:
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(vi.cache_image_path),
str(sized_disk_ds_loc))
except Exception as e:
LOG.warning(_LW("Root disk file creation "
"failed - %s"), e)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to copy cached '
'image %(source)s to '
'%(dest)s for resize: '
'%(error)s'),
{'source': vi.cache_image_path,
'dest': sized_disk_ds_loc,
'error': e.message})
try:
ds_util.file_delete(self._session,
sized_disk_ds_loc,
vi.dc_info.ref)
except vexc.FileNotFoundException:
# File was never created: cleanup not
# required
pass
# Resize the copy to the appropriate size. No need
# for cleanup up here, as _extend_virtual_disk
# already does it
self._extend_if_required(
vi.dc_info, vi.ii, vi.instance, str(sized_disk_ds_loc))
# Associate the sized image disk to the VM by attaching to the VM a
# COW child of said disk.
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(sized_disk_ds_loc),
vi.root_gb * units.Mi, vi.ii.linked_clone)
def _use_iso_image(self, vm_ref, vi):
"""Uses cached image as a bootable virtual cdrom."""
self._attach_cdrom_to_vm(
vm_ref, vi.instance, vi.datastore.ref,
str(vi.cache_image_path))
# Optionally create and attach blank disk
if vi.root_gb > 0:
instance_folder = vi.instance_name
root_disk_name = "%s.vmdk" % vi.instance_name
root_disk_ds_loc = vi.datastore.build_path(instance_folder,
root_disk_name)
# It is pointless to COW a blank disk
linked_clone = False
vm_util.create_virtual_disk(
self._session, vi.dc_info.ref,
vi.ii.adapter_type,
vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi)
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi, linked_clone)
def _update_datacenter_cache_from_objects(self, dcs):
"""Updates the datastore/datacenter cache."""
while dcs:
token = vm_util._get_token(dcs)
for dco in dcs.objects:
dc_ref = dco.obj
ds_refs = []
prop_dict = vm_util.propset_dict(dco.propSet)
name = prop_dict.get('name')
vmFolder = prop_dict.get('vmFolder')
datastore_refs = prop_dict.get('datastore')
if datastore_refs:
datastore_refs = datastore_refs.ManagedObjectReference
for ds in datastore_refs:
ds_refs.append(ds.value)
else:
LOG.debug("Datacenter %s doesn't have any datastore "
"associated with it, ignoring it", name)
for ds_ref in ds_refs:
self._datastore_dc_mapping[ds_ref] = DcInfo(ref=dc_ref,
name=name, vmFolder=vmFolder)
if token:
dcs = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
def get_datacenter_ref_and_name(self, ds_ref):
"""Get the datacenter name and the reference."""
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
if not dc_info:
dcs = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name", "datastore", "vmFolder"])
self._update_datacenter_cache_from_objects(dcs)
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
return dc_info
def list_instances(self):
"""Lists the VM instances that are registered with vCenter cluster."""
properties = ['name', 'runtime.connectionState']
LOG.debug("Getting list of instances from cluster %s",
self._cluster)
vms = []
if self._root_resource_pool:
vms = self._session._call_method(
vim_util, 'get_inner_objects', self._root_resource_pool, 'vm',
'VirtualMachine', properties)
lst_vm_names = self._get_valid_vms_from_retrieve_result(vms)
LOG.debug("Got total of %s instances", str(len(lst_vm_names)))
return lst_vm_names
def get_vnc_console(self, instance):
"""Return connection info for a vnc console using vCenter logic."""
# vCenter does not run virtual machines and does not run
# a VNC proxy. Instead, you need to tell OpenStack to talk
# directly to the ESX host running the VM you are attempting
# to connect to via VNC.
vnc_console = self._get_vnc_console_connection(instance)
host_name = vm_util.get_host_name_for_vm(
self._session,
instance)
vnc_console['host'] = host_name
# NOTE: VM can move hosts in some situations. Debug for admins.
LOG.debug("VM %(uuid)s is currently on host %(host_name)s",
{'uuid': instance.name, 'host_name': host_name},
instance=instance)
return ctype.ConsoleVNC(**vnc_console)
| Metaswitch/calico-nova | nova/virt/vmwareapi/vmops.py | Python | apache-2.0 | 81,484 |
# Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Abstraction layer for networking functionalities.
This module defines internal APIs for duplicated features between OpenStack
Compute and OpenStack Networking. The networking abstraction layer expects
methods defined in this module.
"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class FloatingIpManager(object):
"""Abstract class to implement Floating IP methods
The FloatingIP object returned from methods in this class
must contains the following attributes:
* id: ID of Floating IP
* ip: Floating IP address
* pool: ID of Floating IP pool from which the address is allocated
* fixed_ip: Fixed IP address of a VIF associated with the address
* port_id: ID of a VIF associated with the address
(instance_id when Nova floating IP is used)
* instance_id: Instance ID of an associated with the Floating IP
"""
@abc.abstractmethod
def list_pools(self):
"""Fetches a list of all floating IP pools.
A list of FloatingIpPool objects is returned.
FloatingIpPool object is an APIResourceWrapper/APIDictWrapper
where 'id' and 'name' attributes are defined.
"""
pass
@abc.abstractmethod
def list(self):
"""Fetches a list all floating IPs.
A returned value is a list of FloatingIp object.
"""
pass
@abc.abstractmethod
def get(self, floating_ip_id):
"""Fetches the floating IP.
It returns a FloatingIp object corresponding to floating_ip_id.
"""
pass
@abc.abstractmethod
def allocate(self, pool=None):
"""Allocates a floating IP to the tenant.
You must provide a pool name or id for which you would like to
allocate an floating IP.
"""
pass
@abc.abstractmethod
def release(self, floating_ip_id):
"""Releases a floating IP specified."""
pass
@abc.abstractmethod
def associate(self, floating_ip_id, port_id):
"""Associates the floating IP to the port.
port_id is a fixed IP of an instance (Nova) or
a port_id attached to a VNIC of an instance.
"""
pass
@abc.abstractmethod
def disassociate(self, floating_ip_id):
"""Disassociates the floating IP specified."""
pass
@abc.abstractmethod
def list_targets(self):
"""Returns a list of association targets of instance VIFs.
Each association target is represented as FloatingIpTarget object.
FloatingIpTarget is a APIResourceWrapper/APIDictWrapper and
'id' and 'name' attributes must be defined in each object.
FloatingIpTarget.id can be passed as port_id in associate().
FloatingIpTarget.name is displayed in Floating Ip Association Form.
"""
pass
@abc.abstractmethod
def get_target_id_by_instance(self, instance_id, target_list=None):
"""Returns a target ID of floating IP association.
Based on a backend implementation.
:param instance_id: ID of target VM instance
:param target_list: (optional) a list returned by list_targets().
If specified, looking up is done against the specified list
to save extra API calls to a back-end. Otherwise a target
information is retrieved from a back-end inside the method.
"""
pass
@abc.abstractmethod
def list_target_id_by_instance(self, instance_id, target_list=None):
"""Returns a list of instance's target IDs of floating IP association.
Based on the backend implementation
:param instance_id: ID of target VM instance
:param target_list: (optional) a list returned by list_targets().
If specified, looking up is done against the specified list
to save extra API calls to a back-end. Otherwise target list
is retrieved from a back-end inside the method.
"""
pass
@abc.abstractmethod
def is_simple_associate_supported(self):
"""Returns True if the default floating IP pool is enabled."""
pass
@abc.abstractmethod
def is_supported(self):
"""Returns True if floating IP feature is supported."""
pass
@six.add_metaclass(abc.ABCMeta)
class SecurityGroupManager(object):
"""Abstract class to implement Security Group methods
SecurityGroup object returned from methods in this class
must contains the following attributes:
* id: ID of Security Group (int for Nova, uuid for Neutron)
* name
* description
* tenant_id
* rules: A list of SecurityGroupRule objects
SecurityGroupRule object should have the following attributes
(The attribute names and their formats are borrowed from nova
security group implementation):
* id
* direction
* ethertype
* parent_group_id: security group the rule belongs to
* ip_protocol
* from_port: lower limit of allowed port range (inclusive)
* to_port: upper limit of allowed port range (inclusive)
* ip_range: remote IP CIDR (source for ingress, dest for egress).
The value should be a format of "{'cidr': <cidr>}"
* group: remote security group. The value should be a format of
"{'name': <secgroup_name>}"
"""
@abc.abstractmethod
def list(self):
"""Fetches a list all security groups.
A returned value is a list of SecurityGroup object.
"""
pass
@abc.abstractmethod
def get(self, sg_id):
"""Fetches the security group.
It returns a SecurityGroup object corresponding to sg_id.
"""
pass
@abc.abstractmethod
def create(self, name, desc):
"""Create a new security group.
It returns a SecurityGroup object created.
"""
pass
@abc.abstractmethod
def delete(self, sg_id):
"""Delete the specified security group."""
pass
@abc.abstractmethod
def rule_create(self, parent_group_id,
direction=None, ethertype=None,
ip_protocol=None, from_port=None, to_port=None,
cidr=None, group_id=None):
"""Create a new security group rule.
:param parent_group_id: security group id a rule is created to
:param direction: ingress or egress
:param ethertype: ipv4, ipv6, ...
:param ip_protocol: tcp, udp, icmp
:param from_port: L4 port range min
:param to_port: L4 port range max
:param cidr: Source IP CIDR
:param group_id: ID of Source Security Group
"""
pass
@abc.abstractmethod
def rule_delete(self, sgr_id):
"""Delete the specified security group rule."""
pass
@abc.abstractmethod
def list_by_instance(self, instance_id):
"""Get security groups of an instance."""
pass
@abc.abstractmethod
def update_instance_security_group(self, instance_id,
new_security_group_ids):
"""Update security groups of a specified instance."""
pass
| FNST-OpenStack/horizon | openstack_dashboard/api/network_base.py | Python | apache-2.0 | 7,722 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django
class Migration(migrations.Migration):
dependencies = [
("tests", "0024_tableblockstreampage"),
]
operations = [
migrations.CreateModel(
name="AdvertWithCustomPrimaryKey",
fields=[
(
"advert_id",
models.CharField(max_length=255, primary_key=True, serialize=False),
),
("url", models.URLField(blank=True, null=True)),
("text", models.CharField(max_length=255)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="SnippetChooserModelWithCustomPrimaryKey",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"advertwithcustomprimarykey",
models.ForeignKey(
help_text="help text",
on_delete=django.db.models.deletion.CASCADE,
to="tests.AdvertWithCustomPrimaryKey",
),
),
],
),
]
| wagtail/wagtail | wagtail/tests/testapp/migrations/0025_advertwithcustomprimarykey.py | Python | bsd-3-clause | 1,505 |
#! /usr/bin/python
# Copyright 2019 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_overlay_widgets.py:
# Code generation for overlay widgets. Should be run when the widgets declaration file,
# overlay_widgets.json, is changed.
# NOTE: don't run this script directly. Run scripts/run_code_generation.py.
from datetime import date
import json
import sys
out_file = 'Overlay_autogen.cpp'
in_file = 'overlay_widgets.json'
template_out_file = u"""// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {input_file_name}.
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// {out_file_name}:
// Autogenerated overlay widget declarations.
#include "libANGLE/renderer/driver_utils.h"
#include "libANGLE/Overlay.h"
#include "libANGLE/OverlayWidgets.h"
#include "libANGLE/Overlay_font_autogen.h"
namespace gl
{{
using namespace overlay;
namespace
{{
int GetFontSize(int fontSize, bool largeFont)
{{
if (largeFont && fontSize > 0)
{{
return fontSize - 1;
}}
return fontSize;
}}
}} // anonymous namespace
void Overlay::initOverlayWidgets()
{{
const bool kLargeFont = rx::IsAndroid();
{init_widgets}
}}
}} // namespace gl
"""
template_init_widget = u"""{{
const int32_t fontSize = GetFontSize({font_size}, kLargeFont);
const int32_t offsetX = {offset_x};
const int32_t offsetY = {offset_y};
const int32_t width = {width};
const int32_t height = {height};
widget->{subwidget}type = WidgetType::{type};
widget->{subwidget}fontSize = fontSize;
widget->{subwidget}coords[0] = {coord0};
widget->{subwidget}coords[1] = {coord1};
widget->{subwidget}coords[2] = {coord2};
widget->{subwidget}coords[3] = {coord3};
widget->{subwidget}color[0] = {color_r};
widget->{subwidget}color[1] = {color_g};
widget->{subwidget}color[2] = {color_b};
widget->{subwidget}color[3] = {color_a};
}}
"""
def extract_type_and_constructor(properties):
constructor = properties['type']
args_separated = constructor.split('(', 1)
if len(args_separated) == 1:
return constructor, constructor
type_no_constructor = args_separated[0]
return type_no_constructor, constructor
def get_font_size_constant(properties):
return 'kFontLayer' + properties['font'].capitalize()
def is_graph_type(type):
return type == 'RunningGraph' or type == 'RunningHistogram'
def is_text_type(type):
return not is_graph_type(type)
class OverlayWidget:
def __init__(self, properties, is_graph_description=False):
if not is_graph_description:
self.name = properties['name']
self.type, self.constructor = extract_type_and_constructor(properties)
self.extract_common(properties)
if is_graph_type(self.type):
description_properties = properties['description']
description_properties['type'] = 'Text'
self.description = OverlayWidget(description_properties, True)
def extract_common(self, properties):
self.color = properties['color']
self.coords = properties['coords']
if is_graph_type(self.type):
self.bar_width = properties['bar_width']
self.height = properties['height']
else:
self.font = get_font_size_constant(properties)
self.length = properties['length']
self.negative_alignment = [False, False]
def is_negative_coord(coords, axis, widgets_so_far):
if isinstance(coords[axis], unicode):
coord_split = coords[axis].split('.')
# The coordinate is in the form other_widget.edge.mode
# We simply need to know if other_widget's coordinate is negative or not.
return widgets_so_far[coord_split[0]].negative_alignment[axis]
return coords[axis] < 0
def set_alignment_flags(overlay_widget, widgets_so_far):
overlay_widget.negative_alignment[0] = is_negative_coord(overlay_widget.coords, 0,
widgets_so_far)
overlay_widget.negative_alignment[1] = is_negative_coord(overlay_widget.coords, 1,
widgets_so_far)
if is_graph_type(overlay_widget.type):
set_alignment_flags(overlay_widget.description, widgets_so_far)
def get_offset_helper(widget, axis, smaller_coord_side):
# Assume axis is X. This function returns two values:
# - An offset where the bounding box is placed at,
# - Whether this offset is for the left or right edge.
#
# The input coordinate (widget.coord[axis]) is either:
#
# - a number: in this case, the offset is that number, and its sign determines whether this refers to the left or right edge of the bounding box.
# - other_widget.edge.mode: this has multiple possibilities:
# * edge=left, mode=align: the offset is other_widget.left, the edge is left.
# * edge=left, mode=adjacent: the offset is other_widget.left, the edge is right.
# * edge=right, mode=align: the offset is other_widget.right, the edge is right.
# * edge=right, mode=adjacent: the offset is other_widget.right, the edge is left.
#
# The case for the Y axis is similar, with the edge values being top or bottom.
coord = widget.coords[axis]
if not isinstance(coord, unicode):
is_left = coord >= 0
return coord, is_left
coord_split = coord.split('.')
is_left = coord_split[1] == smaller_coord_side
is_align = coord_split[2] == 'align'
other_widget_coords = 'mState.mOverlayWidgets[WidgetId::' + coord_split[0] + ']->coords'
other_widget_coord_index = axis + (0 if is_left else 2)
offset = other_widget_coords + '[' + str(other_widget_coord_index) + ']'
return offset, is_left == is_align
def get_offset_x(widget):
return get_offset_helper(widget, 0, 'left')
def get_offset_y(widget):
return get_offset_helper(widget, 1, 'top')
def get_bounding_box_coords(offset, width, offset_is_left, is_left_aligned):
# See comment in generate_widget_init_helper. This function is implementing the following:
#
# - offset_is_left && is_left_aligned: [offset, offset + width]
# - offset_is_left && !is_left_aligned: [offset, std::min(offset + width, -1)]
# - !offset_is_left && is_left_aligned: [std::max(1, offset - width), offset]
# - !offset_is_left && !is_left_aligned: [offset - width, offset]
coord_left = offset if offset_is_left else (offset + ' - ' + width)
coord_right = (offset + ' + ' + width) if offset_is_left else offset
if offset_is_left and not is_left_aligned:
coord_right = 'std::min(' + coord_right + ', -1)'
if not offset_is_left and is_left_aligned:
coord_left = 'std::max(' + coord_left + ', 1)'
return coord_left, coord_right
def generate_widget_init_helper(widget, is_graph_description=False):
font_size = '0'
# Common attributes
color = [channel / 255.0 for channel in widget.color]
offset_x, offset_x_is_left = get_offset_x(widget)
offset_y, offset_y_is_top = get_offset_y(widget)
if is_text_type(widget.type):
# Attributes deriven from text properties
font_size = widget.font
width = str(widget.length) + ' * kFontGlyphWidths[fontSize]'
height = 'kFontGlyphHeights[fontSize]'
else:
# Attributes deriven from graph properties
width = str(widget.bar_width) + ' * static_cast<uint32_t>(widget->runningValues.size())'
height = widget.height
is_left_aligned = not widget.negative_alignment[0]
is_top_aligned = not widget.negative_alignment[1]
# We have offset_x, offset_y, width and height which together determine the bounding box. If
# offset_x_is_left, the bounding box X would be in [offset_x, offset_x + width], otherwise it
# would be in [offset_x - width, offset_x]. Similarly for y. Since we use negative values to
# mean aligned to the right side of the screen, we need to make sure that:
#
# - if left aligned: offset_x - width is at minimum 1
# - if right aligned: offset_x + width is at maximum -1
#
# We therefore have the following combinations for the X axis:
#
# - offset_x_is_left && is_left_aligned: [offset_x, offset_x + width]
# - offset_x_is_left && !is_left_aligned: [offset_x, std::min(offset_x + width, -1)]
# - !offset_x_is_left && is_left_aligned: [std::max(1, offset_x - width), offset_x]
# - !offset_x_is_left && !is_left_aligned: [offset_x - width, offset_x]
#
# Similarly for y.
coord0, coord2 = get_bounding_box_coords('offsetX', 'width', offset_x_is_left, is_left_aligned)
coord1, coord3 = get_bounding_box_coords('offsetY', 'height', offset_y_is_top, is_top_aligned)
return template_init_widget.format(
subwidget='description.' if is_graph_description else '',
offset_x=offset_x,
offset_y=offset_y,
width=width,
height=height,
type=widget.type,
font_size=font_size,
coord0=coord0,
coord1=coord1,
coord2=coord2,
coord3=coord3,
color_r=color[0],
color_g=color[1],
color_b=color[2],
color_a=color[3])
def generate_widget_init(widget):
widget_init = '{\n' + widget.type + ' *widget = new ' + widget.constructor + ';\n'
widget_init += generate_widget_init_helper(widget)
widget_init += 'mState.mOverlayWidgets[WidgetId::' + widget.name + '].reset(widget);\n'
if is_graph_type(widget.type):
widget_init += generate_widget_init_helper(widget.description, True)
widget_init += '}\n'
return widget_init
def main():
if len(sys.argv) == 2 and sys.argv[1] == 'inputs':
print(in_file)
return
if len(sys.argv) == 2 and sys.argv[1] == 'outputs':
print(out_file)
return
with open(in_file) as fin:
layout = json.loads(fin.read())
# Read the layouts from the json file and determine alignment of widgets (as they can refer to
# other widgets.
overlay_widgets = {}
for widget_properties in layout['widgets']:
widget = OverlayWidget(widget_properties)
overlay_widgets[widget.name] = widget
set_alignment_flags(widget, overlay_widgets)
# Go over the widgets again and generate initialization code. Note that we need to iterate over
# the widgets in order, so we can't use the overlay_widgets dictionary for iteration.
init_widgets = []
for widget_properties in layout['widgets']:
init_widgets.append(generate_widget_init(overlay_widgets[widget_properties['name']]))
with open(out_file, 'w') as outfile:
outfile.write(
template_out_file.format(
script_name=__file__,
copyright_year=date.today().year,
input_file_name=in_file,
out_file_name=out_file,
init_widgets='\n'.join(init_widgets)))
outfile.close()
if __name__ == '__main__':
sys.exit(main())
| endlessm/chromium-browser | third_party/angle/src/libANGLE/gen_overlay_widgets.py | Python | bsd-3-clause | 11,216 |
import collections
import math
class Solution:
def updateMatrix(self, matrix: List[List[int]]) -> List[List[int]]:
m, n = len(matrix), len(matrix[0])
dist = [[math.inf] * n for _ in range(m)]
Q = collections.deque()
for i in range(m):
for j in range(n):
if matrix[i][j] == 0:
dist[i][j] = 0
Q.append((i, j))
while Q:
r, c = Q.popleft()
for nr, nc in (r-1, c), (r+1, c), (r, c-1), (r, c+1):
if 0 <= nr < m and 0 <= nc < n and dist[nr][nc] > dist[r][c] + 1:
dist[nr][nc] = dist[r][c] + 1
Q.append((nr, nc))
return dist
class Solution2:
def updateMatrix(self, matrix: List[List[int]]) -> List[List[int]]:
m, n = len(matrix), len(matrix[0])
dist = [[math.inf] * n for _ in range(m)]
for i in range(m):
for j in range(n):
if matrix[i][j] == 0:
dist[i][j] = 0
else:
if i > 0:
dist[i][j] = min(dist[i][j], dist[i - 1][j] + 1)
if j > 0:
dist[i][j] = min(dist[i][j], dist[i][j - 1] + 1)
for i in range(m - 1, -1, -1):
for j in range(n - 1, -1, -1):
if matrix[i][j] == 1:
if i < m - 1:
dist[i][j] = min(dist[i][j], dist[i + 1][j] + 1)
if j < n - 1:
dist[i][j] = min(dist[i][j], dist[i][j + 1] + 1)
return dist
| jiadaizhao/LeetCode | 0501-0600/0542-01 Matrix/0542-01 Matrix.py | Python | mit | 1,632 |
# -*- coding: utf-8 -*-
"""
solace.views.themes
~~~~~~~~~~~~~~~~~~~
Implements support for the themes.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import mimetypes
from werkzeug import Response, wrap_file
from werkzeug.exceptions import NotFound
from solace.templating import get_theme
from solace import settings
def get_resource(request, theme, file):
"""Returns a file from the theme."""
theme = get_theme(theme)
if theme is None:
raise NotFound()
f = theme.open_resource(file)
if f is None:
raise NotFound()
resp = Response(wrap_file(request.environ, f),
mimetype=mimetypes.guess_type(file)[0] or 'text/plain',
direct_passthrough=True)
resp.add_etag()
return resp.make_conditional(request)
| mitsuhiko/solace | solace/views/themes.py | Python | bsd-3-clause | 892 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import gzip
import bz2
import zipfile
from tempfile import NamedTemporaryFile
from petl.compat import PY2
from petl.test.helpers import ieq, eq_
import petl as etl
from petl.io.sources import MemorySource, PopenSource, ZipSource, \
StdoutSource, GzipSource, BZ2Source
def test_memorysource():
tbl1 = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
# test writing to a string buffer
ss = MemorySource()
etl.tocsv(tbl1, ss)
expect = "foo,bar\r\na,1\r\nb,2\r\nc,2\r\n"
if not PY2:
expect = expect.encode('ascii')
actual = ss.getvalue()
eq_(expect, actual)
# test reading from a string buffer
tbl2 = etl.fromcsv(MemorySource(actual))
ieq(tbl1, tbl2)
ieq(tbl1, tbl2)
# test appending
etl.appendcsv(tbl1, ss)
actual = ss.getvalue()
expect = "foo,bar\r\na,1\r\nb,2\r\nc,2\r\na,1\r\nb,2\r\nc,2\r\n"
if not PY2:
expect = expect.encode('ascii')
eq_(expect, actual)
def test_memorysource_2():
data = 'foo,bar\r\na,1\r\nb,2\r\nc,2\r\n'
if not PY2:
data = data.encode('ascii')
actual = etl.fromcsv(MemorySource(data))
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
ieq(expect, actual)
ieq(expect, actual)
def test_popensource():
expect = (('foo', 'bar'),)
delimiter = ' '
actual = etl.fromcsv(PopenSource(r'echo foo bar',
shell=True),
delimiter=delimiter)
ieq(expect, actual)
def test_zipsource():
# setup
tbl = [('foo', 'bar'), ('a', '1'), ('b', '2')]
fn_tsv = NamedTemporaryFile().name
etl.totsv(tbl, fn_tsv)
fn_zip = NamedTemporaryFile().name
z = zipfile.ZipFile(fn_zip, mode='w')
z.write(fn_tsv, 'data.tsv')
z.close()
# test
actual = etl.fromtsv(ZipSource(fn_zip, 'data.tsv'))
ieq(tbl, actual)
def test_stdoutsource():
tbl = [('foo', 'bar'), ('a', 1), ('b', 2)]
etl.tocsv(tbl, StdoutSource(), encoding='ascii')
etl.tohtml(tbl, StdoutSource(), encoding='ascii')
etl.topickle(tbl, StdoutSource())
def test_stdoutsource_unicode():
tbl = [('foo', 'bar'),
(u'Արամ Խաչատրյան', 1),
(u'Johann Strauß', 2)]
etl.tocsv(tbl, StdoutSource(), encoding='utf-8')
etl.tohtml(tbl, StdoutSource(), encoding='utf-8')
etl.topickle(tbl, StdoutSource())
def test_gzipsource():
# setup
tbl = [('foo', 'bar'), ('a', '1'), ('b', '2')]
fn = NamedTemporaryFile().name + '.gz'
expect = b"foo,bar\na,1\nb,2\n"
# write explicit
etl.tocsv(tbl, GzipSource(fn), lineterminator='\n')
actual = gzip.open(fn).read()
eq_(expect, actual)
# write implicit
etl.tocsv(tbl, fn, lineterminator='\n')
actual = gzip.open(fn).read()
eq_(expect, actual)
# read explicit
tbl2 = etl.fromcsv(GzipSource(fn))
ieq(tbl, tbl2)
# read implicit
tbl2 = etl.fromcsv(fn)
ieq(tbl, tbl2)
def test_bzip2source():
# setup
tbl = [('foo', 'bar'), ('a', '1'), ('b', '2')]
fn = NamedTemporaryFile().name + '.bz2'
expect = b"foo,bar\na,1\nb,2\n"
# write explicit
etl.tocsv(tbl, BZ2Source(fn), lineterminator='\n')
actual = bz2.BZ2File(fn).read()
eq_(expect, actual)
# write implicit
etl.tocsv(tbl, fn, lineterminator='\n')
actual = bz2.BZ2File(fn).read()
eq_(expect, actual)
# read explicit
tbl2 = etl.fromcsv(BZ2Source(fn))
ieq(tbl, tbl2)
# read implicit
tbl2 = etl.fromcsv(fn)
ieq(tbl, tbl2)
| psnj/petl | petl/test/io/test_sources.py | Python | mit | 3,696 |
from server_group import ServerGroup # NOQA
| cloudpassage-community/halo_api_examples | app/halo_api_examples/server_group/__init__.py | Python | bsd-2-clause | 44 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.dashboards.admin.snapshots \
import forms as vol_snapshot_forms
from openstack_dashboard.dashboards.admin.snapshots \
import tables as vol_snapshot_tables
from openstack_dashboard.dashboards.admin.snapshots \
import tabs as vol_snapshot_tabs
from openstack_dashboard.dashboards.project.snapshots \
import views
class SnapshotsView(tables.PagedTableMixin, tables.DataTableView):
table_class = vol_snapshot_tables.VolumeSnapshotsTable
page_title = _("Volume Snapshots")
def get_data(self):
needs_gs = False
if cinder.is_volume_service_enabled(self.request):
try:
marker, sort_dir = self._get_marker()
snapshots, self._has_more_data, self._has_prev_data = \
cinder.volume_snapshot_list_paged(
self.request, paginate=True, marker=marker,
sort_dir=sort_dir, search_opts={'all_tenants': True})
volumes = cinder.volume_list(
self.request,
search_opts={'all_tenants': True})
volumes = dict((v.id, v) for v in volumes)
except Exception:
snapshots = []
volumes = {}
exceptions.handle(self.request, _("Unable to retrieve "
"volume snapshots."))
needs_gs = any(getattr(snapshot, 'group_snapshot_id', None)
for snapshot in snapshots)
if needs_gs:
try:
group_snapshots = cinder.group_snapshot_list(
self.request, search_opts={'all_tenants': True})
group_snapshots = dict((gs.id, gs) for gs
in group_snapshots)
except Exception:
group_snapshots = {}
exceptions.handle(self.request,
_("Unable to retrieve group snapshots."))
# Gather our tenants to correlate against volume IDs
try:
tenants, has_more = keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve project '
'information of volume snapshots.')
exceptions.handle(self.request, msg)
tenant_dict = dict((t.id, t) for t in tenants)
for snapshot in snapshots:
volume = volumes.get(snapshot.volume_id)
if needs_gs:
group_snapshot = group_snapshots.get(
snapshot.group_snapshot_id)
snapshot.group_snapshot = group_snapshot
else:
snapshot.group_snapshot = None
tenant_id = snapshot.project_id
tenant = tenant_dict.get(tenant_id, None)
snapshot._volume = volume
snapshot.tenant_name = getattr(tenant, "name", None)
snapshot.host_name = getattr(
volume, 'os-vol-host-attr:host', None)
else:
snapshots = []
return snapshots
class UpdateStatusView(forms.ModalFormView):
form_class = vol_snapshot_forms.UpdateStatus
modal_id = "update_volume_snapshot_status"
template_name = 'admin/snapshots/update_status.html'
submit_label = _("Update Status")
submit_url = "horizon:admin:snapshots:update_status"
success_url = reverse_lazy("horizon:admin:snapshots:index")
page_title = _("Update Volume Snapshot Status")
@memoized.memoized_method
def get_object(self):
snap_id = self.kwargs['snapshot_id']
try:
self._object = cinder.volume_snapshot_get(self.request,
snap_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume snapshot.'),
redirect=self.success_url)
return self._object
def get_context_data(self, **kwargs):
context = super(UpdateStatusView, self).get_context_data(**kwargs)
context['snapshot_id'] = self.kwargs["snapshot_id"]
args = (self.kwargs['snapshot_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
snapshot = self.get_object()
return {'snapshot_id': self.kwargs["snapshot_id"],
'status': snapshot.status}
class DetailView(views.DetailView):
tab_group_class = vol_snapshot_tabs.SnapshotDetailsTabs
volume_url = 'horizon:admin:volumes:detail'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
snapshot = self.get_data()
snapshot.volume_url = reverse(self.volume_url,
args=(snapshot.volume_id,))
table = vol_snapshot_tables.VolumeSnapshotsTable(self.request)
context["snapshot"] = snapshot
context["actions"] = table.render_row_actions(snapshot)
return context
@staticmethod
def get_redirect_url():
return reverse('horizon:admin:snapshots:index')
| NeCTAR-RC/horizon | openstack_dashboard/dashboards/admin/snapshots/views.py | Python | apache-2.0 | 6,201 |
# -*- coding: utf-8 -*-
# MDclt.primary.raw.py
#
# Copyright (C) 2012-2015 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Classes for transfer of data from raw text files to h5
.. todo:
- Look into improving speed (np.loadtxt or np.genfromtxt may actually be
preferable)
- Alow scaleoffset to be specified
"""
################################### MODULES ####################################
from __future__ import division, print_function
import os, sys
import numpy as np
from MDclt import Block, Block_Acceptor, primary
################################## FUNCTIONS ###################################
def add_parser(tool_subparsers, **kwargs):
"""
Adds subparser for this analysis to a nascent argument parser
**Arguments:**
:*tool_subparsers*: argparse subparsers object to add subparser
:*args*: Passed to tool_subparsers.add_parser(...)
:*kwargs*: Passed to tool_subparsers.add_parser(...)
"""
from MDclt import overridable_defaults
subparser = primary.add_parser(tool_subparsers,
name = "raw",
help = "Load raw text files")
arg_groups = {ag.title: ag for ag in subparser._action_groups}
arg_groups["input"].add_argument(
"-frames_per_file",
type = int,
required = False,
help = "Number of frames in each file; used to check if new data " +
"is present")
arg_groups["input"].add_argument(
"-dimensions",
type = int,
required = False,
nargs = "*",
help = "Additional dimensions in dataset; if multidimensional " +
"(optional)")
arg_groups["output"].add_argument(
"-output",
type = str,
required = True,
nargs = "+",
action = overridable_defaults(nargs = 2, defaults = {1: "/dataset"}),
help = "H5 file and optionally address in which to output data " +
"(default address: /dataset)")
subparser.set_defaults(analysis = command_line)
def command_line(n_cores = 1, **kwargs):
"""
Provides command line functionality for this analysis
**Arguments:**
:*n_cores*: Number of cores to use
.. todo:
- Figure out syntax to get this into MDclt.primary
"""
from multiprocessing import Pool
from MDclt import pool_director
block_generator = Raw_Block_Generator(**kwargs)
block_acceptor = Block_Acceptor(outputs = block_generator.outputs,
**kwargs)
if n_cores == 1: # Serial
for block in block_generator:
block()
block_acceptor.send(block)
else: # Parallel (processes)
pool = Pool(n_cores)
for block in pool.imap_unordered(pool_director, block_generator):
block_acceptor.send(block)
pool.close()
pool.join()
block_acceptor.close()
################################### CLASSES ####################################
class Raw_Block_Generator(primary.Primary_Block_Generator):
"""
Generator class that prepares blocks of analysis
"""
def __init__(self, infiles, dimensions, output, frames_per_file = None,
**kwargs):
"""
Initializes generator
**Arguments:**
:*output*: List including path to h5 file and
address within h5 file
:*infiles*: List of infiles
:*frames_per_file*: Number of frames in each infile
:*dimensions*: Additional dimensions in dataset; if
multidimensional (optional)
.. todo:
- Intelligently break lists of infiles into blocks larger
than 1
"""
# Input
self.infiles = infiles
self.frames_per_file = frames_per_file
self.infiles_per_block = 5
if dimensions is None:
self.dimensions = []
else:
self.dimensions = dimensions
# Output
self.outputs = [(output[0], os.path.normpath(output[1]))]
# Action
self.dtype = np.float32
super(Raw_Block_Generator, self).__init__(**kwargs)
# Output
self.outputs = [(output[0], os.path.normpath(output[1]),
tuple([self.final_slice.stop - self.final_slice.start]
+ self.dimensions))]
def next(self):
"""
Prepares and returns next Block of analysis
"""
if len(self.infiles) == 0:
raise StopIteration()
else:
block_infiles = self.infiles[:self.infiles_per_block]
block_slice = slice(self.start_index,
self.start_index + len(block_infiles) * self.frames_per_file, 1)
self.infiles = self.infiles[self.infiles_per_block:]
self.start_index += len(block_infiles) * self.frames_per_file
return Raw_Block(infiles = block_infiles,
output = self.outputs[0],
slc = block_slice,
dimensions = self.dimensions,
dtype = self.dtype)
class Raw_Block(Block):
"""
Independent block of analysis
"""
def __init__(self, infiles, output, dtype, slc, dimensions = [],
attrs = {}, **kwargs):
"""
Initializes block of analysis
**Arguments:**
:*infiles*: List of infiles
:*output*: For each dataset, path to h5 file, address
within h5 file, and if appropriate final
shape of dataset; list of tuples
:*dtype*: Data type of nascent dataset
:*slc*: Slice within dataset at which this block
will be stored
:*dimensions*: Additional dimensions in dataset; if
multidimensional (optional)
:*attrs*: Attributes to add to dataset
"""
super(Raw_Block, self).__init__(**kwargs)
self.infiles = infiles
self.dimensions = dimensions
self.output = output
self.datasets = {self.output: dict(slc = slc, attrs = attrs,
kwargs = dict(maxshape = [None] + dimensions,
scaleoffset = 4))}
def __call__(self, **kwargs):
"""
Runs this block of analysis
"""
from subprocess import Popen, PIPE
# Load raw data into numpy using shell commands; there may be a faster
# way to do this; but this seems faster than np.loadtxt()
# followed by np.concanenate() for multiple files
command = "cat {0} | sed ':a;N;$!ba;s/\\n//g'".format(
" ".join(self.infiles))
process = Popen(command, stdout = PIPE, shell = True)
input_bytes = bytearray(process.stdout.read())
dataset = np.array(np.frombuffer(input_bytes, dtype = "S8",
count = int((len(input_bytes) -1) / 8)), np.float32)
# np.loadtxt alternative; keep here for future testing
# dataset = []
# for infile in self.infiles:
# dataset += [np.loadtxt(infile)]
# dataset = np.concatenate(dataset)
# Reshape if necessary
if len(self.dimensions) != 0:
dataset = dataset.reshape(
[dataset.size / np.product(self.dimensions)] + self.dimensions)
# Store in instance variable
self.datasets[self.output]["data"] = dataset
| KarlTDebiec/MDclt | primary/raw.py | Python | bsd-3-clause | 7,794 |
try:
from PyMca import Plugin1DBase
except ImportError:
from . import Plugin1DBase
try:
from PyMca import SortPlotsWindow
except ImportError:
print("SortPlotsWindow importing from somewhere else")
import SortPlotsWindow
from platform import node as gethostname
DEBUG = True
class SortPlots(Plugin1DBase.Plugin1DBase):
def __init__(self, plotWindow, **kw):
Plugin1DBase.Plugin1DBase.__init__(self, plotWindow, **kw)
self.methodDict = {}
text = 'Sort plots for motor value.'
function = self.showSortPlotsWindow
icon = None
info = text
self.methodDict["Sort plots"] =[function, info, icon]
self.widget = None
def getMethods(self, plottype=None):
names = list(self.methodDict.keys())
names.sort()
return names
def getMethodToolTip(self, name):
return self.methodDict[name][1]
def getMethodPixmap(self, name):
return self.methodDict[name][2]
def applyMethod(self, name):
self.methodDict[name][0]()
return
def showSortPlotsWindow(self):
if self.widget is None:
self._createWidget()
else:
self.widget.updatePlots()
self.widget.show()
self.widget.raise_()
def _createWidget(self):
guess = gethostname().lower()
if guess.startswith('dragon'):
beamline = 'ID08'
else:
beamline = '#default#'
if DEBUG:
print '_createWidget -- beamline = "%s"'%beamline
parent = None
self.widget = SortPlotsWindow.SortPlotsWidget(parent,
self._plotWindow,
beamline,
nSelectors = 2)
MENU_TEXT = "Sort Plots"
def getPlugin1DInstance(plotWindow, **kw):
ob = SortPlots(plotWindow)
return ob
if __name__ == "__main__":
from PyMca import ScanWindow
from PyMca import PyMcaQt as qt
import numpy
app = qt.QApplication([])
sw = ScanWindow.ScanWindow()
x = numpy.arange(1000.)
y0 = 10 * x + 10000. * numpy.exp(-0.5*(x-500)*(x-500)/400) + 1500 * numpy.random.random(1000.)
y1 = 10 * x + 10000. * numpy.exp(-0.5*(x-600)*(x-600)/400) + 1500 * numpy.random.random(1000.)
y2 = 10 * x + 10000. * numpy.exp(-0.5*(x-400)*(x-400)/400) + 1500 * numpy.random.random(1000.)
y2[320:322] = 50000.
info0 = {'FileHeader':['#F /data/id08/inhouse/somerandomname'],'xlabel': 'foo', 'ylabel': 'arb', 'MotorNames': 'oxPS Motor11 Motor10 Motor8 Motor9 Motor4 Motor5 Motor6 Motor7 Motor0 Motor1 Motor2 Motor3', 'MotorValues': '1 8.69271399699 21.9836418539 0.198068826612 0.484475455792 0.350252217264 0.663925270933 0.813033264421 0.221149410218 0.593188258866 0.678010392881 0.267389247833 0.677890617858'}
info1 = {'MotorNames': 'PhaseD oxPS Motor16 Motor15 Motor14 Motor13 Motor12 Motor11 Motor10 Motor8 Motor9 Motor4 Motor5 Motor6 Motor7 Motor0 Motor1 Motor2 Motor3', 'MotorValues': '0.470746882688 0.695816070299 0.825780811755 0.25876374531 0.739264467436 0.090842892619 2 0.213445659833 0.823400550314 0.020278096857 0.568744021322 0.85378115537 0.696730386891 0.269196313956 0.793293334395 0.769216567757 0.959092709527 0.0109264683697 0.538264972553'}
info2 = {'MotorNames': 'PhaseD oxPS Motor10 Motor8 Motor9 Motor4 Motor5 Motor6 Motor7 Motor0 Motor1 Motor2 Motor3', 'MotorValues': '2 0.44400576644 0.613870067852 0.901968648111 0.319768771085 0.571432278628 0.278675836163 0.154436774878 0.416231999332 0.294201017231 0.813913587748 0.577572903105 0.869045182568'}
sw.addCurve(x, y0, legend="Curve0", info=info0, replot=False, replace=False)
sw.addCurve(x, y1, legend="Curve1", info=info1, replot=False, replace=False)
sw.addCurve(x, y2, legend="Curve2", info=info2, replot=False, replace=False)
plugin = getPlugin1DInstance(sw)
plugin.applyMethod(plugin.getMethods()[0])
app.exec_()
| tonnrueter/pymca_devel | PyMca/PyMcaPlugins/SortPlotsPlugin.py | Python | gpl-2.0 | 4,051 |
# Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT, included in this distribution as LICENSE
"""Base class for file URLs, URLs on a local file system. These are URLs that can be opened and read"""
from rowgenerators.appurl.url import Url
from rowgenerators.appurl.util import ensure_dir
from os.path import exists, isdir, dirname, basename, join
import pathlib
from urllib.parse import unquote
class AbstractFile(object):
def exists(self):
raise NotImplementedError()
def isdir(self):
raise NotImplementedError()
def dirname(self):
raise NotImplementedError()
def basename(self):
raise NotImplementedError()
def ensure_dir(self):
raise NotImplementedError()
def join(self, s):
raise NotImplementedError()
@property
def fspath(self):
raise NotImplementedError()
@property
def path_is_absolute(self):
raise NotImplementedError()
class InnerFile(AbstractFile):
def exists(self):
return self.inner.exists()
def isdir(self):
return self.inner.isdir()
def dirname(self):
return self.inner.dirname()
def basename(self):
return self.inner.basename()
def ensure_dir(self):
return self.inner.ensure_dir()
def join(self, s):
return self.inner.join(s)
@property
def fspath(self):
return self.inner.fspath
@property
def path_is_absolute(self):
return self.inner.path_is_absolute
class FileUrl(AbstractFile,Url):
"""FileUrl is the baseclass for URLs that reference a general file, assumed to be
local to the file system.
This documentation only describes the differences in implementation from the super class.
See the documentation for the superclass, :py:class:`appurl.Url` for the default implementations.
"""
def __init__(self, url=None, downloader=None,**kwargs):
"""
"""
super().__init__(url, downloader=downloader, **kwargs)
# For resolving relative paths
self.working_dir = self._kwargs.get('working_dir')
match_priority = 90
def exists(self):
return exists(self.fspath)
def isdir(self):
return isdir(self.fspath)
def dirname(self):
return dirname(self.fspath)
def basename(self):
return basename(self.fspath)
def ensure_dir(self):
ensure_dir(self.fspath)
def join(self, s):
return Url.join(self, s)
@property
def fspath(self):
import pathlib
import re
p = unquote(self._path)
if self.netloc: # Windows UNC name
return pathlib.PureWindowsPath("//{}{}".format(self.netloc,p))
elif re.match('[a-zA-Z]:', p): # Windows absolute path
return pathlib.PureWindowsPath(unquote(p))
else:
return pathlib.Path(pathlib.PurePosixPath(p))
@property
def path_is_absolute(self):
return self.fspath.is_absolute()
def absolute(self):
return self.clone(path=str(self.fspath.resolve()))
def list(self):
"""List the contents of a directory
"""
if self.isdir():
from os import listdir
return [u for e in listdir(self.fspath) for u in self.join(e).list()]
else:
return [self]
def get_resource(self):
"""Return a url to the resource, which for FileUrls is always ``self``."""
return self
def get_target(self):
"""Return the url of the target file in the local file system.
"""
from os.path import isabs, join, normpath
t = self.clear_fragment()
if self.encoding:
t.encoding = self.encoding
if not isabs(t.fspath) and self.working_dir:
t.path = normpath(join(self.working_dir, t.fspath))
return t
def read(self, mode='rb'):
"""Return contents of the target file"""
path = self.get_resource().get_target().fspath
with open(path, mode=mode) as f:
return f.read()
def join_target(self, tf):
"""For normal files, joining a target assumes the target is a child of the current target's
directory, so this just passes through the :py:meth:`Url.join_dir`"""
try:
tf = str(tf.path)
except:
pass
return self.clone().join_dir(tf)
def rename(self, new_path):
from os import rename
rename(self.fspath, new_path)
self.path = new_path
def base_rename(self, new_name):
""""Rename only the last path element"""
new_path = join(dirname(self.fspath), new_name)
return self.rename(new_path)
def dataframe(self, *args, **kwargs):
return self.generator.dataframe(*args, **kwargs)
def __str__(self):
return super().__str__()
| CivicKnowledge/rowgenerators | rowgenerators/appurl/file/file.py | Python | mit | 4,910 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from ureport.sql import InstallSQL
class Migration(migrations.Migration):
dependencies = [
('polls', '0036_auto_20160331_1527'),
]
operations = [
InstallSQL('polls_0037')
]
| xkmato/ureport | ureport/polls/migrations/0037_install_poll_results_count_triggers.py | Python | agpl-3.0 | 315 |
"""Open an arbitrary URL.
See the following document for more info on URLs:
"Names and Addresses, URIs, URLs, URNs, URCs", at
http://www.w3.org/pub/WWW/Addressing/Overview.html
See also the HTTP spec (from which the error codes are derived):
"HTTP - Hypertext Transfer Protocol", at
http://www.w3.org/pub/WWW/Protocols/
Related standards and specs:
- RFC1808: the "relative URL" spec. (authoritative status)
- RFC1738 - the "URL standard". (authoritative status)
- RFC1630 - the "URI spec". (informational status)
The object returned by URLopener().open(file) will differ per
protocol. All you know is that is has methods read(), readline(),
readlines(), fileno(), close() and info(). The read*(), fileno()
and close() methods work like those of open files.
The info() method returns a mimetools.Message object which can be
used to query various info about the object, if available.
(mimetools.Message objects are queried with the getheader() method.)
"""
import string
import socket
import os
import time
import sys
import base64
import re
from urlparse import urljoin as basejoin
__all__ = ["urlopen", "URLopener", "FancyURLopener", "urlretrieve",
"urlcleanup", "quote", "quote_plus", "unquote", "unquote_plus",
"urlencode", "url2pathname", "pathname2url", "splittag",
"localhost", "thishost", "ftperrors", "basejoin", "unwrap",
"splittype", "splithost", "splituser", "splitpasswd", "splitport",
"splitnport", "splitquery", "splitattr", "splitvalue",
"getproxies"]
__version__ = '1.17' # XXX This version is not always updated :-(
MAXFTPCACHE = 10 # Trim the ftp cache beyond this size
# Helper for non-unix systems
if os.name == 'nt':
from nturl2path import url2pathname, pathname2url
elif os.name == 'riscos':
from rourl2path import url2pathname, pathname2url
else:
def url2pathname(pathname):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
return unquote(pathname)
def pathname2url(pathname):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
return quote(pathname)
# This really consists of two pieces:
# (1) a class which handles opening of all sorts of URLs
# (plus assorted utilities etc.)
# (2) a set of functions for parsing URLs
# XXX Should these be separated out into different modules?
# Shortcut for basic usage
_urlopener = None
def urlopen(url, data=None, proxies=None, context=None):
"""Create a file-like object for the specified URL to read from."""
from warnings import warnpy3k
warnpy3k("urllib.urlopen() has been removed in Python 3.0 in "
"favor of urllib2.urlopen()", stacklevel=2)
global _urlopener
if proxies is not None or context is not None:
opener = FancyURLopener(proxies=proxies, context=context)
elif not _urlopener:
opener = FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
def urlretrieve(url, filename=None, reporthook=None, data=None, context=None):
global _urlopener
if context is not None:
opener = FancyURLopener(context=context)
elif not _urlopener:
_urlopener = opener = FancyURLopener()
else:
opener = _urlopener
return opener.retrieve(url, filename, reporthook, data)
def urlcleanup():
if _urlopener:
_urlopener.cleanup()
_safe_quoters.clear()
ftpcache.clear()
# check for SSL
try:
import ssl
except:
_have_ssl = False
else:
_have_ssl = True
# exception raised when downloaded size does not match content-length
class ContentTooShortError(IOError):
def __init__(self, message, content):
IOError.__init__(self, message)
self.content = content
ftpcache = {}
class URLopener:
"""Class to open URLs.
This is a class rather than just a subroutine because we may need
more than one set of global protocol-specific options.
Note -- this is a base class for those who don't want the
automatic handling of errors type 302 (relocated) and 401
(authorization needed)."""
__tempfiles = None
version = "Python-urllib/%s" % __version__
# Constructor
def __init__(self, proxies=None, context=None, **x509):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies
self.key_file = x509.get('key_file')
self.cert_file = x509.get('cert_file')
self.context = context
self.addheaders = [('User-Agent', self.version), ('Accept', '*/*')]
self.__tempfiles = []
self.__unlink = os.unlink # See cleanup()
self.tempcache = None
# Undocumented feature: if you assign {} to tempcache,
# it is used to cache files retrieved with
# self.retrieve(). This is not enabled by default
# since it does not work for changing documents (and I
# haven't got the logic to check expiration headers
# yet).
self.ftpcache = ftpcache
# Undocumented feature: you can use a different
# ftp cache by assigning to the .ftpcache member;
# in case you want logically independent URL openers
# XXX This is not threadsafe. Bah.
def __del__(self):
self.close()
def close(self):
self.cleanup()
def cleanup(self):
# This code sometimes runs when the rest of this module
# has already been deleted, so it can't use any globals
# or import anything.
if self.__tempfiles:
for file in self.__tempfiles:
try:
self.__unlink(file)
except OSError:
pass
del self.__tempfiles[:]
if self.tempcache:
self.tempcache.clear()
def addheader(self, *args):
"""Add a header to be used by the HTTP interface only
e.g. u.addheader('Accept', 'sound/basic')"""
self.addheaders.append(args)
# External interface
def open(self, fullurl, data=None):
"""Use URLopener().open(file) instead of open(file, 'r')."""
fullurl = unwrap(toBytes(fullurl))
# percent encode url, fixing lame server errors for e.g, like space
# within url paths.
fullurl = quote(fullurl, safe="%/:=&?~#+!$,;'@()*[]|")
if self.tempcache and fullurl in self.tempcache:
filename, headers = self.tempcache[fullurl]
fp = open(filename, 'rb')
return addinfourl(fp, headers, fullurl)
urltype, url = splittype(fullurl)
if not urltype:
urltype = 'file'
if urltype in self.proxies:
proxy = self.proxies[urltype]
urltype, proxyhost = splittype(proxy)
host, selector = splithost(proxyhost)
url = (host, fullurl) # Signal special case to open_*()
else:
proxy = None
name = 'open_' + urltype
self.type = urltype
name = name.replace('-', '_')
if not hasattr(self, name):
if proxy:
return self.open_unknown_proxy(proxy, fullurl, data)
else:
return self.open_unknown(fullurl, data)
try:
if data is None:
return getattr(self, name)(url)
else:
return getattr(self, name)(url, data)
except socket.error, msg:
raise IOError, ('socket error', msg), sys.exc_info()[2]
def open_unknown(self, fullurl, data=None):
"""Overridable interface to open unknown URL type."""
type, url = splittype(fullurl)
raise IOError, ('url error', 'unknown url type', type)
def open_unknown_proxy(self, proxy, fullurl, data=None):
"""Overridable interface to open unknown URL type."""
type, url = splittype(fullurl)
raise IOError, ('url error', 'invalid proxy for %s' % type, proxy)
# External interface
def retrieve(self, url, filename=None, reporthook=None, data=None):
"""retrieve(url) returns (filename, headers) for a local object
or (tempfilename, headers) for a remote object."""
url = unwrap(toBytes(url))
if self.tempcache and url in self.tempcache:
return self.tempcache[url]
type, url1 = splittype(url)
if filename is None and (not type or type == 'file'):
try:
fp = self.open_local_file(url1)
hdrs = fp.info()
fp.close()
return url2pathname(splithost(url1)[1]), hdrs
except IOError:
pass
fp = self.open(url, data)
try:
headers = fp.info()
if filename:
tfp = open(filename, 'wb')
else:
import tempfile
garbage, path = splittype(url)
garbage, path = splithost(path or "")
path, garbage = splitquery(path or "")
path, garbage = splitattr(path or "")
suffix = os.path.splitext(path)[1]
(fd, filename) = tempfile.mkstemp(suffix)
self.__tempfiles.append(filename)
tfp = os.fdopen(fd, 'wb')
try:
result = filename, headers
if self.tempcache is not None:
self.tempcache[url] = result
bs = 1024*8
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, bs, size)
while 1:
block = fp.read(bs)
if block == "":
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
finally:
tfp.close()
finally:
fp.close()
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise ContentTooShortError("retrieval incomplete: got only %i out "
"of %i bytes" % (read, size), result)
return result
# Each method named open_<type> knows how to open that type of URL
def open_http(self, url, data=None):
"""Use HTTP protocol."""
import httplib
user_passwd = None
proxy_passwd= None
if isinstance(url, str):
host, selector = splithost(url)
if host:
user_passwd, host = splituser(host)
host = unquote(host)
realhost = host
else:
host, selector = url
# check whether the proxy contains authorization information
proxy_passwd, host = splituser(host)
# now we proceed with the url we want to obtain
urltype, rest = splittype(selector)
url = rest
user_passwd = None
if urltype.lower() != 'http':
realhost = None
else:
realhost, rest = splithost(rest)
if realhost:
user_passwd, realhost = splituser(realhost)
if user_passwd:
selector = "%s://%s%s" % (urltype, realhost, rest)
if proxy_bypass(realhost):
host = realhost
#print "proxy via http:", host, selector
if not host: raise IOError, ('http error', 'no host given')
if proxy_passwd:
proxy_passwd = unquote(proxy_passwd)
proxy_auth = base64.b64encode(proxy_passwd).strip()
else:
proxy_auth = None
if user_passwd:
user_passwd = unquote(user_passwd)
auth = base64.b64encode(user_passwd).strip()
else:
auth = None
h = httplib.HTTP(host)
if data is not None:
h.putrequest('POST', selector)
h.putheader('Content-Type', 'application/x-www-form-urlencoded')
h.putheader('Content-Length', '%d' % len(data))
else:
h.putrequest('GET', selector)
if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
if auth: h.putheader('Authorization', 'Basic %s' % auth)
if realhost: h.putheader('Host', realhost)
for args in self.addheaders: h.putheader(*args)
h.endheaders(data)
errcode, errmsg, headers = h.getreply()
fp = h.getfile()
if errcode == -1:
if fp: fp.close()
# something went wrong with the HTTP status line
raise IOError, ('http protocol error', 0,
'got a bad status line', None)
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if (200 <= errcode < 300):
return addinfourl(fp, headers, "http:" + url, errcode)
else:
if data is None:
return self.http_error(url, fp, errcode, errmsg, headers)
else:
return self.http_error(url, fp, errcode, errmsg, headers, data)
def http_error(self, url, fp, errcode, errmsg, headers, data=None):
"""Handle http errors.
Derived class can override this, or provide specific handlers
named http_error_DDD where DDD is the 3-digit error code."""
# First check if there's a specific handler for this error
name = 'http_error_%d' % errcode
if hasattr(self, name):
method = getattr(self, name)
if data is None:
result = method(url, fp, errcode, errmsg, headers)
else:
result = method(url, fp, errcode, errmsg, headers, data)
if result: return result
return self.http_error_default(url, fp, errcode, errmsg, headers)
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""Default error handler: close the connection and raise IOError."""
fp.close()
raise IOError, ('http error', errcode, errmsg, headers)
if _have_ssl:
def open_https(self, url, data=None):
"""Use HTTPS protocol."""
import httplib
user_passwd = None
proxy_passwd = None
if isinstance(url, str):
host, selector = splithost(url)
if host:
user_passwd, host = splituser(host)
host = unquote(host)
realhost = host
else:
host, selector = url
# here, we determine, whether the proxy contains authorization information
proxy_passwd, host = splituser(host)
urltype, rest = splittype(selector)
url = rest
user_passwd = None
if urltype.lower() != 'https':
realhost = None
else:
realhost, rest = splithost(rest)
if realhost:
user_passwd, realhost = splituser(realhost)
if user_passwd:
selector = "%s://%s%s" % (urltype, realhost, rest)
#print "proxy via https:", host, selector
if not host: raise IOError, ('https error', 'no host given')
if proxy_passwd:
proxy_passwd = unquote(proxy_passwd)
proxy_auth = base64.b64encode(proxy_passwd).strip()
else:
proxy_auth = None
if user_passwd:
user_passwd = unquote(user_passwd)
auth = base64.b64encode(user_passwd).strip()
else:
auth = None
h = httplib.HTTPS(host, 0,
key_file=self.key_file,
cert_file=self.cert_file,
context=self.context)
if data is not None:
h.putrequest('POST', selector)
h.putheader('Content-Type',
'application/x-www-form-urlencoded')
h.putheader('Content-Length', '%d' % len(data))
else:
h.putrequest('GET', selector)
if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
if auth: h.putheader('Authorization', 'Basic %s' % auth)
if realhost: h.putheader('Host', realhost)
for args in self.addheaders: h.putheader(*args)
h.endheaders(data)
errcode, errmsg, headers = h.getreply()
fp = h.getfile()
if errcode == -1:
if fp: fp.close()
# something went wrong with the HTTP status line
raise IOError, ('http protocol error', 0,
'got a bad status line', None)
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if (200 <= errcode < 300):
return addinfourl(fp, headers, "https:" + url, errcode)
else:
if data is None:
return self.http_error(url, fp, errcode, errmsg, headers)
else:
return self.http_error(url, fp, errcode, errmsg, headers,
data)
def open_file(self, url):
"""Use local file or FTP depending on form of URL."""
if not isinstance(url, str):
raise IOError, ('file error', 'proxy support for file protocol currently not implemented')
if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/':
return self.open_ftp(url)
else:
return self.open_local_file(url)
def open_local_file(self, url):
"""Use local file."""
import mimetypes, mimetools, email.utils
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
host, file = splithost(url)
localname = url2pathname(file)
try:
stats = os.stat(localname)
except OSError, e:
raise IOError(e.errno, e.strerror, e.filename)
size = stats.st_size
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(url)[0]
headers = mimetools.Message(StringIO(
'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified)))
if not host:
urlfile = file
if file[:1] == '/':
urlfile = 'file://' + file
elif file[:2] == './':
raise ValueError("local file url may start with / or file:. Unknown url of type: %s" % url)
return addinfourl(open(localname, 'rb'),
headers, urlfile)
host, port = splitport(host)
if not port \
and socket.gethostbyname(host) in (localhost(), thishost()):
urlfile = file
if file[:1] == '/':
urlfile = 'file://' + file
return addinfourl(open(localname, 'rb'),
headers, urlfile)
raise IOError, ('local file error', 'not on local host')
def open_ftp(self, url):
"""Use FTP protocol."""
if not isinstance(url, str):
raise IOError, ('ftp error', 'proxy support for ftp protocol currently not implemented')
import mimetypes, mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
host, path = splithost(url)
if not host: raise IOError, ('ftp error', 'no host given')
host, port = splitport(host)
user, host = splituser(host)
if user: user, passwd = splitpasswd(user)
else: passwd = None
host = unquote(host)
user = user or ''
passwd = passwd or ''
host = socket.gethostbyname(host)
if not port:
import ftplib
port = ftplib.FTP_PORT
else:
port = int(port)
path, attrs = splitattr(path)
path = unquote(path)
dirs = path.split('/')
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]: dirs = dirs[1:]
if dirs and not dirs[0]: dirs[0] = '/'
key = user, host, port, '/'.join(dirs)
# XXX thread unsafe!
if len(self.ftpcache) > MAXFTPCACHE:
# Prune the cache, rather arbitrarily
for k in self.ftpcache.keys():
if k != key:
v = self.ftpcache[k]
del self.ftpcache[k]
v.close()
try:
if not key in self.ftpcache:
self.ftpcache[key] = \
ftpwrapper(user, passwd, host, port, dirs)
if not file: type = 'D'
else: type = 'I'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
(fp, retrlen) = self.ftpcache[key].retrfile(file, type)
mtype = mimetypes.guess_type("ftp:" + url)[0]
headers = ""
if mtype:
headers += "Content-Type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-Length: %d\n" % retrlen
headers = mimetools.Message(StringIO(headers))
return addinfourl(fp, headers, "ftp:" + url)
except ftperrors(), msg:
raise IOError, ('ftp error', msg), sys.exc_info()[2]
def open_data(self, url, data=None):
"""Use "data" URL."""
if not isinstance(url, str):
raise IOError, ('data error', 'proxy support for data protocol currently not implemented')
# ignore POSTed data
#
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
[type, data] = url.split(',', 1)
except ValueError:
raise IOError, ('data error', 'bad data URL')
if not type:
type = 'text/plain;charset=US-ASCII'
semi = type.rfind(';')
if semi >= 0 and '=' not in type[semi:]:
encoding = type[semi+1:]
type = type[:semi]
else:
encoding = ''
msg = []
msg.append('Date: %s'%time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(time.time())))
msg.append('Content-type: %s' % type)
if encoding == 'base64':
data = base64.decodestring(data)
else:
data = unquote(data)
msg.append('Content-Length: %d' % len(data))
msg.append('')
msg.append(data)
msg = '\n'.join(msg)
f = StringIO(msg)
headers = mimetools.Message(f, 0)
#f.fileno = None # needed for addinfourl
return addinfourl(f, headers, url)
class FancyURLopener(URLopener):
"""Derived class with handlers for errors we can handle (perhaps)."""
def __init__(self, *args, **kwargs):
URLopener.__init__(self, *args, **kwargs)
self.auth_cache = {}
self.tries = 0
self.maxtries = 10
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""Default error handling -- don't raise an exception."""
return addinfourl(fp, headers, "http:" + url, errcode)
def http_error_302(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 302 -- relocated (temporarily)."""
self.tries += 1
try:
if self.maxtries and self.tries >= self.maxtries:
if hasattr(self, "http_error_500"):
meth = self.http_error_500
else:
meth = self.http_error_default
return meth(url, fp, 500,
"Internal Server Error: Redirect Recursion",
headers)
result = self.redirect_internal(url, fp, errcode, errmsg,
headers, data)
return result
finally:
self.tries = 0
def redirect_internal(self, url, fp, errcode, errmsg, headers, data):
if 'location' in headers:
newurl = headers['location']
elif 'uri' in headers:
newurl = headers['uri']
else:
return
fp.close()
# In case the server sent a relative URL, join with original:
newurl = basejoin(self.type + ":" + url, newurl)
# For security reasons we do not allow redirects to protocols
# other than HTTP, HTTPS or FTP.
newurl_lower = newurl.lower()
if not (newurl_lower.startswith('http://') or
newurl_lower.startswith('https://') or
newurl_lower.startswith('ftp://')):
raise IOError('redirect error', errcode,
errmsg + " - Redirection to url '%s' is not allowed" %
newurl,
headers)
return self.open(newurl)
def http_error_301(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 301 -- also relocated (permanently)."""
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
def http_error_303(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 303 -- also relocated (essentially identical to 302)."""
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
def http_error_307(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 307 -- relocated, but turn POST into error."""
if data is None:
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
else:
return self.http_error_default(url, fp, errcode, errmsg, headers)
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 401 -- authentication required.
This function supports Basic authentication only."""
if not 'www-authenticate' in headers:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
stuff = headers['www-authenticate']
import re
match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
if not match:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
scheme, realm = match.groups()
if scheme.lower() != 'basic':
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
name = 'retry_' + self.type + '_basic_auth'
if data is None:
return getattr(self,name)(url, realm)
else:
return getattr(self,name)(url, realm, data)
def http_error_407(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 407 -- proxy authentication required.
This function supports Basic authentication only."""
if not 'proxy-authenticate' in headers:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
stuff = headers['proxy-authenticate']
import re
match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
if not match:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
scheme, realm = match.groups()
if scheme.lower() != 'basic':
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
name = 'retry_proxy_' + self.type + '_basic_auth'
if data is None:
return getattr(self,name)(url, realm)
else:
return getattr(self,name)(url, realm, data)
def retry_proxy_http_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
newurl = 'http://' + host + selector
proxy = self.proxies['http']
urltype, proxyhost = splittype(proxy)
proxyhost, proxyselector = splithost(proxyhost)
i = proxyhost.find('@') + 1
proxyhost = proxyhost[i:]
user, passwd = self.get_user_passwd(proxyhost, realm, i)
if not (user or passwd): return None
proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost
self.proxies['http'] = 'http://' + proxyhost + proxyselector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_proxy_https_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
newurl = 'https://' + host + selector
proxy = self.proxies['https']
urltype, proxyhost = splittype(proxy)
proxyhost, proxyselector = splithost(proxyhost)
i = proxyhost.find('@') + 1
proxyhost = proxyhost[i:]
user, passwd = self.get_user_passwd(proxyhost, realm, i)
if not (user or passwd): return None
proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost
self.proxies['https'] = 'https://' + proxyhost + proxyselector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_http_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
i = host.find('@') + 1
host = host[i:]
user, passwd = self.get_user_passwd(host, realm, i)
if not (user or passwd): return None
host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host
newurl = 'http://' + host + selector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_https_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
i = host.find('@') + 1
host = host[i:]
user, passwd = self.get_user_passwd(host, realm, i)
if not (user or passwd): return None
host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host
newurl = 'https://' + host + selector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def get_user_passwd(self, host, realm, clear_cache=0):
key = realm + '@' + host.lower()
if key in self.auth_cache:
if clear_cache:
del self.auth_cache[key]
else:
return self.auth_cache[key]
user, passwd = self.prompt_user_passwd(host, realm)
if user or passwd: self.auth_cache[key] = (user, passwd)
return user, passwd
def prompt_user_passwd(self, host, realm):
"""Override this in a GUI environment!"""
import getpass
try:
user = raw_input("Enter username for %s at %s: " % (realm,
host))
passwd = getpass.getpass("Enter password for %s in %s at %s: " %
(user, realm, host))
return user, passwd
except KeyboardInterrupt:
print
return None, None
# Utility functions
_localhost = None
def localhost():
"""Return the IP address of the magic hostname 'localhost'."""
global _localhost
if _localhost is None:
_localhost = socket.gethostbyname('localhost')
return _localhost
_thishost = None
def thishost():
"""Return the IP address of the current host."""
global _thishost
if _thishost is None:
try:
_thishost = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
_thishost = socket.gethostbyname('localhost')
return _thishost
_ftperrors = None
def ftperrors():
"""Return the set of errors raised by the FTP class."""
global _ftperrors
if _ftperrors is None:
import ftplib
_ftperrors = ftplib.all_errors
return _ftperrors
_noheaders = None
def noheaders():
"""Return an empty mimetools.Message object."""
global _noheaders
if _noheaders is None:
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
_noheaders = mimetools.Message(StringIO(), 0)
_noheaders.fp.close() # Recycle file descriptor
return _noheaders
# Utility classes
class ftpwrapper:
"""Class used by open_ftp() for cache of open FTP connections."""
def __init__(self, user, passwd, host, port, dirs,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
persistent=True):
self.user = user
self.passwd = passwd
self.host = host
self.port = port
self.dirs = dirs
self.timeout = timeout
self.refcount = 0
self.keepalive = persistent
try:
self.init()
except:
self.close()
raise
def init(self):
import ftplib
self.busy = 0
self.ftp = ftplib.FTP()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
_target = '/'.join(self.dirs)
self.ftp.cwd(_target)
def retrfile(self, file, type):
import ftplib
self.endtransfer()
if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1
else: cmd = 'TYPE ' + type; isdir = 0
try:
self.ftp.voidcmd(cmd)
except ftplib.all_errors:
self.init()
self.ftp.voidcmd(cmd)
conn = None
if file and not isdir:
# Try to retrieve as a file
try:
cmd = 'RETR ' + file
conn, retrlen = self.ftp.ntransfercmd(cmd)
except ftplib.error_perm, reason:
if str(reason)[:3] != '550':
raise IOError, ('ftp error', reason), sys.exc_info()[2]
if not conn:
# Set transfer mode to ASCII!
self.ftp.voidcmd('TYPE A')
# Try a directory listing. Verify that directory exists.
if file:
pwd = self.ftp.pwd()
try:
try:
self.ftp.cwd(file)
except ftplib.error_perm, reason:
raise IOError, ('ftp error', reason), sys.exc_info()[2]
finally:
self.ftp.cwd(pwd)
cmd = 'LIST ' + file
else:
cmd = 'LIST'
conn, retrlen = self.ftp.ntransfercmd(cmd)
self.busy = 1
ftpobj = addclosehook(conn.makefile('rb'), self.file_close)
self.refcount += 1
conn.close()
# Pass back both a suitably decorated object and a retrieval length
return (ftpobj, retrlen)
def endtransfer(self):
self.busy = 0
def close(self):
self.keepalive = False
if self.refcount <= 0:
self.real_close()
def file_close(self):
self.endtransfer()
self.refcount -= 1
if self.refcount <= 0 and not self.keepalive:
self.real_close()
def real_close(self):
self.endtransfer()
try:
self.ftp.close()
except ftperrors():
pass
class addbase:
"""Base class for addinfo and addclosehook."""
def __init__(self, fp):
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self.fileno = self.fp.fileno
else:
self.fileno = lambda: None
if hasattr(self.fp, "__iter__"):
self.__iter__ = self.fp.__iter__
if hasattr(self.fp, "next"):
self.next = self.fp.next
def __repr__(self):
return '<%s at %r whose fp = %r>' % (self.__class__.__name__,
id(self), self.fp)
def close(self):
self.read = None
self.readline = None
self.readlines = None
self.fileno = None
if self.fp: self.fp.close()
self.fp = None
class addclosehook(addbase):
"""Class to add a close hook to an open file."""
def __init__(self, fp, closehook, *hookargs):
addbase.__init__(self, fp)
self.closehook = closehook
self.hookargs = hookargs
def close(self):
try:
closehook = self.closehook
hookargs = self.hookargs
if closehook:
self.closehook = None
self.hookargs = None
closehook(*hookargs)
finally:
addbase.close(self)
class addinfo(addbase):
"""class to add an info() method to an open file."""
def __init__(self, fp, headers):
addbase.__init__(self, fp)
self.headers = headers
def info(self):
return self.headers
class addinfourl(addbase):
"""class to add info() and geturl() methods to an open file."""
def __init__(self, fp, headers, url, code=None):
addbase.__init__(self, fp)
self.headers = headers
self.url = url
self.code = code
def info(self):
return self.headers
def getcode(self):
return self.code
def geturl(self):
return self.url
# Utilities to parse URLs (most of these return None for missing parts):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
# splittype('type:opaquestring') --> 'type', 'opaquestring'
# splithost('//host[:port]/path') --> 'host[:port]', '/path'
# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
# splitpasswd('user:passwd') -> 'user', 'passwd'
# splitport('host:port') --> 'host', 'port'
# splitquery('/path?query') --> '/path', 'query'
# splittag('/path#tag') --> '/path', 'tag'
# splitattr('/path;attr1=value1;attr2=value2;...') ->
# '/path', ['attr1=value1', 'attr2=value2', ...]
# splitvalue('attr=value') --> 'attr', 'value'
# unquote('abc%20def') -> 'abc def'
# quote('abc def') -> 'abc%20def')
try:
unicode
except NameError:
def _is_unicode(x):
return 0
else:
def _is_unicode(x):
return isinstance(x, unicode)
def toBytes(url):
"""toBytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed
if _is_unicode(url):
try:
url = url.encode("ASCII")
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = url.strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
import re
_typeprog = re.compile('^([^/:]+):')
match = _typeprog.match(url)
if match:
scheme = match.group(1)
return scheme.lower(), url[len(scheme) + 1:]
return None, url
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
_hostprog = re.compile('//([^/#?]*)(.*)', re.DOTALL)
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if path and not path.startswith('/'):
path = '/' + path
return host_port, path
return None, url
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
_passwdprog = None
def splitpasswd(user):
"""splitpasswd('user:passwd') -> 'user', 'passwd'."""
global _passwdprog
if _passwdprog is None:
import re
_passwdprog = re.compile('^([^:]*):(.*)$',re.S)
match = _passwdprog.match(user)
if match: return match.group(1, 2)
return user, None
# splittag('/path#tag') --> '/path', 'tag'
_portprog = None
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]*)$')
match = _portprog.match(host)
if match:
host, port = match.groups()
if port:
return host, port
return host, None
_nportprog = None
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
if port:
try:
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
_queryprog = None
def splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
global _queryprog
if _queryprog is None:
import re
_queryprog = re.compile('^(.*)\?([^?]*)$')
match = _queryprog.match(url)
if match: return match.group(1, 2)
return url, None
_tagprog = None
def splittag(url):
"""splittag('/path#tag') --> '/path', 'tag'."""
global _tagprog
if _tagprog is None:
import re
_tagprog = re.compile('^(.*)#([^#]*)$')
match = _tagprog.match(url)
if match: return match.group(1, 2)
return url, None
def splitattr(url):
"""splitattr('/path;attr1=value1;attr2=value2;...') ->
'/path', ['attr1=value1', 'attr2=value2', ...]."""
words = url.split(';')
return words[0], words[1:]
_valueprog = None
def splitvalue(attr):
"""splitvalue('attr=value') --> 'attr', 'value'."""
global _valueprog
if _valueprog is None:
import re
_valueprog = re.compile('^([^=]*)=(.*)$')
match = _valueprog.match(attr)
if match: return match.group(1, 2)
return attr, None
# urlparse contains a duplicate of this method to avoid a circular import. If
# you update this method, also update the copy in urlparse. This code
# duplication does not exist in Python3.
_hexdig = '0123456789ABCDEFabcdef'
_hextochr = dict((a + b, chr(int(a + b, 16)))
for a in _hexdig for b in _hexdig)
_asciire = re.compile('([\x00-\x7f]+)')
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
if _is_unicode(s):
if '%' not in s:
return s
bits = _asciire.split(s)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(unquote(str(bits[i])).decode('latin1'))
append(bits[i + 1])
return ''.join(res)
bits = s.split('%')
# fastpath
if len(bits) == 1:
return s
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_hextochr[item[:2]])
append(item[2:])
except KeyError:
append('%')
append(item)
return ''.join(res)
def unquote_plus(s):
"""unquote('%7e/abc+def') -> '~/abc def'"""
s = s.replace('+', ' ')
return unquote(s)
always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
_safe_map = {}
for i, c in zip(xrange(256), str(bytearray(xrange(256)))):
_safe_map[c] = c if (i < 128 and c in always_safe) else '%{:02X}'.format(i)
_safe_quoters = {}
def quote(s, safe='/'):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
"""
# fastpath
if not s:
if s is None:
raise TypeError('None object cannot be quoted')
return s
cachekey = (safe, always_safe)
try:
(quoter, safe) = _safe_quoters[cachekey]
except KeyError:
safe_map = _safe_map.copy()
safe_map.update([(c, c) for c in safe])
quoter = safe_map.__getitem__
safe = always_safe + safe
_safe_quoters[cachekey] = (quoter, safe)
if not s.rstrip(safe):
return s
return ''.join(map(quoter, s))
def quote_plus(s, safe=''):
"""Quote the query fragment of a URL; replacing ' ' with '+'"""
if ' ' in s:
s = quote(s, safe + ' ')
return s.replace(' ', '+')
return quote(s, safe)
def urlencode(query, doseq=0):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query,"items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty,va,tb = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", tb
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = quote_plus(str(k))
v = quote_plus(str(v))
l.append(k + '=' + v)
else:
for k, v in query:
k = quote_plus(str(k))
if isinstance(v, str):
v = quote_plus(v)
l.append(k + '=' + v)
elif _is_unicode(v):
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = quote_plus(v.encode("ASCII","replace"))
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v))
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + quote_plus(str(elt)))
return '&'.join(l)
# Proxy handling
def getproxies_environment():
"""Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. In order to prefer lowercase
variables, we process the environment in two passes, first matches any
and second matches only lower case proxies.
If you need a different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor.
"""
# Get all variables
proxies = {}
for name, value in os.environ.items():
name = name.lower()
if value and name[-6:] == '_proxy':
proxies[name[:-6]] = value
# CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY
# (non-all-lowercase) as it may be set from the web server by a "Proxy:"
# header from the client
# If "proxy" is lowercase, it will still be used thanks to the next block
if 'REQUEST_METHOD' in os.environ:
proxies.pop('http', None)
# Get lowercase variables
for name, value in os.environ.items():
if name[-6:] == '_proxy':
name = name.lower()
if value:
proxies[name[:-6]] = value
else:
proxies.pop(name[:-6], None)
return proxies
def proxy_bypass_environment(host, proxies=None):
"""Test if proxies should not be used for a particular host.
Checks the proxies dict for the value of no_proxy, which should be a
list of comma separated DNS suffixes, or '*' for all hosts.
"""
if proxies is None:
proxies = getproxies_environment()
# don't bypass, if no_proxy isn't specified
try:
no_proxy = proxies['no']
except KeyError:
return 0
# '*' is special case for always bypass
if no_proxy == '*':
return 1
# strip port off host
hostonly, port = splitport(host)
# check if the host ends with any of the DNS suffixes
no_proxy_list = [proxy.strip() for proxy in no_proxy.split(',')]
for name in no_proxy_list:
if name:
name = name.lstrip('.') # ignore leading dots
name = re.escape(name)
pattern = r'(.+\.)?%s$' % name
if (re.match(pattern, hostonly, re.I)
or re.match(pattern, host, re.I)):
return 1
# otherwise, don't bypass
return 0
if sys.platform == 'darwin':
from _scproxy import _get_proxy_settings, _get_proxies
def proxy_bypass_macosx_sysconf(host):
"""
Return True iff this host shouldn't be accessed using a proxy
This function uses the MacOSX framework SystemConfiguration
to fetch the proxy information.
"""
import re
import socket
from fnmatch import fnmatch
hostonly, port = splitport(host)
def ip2num(ipAddr):
parts = ipAddr.split('.')
parts = map(int, parts)
if len(parts) != 4:
parts = (parts + [0, 0, 0, 0])[:4]
return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3]
proxy_settings = _get_proxy_settings()
# Check for simple host names:
if '.' not in host:
if proxy_settings['exclude_simple']:
return True
hostIP = None
for value in proxy_settings.get('exceptions', ()):
# Items in the list are strings like these: *.local, 169.254/16
if not value: continue
m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value)
if m is not None:
if hostIP is None:
try:
hostIP = socket.gethostbyname(hostonly)
hostIP = ip2num(hostIP)
except socket.error:
continue
base = ip2num(m.group(1))
mask = m.group(2)
if mask is None:
mask = 8 * (m.group(1).count('.') + 1)
else:
mask = int(mask[1:])
mask = 32 - mask
if (hostIP >> mask) == (base >> mask):
return True
elif fnmatch(host, value):
return True
return False
def getproxies_macosx_sysconf():
"""Return a dictionary of scheme -> proxy server URL mappings.
This function uses the MacOSX framework SystemConfiguration
to fetch the proxy information.
"""
return _get_proxies()
def proxy_bypass(host):
"""Return True, if a host should be bypassed.
Checks proxy settings gathered from the environment, if specified, or
from the MacOSX framework SystemConfiguration.
"""
proxies = getproxies_environment()
if proxies:
return proxy_bypass_environment(host, proxies)
else:
return proxy_bypass_macosx_sysconf(host)
def getproxies():
return getproxies_environment() or getproxies_macosx_sysconf()
elif os.name == 'nt':
def getproxies_registry():
"""Return a dictionary of scheme -> proxy server URL mappings.
Win32 uses the registry to store proxies.
"""
proxies = {}
try:
import _winreg
except ImportError:
# Std module, so should be around - but you never know!
return proxies
try:
internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = _winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
if proxyEnable:
# Returned as Unicode but problems if not converted to ASCII
proxyServer = str(_winreg.QueryValueEx(internetSettings,
'ProxyServer')[0])
if '=' in proxyServer:
# Per-protocol settings
for p in proxyServer.split(';'):
protocol, address = p.split('=', 1)
# See if address has a type:// prefix
import re
if not re.match('^([^/:]+)://', address):
address = '%s://%s' % (protocol, address)
proxies[protocol] = address
else:
# Use one setting for all protocols
if proxyServer[:5] == 'http:':
proxies['http'] = proxyServer
else:
proxies['http'] = 'http://%s' % proxyServer
proxies['https'] = 'https://%s' % proxyServer
proxies['ftp'] = 'ftp://%s' % proxyServer
internetSettings.Close()
except (WindowsError, ValueError, TypeError):
# Either registry key not found etc, or the value in an
# unexpected format.
# proxies already set up to be empty so nothing to do
pass
return proxies
def getproxies():
"""Return a dictionary of scheme -> proxy server URL mappings.
Returns settings gathered from the environment, if specified,
or the registry.
"""
return getproxies_environment() or getproxies_registry()
def proxy_bypass_registry(host):
try:
import _winreg
import re
except ImportError:
# Std modules, so should be around - but you never know!
return 0
try:
internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = _winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
proxyOverride = str(_winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0])
# ^^^^ Returned as Unicode but problems if not converted to ASCII
except WindowsError:
return 0
if not proxyEnable or not proxyOverride:
return 0
# try to make a host list from name and IP address.
rawHost, port = splitport(host)
host = [rawHost]
try:
addr = socket.gethostbyname(rawHost)
if addr != rawHost:
host.append(addr)
except socket.error:
pass
try:
fqdn = socket.getfqdn(rawHost)
if fqdn != rawHost:
host.append(fqdn)
except socket.error:
pass
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
# now check if we match one of the registry values.
for test in proxyOverride:
if test == '<local>':
if '.' not in rawHost:
return 1
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
for val in host:
# print "%s <--> %s" %( test, val )
if re.match(test, val, re.I):
return 1
return 0
def proxy_bypass(host):
"""Return True, if the host should be bypassed.
Checks proxy settings gathered from the environment, if specified,
or the registry.
"""
proxies = getproxies_environment()
if proxies:
return proxy_bypass_environment(host, proxies)
else:
return proxy_bypass_registry(host)
else:
# By default use environment variables
getproxies = getproxies_environment
proxy_bypass = proxy_bypass_environment
# Test and time quote() and unquote()
def test1():
s = ''
for i in range(256): s = s + chr(i)
s = s*4
t0 = time.time()
qs = quote(s)
uqs = unquote(qs)
t1 = time.time()
if uqs != s:
print 'Wrong!'
print repr(s)
print repr(qs)
print repr(uqs)
print round(t1 - t0, 3), 'sec'
def reporthook(blocknum, blocksize, totalsize):
# Report during remote transfers
print "Block number: %d, Block size: %d, Total size: %d" % (
blocknum, blocksize, totalsize)
| HiSPARC/station-software | user/python/Lib/urllib.py | Python | gpl-3.0 | 59,990 |
#! /usr/bin/python3
# -*- coding: utf8 -*-
# The logger module for nv-middlebox.
#
# Every file must use logger module to log the information.
__author__ = "Sugesh Chandran"
__copyright__ = "Copyright (C) The neoview team."
__license__ = "GNU Lesser General Public License"
__version__ = "1.0"
import logging
from src.settings import NV_DEFAULT_LOG_LEVEL, NV_LOG_FILE, NV_LOG_FORMAT, \
NVDB_LOG_FORMAT, NVDB_DEFAULT_LOG_LEVEL, NVDB_LOG_FILE, \
NV_CONSOLE_LOG
class nv_logger():
'''
Wrapper class for the neoview middle box logging. Make use the wrapper
class than calling the logger directly
'''
nv_log_obj = None
def __init__(self, class_name = ""):
self.nv_log_obj = logging.getLogger(class_name)
self.nv_log_obj.setLevel(NV_DEFAULT_LOG_LEVEL)
log_format = logging.Formatter(NV_LOG_FORMAT)
log_fh = logging.FileHandler(NV_LOG_FILE)
log_fh.setLevel(NV_DEFAULT_LOG_LEVEL)
log_fh.setFormatter(log_format)
self.nv_log_obj.addHandler(log_fh)
# Propogate the log to the upper layer , i.e stdout
self.nv_log_obj.propagate = NV_CONSOLE_LOG
def get_logger(self):
return self.nv_log_obj
class nvdb_logger():
nvdb_log_obj = None
def __init__(self):
logging.basicConfig()
self.nvdb_log_obj = logging.getLogger('sqlalchemy')
nvdb_log_format = logging.Formatter(NVDB_LOG_FORMAT)
nvdb_log_fh = logging.FileHandler(NVDB_LOG_FILE)
nvdb_log_fh.setLevel(NVDB_DEFAULT_LOG_LEVEL)
nvdb_log_fh.setFormatter(nvdb_log_format)
self.nvdb_log_obj.addHandler(nvdb_log_fh)
def get_logger(self):
return self.nvdb_log_obj
nvdb_log_handler = nvdb_logger().get_logger()
default_nv_log_handler = nv_logger(__name__).get_logger()
| sugchand/neoview-middle-box | src/nv_logger.py | Python | lgpl-2.1 | 1,831 |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 3 02:10:23 2011
@author: IxxI
@version: v1.0
"""
import sys
import logging
from optparse import OptionParser
from commands import Commands, CLIENT, SERVER
from mcp import getchangedsrc_side
def main():
parser = OptionParser(version='MCP %s' % Commands.fullversion())
parser.add_option('--client', dest='only_client', action='store_true', help='only process client', default=False)
parser.add_option('--server', dest='only_server', action='store_true', help='only process server', default=False)
parser.add_option('-c', '--config', dest='config', help='additional configuration file')
options, _ = parser.parse_args()
getchangedsrc(options.config, options.only_client, options.only_server)
def getchangedsrc(conffile, only_client, only_server):
try:
commands = Commands(conffile)
# client or server
process_client = True
process_server = True
if only_client and not only_server:
process_server = False
if only_server and not only_client:
process_client = False
if process_client:
getchangedsrc_side(commands, CLIENT)
if process_server:
getchangedsrc_side(commands, SERVER)
except Exception: # pylint: disable-msg=W0703
logging.exception('FATAL ERROR')
sys.exit(1)
if __name__ == '__main__':
main()
| mviitanen/marsmod | mcp/runtime/getchangedsrc.py | Python | gpl-2.0 | 1,420 |
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2020 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Johann Rohwer (jrohwer@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Johann M. Rohwer
"""
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from pysces.version import __version__
__doc__ = """
PyscesParScan
-------------
PySCeS class distributed multi-dimensional parameter scans with IPython
"""
import numpy as np
from pysces.PyscesUtils import TimerBox
from pysces.PyscesScan import Scanner
import multiprocessing
import sys, os, pickle
flush = sys.stdout.flush
from time import sleep, time
import subprocess
# this is a cooler way of doing this
# acutally don't need the _multicorescan import
# import _multicorescan
# del _multicorescan
# print "__file__ is", __file__
MULTISCANFILE = __file__.replace(
'PyscesParScan', '_multicorescan'
) # .replace('.pyc','.py')
# print 'MULTISCANFILE is', MULTISCANFILE
__psyco_active__ = 0
class ParScanner(Scanner):
"""
Arbitrary dimension generic distributed scanner.
Subclassed from pysces.PyscesScan.Scanner.
This class is initiated with a loaded PySCeS model and then allows
the user to define scan parameters, see self.addScanParameter()
and user output, see self.addUserOutput().
Steady-state results are always stored in self.SteadyStateResults while
user output can be found in self.UserOutputResults.
Distributed (parallel) execution is achieved with the clustering capability
of IPython. See "ipcluster --help".
"""
# --johann 20101206
genOn = True
_MODE_ = 'state'
HAS_USER_OUTPUT = False
nan_on_bad_state = True
MSG_PRINT_INTERVAL = 500
__AnalysisModes__ = ('state', 'elasticity', 'mca', 'stability')
invalid_state_list = None
invalid_state_list_idx = None
scans_per_run = 100
def __init__(self, mod, engine='multiproc'):
"""
Instantiate the parallel scanner class with a PySCeS model instance
and an optional 'engine' argument specifying the parallel engine:
'multiproc' -- multiprocessing (default)
'ipcluster' -- IPython cluster
"""
self.engine = engine
if engine == 'multiproc':
print('parallel engine: multiproc')
elif engine == 'ipcluster':
print('parallel engine: ipcluster')
try:
from ipyparallel import Client
except ImportError as ex:
print('\n', ex)
raise ImportError(
'PARSCANNER: Requires IPython and ipyparallel version >=4.0 (http://ipython.org) and 0MQ (http://zero.mq).'
)
try:
rc = Client()
self.rc = rc
except OSError as ex:
raise OSError(
str(ex)
+ '\nPARSCANNER: Requires a running IPython cluster. See "ipcluster --help".\n'
)
dv = rc[:] # direct view
lv = rc.load_balanced_view()
self.dv = dv
self.lv = lv
dv.execute('from pysces.PyscesParScan import Analyze, setModValue')
else:
raise UserWarning(engine + " is not a valid parallel engine!")
self.GenDict = {}
self.GenOrder = []
self.ScanSpace = []
self.mod = mod
self.SteadyStateResults = []
self.UserOutputList = []
self.UserOutputResults = []
self.scanT = TimerBox()
def genScanSpace(self):
"""
Generates the parameter scan space, partitioned according to self.scans_per_run
"""
spr = self.scans_per_run
Tsteps = 1
for gen in self.GenOrder:
if self.GenDict[gen][4] == False: # don't increase Tsteps for followers
Tsteps *= self.GenDict[gen][2]
for step in range(Tsteps):
pars = self.__nextValue__()
# if pars not in self.ScanSpace:
self.ScanSpace.append(pars)
self.ScanSpace = np.array(self.ScanSpace)
self.SeqArray = np.arange(1, Tsteps + 1)
if Tsteps % spr == 0:
numparts = Tsteps // spr
else:
numparts = Tsteps // spr + 1
self.ScanPartition = [
self.ScanSpace[n * spr : (n + 1) * spr] for n in range(numparts)
]
self.SeqPartition = [
self.SeqArray[n * spr : (n + 1) * spr] for n in range(numparts)
]
self.Tsteps = Tsteps
def Prepare(self, ReRun=False):
"""
Internal method to prepare the parameters and generate ScanSpace.
"""
print("\nPREPARATION\n-----------")
self.scanT.normal_timer('PREP')
self._MODE_ = self._MODE_.lower()
assert self._MODE_ in self.__AnalysisModes__, (
'\nSCANNER: \"%s\" is not a valid analysis mode!' % self._MODE_
)
if ReRun:
self.ScanSpace = []
self.UserOutputResults = []
self.SteadyStateResults = []
self.invalid_state_list = []
self.invalid_state_list_idx = []
# if self.nan_on_bad_state:
# self.mod.__settings__["mode_state_nan_on_fail"] = True
# self.dv.execute('mod.__settings__["mode_state_nan_on_fail"] = True')
# generate the scan space
self.genScanSpace()
print("Generated ScanSpace:", next(self.scanT.PREP))
print('PARSCANNER: Tsteps', self.Tsteps)
flush()
def Run(self, ReRun=False):
"""
Run the parameter scan with a load balancing task client.
"""
self.Prepare(ReRun)
# this is where the parallel magic fun starts....
arl = [] # asynchronous results list
if self.engine == 'multiproc':
fN = str(time()).split('.')[0]
F = open(fN, 'wb')
pickle.dump(
(
self.mod,
self.ScanPartition,
self.SeqPartition,
self.GenOrder,
self.UserOutputList,
),
F,
protocol=-1,
)
F.close()
fN = os.path.abspath(fN)
print("Preparation completed:", next(self.scanT.PREP))
self.scanT.normal_timer('RUN')
subprocess.call([sys.executable, MULTISCANFILE, self._MODE_, fN])
F = open(fN, 'rb')
res_list = pickle.load(F)
F.close()
os.remove(fN)
for result in res_list:
self.StoreData(result)
elif self.engine == 'ipcluster':
for i in range(len(self.ScanPartition)):
arl.append(
self.lv.apply(
Analyze,
self.ScanPartition[i],
self.SeqPartition[i],
self.GenOrder,
self._MODE_,
self.UserOutputList,
self.mod,
)
)
print("Submitted tasks:", len(arl))
print("Preparation completed:", next(self.scanT.PREP))
print("\nPARALLEL COMPUTATION\n--------------------")
flush()
self.scanT.normal_timer('RUN')
while self.lv.queue_status()['unassigned'] > 0:
sleep(5)
print('Tasks to go... ', self.lv.queue_status()['unassigned'])
# wait until all tasks are completed
self.lv.wait()
flush()
print("\nGATHER RESULT\n-------------")
flush()
for ar in arl:
result = ar.get()
# tuple: 0 - state_species
# 1 - state_flux
# 2 - user_output_results
# 3 - invalid_state_list
# 4 - invalid_state_list_idx
self.StoreData(result)
print("Parallel calculation completed:", next(self.scanT.RUN))
self.GatherScanResult()
def RunScatter(self, ReRun=False):
"""
Run the parameter scan by using scatter and gather for the ScanSpace.
Not load balanced, equal number of scan runs per node.
"""
if self.engine != 'ipcluster':
print("RunScatter() only supported with ipcluster!")
return
self.Prepare(ReRun)
# this is where the parallel magic fun starts....
# push details into client namespace
self.dv.push(
{
'GenOrder': self.GenOrder,
'mode': self._MODE_,
'mod': self.mod,
'UserOutputList': self.UserOutputList,
}
)
# scatter ScanSpace and SeqArray
self.dv.scatter('partition', self.ScanSpace)
self.dv.scatter('seqarray', self.SeqArray)
print("Scattered ScanSpace on number of engines:", len(self.dv))
print("Preparation completed:", next(self.scanT.PREP))
print("\nPARALLEL COMPUTATION\n--------------------")
flush()
self.scanT.normal_timer('RUN')
# executes scan on partitioned ScanSpace on every node
self.dv.execute(
'y=Analyze(partition,seqarray,GenOrder,mode,UserOutputList,mod)', block=True
)
print("Parallel calculation completed:", next(self.scanT.RUN))
flush()
## this is analysis stuff
print("\nGATHER RESULT\n-------------")
flush()
ar = self.dv.gather('y')
results = ar.get()
results = [results[i : i + 5] for i in range(0, len(results), 5)]
for result in results:
# tuple: 0 - state_species
# 1 - state_flux
# 2 - user_output_results
# 3 - invalid_state_list
# 4 - invalid_state_list_idx
self.StoreData(result)
print("Parallel calculation completed:", next(self.scanT.RUN))
self.GatherScanResult()
def StoreData(self, result):
"""
Internal function which concatenates and stores single result generated by Analyze.
- *result* IPython client result object
"""
self.SteadyStateResults.append(
np.hstack((np.array(result[0]), np.array(result[1])))
)
if self.HAS_USER_OUTPUT:
self.UserOutputResults.append(np.array(result[2]))
self.invalid_state_list += result[3]
self.invalid_state_list_idx += result[4]
def GatherScanResult(self):
"""
Concatenates and combines output result fragments from the parallel scan.
"""
# from here on we have the complete results
self.SteadyStateResults = np.vstack([i for i in self.SteadyStateResults])
if self.HAS_USER_OUTPUT:
self.UserOutputResults = np.vstack([i for i in self.UserOutputResults])
self.resetInputParameters()
# print "Gather completed:", self.scanT.GATHER.next()
print("\nPARSCANNER: %s states analysed" % len(self.SteadyStateResults))
print("Total time taken: ", next(self.scanT.PREP))
self.scanT.PREP.close() # close timer
self.scanT.RUN.close()
if len(self.invalid_state_list) > 0:
print('\nBad steady states encountered at:\n')
print("Sequence: ", self.invalid_state_list_idx)
print("Parameters: ", self.invalid_state_list)
# Utility methods for scatter/gather
# was in engineCode.py but refactored into main script
# --johann 20120813
# set model attribute values
def setModValue(mod, name, value):
assert hasattr(mod, name), 'Model does not have an attribute: %s ' % name
setattr(mod, name, float(value))
# analyze steady states
def Analyze(partition, seqarray, GenOrder, mode, UserOutputList, mod):
state_species = []
state_flux = []
user_output_results = []
invalid_state_list = []
invalid_state_list_idx = []
for i in range(len(partition)):
pars = partition[i]
for par in range(len(GenOrder)):
setModValue(mod, GenOrder[par], pars[par])
if mode == 'state':
mod.doState()
elif mode == 'elasticity':
mod.doElas()
elif mode == 'mca':
mod.doMca()
elif mode == 'stability':
mod.doEigenMca()
mod.User_Function()
if not mod.__StateOK__:
invalid_state_list.append(pars)
invalid_state_list_idx.append(seqarray[i])
state_species.append(mod.state_species)
state_flux.append(mod.state_flux)
user_output_results.append([getattr(mod, res) for res in UserOutputList])
return (
state_species,
state_flux,
user_output_results,
invalid_state_list,
invalid_state_list_idx,
)
| bgoli/pysces | pysces/PyscesParScan.py | Python | bsd-3-clause | 13,323 |
Subsets and Splits