repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
pandegroup/osprey | osprey/tests/test_strategies.py | 2 | 3998 | from __future__ import print_function, absolute_import, division
import sys
from six import iteritems
import numpy as np
from numpy.testing.decorators import skipif
from osprey.search_space import SearchSpace
from osprey.search_space import IntVariable, EnumVariable, FloatVariable
from osprey.strategies import RandomSearch, HyperoptTPE, Bayes, GridSearch
try:
from hyperopt import hp, fmin, tpe, Trials
except:
pass
def test_random():
searchspace = SearchSpace()
searchspace.add_float('x', -10, 10)
random = np.random.RandomState(0)
RandomSearch(seed=random).suggest([], searchspace)
def test_grid():
searchspace = SearchSpace()
searchspace.add_enum('x', [1, 2])
searchspace.add_jump('y', min=3, max=4, num=2)
grid_search = GridSearch()
suggestions = [grid_search.suggest([], searchspace) for _ in range(4)]
suggestions = [(s['x'], s['y']) for s in suggestions]
assert suggestions == [(1, 3), (1, 4), (2, 3), (2, 4)], "Didn't examine whole space correctly"
def test_check_repeated_params():
searchspace = SearchSpace()
searchspace.add_enum('x', [1, 2])
searchspace.add_jump('y', min=3, max=4, num=2)
history = []
grid_search1 = GridSearch()
for _ in range(4):
params = grid_search1.suggest(history, searchspace)
history.append((params, 0.0, 'SUCCEEDED'))
grid_search2 = GridSearch()
for _ in range(4):
params = grid_search2.suggest(history, searchspace)
assert grid_search2.is_repeated_suggestion(params, history)
history = []
grid_search3 = GridSearch()
for _ in range(4):
params = grid_search3.suggest(history, searchspace)
history.append((params, 0.0, 'FAILED'))
grid_search4 = GridSearch()
for _ in range(4):
params = grid_search4.suggest(history, searchspace)
assert not grid_search4.is_repeated_suggestion(params, history)
def hyperopt_x2_iterates(n_iters=100):
iterates = []
trials = Trials()
random = np.random.RandomState(0)
def fn(params):
iterates.append(params['x'])
return params['x']**2
for i in range(n_iters):
fmin(fn=fn, algo=tpe.suggest, max_evals=i+1, trials=trials,
space={'x': hp.uniform('x', -10, 10)},
**HyperoptTPE._hyperopt_fmin_random_kwarg(random))
return np.array(iterates)
def our_x2_iterates(n_iters=100):
history = []
searchspace = SearchSpace()
searchspace.add_float('x', -10, 10)
random = np.random.RandomState(0)
# note the switch of sign, because _our_ function hyperopt_tpe is
# a maximizer, not a minimizer
def fn(params):
return -params['x']**2
for i in range(n_iters):
params = HyperoptTPE(seed=random).suggest(history, searchspace)
history.append((params, fn(params), 'SUCCEEDED'))
return np.array([h[0]['x'] for h in history])
@skipif('hyperopt.fmin' not in sys.modules, 'this test requires hyperopt')
def test_1():
ours = our_x2_iterates(25)
ref = hyperopt_x2_iterates(25)
np.testing.assert_array_equal(ref, ours)
@skipif('GPy' not in sys.modules, 'this test requires GPy')
def test_gp():
searchspace = SearchSpace()
searchspace.add_float('x', -10, 10)
searchspace.add_float('y', 1, 10, warp='log')
searchspace.add_int('z', -10, 10)
searchspace.add_enum('w', ['opt1', 'opt2'])
history = [(searchspace.rvs(), np.random.random(), 'SUCCEEDED')
for _ in range(4)]
params = Bayes().suggest(history, searchspace)
for k, v in iteritems(params):
assert k in searchspace.variables
if isinstance(searchspace[k], EnumVariable):
assert v in searchspace[k].choices
elif isinstance(searchspace[k], FloatVariable):
assert searchspace[k].min <= v <= searchspace[k].max
elif isinstance(searchspace[k], IntVariable):
assert searchspace[k].min <= v <= searchspace[k].max
else:
assert False
| apache-2.0 |
r-o-b-b-i-e/pootle | tests/commands/pootle_runner.py | 3 | 1506 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from subprocess import call
import pytest
pytestmark = pytest.mark.skipif(call(['which', 'pootle']) != 0,
reason='not installed via setup.py')
@pytest.mark.cmd
def test_pootle_noargs(capfd):
"""Pootle no args should give help"""
call(['pootle'])
out, err = capfd.readouterr()
# Expected:
# Type 'pootle help <subcommand>'
# but 'pootle' is 'pootle-script.py' on Windows
assert "Type 'pootle" in out
assert " help <subcommand>'" in out
@pytest.mark.cmd
def test_pootle_version(capfd):
"""Display Pootle version info"""
call(['pootle', '--version'])
out, err = capfd.readouterr()
assert 'Pootle' in err
assert 'Django' in err
assert 'Translate Toolkit' in err
@pytest.mark.cmd
def test_pootle_init(capfd):
"""pootle init --help"""
call(['pootle', 'init', '--help'])
out, err = capfd.readouterr()
assert "--db" in out
@pytest.mark.cmd
def test_pootle_init_db_sqlite(capfd, tmpdir):
"""pootle init --help"""
test_conf_file = tmpdir.join("pootle.conf")
call(['pootle', 'init', '--db=sqlite', '--config=%s' % test_conf_file])
out, err = capfd.readouterr()
assert "Configuration file created" in out
| gpl-3.0 |
Yaribz/SPADS | plugins/templates/commented/myconfigurableplugin.py | 1 | 1698 | # Import the perl module so we can call the SPADS Plugin API
import perl
# perl.MyConfigurablePlugin is the Perl representation of the MyConfigurablePlugin plugin module
# We will use this object to call the plugin API
spads=perl.MyConfigurablePlugin
# This is the first version of the plugin
pluginVersion='0.1'
# This plugin requires a SPADS version which supports Python plugins
# (only SPADS versions >= 0.12.29 support Python plugins)
requiredSpadsVersion='0.12.29'
# We define one global setting "MyGlobalSetting" and one preset setting "MyPresetSetting".
# Both are of type "notNull", which means any non-null value is allowed
# (check %paramTypes hash in SpadsConf.pm for a complete list of allowed setting types)
globalPluginParams = { 'MyGlobalSetting': ['notNull'] }
presetPluginParams = { 'MyPresetSetting': ['notNull'] }
# This is how SPADS gets our version number (mandatory callback)
def getVersion(pluginObject):
return pluginVersion
# This is how SPADS determines if the plugin is compatible (mandatory callback)
def getRequiredSpadsVersion(pluginName):
return requiredSpadsVersion
# This is how SPADS finds what settings we need in our configuration file (mandatory callback for configurable plugins)
def getParams(pluginName):
return [ globalPluginParams , presetPluginParams ]
# This is the class implementing the plugin
class MyConfigurablePlugin:
# This is our constructor, called when the plugin is loaded by SPADS (mandatory callback)
def __init__(self,context):
# We call the API function "slog" to log a notice message (level 3) when the plugin is loaded
spads.slog("Plugin loaded (version %s)" % pluginVersion,3)
| gpl-3.0 |
natetrue/ReplicatorG | skein_engines/skeinforge-31/fabmetheus_utilities/svg_writer.py | 2 | 10274 | """
Svg_writer is a class and collection of utilities to read from and write to an svg file.
Svg_writer uses the layer_template.svg file in the templates folder in the same folder as svg_writer, to output an svg file.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities.xml_simple_reader import XMLSimpleReader
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
import cStringIO
import math
import os
__author__ = 'Enrique Perez ([email protected])'
__date__ = "$Date: 2008/02/05 $"
__license__ = 'GPL 3.0'
def getCarving(fileName):
"Get a carving for the file using an import plugin."
pluginModule = fabmetheus_interpret.getInterpretPlugin(fileName)
if pluginModule == None:
return None
return pluginModule.getCarving(fileName)
def getSliceDictionary(xmlElement):
"Get the metadata slice attribute dictionary."
for metadataElement in xmlElement.getChildrenWithClassName('metadata'):
for child in metadataElement.children:
if child.className.lower() == 'slice:layers':
return child.attributeDictionary
return {}
def getSVGByLoopLayers(addLayerTemplateToSVG, rotatedLoopLayers, svgCarving):
"Get the svg text."
if len(rotatedLoopLayers) < 1:
return ''
decimalPlacesCarried = max(0, 2 - int(math.floor(math.log10(svgCarving.layerThickness))))
svgWriter = SVGWriter(addLayerTemplateToSVG, svgCarving, decimalPlacesCarried)
return svgWriter.getReplacedSVGTemplate(svgCarving.fileName, 'basic', rotatedLoopLayers, svgCarving.getFabmetheusXML())
def getTruncatedRotatedBoundaryLayers( repository, rotatedBoundaryLayers ):
"Get the truncated rotated boundary layers."
return rotatedBoundaryLayers[ repository.layersFrom.value : repository.layersTo.value ]
def setSVGCarvingCorners(rotatedLoopLayers, svgCarving):
"Parse SVG text and store the layers."
for rotatedBoundaryLayer in rotatedLoopLayers:
for loop in rotatedBoundaryLayer.loops:
for point in loop:
pointVector3 = Vector3(point.real, point.imag, rotatedBoundaryLayer.z)
svgCarving.cornerMaximum = euclidean.getPointMaximum(svgCarving.cornerMaximum, pointVector3)
svgCarving.cornerMinimum = euclidean.getPointMinimum(svgCarving.cornerMinimum, pointVector3)
class SVGWriter:
"A base class to get an svg skein from a carving."
def __init__(self, addLayerTemplateToSVG, carving, decimalPlacesCarried, perimeterWidth = None):
self.addLayerTemplateToSVG = addLayerTemplateToSVG
self.carving = carving
self.decimalPlacesCarried = decimalPlacesCarried
self.margin = 20
self.perimeterWidth = perimeterWidth
self.textHeight = 22.5
self.unitScale = 3.7
def addLayerBegin(self, layerIndex, rotatedBoundaryLayer):
"Add the start lines for the layer."
zRounded = self.getRounded( rotatedBoundaryLayer.z )
self.graphicsCopy = self.graphicsXMLElement.getCopy(zRounded, self.graphicsXMLElement.parent)
if self.addLayerTemplateToSVG:
marginRounded = self.getRounded(self.margin)
layerTranslateY = layerIndex * self.textHeight + (layerIndex + 1) * (self.extent.y * self.unitScale + self.margin)
translateYRounded = self.getRounded(layerTranslateY)
self.graphicsCopy.attributeDictionary['transform'] = 'translate(%s, %s)' % (marginRounded, translateYRounded)
self.graphicsCopy.getFirstChildWithClassName('text').text = 'Layer %s, z:%s' % (layerIndex, zRounded)
self.pathXMLElement = self.graphicsCopy.getFirstChildWithClassName('path')
self.pathDictionary = self.pathXMLElement.attributeDictionary
def addRotatedLoopLayerToOutput( self, layerIndex, rotatedBoundaryLayer ):
"Add rotated boundary layer to the output."
self.addLayerBegin( layerIndex, rotatedBoundaryLayer )
if rotatedBoundaryLayer.rotation != None:
self.graphicsCopy.attributeDictionary['bridgeRotation'] = str( rotatedBoundaryLayer.rotation )
if self.addLayerTemplateToSVG:
self.pathDictionary['transform'] = self.getTransformString()
else:
del self.pathDictionary['transform']
self.pathDictionary['d'] = self.getSVGStringForLoops(rotatedBoundaryLayer.loops)
def addRotatedLoopLayersToOutput( self, rotatedBoundaryLayers ):
"Add rotated boundary layers to the output."
for rotatedBoundaryLayerIndex, rotatedBoundaryLayer in enumerate( rotatedBoundaryLayers ):
self.addRotatedLoopLayerToOutput( rotatedBoundaryLayerIndex, rotatedBoundaryLayer )
def getReplacedSVGTemplate(self, fileName, procedureName, rotatedBoundaryLayers, xmlElement):
"Get the lines of text from the layer_template.svg file."
# ( layers.length + 1 ) * (margin + sliceDimY * unitScale + txtHeight) + margin + txtHeight + margin + 110
cornerMaximum = self.carving.getCarveCornerMaximum()
cornerMinimum = self.carving.getCarveCornerMinimum()
self.extent = cornerMaximum - cornerMinimum
svgTemplateText = gcodec.getFileTextInFileDirectory( __file__, os.path.join('templates', 'layer_template.svg') )
self.xmlParser = XMLSimpleReader( fileName, None, svgTemplateText )
self.svgElement = self.xmlParser.getRoot()
if not self.addLayerTemplateToSVG:
self.svgElement.getXMLElementByID('layerTextTemplate').removeFromIDNameParent()
del self.svgElement.getXMLElementByID('sliceElementTemplate').attributeDictionary['transform']
svgElementDictionary = self.svgElement.attributeDictionary
self.graphicsXMLElement = self.svgElement.getXMLElementByID('sliceElementTemplate')
self.graphicsXMLElement.attributeDictionary['id'] = 'z:'
self.addRotatedLoopLayersToOutput( rotatedBoundaryLayers )
self.sliceDictionary = getSliceDictionary( self.svgElement )
self.setMetadataNoscriptElement('layerThickness', self.carving.getCarveLayerThickness() )
self.setMetadataNoscriptElement('maxX', cornerMaximum.x )
self.setMetadataNoscriptElement('minX', cornerMinimum.x )
self.setMetadataNoscriptElement('maxY', cornerMaximum.y )
self.setMetadataNoscriptElement('minY', cornerMinimum.y )
self.setMetadataNoscriptElement('maxZ', cornerMaximum.z )
self.setMetadataNoscriptElement('minZ', cornerMinimum.z )
self.margin = float( self.sliceDictionary['margin'] )
self.textHeight = float( self.sliceDictionary['textHeight'] )
javascriptControlBoxWidth = float( self.sliceDictionary['javascriptControlBoxWidth'] )
noJavascriptControlBoxHeight = float( self.sliceDictionary['noJavascriptControlBoxHeight'] )
controlTop = len( rotatedBoundaryLayers ) * ( self.margin + self.extent.y * self.unitScale + self.textHeight ) + 2.0 * self.margin + self.textHeight
self.svgElement.getFirstChildWithClassName('title').text = os.path.basename(fileName) + ' - Slice Layers'
svgElementDictionary['height'] = '%spx' % self.getRounded( controlTop + noJavascriptControlBoxHeight + self.margin )
# width = margin + (sliceDimX * unitScale) + margin;
width = 2.0 * self.margin + max( self.extent.x * self.unitScale, javascriptControlBoxWidth )
svgElementDictionary['width'] = '%spx' % self.getRounded( width )
self.sliceDictionary['decimalPlacesCarried'] = str( self.decimalPlacesCarried )
if self.perimeterWidth != None:
self.sliceDictionary['perimeterWidth'] = self.getRounded( self.perimeterWidth )
self.sliceDictionary['yAxisPointingUpward'] = 'true'
self.sliceDictionary['procedureDone'] = procedureName
noJavascriptDictionary = self.svgElement.getXMLElementByID('noJavascriptControls').attributeDictionary
noJavascriptDictionary['transform'] = 'translate(%s, %s)' % ( self.getRounded(self.margin), self.getRounded( controlTop ) )
self.svgElement.getXMLElementByID('dimXNoJavascript').text = self.getRounded( self.extent.x )
self.svgElement.getXMLElementByID('dimYNoJavascript').text = self.getRounded( self.extent.y )
self.svgElement.getXMLElementByID('dimZNoJavascript').text = self.getRounded( self.extent.z )
if not self.addLayerTemplateToSVG:
self.svgElement.getFirstChildWithClassName('script').removeFromIDNameParent()
self.svgElement.getXMLElementByID('beginningOfControlSection').removeFromIDNameParent()
self.svgElement.getXMLElementByID('noJavascriptControls').removeFromIDNameParent()
self.graphicsXMLElement.removeFromIDNameParent()
if xmlElement != None:
xmlElement.setParentAddToChildren(self.svgElement)
output = cStringIO.StringIO()
output.write( self.xmlParser.beforeRoot )
self.svgElement.addXML( 0, output )
return output.getvalue()
def getRounded(self, number):
"Get number rounded to the number of carried decimal places as a string."
return euclidean.getRoundedToDecimalPlacesString(self.decimalPlacesCarried, number)
def getRoundedComplexString(self, point):
"Get the rounded complex string."
return self.getRounded( point.real ) + ' ' + self.getRounded( point.imag )
def getSVGStringForLoop( self, loop ):
"Get the svg loop string."
if len(loop) < 1:
return ''
return self.getSVGStringForPath(loop) + ' z'
def getSVGStringForLoops( self, loops ):
"Get the svg loops string."
loopString = ''
if len(loops) > 0:
loopString += self.getSVGStringForLoop( loops[0] )
for loop in loops[1 :]:
loopString += ' ' + self.getSVGStringForLoop(loop)
return loopString
def getSVGStringForPath( self, path ):
"Get the svg path string."
svgLoopString = ''
for point in path:
stringBeginning = 'M '
if len( svgLoopString ) > 0:
stringBeginning = ' L '
svgLoopString += stringBeginning + self.getRoundedComplexString(point)
return svgLoopString
def getTransformString(self):
"Get the svg transform string."
cornerMinimumXString = self.getRounded( - self.carving.getCarveCornerMinimum().x )
cornerMinimumYString = self.getRounded( - self.carving.getCarveCornerMinimum().y )
return 'scale(%s, %s) translate(%s, %s)' % ( self.unitScale, - self.unitScale, cornerMinimumXString, cornerMinimumYString )
def setMetadataNoscriptElement( self, prefix, value ):
"Set the metadata value and the NoJavascript text."
valueString = self.getRounded(value)
self.sliceDictionary[ prefix ] = valueString
self.svgElement.getXMLElementByID( prefix + 'NoJavascript').text = valueString
| gpl-2.0 |
thomasvincent/utilities | NagiosPlugins/check_procs/pexpect/examples/bd_serv.py | 16 | 10268 | #!/usr/bin/env python
"""Back door shell server
This exposes an shell terminal on a socket.
--hostname : sets the remote host name to open an ssh connection to.
--username : sets the user name to login with
--password : (optional) sets the password to login with
--port : set the local port for the server to listen on
--watch : show the virtual screen after each client request
"""
# Having the password on the command line is not a good idea, but
# then this entire project is probably not the most security concious thing
# I've ever built. This should be considered an experimental tool -- at best.
import pxssh, pexpect, ANSI
import time, sys, os, getopt, getpass, traceback, threading, socket
def exit_with_usage(exit_code=1):
print globals()['__doc__']
os._exit(exit_code)
class roller (threading.Thread):
"""This runs a function in a loop in a thread."""
def __init__(self, interval, function, args=[], kwargs={}):
"""The interval parameter defines time between each call to the function.
"""
threading.Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = threading.Event()
def cancel(self):
"""Stop the roller."""
self.finished.set()
def run(self):
while not self.finished.isSet():
# self.finished.wait(self.interval)
self.function(*self.args, **self.kwargs)
def endless_poll (child, prompt, screen, refresh_timeout=0.1):
"""This keeps the screen updated with the output of the child. This runs in
a separate thread. See roller(). """
#child.logfile_read = screen
try:
s = child.read_nonblocking(4000, 0.1)
screen.write(s)
except:
pass
#while True:
# #child.prompt (timeout=refresh_timeout)
# try:
# #child.read_nonblocking(1,timeout=refresh_timeout)
# child.read_nonblocking(4000, 0.1)
# except:
# pass
def daemonize (stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
'''This forks the current process into a daemon. Almost none of this is
necessary (or advisable) if your daemon is being started by inetd. In that
case, stdin, stdout and stderr are all set up for you to refer to the
network connection, and the fork()s and session manipulation should not be
done (to avoid confusing inetd). Only the chdir() and umask() steps remain
as useful.
References:
UNIX Programming FAQ
1.7 How do I get my program to act like a daemon?
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
Advanced Programming in the Unix Environment
W. Richard Stevens, 1992, Addison-Wesley, ISBN 0-201-56317-7.
The stdin, stdout, and stderr arguments are file names that will be opened
and be used to replace the standard file descriptors in sys.stdin,
sys.stdout, and sys.stderr. These arguments are optional and default to
/dev/null. Note that stderr is opened unbuffered, so if it shares a file
with stdout then interleaved output may not appear in the order that you
expect. '''
# Do first fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit first parent.
except OSError, e:
sys.stderr.write ("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror) )
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.umask(0)
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit second parent.
except OSError, e:
sys.stderr.write ("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror) )
sys.exit(1)
# Now I am a daemon!
# Redirect standard file descriptors.
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# I now return as the daemon
return 0
def add_cursor_blink (response, row, col):
i = (row-1) * 80 + col
return response[:i]+'<img src="http://www.noah.org/cursor.gif">'+response[i:]
def main ():
try:
optlist, args = getopt.getopt(sys.argv[1:], 'h?d', ['help','h','?', 'hostname=', 'username=', 'password=', 'port=', 'watch'])
except Exception, e:
print str(e)
exit_with_usage()
command_line_options = dict(optlist)
options = dict(optlist)
# There are a million ways to cry for help. These are but a few of them.
if [elem for elem in command_line_options if elem in ['-h','--h','-?','--?','--help']]:
exit_with_usage(0)
hostname = "127.0.0.1"
port = 1664
username = os.getenv('USER')
password = ""
daemon_mode = False
if '-d' in options:
daemon_mode = True
if '--watch' in options:
watch_mode = True
else:
watch_mode = False
if '--hostname' in options:
hostname = options['--hostname']
if '--port' in options:
port = int(options['--port'])
if '--username' in options:
username = options['--username']
print "Login for %s@%s:%s" % (username, hostname, port)
if '--password' in options:
password = options['--password']
else:
password = getpass.getpass('password: ')
if daemon_mode:
print "daemonizing server"
daemonize()
#daemonize('/dev/null','/tmp/daemon.log','/tmp/daemon.log')
sys.stdout.write ('server started with pid %d\n' % os.getpid() )
virtual_screen = ANSI.ANSI (24,80)
child = pxssh.pxssh()
child.login (hostname, username, password)
print 'created shell. command line prompt is', child.PROMPT
#child.sendline ('stty -echo')
#child.setecho(False)
virtual_screen.write (child.before)
virtual_screen.write (child.after)
if os.path.exists("/tmp/mysock"): os.remove("/tmp/mysock")
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
localhost = '127.0.0.1'
s.bind('/tmp/mysock')
os.chmod('/tmp/mysock',0777)
print 'Listen'
s.listen(1)
print 'Accept'
#s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#localhost = '127.0.0.1'
#s.bind((localhost, port))
#print 'Listen'
#s.listen(1)
r = roller (0.01, endless_poll, (child, child.PROMPT, virtual_screen))
r.start()
print "screen poll updater started in background thread"
sys.stdout.flush()
try:
while True:
conn, addr = s.accept()
print 'Connected by', addr
data = conn.recv(1024)
if data[0]!=':':
cmd = ':sendline'
arg = data.strip()
else:
request = data.split(' ', 1)
if len(request)>1:
cmd = request[0].strip()
arg = request[1].strip()
else:
cmd = request[0].strip()
if cmd == ':exit':
r.cancel()
break
elif cmd == ':sendline':
child.sendline (arg)
#child.prompt(timeout=2)
time.sleep(0.2)
shell_window = str(virtual_screen)
elif cmd == ':send' or cmd==':xsend':
if cmd==':xsend':
arg = arg.decode("hex")
child.send (arg)
time.sleep(0.2)
shell_window = str(virtual_screen)
elif cmd == ':cursor':
shell_window = '%x%x' % (virtual_screen.cur_r, virtual_screen.cur_c)
elif cmd == ':refresh':
shell_window = str(virtual_screen)
response = []
response.append (shell_window)
#response = add_cursor_blink (response, row, col)
sent = conn.send('\n'.join(response))
if watch_mode: print '\n'.join(response)
if sent < len (response):
print "Sent is too short. Some data was cut off."
conn.close()
finally:
r.cancel()
print "cleaning up socket"
s.close()
if os.path.exists("/tmp/mysock"): os.remove("/tmp/mysock")
print "done!"
def pretty_box (rows, cols, s):
"""This puts an ASCII text box around the given string, s.
"""
top_bot = '+' + '-'*cols + '+\n'
return top_bot + '\n'.join(['|'+line+'|' for line in s.split('\n')]) + '\n' + top_bot
def error_response (msg):
response = []
response.append ("""All commands start with :
:{REQUEST} {ARGUMENT}
{REQUEST} may be one of the following:
:sendline: Run the ARGUMENT followed by a line feed.
:send : send the characters in the ARGUMENT without a line feed.
:refresh : Use to catch up the screen with the shell if state gets out of sync.
Example:
:sendline ls -l
You may also leave off :command and it will be assumed.
Example:
ls -l
is equivalent to:
:sendline ls -l
""")
response.append (msg)
return '\n'.join(response)
def parse_host_connect_string (hcs):
"""This parses a host connection string in the form
username:password@hostname:port. All fields are options expcet hostname. A
dictionary is returned with all four keys. Keys that were not included are
set to empty strings ''. Note that if your password has the '@' character
then you must backslash escape it. """
if '@' in hcs:
p = re.compile (r'(?P<username>[^@:]*)(:?)(?P<password>.*)(?!\\)@(?P<hostname>[^:]*):?(?P<port>[0-9]*)')
else:
p = re.compile (r'(?P<username>)(?P<password>)(?P<hostname>[^:]*):?(?P<port>[0-9]*)')
m = p.search (hcs)
d = m.groupdict()
d['password'] = d['password'].replace('\\@','@')
return d
if __name__ == "__main__":
try:
start_time = time.time()
print time.asctime()
main()
print time.asctime()
print "TOTAL TIME IN MINUTES:",
print (time.time() - start_time) / 60.0
except Exception, e:
print str(e)
tb_dump = traceback.format_exc()
print str(tb_dump)
| apache-2.0 |
hpbader42/Klampt | Python/klampt/vis/glinterface.py | 1 | 2832 |
class GLPluginInterface:
"""Users can add their own hooks into the visualizer by overloading this
class's methods. Each method should return True if the user event was
processed. A return value of True stops cascading
events to a parent interface."""
def __init__(self):
self.window = None
self.view = None
self.actions = []
def initialize(self):
"""Called by backend after the GL context is created but before the event loop starts."""
return True
def displayfunc(self):
return False
def display(self):
return False
def display_screen(self):
return False
def reshapefunc(self,w,h):
return False
def keyboardfunc(self,c,x,y):
return False
def keyboardupfunc(self,c,x,y):
return False
def mousefunc(self,button,state,x,y):
return False
def motionfunc(self,x,y,dx,dy):
return False
def idle(self):
return True
def eventfunc(self,type,args=""):
"""Generic hook for other events, e.g., button presses, from the GUI"""
return False
def closefunc(self):
return False
def add_action(self,callback,short_name,key,description=None):
"""Defines a new generic GUI action. The action will be available in a menu in
Qt or as keyboard commands in GLUT."""
self.actions.append((callback,short_name,key,description))
#functions to request operations of the backend
def reshape(self,w,h):
"""Asks to resize the GL window"""
if self.window:
return self.window.reshape(w,h)
def idlesleep(self,seconds):
"""Asks to sleep the idle function for seconds seconds."""
if self.window:
self.window.idlesleep(seconds)
def modifiers(self):
"""Retrieves a list of currently pressed keyboard modifiers.
Values can be any combination of 'ctrl', 'shift', 'alt'.
"""
return self.window.modifiers()
def refresh(self):
"""Asks for a redraw"""
if self.window:
self.window.refresh()
def draw_text(self,point,text,size=12,color=None):
"""Draws text of the given size and color at the point (x,y) or (x,y,z). The
former method is usually called during display_screen."""
if self.window:
self.window.draw_text(point,text,size,color)
#3D viewport accessors -- not supported directly through the backend
def click_ray(self,x,y):
"""Returns the world-space ray associated with the camera click at x,y."""
return self.view.click_ray(x,y)
def viewport(self):
"""Returns the Viewport instance associated with the current GL view."""
if not self.view:
self.view = self.window.view()
return self.view
| bsd-3-clause |
jasonseminara/OpenSourceFinal | myvenv/lib/python3.5/site-packages/pip/_vendor/packaging/_structures.py | 906 | 1809 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
class Infinity(object):
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity(object):
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
NegativeInfinity = NegativeInfinity()
| mit |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_1_0/ip_routed_neighbor_broker.py | 17 | 41203 | from ..broker import Broker
class IpRoutedNeighborBroker(Broker):
controller = "ip_routed_neighbors"
def show(self, **kwargs):
"""Shows the details for the specified ip routed neighbor.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IPRoutedNeighborID: The internal NetMRI identifier for this neighbor/route relationship.
:type IPRoutedNeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return ip_routed_neighbor: The ip routed neighbor identified by the specified IPRoutedNeighborID.
:rtype ip_routed_neighbor: IpRoutedNeighbor
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available ip routed neighbors. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceRouteID: The internal NetMRI identifier for the route which this record references.
:type DeviceRouteID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceRouteID: The internal NetMRI identifier for the route which this record references.
:type DeviceRouteID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborID: The internal NetMRI identifier for this neighbor/route relationship.
:type IPRoutedNeighborID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborID: The internal NetMRI identifier for this neighbor/route relationship.
:type IPRoutedNeighborID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborTimestamp: The date and time this record was collected or calculated.
:type IPRoutedNeighborTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborTimestamp: The date and time this record was collected or calculated.
:type IPRoutedNeighborTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborID: The internal NetMRI identifier of the corresponding neighbor record for this relationship.
:type NeighborID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborID: The internal NetMRI identifier of the corresponding neighbor record for this relationship.
:type NeighborID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the ip routed neighbors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IPRoutedNeighborID
:param sort: The data field(s) to use for sorting the output. Default is IPRoutedNeighborID. Valid values are IPRoutedNeighborID, DataSourceID, NeighborID, IPRoutedNeighborStartTime, IPRoutedNeighborEndTime, IPRoutedNeighborChangedCols, IPRoutedNeighborTimestamp, IPRoutedNeighborMapSource, DeviceRouteID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IpRoutedNeighbor. Valid values are IPRoutedNeighborID, DataSourceID, NeighborID, IPRoutedNeighborStartTime, IPRoutedNeighborEndTime, IPRoutedNeighborChangedCols, IPRoutedNeighborTimestamp, IPRoutedNeighborMapSource, DeviceRouteID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return ip_routed_neighbors: An array of the IpRoutedNeighbor objects that match the specified input criteria.
:rtype ip_routed_neighbors: Array of IpRoutedNeighbor
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available ip routed neighbors matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceRouteID: The internal NetMRI identifier for the route which this record references.
:type DeviceRouteID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceRouteID: The internal NetMRI identifier for the route which this record references.
:type DeviceRouteID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborChangedCols: The fields that changed between this revision of the record and the previous revision.
:type IPRoutedNeighborChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborChangedCols: The fields that changed between this revision of the record and the previous revision.
:type IPRoutedNeighborChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type IPRoutedNeighborEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type IPRoutedNeighborEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborID: The internal NetMRI identifier for this neighbor/route relationship.
:type IPRoutedNeighborID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborID: The internal NetMRI identifier for this neighbor/route relationship.
:type IPRoutedNeighborID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborMapSource: Internal tracking information for NetMRI algorithms.
:type IPRoutedNeighborMapSource: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborMapSource: Internal tracking information for NetMRI algorithms.
:type IPRoutedNeighborMapSource: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborStartTime: The starting effective time of this revision of the record.
:type IPRoutedNeighborStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborStartTime: The starting effective time of this revision of the record.
:type IPRoutedNeighborStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborTimestamp: The date and time this record was collected or calculated.
:type IPRoutedNeighborTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IPRoutedNeighborTimestamp: The date and time this record was collected or calculated.
:type IPRoutedNeighborTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborID: The internal NetMRI identifier of the corresponding neighbor record for this relationship.
:type NeighborID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborID: The internal NetMRI identifier of the corresponding neighbor record for this relationship.
:type NeighborID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the ip routed neighbors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IPRoutedNeighborID
:param sort: The data field(s) to use for sorting the output. Default is IPRoutedNeighborID. Valid values are IPRoutedNeighborID, DataSourceID, NeighborID, IPRoutedNeighborStartTime, IPRoutedNeighborEndTime, IPRoutedNeighborChangedCols, IPRoutedNeighborTimestamp, IPRoutedNeighborMapSource, DeviceRouteID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IpRoutedNeighbor. Valid values are IPRoutedNeighborID, DataSourceID, NeighborID, IPRoutedNeighborStartTime, IPRoutedNeighborEndTime, IPRoutedNeighborChangedCols, IPRoutedNeighborTimestamp, IPRoutedNeighborMapSource, DeviceRouteID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against ip routed neighbors, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceRouteID, IPRoutedNeighborChangedCols, IPRoutedNeighborEndTime, IPRoutedNeighborID, IPRoutedNeighborMapSource, IPRoutedNeighborStartTime, IPRoutedNeighborTimestamp, NeighborID.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return ip_routed_neighbors: An array of the IpRoutedNeighbor objects that match the specified input criteria.
:rtype ip_routed_neighbors: Array of IpRoutedNeighbor
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available ip routed neighbors matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceRouteID, IPRoutedNeighborChangedCols, IPRoutedNeighborEndTime, IPRoutedNeighborID, IPRoutedNeighborMapSource, IPRoutedNeighborStartTime, IPRoutedNeighborTimestamp, NeighborID.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceRouteID: The operator to apply to the field DeviceRouteID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceRouteID: The internal NetMRI identifier for the route which this record references. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceRouteID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceRouteID: If op_DeviceRouteID is specified, the field named in this input will be compared to the value in DeviceRouteID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceRouteID must be specified if op_DeviceRouteID is specified.
:type val_f_DeviceRouteID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceRouteID: If op_DeviceRouteID is specified, this value will be compared to the value in DeviceRouteID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceRouteID must be specified if op_DeviceRouteID is specified.
:type val_c_DeviceRouteID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IPRoutedNeighborChangedCols: The operator to apply to the field IPRoutedNeighborChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IPRoutedNeighborChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IPRoutedNeighborChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IPRoutedNeighborChangedCols: If op_IPRoutedNeighborChangedCols is specified, the field named in this input will be compared to the value in IPRoutedNeighborChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IPRoutedNeighborChangedCols must be specified if op_IPRoutedNeighborChangedCols is specified.
:type val_f_IPRoutedNeighborChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IPRoutedNeighborChangedCols: If op_IPRoutedNeighborChangedCols is specified, this value will be compared to the value in IPRoutedNeighborChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IPRoutedNeighborChangedCols must be specified if op_IPRoutedNeighborChangedCols is specified.
:type val_c_IPRoutedNeighborChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IPRoutedNeighborEndTime: The operator to apply to the field IPRoutedNeighborEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IPRoutedNeighborEndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IPRoutedNeighborEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IPRoutedNeighborEndTime: If op_IPRoutedNeighborEndTime is specified, the field named in this input will be compared to the value in IPRoutedNeighborEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IPRoutedNeighborEndTime must be specified if op_IPRoutedNeighborEndTime is specified.
:type val_f_IPRoutedNeighborEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IPRoutedNeighborEndTime: If op_IPRoutedNeighborEndTime is specified, this value will be compared to the value in IPRoutedNeighborEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IPRoutedNeighborEndTime must be specified if op_IPRoutedNeighborEndTime is specified.
:type val_c_IPRoutedNeighborEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IPRoutedNeighborID: The operator to apply to the field IPRoutedNeighborID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IPRoutedNeighborID: The internal NetMRI identifier for this neighbor/route relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IPRoutedNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IPRoutedNeighborID: If op_IPRoutedNeighborID is specified, the field named in this input will be compared to the value in IPRoutedNeighborID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IPRoutedNeighborID must be specified if op_IPRoutedNeighborID is specified.
:type val_f_IPRoutedNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IPRoutedNeighborID: If op_IPRoutedNeighborID is specified, this value will be compared to the value in IPRoutedNeighborID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IPRoutedNeighborID must be specified if op_IPRoutedNeighborID is specified.
:type val_c_IPRoutedNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IPRoutedNeighborMapSource: The operator to apply to the field IPRoutedNeighborMapSource. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IPRoutedNeighborMapSource: Internal tracking information for NetMRI algorithms. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IPRoutedNeighborMapSource: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IPRoutedNeighborMapSource: If op_IPRoutedNeighborMapSource is specified, the field named in this input will be compared to the value in IPRoutedNeighborMapSource using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IPRoutedNeighborMapSource must be specified if op_IPRoutedNeighborMapSource is specified.
:type val_f_IPRoutedNeighborMapSource: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IPRoutedNeighborMapSource: If op_IPRoutedNeighborMapSource is specified, this value will be compared to the value in IPRoutedNeighborMapSource using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IPRoutedNeighborMapSource must be specified if op_IPRoutedNeighborMapSource is specified.
:type val_c_IPRoutedNeighborMapSource: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IPRoutedNeighborStartTime: The operator to apply to the field IPRoutedNeighborStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IPRoutedNeighborStartTime: The starting effective time of this revision of the record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IPRoutedNeighborStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IPRoutedNeighborStartTime: If op_IPRoutedNeighborStartTime is specified, the field named in this input will be compared to the value in IPRoutedNeighborStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IPRoutedNeighborStartTime must be specified if op_IPRoutedNeighborStartTime is specified.
:type val_f_IPRoutedNeighborStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IPRoutedNeighborStartTime: If op_IPRoutedNeighborStartTime is specified, this value will be compared to the value in IPRoutedNeighborStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IPRoutedNeighborStartTime must be specified if op_IPRoutedNeighborStartTime is specified.
:type val_c_IPRoutedNeighborStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IPRoutedNeighborTimestamp: The operator to apply to the field IPRoutedNeighborTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IPRoutedNeighborTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IPRoutedNeighborTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IPRoutedNeighborTimestamp: If op_IPRoutedNeighborTimestamp is specified, the field named in this input will be compared to the value in IPRoutedNeighborTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IPRoutedNeighborTimestamp must be specified if op_IPRoutedNeighborTimestamp is specified.
:type val_f_IPRoutedNeighborTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IPRoutedNeighborTimestamp: If op_IPRoutedNeighborTimestamp is specified, this value will be compared to the value in IPRoutedNeighborTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IPRoutedNeighborTimestamp must be specified if op_IPRoutedNeighborTimestamp is specified.
:type val_c_IPRoutedNeighborTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborID: The operator to apply to the field NeighborID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborID: The internal NetMRI identifier of the corresponding neighbor record for this relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborID: If op_NeighborID is specified, the field named in this input will be compared to the value in NeighborID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborID must be specified if op_NeighborID is specified.
:type val_f_NeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborID: If op_NeighborID is specified, this value will be compared to the value in NeighborID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborID must be specified if op_NeighborID is specified.
:type val_c_NeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the ip routed neighbors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IPRoutedNeighborID
:param sort: The data field(s) to use for sorting the output. Default is IPRoutedNeighborID. Valid values are IPRoutedNeighborID, DataSourceID, NeighborID, IPRoutedNeighborStartTime, IPRoutedNeighborEndTime, IPRoutedNeighborChangedCols, IPRoutedNeighborTimestamp, IPRoutedNeighborMapSource, DeviceRouteID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IpRoutedNeighbor. Valid values are IPRoutedNeighborID, DataSourceID, NeighborID, IPRoutedNeighborStartTime, IPRoutedNeighborEndTime, IPRoutedNeighborChangedCols, IPRoutedNeighborTimestamp, IPRoutedNeighborMapSource, DeviceRouteID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return ip_routed_neighbors: An array of the IpRoutedNeighbor objects that match the specified input criteria.
:rtype ip_routed_neighbors: Array of IpRoutedNeighbor
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
| apache-2.0 |
domesticduck/MenuConciergeServer | vendor/bundle/ruby/2.0.0/gems/libv8-3.16.14.3/vendor/v8/test/sputnik/testcfg.py | 11 | 4083 | # Copyright 2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from os.path import join, exists
import sys
import test
import time
def GetSuite(name, root):
# Not implemented.
return None
class SputnikTestCase(test.TestCase):
def __init__(self, case, path, context, mode):
super(SputnikTestCase, self).__init__(context, path, mode)
self.case = case
self.tmpfile = None
self.source = None
def IsNegative(self):
return '@negative' in self.GetSource()
def IsFailureOutput(self, output):
if output.exit_code != 0:
return True
out = output.stdout
return "SputnikError" in out
def BeforeRun(self):
self.tmpfile = sputnik.TempFile(suffix='.js', prefix='sputnik-', text=True)
self.tmpfile.Write(self.GetSource())
self.tmpfile.Close()
def AfterRun(self, result):
# Dispose the temporary file if everything looks okay.
if result is None or not result.HasPreciousOutput(): self.tmpfile.Dispose()
self.tmpfile = None
def GetCommand(self):
result = self.context.GetVmCommand(self, self.mode)
result.append(self.tmpfile.name)
return result
def GetLabel(self):
return "%s sputnik %s" % (self.mode, self.GetName())
def GetName(self):
return self.path[-1]
def GetSource(self):
if not self.source:
self.source = self.case.GetSource()
return self.source
class SputnikTestConfiguration(test.TestConfiguration):
def __init__(self, context, root):
super(SputnikTestConfiguration, self).__init__(context, root)
def ListTests(self, current_path, path, mode, variant_flags):
# Import the sputnik test runner script as a module
testroot = join(self.root, 'sputniktests')
modroot = join(testroot, 'tools')
sys.path.append(modroot)
import sputnik
globals()['sputnik'] = sputnik
# Do not run strict mode tests yet. TODO(mmaly)
test_suite = sputnik.TestSuite(testroot, False)
test_suite.Validate()
tests = test_suite.EnumerateTests([])
result = []
for test in tests:
full_path = current_path + [test.GetPath()[-1]]
if self.Contains(path, full_path):
case = SputnikTestCase(test, full_path, self.context, mode)
result.append(case)
return result
def GetBuildRequirements(self):
return ['d8']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, 'sputnik.status')
if exists(status_file):
test.ReadConfigurationInto(status_file, sections, defs)
def GetConfiguration(context, root):
return SputnikTestConfiguration(context, root)
| apache-2.0 |
wasit7/labeller | lib/werkzeug/testsuite/contrib/sessions.py | 146 | 2325 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.sessions
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Added tests for the sessions.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import unittest
import shutil
from tempfile import mkdtemp, gettempdir
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib.sessions import FilesystemSessionStore
class SessionTestCase(WerkzeugTestCase):
def setup(self):
self.session_folder = mkdtemp()
def teardown(self):
shutil.rmtree(self.session_folder)
def test_default_tempdir(self):
store = FilesystemSessionStore()
assert store.path == gettempdir()
def test_basic_fs_sessions(self):
store = FilesystemSessionStore(self.session_folder)
x = store.new()
assert x.new
assert not x.modified
x['foo'] = [1, 2, 3]
assert x.modified
store.save(x)
x2 = store.get(x.sid)
assert not x2.new
assert not x2.modified
assert x2 is not x
assert x2 == x
x2['test'] = 3
assert x2.modified
assert not x2.new
store.save(x2)
x = store.get(x.sid)
store.delete(x)
x2 = store.get(x.sid)
# the session is not new when it was used previously.
assert not x2.new
def test_non_urandom(self):
urandom = os.urandom
del os.urandom
try:
store = FilesystemSessionStore(self.session_folder)
store.new()
finally:
os.urandom = urandom
def test_renewing_fs_session(self):
store = FilesystemSessionStore(self.session_folder, renew_missing=True)
x = store.new()
store.save(x)
store.delete(x)
x2 = store.get(x.sid)
assert x2.new
def test_fs_session_lising(self):
store = FilesystemSessionStore(self.session_folder, renew_missing=True)
sessions = set()
for x in range(10):
sess = store.new()
store.save(sess)
sessions.add(sess.sid)
listed_sessions = set(store.list())
assert sessions == listed_sessions
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SessionTestCase))
return suite
| gpl-2.0 |
ismailsunni/healthsites | django_project/core/settings/prod.py | 1 | 2660 | # -*- coding: utf-8 -*-
from .project import * # noqa
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'healthsites_dev',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
# Set to empty string for default.
'PORT': '',
}
}
# Comment if you are not running behind proxy
USE_X_FORWARDED_HOST = True
# Set debug to false for production
DEBUG = TEMPLATE_DEBUG = False
INSTALLED_APPS += (
'raven.contrib.django.raven_compat', # enable Raven plugin
)
if 'raven.contrib.django.raven_compat' in INSTALLED_APPS:
print '*********** Setting up sentry logging ************'
SENTRY_DSN = (
'http://ea65e461089d4fcda9d63696cd70fa50:9ba52586ab79479eba68'
'[email protected]/12')
# MIDDLEWARE_CLASSES = (
# 'raven.contrib.django.middleware.SentryResponseErrorIdMiddleware',
# 'raven.contrib.django.middleware.SentryLogMiddleware',
# ) + MIDDLEWARE_CLASSES
#
# Sentry settings - logs exceptions to a database
LOGGING = {
# internal dictConfig version - DON'T CHANGE
'version': 1,
'disable_existing_loggers': True,
# default root logger - handle with sentry
'root': {
'level': 'ERROR',
'handlers': ['sentry'],
},
'handlers': {
# send email to mail_admins, if DEBUG=False
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
# sentry logger
'sentry': {
'level': 'WARNING',
'class': (
'raven.contrib.django.raven_compat.'
'handlers.SentryHandler'),
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['sentry'],
'propagate': False
},
'raven': {
'level': 'ERROR',
'handlers': ['mail_admins'],
'propagate': False
},
'sentry.errors': {
'level': 'ERROR',
'handlers': ['mail_admins'],
'propagate': False
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
}
}
}
| bsd-2-clause |
auduny/home-assistant | tests/components/mqtt_json/test_device_tracker.py | 6 | 6255 | """The tests for the JSON MQTT device tracker platform."""
import json
from asynctest import patch
import logging
import os
import pytest
from homeassistant.setup import async_setup_component
from homeassistant.components import device_tracker
from homeassistant.const import CONF_PLATFORM
from tests.common import async_mock_mqtt_component, async_fire_mqtt_message
_LOGGER = logging.getLogger(__name__)
LOCATION_MESSAGE = {
'longitude': 1.0,
'gps_accuracy': 60,
'latitude': 2.0,
'battery_level': 99.9}
LOCATION_MESSAGE_INCOMPLETE = {
'longitude': 2.0}
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
hass.loop.run_until_complete(async_mock_mqtt_component(hass))
yaml_devices = hass.config.path(device_tracker.YAML_DEVICES)
yield
if os.path.isfile(yaml_devices):
os.remove(yaml_devices)
async def test_ensure_device_tracker_platform_validation(hass):
"""Test if platform validation was done."""
async def mock_setup_scanner(hass, config, see, discovery_info=None):
"""Check that Qos was added by validation."""
assert 'qos' in config
with patch('homeassistant.components.mqtt_json.device_tracker.'
'async_setup_scanner', autospec=True,
side_effect=mock_setup_scanner) as mock_sp:
dev_id = 'paulus'
topic = 'location/paulus'
assert await async_setup_component(hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'mqtt_json',
'devices': {dev_id: topic}
}
})
assert mock_sp.call_count == 1
async def test_json_message(hass):
"""Test json location message."""
dev_id = 'zanzito'
topic = 'location/zanzito'
location = json.dumps(LOCATION_MESSAGE)
assert await async_setup_component(hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'mqtt_json',
'devices': {dev_id: topic}
}
})
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
state = hass.states.get('device_tracker.zanzito')
assert state.attributes.get('latitude') == 2.0
assert state.attributes.get('longitude') == 1.0
async def test_non_json_message(hass, caplog):
"""Test receiving a non JSON message."""
dev_id = 'zanzito'
topic = 'location/zanzito'
location = 'home'
assert await async_setup_component(hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'mqtt_json',
'devices': {dev_id: topic}
}
})
caplog.set_level(logging.ERROR)
caplog.clear()
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert "Error parsing JSON payload: home" in \
caplog.text
async def test_incomplete_message(hass, caplog):
"""Test receiving an incomplete message."""
dev_id = 'zanzito'
topic = 'location/zanzito'
location = json.dumps(LOCATION_MESSAGE_INCOMPLETE)
assert await async_setup_component(hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'mqtt_json',
'devices': {dev_id: topic}
}
})
caplog.set_level(logging.ERROR)
caplog.clear()
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert "Skipping update for following data because of missing " \
"or malformatted data: {\"longitude\": 2.0}" in \
caplog.text
async def test_single_level_wildcard_topic(hass):
"""Test single level wildcard topic."""
dev_id = 'zanzito'
subscription = 'location/+/zanzito'
topic = 'location/room/zanzito'
location = json.dumps(LOCATION_MESSAGE)
assert await async_setup_component(hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'mqtt_json',
'devices': {dev_id: subscription}
}
})
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
state = hass.states.get('device_tracker.zanzito')
assert state.attributes.get('latitude') == 2.0
assert state.attributes.get('longitude') == 1.0
async def test_multi_level_wildcard_topic(hass):
"""Test multi level wildcard topic."""
dev_id = 'zanzito'
subscription = 'location/#'
topic = 'location/zanzito'
location = json.dumps(LOCATION_MESSAGE)
assert await async_setup_component(hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'mqtt_json',
'devices': {dev_id: subscription}
}
})
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
state = hass.states.get('device_tracker.zanzito')
assert state.attributes.get('latitude') == 2.0
assert state.attributes.get('longitude') == 1.0
async def test_single_level_wildcard_topic_not_matching(hass):
"""Test not matching single level wildcard topic."""
dev_id = 'zanzito'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
subscription = 'location/+/zanzito'
topic = 'location/zanzito'
location = json.dumps(LOCATION_MESSAGE)
assert await async_setup_component(hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'mqtt_json',
'devices': {dev_id: subscription}
}
})
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id) is None
async def test_multi_level_wildcard_topic_not_matching(hass):
"""Test not matching multi level wildcard topic."""
dev_id = 'zanzito'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
subscription = 'location/#'
topic = 'somewhere/zanzito'
location = json.dumps(LOCATION_MESSAGE)
assert await async_setup_component(hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'mqtt_json',
'devices': {dev_id: subscription}
}
})
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id) is None
| apache-2.0 |
blighj/django | tests/forms_tests/field_tests/test_multiplechoicefield.py | 28 | 3478 | from django.forms import MultipleChoiceField, ValidationError
from django.test import SimpleTestCase
class MultipleChoiceFieldTest(SimpleTestCase):
def test_multiplechoicefield_1(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')])
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
with self.assertRaisesMessage(ValidationError, "'Enter a list of values.'"):
f.clean('hello')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean([])
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(())
msg = "'Select a valid choice. 3 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(['3'])
def test_multiplechoicefield_2(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual([], f.clean(''))
self.assertEqual([], f.clean(None))
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
with self.assertRaisesMessage(ValidationError, "'Enter a list of values.'"):
f.clean('hello')
self.assertEqual([], f.clean([]))
self.assertEqual([], f.clean(()))
msg = "'Select a valid choice. 3 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(['3'])
def test_multiplechoicefield_3(self):
f = MultipleChoiceField(
choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other')]
)
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '5'], f.clean([1, 5]))
self.assertEqual(['1', '5'], f.clean([1, '5']))
self.assertEqual(['1', '5'], f.clean(['1', 5]))
self.assertEqual(['1', '5'], f.clean(['1', '5']))
msg = "'Select a valid choice. 6 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(['6'])
msg = "'Select a valid choice. 6 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(['1', '6'])
def test_multiplechoicefield_changed(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two'), ('3', 'Three')])
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed([], None))
self.assertTrue(f.has_changed(None, ['1']))
self.assertFalse(f.has_changed([1, 2], ['1', '2']))
self.assertFalse(f.has_changed([2, 1], ['1', '2']))
self.assertTrue(f.has_changed([1, 2], ['1']))
self.assertTrue(f.has_changed([1, 2], ['1', '3']))
| bsd-3-clause |
wwj718/edx-platform | common/lib/xmodule/xmodule/modulestore/search.py | 52 | 5832 | ''' useful functions for finding content and its position '''
from logging import getLogger
from .exceptions import (ItemNotFoundError, NoPathToItem)
LOGGER = getLogger(__name__)
def path_to_location(modulestore, usage_key, full_path=False):
'''
Try to find a course_id/chapter/section[/position] path to location in
modulestore. The courseware insists that the first level in the course is
chapter, but any kind of module can be a "section".
Args:
modulestore: which store holds the relevant objects
usage_key: :class:`UsageKey` the id of the location to which to generate the path
full_path: :class:`Bool` if True, return the full path to location. Default is False.
Raises
ItemNotFoundError if the location doesn't exist.
NoPathToItem if the location exists, but isn't accessible via
a chapter/section path in the course(s) being searched.
Returns:
a tuple (course_id, chapter, section, position) suitable for the
courseware index view.
If the section is a sequential or vertical, position will be the children index
of this location under that sequence.
'''
def flatten(xs):
'''Convert lisp-style (a, (b, (c, ()))) list into a python list.
Not a general flatten function. '''
p = []
while xs != ():
p.append(xs[0])
xs = xs[1]
return p
def find_path_to_course():
'''Find a path up the location graph to a node with the
specified category.
If no path exists, return None.
If a path exists, return it as a tuple with root location first, and
the target location last.
'''
# Standard DFS
# To keep track of where we came from, the work queue has
# tuples (location, path-so-far). To avoid lots of
# copying, the path-so-far is stored as a lisp-style
# list--nested hd::tl tuples, and flattened at the end.
queue = [(usage_key, ())]
while len(queue) > 0:
(next_usage, path) = queue.pop() # Takes from the end
# get_parent_location raises ItemNotFoundError if location isn't found
parent = modulestore.get_parent_location(next_usage)
# print 'Processing loc={0}, path={1}'.format(next_usage, path)
if next_usage.block_type == "course":
# Found it!
path = (next_usage, path)
return flatten(path)
elif parent is None:
# Orphaned item.
return None
# otherwise, add parent locations at the end
newpath = (next_usage, path)
queue.append((parent, newpath))
with modulestore.bulk_operations(usage_key.course_key):
if not modulestore.has_item(usage_key):
raise ItemNotFoundError(usage_key)
path = find_path_to_course()
if path is None:
raise NoPathToItem(usage_key)
if full_path:
return path
n = len(path)
course_id = path[0].course_key
# pull out the location names
chapter = path[1].name if n > 1 else None
section = path[2].name if n > 2 else None
vertical = path[3].name if n > 3 else None
# Figure out the position
position = None
# This block of code will find the position of a module within a nested tree
# of modules. If a problem is on tab 2 of a sequence that's on tab 3 of a
# sequence, the resulting position is 3_2. However, no positional modules
# (e.g. sequential and videosequence) currently deal with this form of
# representing nested positions. This needs to happen before jumping to a
# module nested in more than one positional module will work.
if n > 3:
position_list = []
for path_index in range(2, n - 1):
category = path[path_index].block_type
if category == 'sequential' or category == 'videosequence':
section_desc = modulestore.get_item(path[path_index])
# this calls get_children rather than just children b/c old mongo includes private children
# in children but not in get_children
child_locs = [c.location for c in section_desc.get_children()]
# positions are 1-indexed, and should be strings to be consistent with
# url parsing.
position_list.append(str(child_locs.index(path[path_index + 1]) + 1))
position = "_".join(position_list)
return (course_id, chapter, section, vertical, position, path[-1])
def navigation_index(position):
"""
Get the navigation index from the position argument (where the position argument was recieved from a call to
path_to_location)
Argument:
position - result of position returned from call to path_to_location. This is an underscore (_) separated string of
vertical 1-indexed positions. If the course is built in Studio then you'll never see verticals as children of
verticals, and so extremely often one will only see the first vertical as an integer position. This specific action
is to allow navigation / breadcrumbs to locate the topmost item because this is the location actually required by
the LMS code
Returns:
1-based integer of the position of the desired item within the vertical
"""
if position is None:
return None
try:
navigation_position = int(position.split('_', 1)[0])
except (ValueError, TypeError):
LOGGER.exception(u'Bad position %r passed to navigation_index, will assume first position', position)
navigation_position = 1
return navigation_position
| agpl-3.0 |
tecan/xchat-rt | plugins/scripts/Supybot-0.83.4.1-bitcoinotc-bot/src/shlex.py | 14 | 8159 | """A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
import os.path
import sys
__all__ = ["shlex"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None):
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
self.whitespace = ' \t\r\n'
self.quotes = '\'"'
self.state = ' '
self.pushback = []
self.lineno = 1
self.debug = 0
self.token = ''
self.backslash = False
self.filestack = []
self.source = None
if self.debug:
print 'shlex: reading from %s, line %d' \
% (self.instream, self.lineno)
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print "shlex: pushing token " + `tok`
self.pushback = [tok] + self.pushback
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
self.filestack.insert(0, (self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print 'shlex: pushing to file %s' % (self.infile,)
else:
print 'shlex: pushing to stream %s' % (self.instream,)
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack[0]
self.filestack = self.filestack[1:]
if self.debug:
print 'shlex: popping to %s, line %d' \
% (self.instream, self.lineno)
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback[0]
self.pushback = self.pushback[1:]
if self.debug >= 1:
print "shlex: popping token " + `tok`
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == "":
if len(self.filestack) == 0:
return ""
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw:
print "shlex: token=" + `raw`
else:
print "shlex: token=EOF"
return raw
def read_token(self):
"Read a token from the input stream (no pushback or inclusions)"
while 1:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print "shlex: in state", repr(self.state), \
"I see character:", repr(nextchar)
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in whitespace state"
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
self.token = nextchar
self.state = nextchar
else:
self.token = nextchar
if self.token:
break # emit current token
else:
continue
elif self.state in self.quotes:
self.token = self.token + nextchar
if nextchar == '\\':
if self.backslash:
self.backslash = False
else:
self.backslash = True
else:
if not self.backslash and nextchar == self.state:
self.state = ' '
break
elif self.backslash:
self.backslash = False
elif not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in quotes state"
# XXX what error should be raised here?
raise ValueError, "No closing quotation"
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars or nextchar in self.quotes:
self.token = self.token + nextchar
else:
self.pushback = [nextchar] + self.pushback
if self.debug >= 2:
print "shlex: I see punctuation in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.debug > 1:
if result:
print "shlex: raw token=" + `result`
else:
print "shlex: raw token=EOF"
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if type(self.infile) == type("") and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = shlex()
else:
file = sys.argv[1]
lexer = shlex(open(file), file)
while 1:
tt = lexer.get_token()
if tt:
print "Token: " + repr(tt)
else:
break
| gpl-2.0 |
ruffsl/ros_buildfarm | scripts/status/build_blocked_releases_page.py | 2 | 1506 | #!/usr/bin/env python3
# Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from ros_buildfarm.argument import add_argument_config_url
from ros_buildfarm.argument import add_argument_output_dir
from ros_buildfarm.argument import add_argument_rosdistro_name
from ros_buildfarm.status_page import build_blocked_releases_page
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description='Generate the blocked releases page')
add_argument_config_url(parser)
add_argument_rosdistro_name(parser)
add_argument_output_dir(parser)
parser.add_argument(
'--copy-resources',
action='store_true',
help='Copy the resources instead of using symlinks')
args = parser.parse_args(argv)
return build_blocked_releases_page(
args.config_url, args.rosdistro_name,
args.output_dir, copy_resources=args.copy_resources)
if __name__ == '__main__':
main()
| apache-2.0 |
armpc/repo | subcmds/manifest.py | 15 | 2306 | #
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from command import PagedCommand
class Manifest(PagedCommand):
common = False
helpSummary = "Manifest inspection utility"
helpUsage = """
%prog [-o {-|NAME.xml} [-r]]
"""
_helpDescription = """
With the -o option, exports the current manifest for inspection.
The manifest and (if present) local_manifest.xml are combined
together to produce a single manifest file. This file can be stored
in a Git repository for use during future 'repo init' invocations.
"""
@property
def helpDescription(self):
help = self._helpDescription + '\n'
r = os.path.dirname(__file__)
r = os.path.dirname(r)
fd = open(os.path.join(r, 'docs', 'manifest-format.txt'))
for line in fd:
help += line
fd.close()
return help
def _Options(self, p):
p.add_option('-r', '--revision-as-HEAD',
dest='peg_rev', action='store_true',
help='Save revisions as current HEAD')
p.add_option('-o', '--output-file',
dest='output_file',
help='File to save the manifest to',
metavar='-|NAME.xml')
def _Output(self, opt):
if opt.output_file == '-':
fd = sys.stdout
else:
fd = open(opt.output_file, 'w')
self.manifest.Save(fd,
peg_rev = opt.peg_rev)
fd.close()
if opt.output_file != '-':
print >>sys.stderr, 'Saved manifest to %s' % opt.output_file
def Execute(self, opt, args):
if args:
self.Usage()
if opt.output_file is not None:
self._Output(opt)
return
print >>sys.stderr, 'error: no operation to perform'
print >>sys.stderr, 'error: see repo help manifest'
sys.exit(1)
| apache-2.0 |
0-wiz-0/audacity | lib-src/lv2/suil/waflib/Tools/suncxx.py | 196 | 1487 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Utils
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_sxx(conf):
v=conf.env
cc=None
if v['CXX']:cc=v['CXX']
elif'CXX'in conf.environ:cc=conf.environ['CXX']
if not cc:cc=conf.find_program('CC',var='CXX')
if not cc:cc=conf.find_program('c++',var='CXX')
if not cc:conf.fatal('Could not find a Sun C++ compiler')
cc=conf.cmd_to_list(cc)
try:
conf.cmd_and_log(cc+['-flags'])
except Exception:
conf.fatal('%r is not a Sun compiler'%cc)
v['CXX']=cc
v['CXX_NAME']='sun'
conf.get_suncc_version(cc)
@conf
def sxx_common_flags(conf):
v=conf.env
v['CXX_SRC_F']=[]
v['CXX_TGT_F']=['-c','-o']
if not v['LINK_CXX']:v['LINK_CXX']=v['CXX']
v['CXXLNK_SRC_F']=[]
v['CXXLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']='-Bdynamic'
v['STLIB_MARKER']='-Bstatic'
v['cxxprogram_PATTERN']='%s'
v['CXXFLAGS_cxxshlib']=['-Kpic','-DPIC']
v['LINKFLAGS_cxxshlib']=['-G']
v['cxxshlib_PATTERN']='lib%s.so'
v['LINKFLAGS_cxxstlib']=['-Bstatic']
v['cxxstlib_PATTERN']='lib%s.a'
def configure(conf):
conf.find_sxx()
conf.find_ar()
conf.sxx_common_flags()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
| gpl-2.0 |
pwil3058/pysm_wsm | scm/scm_events.py | 1 | 1384 | ### -*- coding: utf-8 -*-
###
### Copyright (C) 2016 Peter Williams <[email protected]>
###
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; version 2 of the License only.
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from ..bab import enotify
E_FILE_ADDED, E_FILE_DELETED, E_FILE_MODIFIED, E_FILE_CHANGES = enotify.new_event_flags_and_mask(3)
E_FILE_MOVED = E_FILE_ADDED|E_FILE_DELETED
E_INDEX_MOD, E_COMMIT, E_BACKOUT, E_BRANCH, E_TAG, E_PUSH, E_PULL, E_INIT, E_CLONE, E_STASH, E_FETCH, E_CS_CHANGES = enotify.new_event_flags_and_mask(11)
E_NEW_SCM = E_INIT|E_CLONE
E_CHECKOUT, E_BISECT, E_MERGE, E_UPDATE, E_WD_CHANGES = enotify.new_event_flags_and_mask(4)
E_PGND_RC_CHANGED, E_USER_RC_CHANGED, E_RC_CHANGED = enotify.new_event_flags_and_mask(2)
E_LOG = enotify.new_event_flag()
E_REMOTE = enotify.new_event_flag()
| gpl-2.0 |
ixdy/kubernetes-test-infra | gubernator/github/models.py | 16 | 7755 | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import datetime
import json
import google.appengine.ext.ndb as ndb
class GithubResource(ndb.Model):
# A key holder used to define an entitygroup for
# each Issue/PR, for easy ancestor queries.
@staticmethod
def make_key(repo, number):
return ndb.Key(GithubResource, '%s %s' % (repo, number))
def shrink(body):
"""Recursively remove Github API urls from an object to make it more human-readable."""
toremove = []
for key, value in body.iteritems():
if isinstance(value, basestring):
if key.endswith('url'):
if (value.startswith('https://api.github.com/') or
value.startswith('https://avatars.githubusercontent.com')):
toremove.append(key)
elif isinstance(value, dict):
shrink(value)
elif isinstance(value, list):
for el in value:
if isinstance(el, dict):
shrink(el)
for key in toremove:
body.pop(key)
return body
class GithubWebhookRaw(ndb.Model):
repo = ndb.StringProperty()
number = ndb.IntegerProperty(indexed=False)
event = ndb.StringProperty()
guid = ndb.StringProperty()
timestamp = ndb.DateTimeProperty(auto_now_add=True)
body = ndb.TextProperty(compressed=True)
def to_tuple(self):
return (self.event, shrink(json.loads(self.body)), float(self.timestamp.strftime('%s.%f')))
def from_iso8601(t):
if not t:
return t
if t.endswith('Z'):
return datetime.datetime.strptime(t, '%Y-%m-%dT%H:%M:%SZ')
elif t.endswith('+00:00'):
return datetime.datetime.strptime(t, '%Y-%m-%dT%H:%M:%S+00:00')
else:
logging.warning('unparseable time value: %s', t)
return None
def make_kwargs(body, fields):
kwargs = {}
for field in fields:
if field.endswith('_at'):
kwargs[field] = from_iso8601(body[field])
else:
kwargs[field] = body[field]
return kwargs
class GHStatus(ndb.Model):
# Key: {repo}\t{sha}\t{context}
state = ndb.StringProperty(indexed=False)
target_url = ndb.StringProperty(indexed=False)
description = ndb.TextProperty()
created_at = ndb.DateTimeProperty(indexed=False)
updated_at = ndb.DateTimeProperty(indexed=False)
@staticmethod
def make_key(repo, sha, context):
return ndb.Key(GHStatus, '%s\t%s\t%s' % (repo, sha, context))
@staticmethod
def make(repo, sha, context, **kwargs):
return GHStatus(key=GHStatus.make_key(repo, sha, context), **kwargs)
@staticmethod
def query_for_sha(repo, sha):
before = GHStatus.make_key(repo, sha, '')
after = GHStatus.make_key(repo, sha, '\x7f')
return GHStatus.query(GHStatus.key > before, GHStatus.key < after)
@staticmethod
def from_json(body):
kwargs = make_kwargs(body,
'sha context state target_url description '
'created_at updated_at'.split())
kwargs['repo'] = body['name']
return GHStatus.make(**kwargs)
@property
def repo(self):
return self.key.id().split('\t', 1)[0]
@property
def sha(self):
return self.key.id().split('\t', 2)[1]
@property
def context(self):
return self.key.id().split('\t', 2)[2]
class GHIssueDigest(ndb.Model):
# Key: {repo} {number}
is_pr = ndb.BooleanProperty()
is_open = ndb.BooleanProperty()
involved = ndb.StringProperty(repeated=True)
xref = ndb.StringProperty(repeated=True)
payload = ndb.JsonProperty()
updated_at = ndb.DateTimeProperty()
head = ndb.StringProperty()
@staticmethod
def make_key(repo, number):
return ndb.Key(GHIssueDigest, '%s %s' % (repo, number))
@staticmethod
def make(repo, number, is_pr, is_open, involved, payload, updated_at):
return GHIssueDigest(key=GHIssueDigest.make_key(repo, number),
is_pr=is_pr, is_open=is_open, involved=involved, payload=payload,
updated_at=updated_at, head=payload.get('head'),
xref=payload.get('xrefs', []))
@staticmethod
def get(repo, number):
return GHIssueDigest.make_key(repo, number).get()
@property
def repo(self):
return self.key.id().split()[0]
@property
def number(self):
return int(self.key.id().split()[1])
@property
def url(self):
return 'https://github.com/%s/issues/%s' % tuple(self.key.id().split())
@property
def title(self):
return self.payload.get('title', '')
@staticmethod
def find_head(repo, head):
return GHIssueDigest.query(GHIssueDigest.key > GHIssueDigest.make_key(repo, ''),
GHIssueDigest.key < GHIssueDigest.make_key(repo, '~'),
GHIssueDigest.head == head)
@staticmethod
@ndb.tasklet
def find_xrefs_async(xref):
issues = yield GHIssueDigest.query(GHIssueDigest.xref == xref).fetch_async()
raise ndb.Return(list(issues))
@staticmethod
@ndb.tasklet
def find_xrefs_multi_async(xrefs):
"""
Given a list of xrefs to search for, return a dict of lists
of result values. Xrefs that have no corresponding issues are
not represented in the dictionary.
"""
# The IN operator does multiple sequential queries and ORs them
# together. This is slow here-- a range query is faster, since
# this is used to get xrefs for a set of contiguous builds.
if not xrefs: # nothing => nothing
raise ndb.Return({})
xrefs = set(xrefs)
issues = yield GHIssueDigest.query(
GHIssueDigest.xref >= min(xrefs),
GHIssueDigest.xref <= max(xrefs)).fetch_async(batch_size=500)
refs = {}
for issue in issues:
for xref in issue.xref:
if xref in xrefs:
refs.setdefault(xref, []).append(issue)
raise ndb.Return(refs)
@staticmethod
def find_open_prs():
# pylint: disable=singleton-comparison
return GHIssueDigest.query(GHIssueDigest.is_pr == True,
GHIssueDigest.is_open == True)
@staticmethod
def find_open_prs_for_repo(repo):
return (GHIssueDigest.find_open_prs()
.filter(GHIssueDigest.key > GHIssueDigest.make_key(repo, ''),
GHIssueDigest.key < GHIssueDigest.make_key(repo, '~')))
class GHUserState(ndb.Model):
# Key: {github username}
acks = ndb.JsonProperty() # dict of issue keys => ack time (seconds since epoch)
@staticmethod
def make_key(user):
return ndb.Key(GHUserState, user)
@staticmethod
def make(user, acks=None):
return GHUserState(key=GHUserState.make_key(user), acks=acks or {})
@ndb.transactional
def save_if_newer(obj):
assert obj.updated_at is not None
old = obj.key.get()
if old is None:
obj.put()
return True
else:
if old.updated_at is None or obj.updated_at >= old.updated_at:
obj.put()
return True
return False
| apache-2.0 |
MridulS/sympy | sympy/matrices/expressions/matexpr.py | 14 | 12894 | from __future__ import print_function, division
from functools import wraps
from sympy.core import S, Symbol, sympify, Tuple, Integer, Basic, Expr
from sympy.core.decorators import call_highest_priority
from sympy.core.sympify import SympifyError, sympify
from sympy.functions import conjugate, adjoint
from sympy.matrices import ShapeError
from sympy.simplify import simplify
def _sympifyit(arg, retval=None):
# This version of _sympifyit sympifies MutableMatrix objects
def deco(func):
@wraps(func)
def __sympifyit_wrapper(a, b):
try:
b = sympify(b, strict=True)
return func(a, b)
except SympifyError:
return retval
return __sympifyit_wrapper
return deco
class MatrixExpr(Basic):
""" Superclass for Matrix Expressions
MatrixExprs represent abstract matrices, linear transformations represented
within a particular basis.
Examples
========
>>> from sympy import MatrixSymbol
>>> A = MatrixSymbol('A', 3, 3)
>>> y = MatrixSymbol('y', 3, 1)
>>> x = (A.T*A).I * A * y
See Also
========
MatrixSymbol
MatAdd
MatMul
Transpose
Inverse
"""
_op_priority = 11.0
is_Matrix = True
is_MatrixExpr = True
is_Identity = None
is_Inverse = False
is_Transpose = False
is_ZeroMatrix = False
is_MatAdd = False
is_MatMul = False
is_commutative = False
def __new__(cls, *args, **kwargs):
args = map(sympify, args)
return Basic.__new__(cls, *args, **kwargs)
# The following is adapted from the core Expr object
def __neg__(self):
return MatMul(S.NegativeOne, self).doit()
def __abs__(self):
raise NotImplementedError
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return MatAdd(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return MatAdd(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return MatAdd(self, -other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return MatAdd(other, -self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return MatMul(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return MatMul(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
if not self.is_square:
raise ShapeError("Power of non-square matrix %s" % self)
if other is S.NegativeOne:
return Inverse(self)
elif other is S.Zero:
return Identity(self.rows)
elif other is S.One:
return self
return MatPow(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
raise NotImplementedError("Matrix Power not defined")
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rdiv__')
def __div__(self, other):
return self * other**S.NegativeOne
@_sympifyit('other', NotImplemented)
@call_highest_priority('__div__')
def __rdiv__(self, other):
raise NotImplementedError()
#return MatMul(other, Pow(self, S.NegativeOne))
__truediv__ = __div__
__rtruediv__ = __rdiv__
@property
def rows(self):
return self.shape[0]
@property
def cols(self):
return self.shape[1]
@property
def is_square(self):
return self.rows == self.cols
def _eval_conjugate(self):
from sympy.matrices.expressions.adjoint import Adjoint
from sympy.matrices.expressions.transpose import Transpose
return Adjoint(Transpose(self))
def _eval_inverse(self):
from sympy.matrices.expressions.inverse import Inverse
return Inverse(self)
def _eval_transpose(self):
return Transpose(self)
def _eval_power(self, exp):
return MatPow(self, exp)
def _eval_simplify(self, **kwargs):
if self.is_Atom:
return self
else:
return self.__class__(*[simplify(x, **kwargs) for x in self.args])
def _eval_adjoint(self):
from sympy.matrices.expressions.adjoint import Adjoint
return Adjoint(self)
def _entry(self, i, j):
raise NotImplementedError(
"Indexing not implemented for %s" % self.__class__.__name__)
def adjoint(self):
return adjoint(self)
def conjugate(self):
return conjugate(self)
def transpose(self):
from sympy.matrices.expressions.transpose import transpose
return transpose(self)
T = property(transpose, None, None, 'Matrix transposition.')
def inverse(self):
return self._eval_inverse()
@property
def I(self):
return self.inverse()
def valid_index(self, i, j):
def is_valid(idx):
return isinstance(idx, (int, Integer, Symbol, Expr))
return (is_valid(i) and is_valid(j) and
(0 <= i) != False and (i < self.rows) != False and
(0 <= j) != False and (j < self.cols) != False)
def __getitem__(self, key):
if not isinstance(key, tuple) and isinstance(key, slice):
from sympy.matrices.expressions.slice import MatrixSlice
return MatrixSlice(self, key, (0, None, 1))
if isinstance(key, tuple) and len(key) == 2:
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
from sympy.matrices.expressions.slice import MatrixSlice
return MatrixSlice(self, i, j)
i, j = sympify(i), sympify(j)
if self.valid_index(i, j) != False:
return self._entry(i, j)
else:
raise IndexError("Invalid indices (%s, %s)" % (i, j))
elif isinstance(key, (int, Integer)):
# row-wise decomposition of matrix
rows, cols = self.shape
if not (isinstance(rows, Integer) and isinstance(cols, Integer)):
raise IndexError("Single index only supported for "
"non-symbolic matrix shapes.")
key = sympify(key)
i = key // cols
j = key % cols
if self.valid_index(i, j) != False:
return self._entry(i, j)
else:
raise IndexError("Invalid index %s" % key)
elif isinstance(key, (Symbol, Expr)):
raise IndexError("Single index only supported for "
"non-symbolic indices.")
raise IndexError("Invalid index, wanted %s[i,j]" % self)
def as_explicit(self):
"""
Returns a dense Matrix with elements represented explicitly
Returns an object of type ImmutableMatrix.
Examples
========
>>> from sympy import Identity
>>> I = Identity(3)
>>> I
I
>>> I.as_explicit()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_mutable: returns mutable Matrix type
"""
from sympy.matrices.immutable import ImmutableMatrix
return ImmutableMatrix([[ self[i, j]
for j in range(self.cols)]
for i in range(self.rows)])
def as_mutable(self):
"""
Returns a dense, mutable matrix with elements represented explicitly
Examples
========
>>> from sympy import Identity
>>> I = Identity(3)
>>> I
I
>>> I.shape
(3, 3)
>>> I.as_mutable()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_explicit: returns ImmutableMatrix
"""
return self.as_explicit().as_mutable()
def __array__(self):
from numpy import empty
a = empty(self.shape, dtype=object)
for i in range(self.rows):
for j in range(self.cols):
a[i, j] = self[i, j]
return a
def equals(self, other):
"""
Test elementwise equality between matrices, potentially of different
types
>>> from sympy import Identity, eye
>>> Identity(3).equals(eye(3))
True
"""
return self.as_explicit().equals(other)
def canonicalize(self):
return self
def as_coeff_mmul(self):
return 1, MatMul(self)
class MatrixElement(Expr):
parent = property(lambda self: self.args[0])
i = property(lambda self: self.args[1])
j = property(lambda self: self.args[2])
_diff_wrt = True
class MatrixSymbol(MatrixExpr):
"""Symbolic representation of a Matrix object
Creates a SymPy Symbol to represent a Matrix. This matrix has a shape and
can be included in Matrix Expressions
>>> from sympy import MatrixSymbol, Identity
>>> A = MatrixSymbol('A', 3, 4) # A 3 by 4 Matrix
>>> B = MatrixSymbol('B', 4, 3) # A 4 by 3 Matrix
>>> A.shape
(3, 4)
>>> 2*A*B + Identity(3)
I + 2*A*B
"""
is_commutative = False
def __new__(cls, name, n, m):
n, m = sympify(n), sympify(m)
obj = Basic.__new__(cls, name, n, m)
return obj
def _hashable_content(self):
return(self.name, self.shape)
@property
def shape(self):
return self.args[1:3]
@property
def name(self):
return self.args[0]
def _eval_subs(self, old, new):
# only do substitutions in shape
shape = Tuple(*self.shape)._subs(old, new)
return MatrixSymbol(self.name, *shape)
def __call__(self, *args):
raise TypeError( "%s object is not callable" % self.__class__ )
def _entry(self, i, j):
return MatrixElement(self, i, j)
@property
def free_symbols(self):
return set((self,))
def doit(self, **hints):
if hints.get('deep', True):
return type(self)(self.name, self.args[1].doit(**hints),
self.args[2].doit(**hints))
else:
return self
def _eval_simplify(self, **kwargs):
return self
class Identity(MatrixExpr):
"""The Matrix Identity I - multiplicative identity
>>> from sympy.matrices import Identity, MatrixSymbol
>>> A = MatrixSymbol('A', 3, 5)
>>> I = Identity(3)
>>> I*A
A
"""
is_Identity = True
def __new__(cls, n):
return super(Identity, cls).__new__(cls, sympify(n))
@property
def rows(self):
return self.args[0]
@property
def cols(self):
return self.args[0]
@property
def shape(self):
return (self.args[0], self.args[0])
def _eval_transpose(self):
return self
def _eval_trace(self):
return self.rows
def _eval_inverse(self):
return self
def conjugate(self):
return self
def _entry(self, i, j):
if i == j:
return S.One
else:
return S.Zero
def _eval_determinant(self):
return S.One
class ZeroMatrix(MatrixExpr):
"""The Matrix Zero 0 - additive identity
>>> from sympy import MatrixSymbol, ZeroMatrix
>>> A = MatrixSymbol('A', 3, 5)
>>> Z = ZeroMatrix(3, 5)
>>> A+Z
A
>>> Z*A.T
0
"""
is_ZeroMatrix = True
def __new__(cls, m, n):
return super(ZeroMatrix, cls).__new__(cls, m, n)
@property
def shape(self):
return (self.args[0], self.args[1])
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
if other != 1 and not self.is_square:
raise ShapeError("Power of non-square matrix %s" % self)
if other == 0:
return Identity(self.rows)
return self
def _eval_transpose(self):
return ZeroMatrix(self.cols, self.rows)
def _eval_trace(self):
return S.Zero
def _eval_determinant(self):
return S.Zero
def conjugate(self):
return self
def _entry(self, i, j):
return S.Zero
def __nonzero__(self):
return False
__bool__ = __nonzero__
def matrix_symbols(expr):
return [sym for sym in expr.free_symbols if sym.is_Matrix]
from .matmul import MatMul
from .matadd import MatAdd
from .matpow import MatPow
from .transpose import Transpose
from .inverse import Inverse
| bsd-3-clause |
LaboratoireMecaniqueLille/crappy | crappy/tool/GUI_Arduino/frame_objects.py | 1 | 6752 | # coding: utf-8
import tkinter as tk
from tkinter import ttk
class FrameObjects(tk.Frame):
"""A very simple class that should be inherited by frames, to help create and
maintain in order the file.
How it works:
- After frames defined, create a dict (or an orderedDict, for convenience),
that will contain every widget.
- Use appropriate method, and specify the frame and its dictionary. If
variables are created, they will be a class attribute.
These are commune args to specify to every widget:
- widgets_dict: the dictionary in which the widget will be stored.
- frame: the frame to put the widget.
- name: the key to call the widget inside the dictionary.
- text (if applicable): the text to show.
"""
def __init__(self):
super().__init__()
def add_button(self, **kwargs):
"""To add a tkinter button.
Args:
text: the text to show inside the button.
bg: background color.
height, width: self-explanatory
command_type: by default, clicking to the button executes the
submit_command(command) method, with command as arg. To have different
behavior, just specify "custom", or other string than "to_serial".
command: the command to be executed, OR the string to pass to
submit_command.
"""
widgets_dict = kwargs.pop('widgets_dict', None)
frame = kwargs.pop('frame', None)
name = kwargs.pop('name', 'Button')
text = kwargs.pop('text', 'Button')
bg = kwargs.pop('bg', 'white')
height = kwargs.pop('height', 2)
width = kwargs.pop('width', 10)
command_type = kwargs.pop('command_type', 'to_serial')
command = kwargs.pop('command', None)
assert not kwargs, 'Error: unknown arg(s) in button definition:' + str(
kwargs)
if command_type != 'to_serial':
widgets_dict[name] = tk.Button(frame,
text=text,
bg=bg,
relief="raised",
height=height, width=width,
command=command,
font=("Courier bold", 11))
else:
widgets_dict[name] = tk.Button(frame,
text=text,
bg=bg,
relief="raised",
height=height, width=width,
command=lambda: self.submit_command(
command),
font=("Courier bold", 11))
@staticmethod
def add_label(**kwargs):
"""To add label.
Args:
font: to specify the text font, size, style.
relief: to add some relief to the label.
"""
widgets_dict = kwargs.pop('widgets_dict', None)
frame = kwargs.pop('frame', None)
text = kwargs.pop('text', 'label')
name = kwargs.pop('name', text)
relief = kwargs.pop('relief', 'flat')
font = kwargs.pop('font', ('Courier bold', 11))
widgets_dict[name] = (tk.Label(frame,
text=text,
relief=relief,
font=font))
def add_entry(self, **kwargs):
"""To add an entry box. The adding of the entry box will add an attribute,
if no variable is specified.
Args:
vartype: to specify which type the variable associated with the entry
will be. Useful to make sure the user does not enter a string when a
number is expected.
variable: the variables name, which will be set as an attribute.
width : the width of the entry box.
"""
widgets_dict = kwargs.pop('widgets_dict', None)
frame = kwargs.pop('frame', None)
entry_name = kwargs.pop('name', 'name')
width = kwargs.pop('width', 10)
vartype = kwargs.pop('vartype', tk.DoubleVar())
variable = kwargs.pop("variable", None)
if not variable:
setattr(self, entry_name + '_var', vartype)
widgets_dict[entry_name] = tk.Entry(frame,
textvariable=getattr(self, entry_name +
'_var'),
width=width)
else:
widgets_dict[entry_name] = tk.Entry(frame,
textvariable=variable,
width=width)
def add_checkbutton(self, **kwargs):
"""To add a checkbutton. Will create automatically a boolean attribute,
which will represent the checkbutton state."""
widgets_dict = kwargs.pop('widgets_dict', None)
frame = kwargs.pop('frame', None)
text = kwargs.pop("text", None)
name = kwargs.pop('name', 'checkbutton')
variable_name = kwargs.pop('variable', name + '_var')
var = tk.BooleanVar()
setattr(self, variable_name, var)
widgets_dict[name] = tk.Checkbutton(frame,
text=text,
variable=var)
def add_combobox(self, **kwargs):
"""To add a combobox. Will automatically add an attribute.
Args:
entries: a list that contains every selectable option.
variable: the name of the variable, that will become an attribute.
default_index: to define which default entry to show on the combobox.
var:
"""
widgets_dict = kwargs.pop("widgets_dict", None)
frame = kwargs.pop("frame", None)
entries = kwargs.pop("entries", None)
name = kwargs.pop("name", "combobox")
variable_name = kwargs.pop("variable", name + "_var")
default_index = kwargs.pop("default", 0)
var = tk.StringVar()
var.set(entries[default_index])
setattr(self, variable_name, var)
combo_box = ttk.Combobox(frame,
textvariable=var,
values=entries,
state='readonly')
widgets_dict[name] = combo_box
@staticmethod
def add_scale(**kwargs):
"""To add a scrollbar"""
widgets_dict = kwargs.pop('widgets_dict', None)
frame = kwargs.pop('frame', None)
name = kwargs.pop('name', 'Button')
boundaries = kwargs.pop("boundaries", (0, 1))
widgets_dict[name] = tk.Scale(frame,
from_=boundaries[0],
to_=boundaries[1],
orient=tk.HORIZONTAL,
)
@staticmethod
def add_text(**kwargs):
widgets_dict = kwargs.pop('widgets_dict', None)
frame = kwargs.pop('frame', None)
text = kwargs.pop('text', 'label')
name = kwargs.pop('name', text)
widgets_dict[name] = tk.Text(frame)
| gpl-2.0 |
odoousers2014/odoo | addons/mail/tests/test_mail_features.py | 3 | 60081 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.mail_mail import mail_mail
from openerp.addons.mail.mail_thread import mail_thread
from openerp.addons.mail.tests.common import TestMail
from openerp.tools import mute_logger, email_split, html2plaintext
from openerp.tools.mail import html_sanitize
class test_mail(TestMail):
def test_000_alias_setup(self):
""" Test basic mail.alias setup works, before trying to use them for routing """
cr, uid = self.cr, self.uid
self.user_valentin_id = self.res_users.create(cr, uid,
{'name': 'Valentin Cognito', 'email': '[email protected]', 'login': 'valentin.cognito', 'alias_name': 'valentin.cognito'})
self.user_valentin = self.res_users.browse(cr, uid, self.user_valentin_id)
self.assertEquals(self.user_valentin.alias_name, self.user_valentin.login, "Login should be used as alias")
self.user_pagan_id = self.res_users.create(cr, uid,
{'name': 'Pagan Le Marchant', 'email': '[email protected]', 'login': '[email protected]', 'alias_name': '[email protected]'})
self.user_pagan = self.res_users.browse(cr, uid, self.user_pagan_id)
self.assertEquals(self.user_pagan.alias_name, 'plmarchant', "If login is an email, the alias should keep only the local part")
self.user_barty_id = self.res_users.create(cr, uid,
{'name': 'Bartholomew Ironside', 'email': '[email protected]', 'login': 'b4r+_#_R3wl$$', 'alias_name': 'b4r+_#_R3wl$$'})
self.user_barty = self.res_users.browse(cr, uid, self.user_barty_id)
self.assertEquals(self.user_barty.alias_name, 'b4r+_-_r3wl-', 'Disallowed chars should be replaced by hyphens')
def test_00_followers_function_field(self):
""" Tests designed for the many2many function field 'follower_ids'.
We will test to perform writes using the many2many commands 0, 3, 4,
5 and 6. """
cr, uid, user_admin, partner_bert_id, group_pigs = self.cr, self.uid, self.user_admin, self.partner_bert_id, self.group_pigs
# Data: create 'disturbing' values in mail.followers: same res_id, other res_model; same res_model, other res_id
group_dummy_id = self.mail_group.create(cr, uid,
{'name': 'Dummy group'}, {'mail_create_nolog': True})
self.mail_followers.create(cr, uid,
{'res_model': 'mail.thread', 'res_id': self.group_pigs_id, 'partner_id': partner_bert_id})
self.mail_followers.create(cr, uid,
{'res_model': 'mail.group', 'res_id': group_dummy_id, 'partner_id': partner_bert_id})
# Pigs just created: should be only Admin as follower
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([user_admin.partner_id.id]), 'Admin should be the only Pigs fan')
# Subscribe Bert through a '4' command
group_pigs.write({'message_follower_ids': [(4, partner_bert_id)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id, user_admin.partner_id.id]), 'Bert and Admin should be the only Pigs fans')
# Unsubscribe Bert through a '3' command
group_pigs.write({'message_follower_ids': [(3, partner_bert_id)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([user_admin.partner_id.id]), 'Admin should be the only Pigs fan')
# Set followers through a '6' command
group_pigs.write({'message_follower_ids': [(6, 0, [partner_bert_id])]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id]), 'Bert should be the only Pigs fan')
# Add a follower created on the fly through a '0' command
group_pigs.write({'message_follower_ids': [(0, 0, {'name': 'Patrick Fiori'})]})
partner_patrick_id = self.res_partner.search(cr, uid, [('name', '=', 'Patrick Fiori')])[0]
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id, partner_patrick_id]), 'Bert and Patrick should be the only Pigs fans')
# Finally, unlink through a '5' command
group_pigs.write({'message_follower_ids': [(5, 0)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertFalse(follower_ids, 'Pigs group should not have fans anymore')
# Test dummy data has not been altered
fol_obj_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.thread'), ('res_id', '=', self.group_pigs_id)])
follower_ids = set([follower.partner_id.id for follower in self.mail_followers.browse(cr, uid, fol_obj_ids)])
self.assertEqual(follower_ids, set([partner_bert_id]), 'Bert should be the follower of dummy mail.thread data')
fol_obj_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', group_dummy_id)])
follower_ids = set([follower.partner_id.id for follower in self.mail_followers.browse(cr, uid, fol_obj_ids)])
self.assertEqual(follower_ids, set([partner_bert_id, user_admin.partner_id.id]), 'Bert and Admin should be the followers of dummy mail.group data')
def test_05_message_followers_and_subtypes(self):
""" Tests designed for the subscriber API as well as message subtypes """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
# Data: message subtypes
self.mail_message_subtype.create(cr, uid, {'name': 'mt_mg_def', 'default': True, 'res_model': 'mail.group'})
self.mail_message_subtype.create(cr, uid, {'name': 'mt_other_def', 'default': True, 'res_model': 'crm.lead'})
self.mail_message_subtype.create(cr, uid, {'name': 'mt_all_def', 'default': True, 'res_model': False})
mt_mg_nodef = self.mail_message_subtype.create(cr, uid, {'name': 'mt_mg_nodef', 'default': False, 'res_model': 'mail.group'})
mt_all_nodef = self.mail_message_subtype.create(cr, uid, {'name': 'mt_all_nodef', 'default': False, 'res_model': False})
default_group_subtypes = self.mail_message_subtype.search(cr, uid, [('default', '=', True), '|', ('res_model', '=', 'mail.group'), ('res_model', '=', False)])
# ----------------------------------------
# CASE1: test subscriptions with subtypes
# ----------------------------------------
# Do: subscribe Raoul, should have default subtypes
group_pigs.message_subscribe_users([user_raoul.id])
group_pigs.refresh()
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Raoul follows default subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set(default_group_subtypes),
'message_subscribe: Raoul subscription subtypes are incorrect, should be all default ones')
# Do: subscribe Raoul with specified new subtypes
group_pigs.message_subscribe_users([user_raoul.id], subtype_ids=[mt_mg_nodef])
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Test: 2 lines in mail.followers (no duplicate for Raoul)
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
])
self.assertEqual(len(fol_ids), 2,
'message_subscribe: subscribing an already-existing follower should not create new entries in mail.followers')
# Test: Raoul follows only specified subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef]),
'message_subscribe: Raoul subscription subtypes are incorrect, should be only specified')
# Do: Subscribe Raoul without specified subtypes: should not erase existing subscription subtypes
group_pigs.message_subscribe_users([user_raoul.id, user_raoul.id])
group_pigs.message_subscribe_users([user_raoul.id])
group_pigs.refresh()
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Test: Raoul follows default subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef]),
'message_subscribe: Raoul subscription subtypes are incorrect, should be only specified')
# Do: Unsubscribe Raoul twice through message_unsubscribe_users
group_pigs.message_unsubscribe_users([user_raoul.id, user_raoul.id])
group_pigs.refresh()
# Test: 1 follower (Admin)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(follower_ids, [user_admin.partner_id.id], 'Admin must be the only Pigs fan')
# Test: 1 lines in mail.followers (no duplicate for Raoul)
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id)
])
self.assertEqual(len(fol_ids), 1,
'message_subscribe: group should have only 1 entry in mail.follower for 1 follower')
# Do: subscribe Admin with subtype_ids
group_pigs.message_subscribe_users([uid], [mt_mg_nodef, mt_all_nodef])
fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id), ('partner_id', '=', user_admin.partner_id.id)])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef, mt_all_nodef]), 'subscription subtypes are incorrect')
# ----------------------------------------
# CASE2: test mail_thread fields
# ----------------------------------------
subtype_data = group_pigs._get_subscription_data(None, None)[group_pigs.id]['message_subtype_data']
self.assertEqual(set(subtype_data.keys()), set(['Discussions', 'mt_mg_def', 'mt_all_def', 'mt_mg_nodef', 'mt_all_nodef']), 'mail.group available subtypes incorrect')
self.assertFalse(subtype_data['Discussions']['followed'], 'Admin should not follow Discussions in pigs')
self.assertTrue(subtype_data['mt_mg_nodef']['followed'], 'Admin should follow mt_mg_nodef in pigs')
self.assertTrue(subtype_data['mt_all_nodef']['followed'], 'Admin should follow mt_all_nodef in pigs')
def test_11_notification_url(self):
""" Tests designed to test the URL added in notification emails. """
cr, uid, group_pigs = self.cr, self.uid, self.group_pigs
# Test URL formatting
base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# Partner data
partner_raoul = self.res_partner.browse(cr, uid, self.partner_raoul_id)
partner_bert_id = self.res_partner.create(cr, uid, {'name': 'bert'})
partner_bert = self.res_partner.browse(cr, uid, partner_bert_id)
# Mail data
mail_mail_id = self.mail_mail.create(cr, uid, {'state': 'exception'})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
# Test: link for nobody -> None
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail)
self.assertEqual(url, None,
'notification email: mails not send to a specific partner should not have any URL')
# Test: link for partner -> None
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_bert)
self.assertEqual(url, None,
'notification email: mails send to a not-user partner should not have any URL')
# Test: link for user -> signin
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
# Test: link for user -> with model and res_id
mail_mail_id = self.mail_mail.create(cr, uid, {'model': 'mail.group', 'res_id': group_pigs.id})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
self.assertIn('model=mail.group', url,
'notification email: link should contain the model when having not notification email on a record')
self.assertIn('res_id=%s' % group_pigs.id, url,
'notification email: link should contain the res_id when having not notification email on a record')
# Test: link for user -> with model and res_id
mail_mail_id = self.mail_mail.create(cr, uid, {'notification': True, 'model': 'mail.group', 'res_id': group_pigs.id})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
self.assertIn('message_id=%s' % mail.mail_message_id.id, url,
'notification email: link based on message should contain the mail_message id')
self.assertNotIn('model=mail.group', url,
'notification email: link based on message should not contain model')
self.assertNotIn('res_id=%s' % group_pigs.id, url,
'notification email: link based on message should not contain res_id')
@mute_logger('openerp.addons.mail.mail_thread', 'openerp.models')
def test_12_inbox_redirection(self):
""" Tests designed to test the inbox redirection of emails notification URLs. """
cr, uid, user_admin, group_pigs = self.cr, self.uid, self.user_admin, self.group_pigs
model, act_id = self.ir_model_data.get_object_reference(cr, uid, 'mail', 'action_mail_inbox_feeds')
# Data: post a message on pigs
msg_id = self.group_pigs.message_post(body='My body', partner_ids=[self.partner_bert_id], type='comment', subtype='mail.mt_comment')
# No specific parameters -> should redirect to Inbox
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
# Raoul has read access to Pigs -> should redirect to form view of Pigs
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {'message_id': msg_id}})
self.assertEqual(
action.get('type'), 'ir.actions.act_window',
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
self.assertEqual(
action.get('res_id'), group_pigs.id,
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {'model': 'mail.group', 'res_id': group_pigs.id}})
self.assertEqual(
action.get('type'), 'ir.actions.act_window',
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
self.assertEqual(
action.get('res_id'), group_pigs.id,
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
# Bert has no read access to Pigs -> should redirect to Inbox
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_bert_id, {'params': {'message_id': msg_id}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_bert_id, {'params': {'model': 'mail.group', 'res_id': group_pigs.id}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
def test_20_message_post(self):
""" Tests designed for message_post. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 0 - Update existing users-partners
self.res_users.write(cr, uid, [uid], {'email': 'a@a', 'notify_email': 'always'})
self.res_users.write(cr, uid, [self.user_raoul_id], {'email': 'r@r'})
# 1 - Bert Tartopoils, with email, should receive emails for comments and emails
p_b_id = self.res_partner.create(cr, uid, {'name': 'Bert Tartopoils', 'email': 'b@b'})
# 2 - Carine Poilvache, with email, should receive emails for emails
p_c_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c', 'notify_email': 'none'})
# 3 - Dédé Grosbedon, without email, to test email verification; should receive emails for every message
p_d_id = self.res_partner.create(cr, uid, {'name': 'Dédé Grosbedon', 'email': 'd@d', 'notify_email': 'always'})
# 4 - Attachments
attach1_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach1', 'datas_fname': 'Attach1',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
attach2_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach2', 'datas_fname': 'Attach2',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
attach3_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach3', 'datas_fname': 'Attach3',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
# 5 - Mail data
_subject = 'Pigs'
_mail_subject = 'Re: %s' % (group_pigs.name)
_body1 = '<p>Pigs rules</p>'
_body2 = '<html>Pigs rocks</html>'
_attachments = [
('List1', 'My first attachment'),
('List2', 'My second attachment')
]
# --------------------------------------------------
# CASE1: post comment + partners + attachments
# --------------------------------------------------
# Data: set alias_domain to see emails with alias
self.registry('ir.config_parameter').set_param(self.cr, self.uid, 'mail.catchall.domain', 'schlouby.fr')
# Data: change Pigs name to test reply_to
self.mail_group.write(cr, uid, [self.group_pigs_id], {'name': '"Pigs" !ù $%-'})
# Do: subscribe Raoul
new_follower_ids = [self.partner_raoul_id]
group_pigs.message_subscribe(new_follower_ids)
# Test: group followers = Raoul + uid
group_fids = [follower.id for follower in group_pigs.message_follower_ids]
test_fids = new_follower_ids + [self.partner_admin_id]
self.assertEqual(set(test_fids), set(group_fids),
'message_subscribe: incorrect followers after subscribe')
# Do: Raoul message_post on Pigs
self._init_mock_build_email()
msg1_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id,
body=_body1, subject=_subject, partner_ids=[p_b_id, p_c_id],
attachment_ids=[attach1_id, attach2_id], attachments=_attachments,
type='comment', subtype='mt_comment')
msg = self.mail_message.browse(cr, uid, msg1_id)
msg_message_id = msg.message_id
msg_pids = [partner.id for partner in msg.notified_partner_ids]
msg_aids = [attach.id for attach in msg.attachment_ids]
sent_emails = self._build_email_kwargs_list
# Test: mail_message: subject and body not modified
self.assertEqual(_subject, msg.subject, 'message_post: mail.message subject incorrect')
self.assertEqual(_body1, msg.body, 'message_post: mail.message body incorrect')
# Test: mail_message: notified_partner_ids = group followers + partner_ids - author
test_pids = set([self.partner_admin_id, p_b_id, p_c_id])
self.assertEqual(test_pids, set(msg_pids), 'message_post: mail.message notified partners incorrect')
# Test: mail_message: attachments (4, attachment_ids + attachments)
test_aids = set([attach1_id, attach2_id])
msg_attach_names = set([attach.name for attach in msg.attachment_ids])
test_attach_names = set(['Attach1', 'Attach2', 'List1', 'List2'])
self.assertEqual(len(msg_aids), 4,
'message_post: mail.message wrong number of attachments')
self.assertEqual(msg_attach_names, test_attach_names,
'message_post: mail.message attachments incorrectly added')
self.assertTrue(test_aids.issubset(set(msg_aids)),
'message_post: mail.message attachments duplicated')
for attach in msg.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group',
'message_post: mail.message attachments were not linked to the document')
self.assertEqual(attach.res_id, group_pigs.id,
'message_post: mail.message attachments were not linked to the document')
if 'List' in attach.name:
self.assertIn((attach.name, attach.datas.decode('base64')), _attachments,
'message_post: mail.message attachment name / data incorrect')
dl_attach = self.mail_message.download_attachment(cr, user_raoul.id, id_message=msg.id, attachment_id=attach.id)
self.assertIn((dl_attach['filename'], dl_attach['base64'].decode('base64')), _attachments,
'message_post: mail.message download_attachment is incorrect')
# Test: followers: same as before (author was already subscribed)
group_pigs.refresh()
group_fids = [follower.id for follower in group_pigs.message_follower_ids]
test_fids = new_follower_ids + [self.partner_admin_id]
self.assertEqual(set(test_fids), set(group_fids),
'message_post: wrong followers after posting')
# Test: mail_mail: notifications have been deleted
self.assertFalse(self.mail_mail.search(cr, uid, [('mail_message_id', '=', msg1_id)]),
'message_post: mail.mail notifications should have been auto-deleted!')
# Test: notifications emails: to a and b, c is email only, r is author
test_emailto = ['Administrator <a@a>', 'Bert Tartopoils <b@b>']
# test_emailto = ['"Followers of -Pigs-" <a@a>', '"Followers of -Pigs-" <b@b>']
self.assertEqual(len(sent_emails), 2,
'message_post: notification emails wrong number of send emails')
self.assertEqual(set([m['email_to'][0] for m in sent_emails]), set(test_emailto),
'message_post: notification emails wrong recipients (email_to)')
for sent_email in sent_emails:
self.assertEqual(sent_email['email_from'], 'Raoul Grosbedon <[email protected]>',
'message_post: notification email wrong email_from: should use alias of sender')
self.assertEqual(len(sent_email['email_to']), 1,
'message_post: notification email sent to more than one email address instead of a precise partner')
self.assertIn(sent_email['email_to'][0], test_emailto,
'message_post: notification email email_to incorrect')
self.assertEqual(sent_email['reply_to'], u'"YourCompany \\"Pigs\\" !ù $%-" <[email protected]>',
'message_post: notification email reply_to incorrect')
self.assertEqual(_subject, sent_email['subject'],
'message_post: notification email subject incorrect')
self.assertIn(_body1, sent_email['body'],
'message_post: notification email body incorrect')
self.assertIn('Pigs rules', sent_email['body_alternative'],
'message_post: notification email body alternative should contain the body')
self.assertNotIn('<p>', sent_email['body_alternative'],
'message_post: notification email body alternative still contains html')
self.assertFalse(sent_email['references'],
'message_post: references should be False when sending a message that is not a reply')
# Test: notification linked to this message = group followers = notified_partner_ids
notif_ids = self.mail_notification.search(cr, uid, [('message_id', '=', msg1_id)])
notif_pids = set([notif.partner_id.id for notif in self.mail_notification.browse(cr, uid, notif_ids)])
self.assertEqual(notif_pids, test_pids,
'message_post: mail.message created mail.notification incorrect')
# Data: Pigs name back to normal
self.mail_group.write(cr, uid, [self.group_pigs_id], {'name': 'Pigs'})
# --------------------------------------------------
# CASE2: reply + parent_id + parent notification
# --------------------------------------------------
# Data: remove alias_domain to see emails with alias
param_ids = self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.domain')])
self.registry('ir.config_parameter').unlink(cr, uid, param_ids)
# Do: Raoul message_post on Pigs
self._init_mock_build_email()
msg2_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id,
body=_body2, type='email', subtype='mt_comment',
partner_ids=[p_d_id], parent_id=msg1_id, attachment_ids=[attach3_id],
context={'mail_post_autofollow': True})
msg = self.mail_message.browse(cr, uid, msg2_id)
msg_pids = [partner.id for partner in msg.notified_partner_ids]
msg_aids = [attach.id for attach in msg.attachment_ids]
sent_emails = self._build_email_kwargs_list
# Test: mail_message: subject is False, body, parent_id is msg_id
self.assertEqual(msg.subject, False, 'message_post: mail.message subject incorrect')
self.assertEqual(msg.body, html_sanitize(_body2), 'message_post: mail.message body incorrect')
self.assertEqual(msg.parent_id.id, msg1_id, 'message_post: mail.message parent_id incorrect')
# Test: mail_message: notified_partner_ids = group followers
test_pids = [self.partner_admin_id, p_d_id]
self.assertEqual(set(test_pids), set(msg_pids), 'message_post: mail.message partners incorrect')
# Test: mail_message: notifications linked to this message = group followers = notified_partner_ids
notif_ids = self.mail_notification.search(cr, uid, [('message_id', '=', msg2_id)])
notif_pids = [notif.partner_id.id for notif in self.mail_notification.browse(cr, uid, notif_ids)]
self.assertEqual(set(test_pids), set(notif_pids), 'message_post: mail.message notification partners incorrect')
# Test: mail_mail: notifications deleted
self.assertFalse(self.mail_mail.search(cr, uid, [('mail_message_id', '=', msg2_id)]), 'mail.mail notifications should have been auto-deleted!')
# Test: emails send by server (to a, b, c, d)
test_emailto = [u'Administrator <a@a>', u'Bert Tartopoils <b@b>', u'Carine Poilvache <c@c>', u'D\xe9d\xe9 Grosbedon <d@d>']
# test_emailto = [u'"Followers of Pigs" <a@a>', u'"Followers of Pigs" <b@b>', u'"Followers of Pigs" <c@c>', u'"Followers of Pigs" <d@d>']
# self.assertEqual(len(sent_emails), 3, 'sent_email number of sent emails incorrect')
for sent_email in sent_emails:
self.assertEqual(sent_email['email_from'], 'Raoul Grosbedon <r@r>',
'message_post: notification email wrong email_from: should use email of sender when no alias domain set')
self.assertEqual(len(sent_email['email_to']), 1,
'message_post: notification email sent to more than one email address instead of a precise partner')
self.assertIn(sent_email['email_to'][0], test_emailto,
'message_post: notification email email_to incorrect')
self.assertEqual(email_split(sent_email['reply_to']), ['r@r'], # was '"Followers of Pigs" <r@r>', but makes no sense
'message_post: notification email reply_to incorrect: should have raoul email')
self.assertEqual(_mail_subject, sent_email['subject'],
'message_post: notification email subject incorrect')
self.assertIn(html_sanitize(_body2), sent_email['body'],
'message_post: notification email does not contain the body')
self.assertIn('Pigs rocks', sent_email['body_alternative'],
'message_post: notification email body alternative should contain the body')
self.assertNotIn('<p>', sent_email['body_alternative'],
'message_post: notification email body alternative still contains html')
self.assertIn(msg_message_id, sent_email['references'],
'message_post: notification email references lacks parent message message_id')
# Test: attachments + download
for attach in msg.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group',
'message_post: mail.message attachment res_model incorrect')
self.assertEqual(attach.res_id, self.group_pigs_id,
'message_post: mail.message attachment res_id incorrect')
# Test: Dédé has been notified -> should also have been notified of the parent message
msg = self.mail_message.browse(cr, uid, msg1_id)
msg_pids = set([partner.id for partner in msg.notified_partner_ids])
test_pids = set([self.partner_admin_id, p_b_id, p_c_id, p_d_id])
self.assertEqual(test_pids, msg_pids, 'message_post: mail.message parent notification not created')
# Do: reply to last message
msg3_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id, body='Test', parent_id=msg2_id)
msg = self.mail_message.browse(cr, uid, msg3_id)
# Test: check that its parent will be the first message
self.assertEqual(msg.parent_id.id, msg1_id, 'message_post did not flatten the thread structure')
def test_25_message_compose_wizard(self):
""" Tests designed for the mail.compose.message wizard. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
mail_compose = self.registry('mail.compose.message')
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 0 - Update existing users-partners
self.res_users.write(cr, uid, [uid], {'email': 'a@a'})
self.res_users.write(cr, uid, [self.user_raoul_id], {'email': 'r@r'})
# 1 - Bert Tartopoils, with email, should receive emails for comments and emails
p_b_id = self.res_partner.create(cr, uid, {'name': 'Bert Tartopoils', 'email': 'b@b'})
# 2 - Carine Poilvache, with email, should receive emails for emails
p_c_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c', 'notify_email': 'always'})
# 3 - Dédé Grosbedon, without email, to test email verification; should receive emails for every message
p_d_id = self.res_partner.create(cr, uid, {'name': 'Dédé Grosbedon', 'email': 'd@d', 'notify_email': 'always'})
# 4 - Create a Bird mail.group, that will be used to test mass mailing
group_bird_id = self.mail_group.create(cr, uid,
{
'name': 'Bird',
'description': 'Bird resistance',
}, context={'mail_create_nolog': True})
group_bird = self.mail_group.browse(cr, uid, group_bird_id)
# 5 - Mail data
_subject = 'Pigs'
_body = 'Pigs <b>rule</b>'
_reply_subject = 'Re: %s' % _subject
_attachments = [
{'name': 'First', 'datas_fname': 'first.txt', 'datas': 'My first attachment'.encode('base64')},
{'name': 'Second', 'datas_fname': 'second.txt', 'datas': 'My second attachment'.encode('base64')}
]
_attachments_test = [('first.txt', 'My first attachment'), ('second.txt', 'My second attachment')]
# 6 - Subscribe Bert to Pigs
group_pigs.message_subscribe([p_b_id])
# --------------------------------------------------
# CASE1: wizard + partners + context keys
# --------------------------------------------------
# Do: Raoul wizard-composes on Pigs with auto-follow for partners, not for author
compose_id = mail_compose.create(cr, user_raoul.id,
{
'subject': _subject,
'body': _body,
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
})
compose = mail_compose.browse(cr, uid, compose_id)
# Test: mail.compose.message: composition_mode, model, res_id
self.assertEqual(compose.composition_mode, 'comment', 'compose wizard: mail.compose.message incorrect composition_mode')
self.assertEqual(compose.model, 'mail.group', 'compose wizard: mail.compose.message incorrect model')
self.assertEqual(compose.res_id, self.group_pigs_id, 'compose wizard: mail.compose.message incorrect res_id')
# Do: Post the comment
mail_compose.send_mail(cr, user_raoul.id, [compose_id], context={'mail_post_autofollow': True, 'mail_create_nosubscribe': True})
group_pigs.refresh()
message = group_pigs.message_ids[0]
# Test: mail_mail: notifications have been deleted
self.assertFalse(self.mail_mail.search(cr, uid, [('mail_message_id', '=', message.id)]),'message_send: mail.mail message should have been auto-deleted!')
# Test: mail.group: followers (c and d added by auto follow key; raoul not added by nosubscribe key)
pigs_pids = [p.id for p in group_pigs.message_follower_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(pigs_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
# Test: mail.message: subject, body inside p
self.assertEqual(message.subject, _subject, 'compose wizard: mail.message incorrect subject')
self.assertEqual(message.body, '<p>%s</p>' % _body, 'compose wizard: mail.message incorrect body')
# Test: mail.message: notified_partner_ids = admin + bert (followers) + c + d (recipients)
msg_pids = [partner.id for partner in message.notified_partner_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(msg_pids), set(test_pids),
'compose wizard: mail.message notified_partner_ids incorrect')
# --------------------------------------------------
# CASE2: reply + attachments
# --------------------------------------------------
# Do: Reply with attachments
compose_id = mail_compose.create(cr, user_raoul.id,
{
'attachment_ids': [(0, 0, _attachments[0]), (0, 0, _attachments[1])]
}, context={
'default_composition_mode': 'comment',
'default_res_id': self.group_pigs_id,
'default_parent_id': message.id
})
compose = mail_compose.browse(cr, uid, compose_id)
# Test: mail.compose.message: model, res_id, parent_id
self.assertEqual(compose.model, 'mail.group', 'compose wizard: mail.compose.message incorrect model')
self.assertEqual(compose.res_id, self.group_pigs_id, 'compose wizard: mail.compose.message incorrect res_id')
self.assertEqual(compose.parent_id.id, message.id, 'compose wizard: mail.compose.message incorrect parent_id')
# Test: mail.compose.message: subject as Re:.., body, parent_id
self.assertEqual(compose.subject, _reply_subject, 'compose wizard: mail.compose.message incorrect subject')
self.assertFalse(compose.body, 'compose wizard: mail.compose.message body should not contain parent message body')
self.assertEqual(compose.parent_id and compose.parent_id.id, message.id, 'compose wizard: mail.compose.message parent_id incorrect')
# Test: mail.compose.message: attachments
for attach in compose.attachment_ids:
self.assertIn((attach.datas_fname, attach.datas.decode('base64')), _attachments_test,
'compose wizard: mail.message attachment name / data incorrect')
# --------------------------------------------------
# CASE3: mass_mail on Pigs and Bird
# --------------------------------------------------
# Do: Compose in mass_mail_mode on pigs and bird
compose_id = mail_compose.create(
cr, user_raoul.id, {
'subject': _subject,
'body': '${object.description}',
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'mass_mail',
'default_model': 'mail.group',
'default_res_id': False,
'active_ids': [self.group_pigs_id, group_bird_id],
})
compose = mail_compose.browse(cr, uid, compose_id)
# Do: Post the comment, get created message for each group
mail_compose.send_mail(cr, user_raoul.id, [compose_id], context={
'default_res_id': -1,
'active_ids': [self.group_pigs_id, group_bird_id]
})
# check mail_mail
mail_mail_ids = self.mail_mail.search(cr, uid, [('subject', '=', _subject)])
for mail_mail in self.mail_mail.browse(cr, uid, mail_mail_ids):
self.assertEqual(set([p.id for p in mail_mail.recipient_ids]), set([p_c_id, p_d_id]),
'compose wizard: mail_mail mass mailing: mail.mail in mass mail incorrect recipients')
# check logged messages
group_pigs.refresh()
group_bird.refresh()
message1 = group_pigs.message_ids[0]
message2 = group_bird.message_ids[0]
# Test: Pigs and Bird did receive their message
test_msg_ids = self.mail_message.search(cr, uid, [], limit=2)
mail_ids = self.mail_mail.search(cr, uid, [('mail_message_id', '=', message2.id)])
mail_record_id = self.mail_mail.browse(cr, uid, mail_ids)[0]
self.assertTrue(mail_record_id, "'message_send: mail.mail message should have in processing mail queue'" )
#check mass mail state...
test_mail_ids = self.mail_mail.search(cr, uid, [('state', '=', 'exception')])
self.assertNotIn(mail_ids, test_mail_ids, 'compose wizard: Mail sending Failed!!')
self.assertIn(message1.id, test_msg_ids, 'compose wizard: Pigs did not receive its mass mailing message')
self.assertIn(message2.id, test_msg_ids, 'compose wizard: Bird did not receive its mass mailing message')
# Test: mail.message: subject, body, subtype, notified partners (nobody + specific recipients)
self.assertEqual(message1.subject, _subject,
'compose wizard: message_post: mail.message in mass mail subject incorrect')
self.assertEqual(message1.body, '<p>%s</p>' % group_pigs.description,
'compose wizard: message_post: mail.message in mass mail body incorrect')
# self.assertEqual(set([p.id for p in message1.notified_partner_ids]), set([p_c_id, p_d_id]),
# 'compose wizard: message_post: mail.message in mass mail incorrect notified partners')
self.assertEqual(message2.subject, _subject,
'compose wizard: message_post: mail.message in mass mail subject incorrect')
self.assertEqual(message2.body, '<p>%s</p>' % group_bird.description,
'compose wizard: message_post: mail.message in mass mail body incorrect')
# self.assertEqual(set([p.id for p in message2.notified_partner_ids]), set([p_c_id, p_d_id]),
# 'compose wizard: message_post: mail.message in mass mail incorrect notified partners')
# Test: mail.group followers: author not added as follower in mass mail mode
pigs_pids = [p.id for p in group_pigs.message_follower_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(pigs_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
bird_pids = [p.id for p in group_bird.message_follower_ids]
test_pids = [self.partner_admin_id]
self.assertEqual(set(bird_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
# Do: Compose in mass_mail, coming from list_view, we have an active_domain that should be supported
compose_id = mail_compose.create(cr, user_raoul.id,
{
'subject': _subject,
'body': '${object.description}',
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'mass_mail',
'default_model': 'mail.group',
'default_res_id': False,
'active_ids': [self.group_pigs_id],
'active_domain': [('name', 'in', ['Pigs', 'Bird'])],
})
compose = mail_compose.browse(cr, uid, compose_id)
# Do: Post the comment, get created message for each group
mail_compose.send_mail(
cr, user_raoul.id, [compose_id], context={
'default_res_id': -1,
'active_ids': [self.group_pigs_id, group_bird_id]
})
group_pigs.refresh()
group_bird.refresh()
message1 = group_pigs.message_ids[0]
message2 = group_bird.message_ids[0]
# Test: Pigs and Bird did receive their message
test_msg_ids = self.mail_message.search(cr, uid, [], limit=2)
self.assertIn(message1.id, test_msg_ids, 'compose wizard: Pigs did not receive its mass mailing message')
self.assertIn(message2.id, test_msg_ids, 'compose wizard: Bird did not receive its mass mailing message')
def test_30_needaction(self):
""" Tests for mail.message needaction. """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
na_admin_base = self.mail_message._needaction_count(cr, uid, domain=[])
na_demo_base = self.mail_message._needaction_count(cr, user_raoul.id, domain=[])
# Test: number of unread notification = needaction on mail.message
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_admin.partner_id.id),
('is_read', '=', False)
])
na_count = self.mail_message._needaction_count(cr, uid, domain=[])
self.assertEqual(len(notif_ids), na_count, 'unread notifications count does not match needaction count')
# Do: post 2 message on group_pigs as admin, 3 messages as demo user
for dummy in range(2):
group_pigs.message_post(body='My Body', subtype='mt_comment')
raoul_pigs = group_pigs.sudo(user_raoul)
for dummy in range(3):
raoul_pigs.message_post(body='My Demo Body', subtype='mt_comment')
# Test: admin has 3 new notifications (from demo), and 3 new needaction
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_admin.partner_id.id),
('is_read', '=', False)
])
self.assertEqual(len(notif_ids), na_admin_base + 3, 'Admin should have 3 new unread notifications')
na_admin = self.mail_message._needaction_count(cr, uid, domain=[])
na_admin_group = self.mail_message._needaction_count(cr, uid, domain=[('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)])
self.assertEqual(na_admin, na_admin_base + 3, 'Admin should have 3 new needaction')
self.assertEqual(na_admin_group, 3, 'Admin should have 3 needaction related to Pigs')
# Test: demo has 0 new notifications (not a follower, not receiving its own messages), and 0 new needaction
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_raoul.partner_id.id),
('is_read', '=', False)
])
self.assertEqual(len(notif_ids), na_demo_base + 0, 'Demo should have 0 new unread notifications')
na_demo = self.mail_message._needaction_count(cr, user_raoul.id, domain=[])
na_demo_group = self.mail_message._needaction_count(cr, user_raoul.id, domain=[('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)])
self.assertEqual(na_demo, na_demo_base + 0, 'Demo should have 0 new needaction')
self.assertEqual(na_demo_group, 0, 'Demo should have 0 needaction related to Pigs')
def test_40_track_field(self):
""" Testing auto tracking of fields. """
def _strip_string_spaces(body):
return body.replace(' ', '').replace('\n', '')
# Data: subscribe Raoul to Pigs, because he will change the public attribute and may loose access to the record
cr, uid = self.cr, self.uid
self.mail_group.message_subscribe_users(cr, uid, [self.group_pigs_id], [self.user_raoul_id])
# Data: res.users.group, to test group_public_id automatic logging
group_system_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_system')
group_system_id = group_system_ref and group_system_ref[1] or False
# Data: custom subtypes
mt_private_id = self.mail_message_subtype.create(cr, uid, {'name': 'private', 'description': 'Private public'})
self.ir_model_data.create(cr, uid, {'name': 'mt_private', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_private_id})
mt_name_supername_id = self.mail_message_subtype.create(cr, uid, {'name': 'name_supername', 'description': 'Supername name'})
self.ir_model_data.create(cr, uid, {'name': 'mt_name_supername', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_name_supername_id})
mt_group_public_set_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public_set', 'description': 'Group set'})
self.ir_model_data.create(cr, uid, {'name': 'mt_group_public_set', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_set_id})
mt_group_public_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public', 'description': 'Group changed'})
self.ir_model_data.create(cr, uid, {'name': 'mt_group_public', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_id})
# Data: alter mail_group model for testing purposes (test on classic, selection and many2one fields)
cls = type(self.mail_group)
self.assertNotIn('_track', cls.__dict__)
cls._track = {
'public': {
'mail.mt_private': lambda self, cr, uid, obj, ctx=None: obj.public == 'private',
},
'name': {
'mail.mt_name_supername': lambda self, cr, uid, obj, ctx=None: obj.name == 'supername',
},
'group_public_id': {
'mail.mt_group_public_set': lambda self, cr, uid, obj, ctx=None: obj.group_public_id,
'mail.mt_group_public': lambda self, cr, uid, obj, ctx=None: True,
},
}
visibility = {'public': 'onchange', 'name': 'always', 'group_public_id': 'onchange'}
for key in visibility:
self.assertFalse(hasattr(getattr(cls, key), 'track_visibility'))
getattr(cls, key).track_visibility = visibility[key]
@self.addCleanup
def cleanup():
delattr(cls, '_track')
for key in visibility:
del getattr(cls, key).track_visibility
# Test: change name -> always tracked, not related to a subtype
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'public': 'public'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 1, 'tracked: a message should have been produced')
# Test: first produced message: no subtype, name change tracked
last_msg = self.group_pigs.message_ids[-1]
self.assertFalse(last_msg.subtype_id, 'tracked: message should not have been linked to a subtype')
self.assertIn(u"Selectedgroupofusers\u2192Everyone", _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
self.assertIn('Pigs', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change name as supername, public as private -> 2 subtypes
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'name': 'supername', 'public': 'private'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 3, 'tracked: two messages should have been produced')
# Test: first produced message: mt_name_supername
last_msg = self.group_pigs.message_ids[-2]
self.assertEqual(last_msg.subtype_id.id, mt_private_id, 'tracked: message should be linked to mt_private subtype')
self.assertIn('Private public', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Pigs\u2192supername', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
# Test: second produced message: mt_name_supername
last_msg = self.group_pigs.message_ids[-3]
self.assertEqual(last_msg.subtype_id.id, mt_name_supername_id, 'tracked: message should be linked to mt_name_supername subtype')
self.assertIn('Supername name', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u"Everyone\u2192Invitedpeopleonly", _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
self.assertIn(u'Pigs\u2192supername', _strip_string_spaces(last_msg.body), 'tracked feature: message body does not hold always tracked field')
# Test: change public as public, group_public_id -> 2 subtypes, name always tracked
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'public': 'public', 'group_public_id': group_system_id})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 5, 'tracked: one message should have been produced')
# Test: first produced message: mt_group_public_set_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-4]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_set_id, 'tracked: message should be linked to mt_group_public_set_id')
self.assertIn('Group set', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u"Invitedpeopleonly\u2192Everyone", _strip_string_spaces(last_msg.body), 'tracked: message body does not hold changed tracked field')
self.assertIn(u'HumanResources/Employee\u2192Administration/Settings', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: second produced message: mt_group_public_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-5]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_id, 'tracked: message should be linked to mt_group_public_id')
self.assertIn('Group changed', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u"Invitedpeopleonly\u2192Everyone", _strip_string_spaces(last_msg.body), 'tracked: message body does not hold changed tracked field')
self.assertIn(u'HumanResources/Employee\u2192Administration/Settings', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change group_public_id to False -> 1 subtype, name always tracked
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'group_public_id': False})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 6, 'tracked: one message should have been produced')
# Test: first produced message: mt_group_public_set_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-6]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_id, 'tracked: message should be linked to mt_group_public_id')
self.assertIn('Group changed', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Administration/Settings\u2192', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change not tracked field, no tracking message
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'description': 'Dummy'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 6, 'tracked: No message should have been produced')
| agpl-3.0 |
wathen/PhD | MHD/FEniCS/ShiftCurlCurl/CppGradient/Efficient/CurlCurlSecondOrder.py | 1 | 5726 | import petsc4py, sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import os, inspect
from dolfin import *
import numpy
import ExactSol
import MatrixOperations as MO
import CheckPetsc4py as CP
import HiptmairPrecond
import HiptmairSetup
from timeit import default_timer as timer
m = 8
errL2b =numpy.zeros((m-1,1))
errCurlb =numpy.zeros((m-1,1))
l2border = numpy.zeros((m-1,1))
Curlborder =numpy.zeros((m-1,1))
ItsSave = numpy.zeros((m-1,1))
DimSave = numpy.zeros((m-1,1))
TimeSave = numpy.zeros((m-1,1))
NN = numpy.zeros((m-1,1))
Curlgrad = numpy.zeros((m-1,1))
Massgrad = numpy.zeros((m-1,1))
Laplgrad = numpy.zeros((m-1,1))
dim =3
for xx in xrange(1,m):
NN[xx-1] = xx+0
nn = int(2**(NN[xx-1][0]))
# nn = 1
omega = 1
if dim == 2:
esh = UnitSquareMesh(int(nn),int(nn))
# mesh = RectangleMesh(0.0, 0.0, 1.0, 1.5, int(nn), int(nn), 'left')
u0, p0, CurlCurl, gradPres, CurlMass = ExactSol.M2D(2,Show="yes", Mass = omega)
else:
mesh = UnitCubeMesh(int(nn),int(nn),int(nn))
u0, p0, CurlCurl, gradPres, CurlMass = ExactSol.M3D(1,Show="yes", Mass = omega)
order = 2
parameters['reorder_dofs_serial'] = False
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
parameters['reorder_dofs_serial'] = False
DimSave[xx-1] = Magnetic.dim()
print Magnetic.dim()
parameters['linear_algebra_backend'] = 'uBLAS'
# tic()
# C, P = HiptmairSetup.HiptmairMatrixSetupBoundary(mesh, Magnetic.dim(), Lagrange.dim(),dim)
# G, P = HiptmairSetup.HiptmairBCsetupBoundary(C,P,mesh)
# endTimeB = toc()
# print endTimeB
print "\n"
# tic()
# C, P = HiptmairSetup.HiptmairMatrixSetup(mesh, Magnetic.dim(), Lagrange.dim())
# G, P = HiptmairSetup.HiptmairBCsetup(C,P, mesh, [Magnetic,Lagrange])
# endTime = toc()
# print endTime
# ataaa
def boundary(x, on_boundary):
return on_boundary
bc = DirichletBC(Magnetic,u0, boundary)
bcu = DirichletBC(Lagrange, Expression(("0.0")), boundary)
(v) = TestFunction(Magnetic)
(u) = TrialFunction(Magnetic)
(p) = TrialFunction(Lagrange)
(q) = TestFunction(Lagrange)
a = inner(curl(u),curl(v))*dx + inner(u,v)*dx
L1 = inner(v, CurlMass)*dx
tic()
Acurl,b = assemble_system(a,L1,bc, form_compiler_parameters={"eliminate_zeros": True})
print "System assembled, time: ", toc()
tic()
A,b = CP.Assemble(Acurl,b)
x = b.duplicate()
print "PETSc system assembled, time: ", toc()
MatVec = 'yes'
if MatVec == "yes":
tic()
VecLagrange, kspMass, VectorLaplacian, ScalarLaplacian, B, BC = HiptmairSetup.HiptmairAnyOrder(Magnetic,Lagrange)
# del b1, b2
print "Hiptmair Laplacians BC assembled, time: ", toc()
ksp = PETSc.KSP().create()
ksp.setTolerances(1e-6)
ksp.setType('cg')
ksp.setOperators(A,A)
pc = ksp.getPC()
reshist = {}
def monitor(ksp, its, rnorm):
reshist[its] = rnorm
print its, ' ', rnorm
ksp.setMonitor(monitor)
pc.setType(PETSc.PC.Type.PYTHON)
kspVector, kspScalar, diag = HiptmairSetup.HiptmairKSPsetup(VectorLaplacian, ScalarLaplacian,A)
del A, VectorLaplacian, ScalarLaplacian
pc.setPythonContext(HiptmairPrecond.HiptmairApply([Magnetic,Lagrange,VecLagrange] ,B, kspMass, kspVector, kspScalar, diag, BC))
scale = b.norm()
b = b/scale
tic()
ksp.solve(b, x)
TimeSave[xx-1] = toc()
x = x*scale
print ksp.its
print TimeSave[xx-1]
ItsSave[xx-1] = ksp.its
print " \n\n\n\n"
else:
# tic()
C, P = HiptmairSetup.HiptmairMatrixSetupBoundary(mesh, Magnetic.dim(), Lagrange.dim(),dim)
G, P = HiptmairSetup.HiptmairBCsetupBoundary(C,P,mesh)
# endTimeB = toc()
# print endTimeB
print "\n"
tic()
ScalarLaplacian, b1 = assemble_system(inner(grad(p),grad(q))*dx,inner(p0,q)*dx,bcu)
VectorLaplacian, b2 = assemble_system(inner(grad(p),grad(q))*dx+inner(p,q)*dx,inner(p0,q)*dx,bcu)
del b1, b2
print "Hiptmair Laplacians BC assembled, time: ", toc()
tic()
VectorLaplacian = PETSc.Mat().createAIJ(size=VectorLaplacian.sparray().shape,csr=(VectorLaplacian.sparray().indptr, VectorLaplacian.sparray().indices, VectorLaplacian.sparray().data))
ScalarLaplacian = PETSc.Mat().createAIJ(size=ScalarLaplacian.sparray().shape,csr=(ScalarLaplacian.sparray().indptr, ScalarLaplacian.sparray().indices, ScalarLaplacian.sparray().data))
print "PETSc Laplacians assembled, time: ", toc()
ksp = PETSc.KSP().create()
ksp.setTolerances(1e-6)
ksp.setType('cg')
ksp.setOperators(A,A)
pc = ksp.getPC()
pc.setType(PETSc.PC.Type.PYTHON)
kspVector, kspScalar, diag = HiptmairSetup.HiptmairKSPsetup(VectorLaplacian, ScalarLaplacian,A)
del A, VectorLaplacian, ScalarLaplacian
pc.setPythonContext(HiptmairPrecond.GSvector(G, P, kspVector, kspScalar, diag))
scale = b.norm()
b = b/scale
tic()
ksp.solve(b, x)
TimeSave[xx-1] = toc()
x = x*scale
print ksp.its
print TimeSave[xx-1]
ItsSave[xx-1] = ksp.its
print " \n\n\n\n"
import pandas as pd
print "\n\n\n"
ItsTitlesB = ["l","B DoF","Time","Iterations"]
ItsValuesB = numpy.concatenate((NN,DimSave,TimeSave,ItsSave),axis=1)
ItsTableB= pd.DataFrame(ItsValuesB, columns = ItsTitlesB)
pd.set_option('precision',5)
print ItsTableB.to_latex()
if m !=2:
print numpy.abs((TimeSave[1:]/TimeSave[:-1]))/(2*dim)
| mit |
steedos/odoo7 | openerp/addons/document/__init__.py | 434 | 1128 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import content_index
import std_index
import document
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/transformer/implementations/implementation_closed/tests/test_convtbc.py | 6 | 1787 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import unittest
from fairseq.modules import ConvTBC
import torch.nn as nn
class TestConvTBC(unittest.TestCase):
def test_convtbc(self):
# ksz, in_channels, out_channels
conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1)
# out_channels, in_channels, ksz
conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1)
conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2))
conv_tbc.bias.data.copy_(conv1d.bias.data)
input_tbc = torch.randn(7, 2, 4, requires_grad=True)
input1d = input_tbc.data.transpose(0, 1).transpose(1, 2)
input1d.requires_grad = True
output_tbc = conv_tbc(input_tbc)
output1d = conv1d(input1d)
self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data)
grad_tbc = torch.randn(output_tbc.size())
grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous()
output_tbc.backward(grad_tbc)
output1d.backward(grad1d)
self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data)
self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data)
self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
heran7/edx-platform | common/lib/xmodule/xmodule/static_content.py | 14 | 6505 | # /usr/bin/env python
"""
This module has utility functions for gathering up the static content
that is defined by XModules and XModuleDescriptors (javascript and css)
"""
import logging
import hashlib
import os
import errno
import sys
from collections import defaultdict
from docopt import docopt
from path import path
from xmodule.x_module import XModuleDescriptor
LOG = logging.getLogger(__name__)
def write_module_styles(output_root):
"""Write all registered XModule css, sass, and scss files to output root."""
return _write_styles('.xmodule_display', output_root, _list_modules())
def write_module_js(output_root):
"""Write all registered XModule js and coffee files to output root."""
return _write_js(output_root, _list_modules())
def write_descriptor_styles(output_root):
"""Write all registered XModuleDescriptor css, sass, and scss files to output root."""
return _write_styles('.xmodule_edit', output_root, _list_descriptors())
def write_descriptor_js(output_root):
"""Write all registered XModuleDescriptor js and coffee files to output root."""
return _write_js(output_root, _list_descriptors())
def _list_descriptors():
"""Return a list of all registered XModuleDescriptor classes."""
return [
desc for desc in [
desc for (_, desc) in XModuleDescriptor.load_classes()
]
]
def _list_modules():
"""Return a list of all registered XModule classes."""
return [
desc.module_class
for desc
in _list_descriptors()
]
def _ensure_dir(directory):
"""Ensure that `directory` exists."""
try:
os.makedirs(directory)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def _write_styles(selector, output_root, classes):
"""
Write the css fragments from all XModules in `classes`
into `output_root` as individual files, hashed by the contents to remove
duplicates
"""
contents = {}
css_fragments = defaultdict(set)
for class_ in classes:
class_css = class_.get_css()
for filetype in ('sass', 'scss', 'css'):
for idx, fragment in enumerate(class_css.get(filetype, [])):
css_fragments[idx, filetype, fragment].add(class_.__name__)
css_imports = defaultdict(set)
for (idx, filetype, fragment), classes in sorted(css_fragments.items()):
fragment_name = "{idx:0=3d}-{hash}.{type}".format(
idx=idx,
hash=hashlib.md5(fragment).hexdigest(),
type=filetype)
# Prepend _ so that sass just includes the files into a single file
filename = '_' + fragment_name
contents[filename] = fragment
for class_ in classes:
css_imports[class_].add(fragment_name)
module_styles_lines = []
module_styles_lines.append("@import 'bourbon/bourbon';")
module_styles_lines.append("@import 'bourbon/addons/button';")
for class_, fragment_names in css_imports.items():
module_styles_lines.append("""{selector}.xmodule_{class_} {{""".format(
class_=class_, selector=selector
))
module_styles_lines.extend(' @import "{0}";'.format(name) for name in fragment_names)
module_styles_lines.append('}')
contents['_module-styles.scss'] = '\n'.join(module_styles_lines)
_write_files(output_root, contents)
def _write_js(output_root, classes):
"""
Write the javascript fragments from all XModules in `classes`
into `output_root` as individual files, hashed by the contents to remove
duplicates
"""
contents = {}
js_fragments = set()
for class_ in classes:
module_js = class_.get_javascript()
for filetype in ('coffee', 'js'):
for idx, fragment in enumerate(module_js.get(filetype, [])):
js_fragments.add((idx, filetype, fragment))
for idx, filetype, fragment in sorted(js_fragments):
filename = "{idx:0=3d}-{hash}.{type}".format(
idx=idx,
hash=hashlib.md5(fragment).hexdigest(),
type=filetype)
contents[filename] = fragment
_write_files(output_root, contents, {'.coffee': '.js'})
return [output_root / filename for filename in contents.keys()]
def _write_files(output_root, contents, generated_suffix_map=None):
"""
Write file contents to output root.
Any files not listed in contents that exists in output_root will be deleted,
unless it matches one of the patterns in `generated_suffix_map`.
output_root (path): The root directory to write the file contents in
contents (dict): A map from filenames to file contents to be written to the output_root
generated_suffix_map (dict): Optional. Maps file suffix to generated file suffix.
For any file in contents, if the suffix matches a key in `generated_suffix_map`,
then the same filename with the suffix replaced by the value from `generated_suffix_map`
will be ignored
"""
_ensure_dir(output_root)
to_delete = set(file.basename() for file in output_root.files()) - set(contents.keys())
if generated_suffix_map:
for output_file in contents.keys():
for suffix, generated_suffix in generated_suffix_map.items():
if output_file.endswith(suffix):
to_delete.discard(output_file.replace(suffix, generated_suffix))
for extra_file in to_delete:
(output_root / extra_file).remove_p()
for filename, file_content in contents.iteritems():
output_file = output_root / filename
not_file = not output_file.isfile()
# not_file is included to short-circuit this check, because
# read_md5 depends on the file already existing
write_file = not_file or output_file.read_md5() != hashlib.md5(file_content).digest() # pylint: disable=E1121
if write_file:
LOG.debug("Writing %s", output_file)
output_file.write_bytes(file_content)
else:
LOG.debug("%s unchanged, skipping", output_file)
def main():
"""
Generate
Usage: static_content.py <output_root>
"""
args = docopt(main.__doc__)
root = path(args['<output_root>'])
write_descriptor_js(root / 'descriptors/js')
write_descriptor_styles(root / 'descriptors/css')
write_module_js(root / 'modules/js')
write_module_styles(root / 'modules/css')
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 |
ivandevp/django | tests/utils_tests/test_datetime_safe.py | 207 | 2371 | import unittest
from datetime import (
date as original_date, datetime as original_datetime,
time as original_time,
)
from django.utils.datetime_safe import date, datetime, time
class DatetimeTests(unittest.TestCase):
def setUp(self):
self.just_safe = (1900, 1, 1)
self.just_unsafe = (1899, 12, 31, 23, 59, 59)
self.just_time = (11, 30, 59)
self.really_old = (20, 1, 1)
self.more_recent = (2006, 1, 1)
def test_compare_datetimes(self):
self.assertEqual(original_datetime(*self.more_recent), datetime(*self.more_recent))
self.assertEqual(original_datetime(*self.really_old), datetime(*self.really_old))
self.assertEqual(original_date(*self.more_recent), date(*self.more_recent))
self.assertEqual(original_date(*self.really_old), date(*self.really_old))
self.assertEqual(original_date(*self.just_safe).strftime('%Y-%m-%d'), date(*self.just_safe).strftime('%Y-%m-%d'))
self.assertEqual(original_datetime(*self.just_safe).strftime('%Y-%m-%d'), datetime(*self.just_safe).strftime('%Y-%m-%d'))
self.assertEqual(original_time(*self.just_time).strftime('%H:%M:%S'), time(*self.just_time).strftime('%H:%M:%S'))
def test_safe_strftime(self):
self.assertEqual(date(*self.just_unsafe[:3]).strftime('%Y-%m-%d (weekday %w)'), '1899-12-31 (weekday 0)')
self.assertEqual(date(*self.just_safe).strftime('%Y-%m-%d (weekday %w)'), '1900-01-01 (weekday 1)')
self.assertEqual(datetime(*self.just_unsafe).strftime('%Y-%m-%d %H:%M:%S (weekday %w)'), '1899-12-31 23:59:59 (weekday 0)')
self.assertEqual(datetime(*self.just_safe).strftime('%Y-%m-%d %H:%M:%S (weekday %w)'), '1900-01-01 00:00:00 (weekday 1)')
self.assertEqual(time(*self.just_time).strftime('%H:%M:%S AM'), '11:30:59 AM')
# %y will error before this date
self.assertEqual(date(*self.just_safe).strftime('%y'), '00')
self.assertEqual(datetime(*self.just_safe).strftime('%y'), '00')
self.assertEqual(date(1850, 8, 2).strftime("%Y/%m/%d was a %A"), '1850/08/02 was a Friday')
def test_zero_padding(self):
"""
Regression for #12524
Check that pre-1000AD dates are padded with zeros if necessary
"""
self.assertEqual(date(1, 1, 1).strftime("%Y/%m/%d was a %A"), '0001/01/01 was a Monday')
| bsd-3-clause |
CarlosRA97/explorer-interface | GPIO/ventanas.py | 1 | 1952 | #!/usr/bin/env python2.7
from Tkinter import *
import tkMessageBox
# from RPi.GPIO import *
# # from wiringpi2 import *
from time import sleep
pinesGPIO = [2,3,4,17,27,22,10,9,11,14,15,18,23,24,25,8,7]
pinesGPIO
# setmode(BCM)
# setwarnings(False)
def stp(pin):
setup(pin, OUT)
def on(pin):
output(pin,1)
def off(pin):
output(pin,0)
def repeat(pin,times):
counter = 0
while times >= counter:
output(pin,1)
sleep(.1)
output(pin,0)
sleep(.1)
counter += 1
def OnButtonClick(button):
result = tkMessageBox.askyesno("GPIO PIN","Do you want pin %s turn on?" % button)
if result:
stp(button)
on(button)
else:
stp(button)
repeat(button,4)
off(button)
#Crea la ventana
raiz = Tk()
raiz.title('GPIO Pin')
raiz.geometry('180x550')
#Ventana
ventana = Frame(raiz)
ventana.grid(column=0, row=0, padx=(10,50), pady=(10,10))
ventana.columnconfigure(0,weight=1)
ventana.rowconfigure(0,weight=1)
#Etiquetas de texto
tag = Label(ventana, text="Pines")
tag.grid(column=2, row=1, pady=(5,5))
#------------Botones-----------#
def botonera():
rowGrid = 2
colGrid = 1
# Primera columna
for pin in pinesGPIO[:1+len(pinesGPIO)/2]:
_ = Button(ventana, text=str(pin))
_.grid(column=colGrid, row=rowGrid, padx=(5,5), pady=(5,5))
_.config(command = lambda: OnButtonClick(pin))
rowGrid += 1
# Segunda columna
colGrid = 3
rowGrid = 2
for pin in pinesGPIO[1+len(pinesGPIO)/2:]:
_ = Button(ventana, text=str(pin))
_.grid(column=colGrid, row=rowGrid, padx=(5,5), pady=(5,5))
_.config(command = lambda: OnButtonClick(pin))
rowGrid += 1
#------------------------------#
#Campo de entrada de texto
# entrada_txt = Entry(ventana, width=20, textvariable="")
# entrada_txt.grid(column=2, row=2)
#Poner en marcha el ciclo de eventos y llama a todas las funciones visuales
botonera()
raiz.mainloop()
| gpl-2.0 |
onitake/ansible | lib/ansible/plugins/cliconf/__init__.py | 7 | 20946 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import abstractmethod
from functools import wraps
from ansible.plugins import AnsiblePlugin
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes, to_text
from ansible.utils.display import Display
try:
from scp import SCPClient
HAS_SCP = True
except ImportError:
HAS_SCP = False
display = Display()
def enable_mode(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
prompt = self._connection.get_prompt()
if not to_text(prompt, errors='surrogate_or_strict').strip().endswith('#'):
raise AnsibleError('operation requires privilege escalation')
return func(self, *args, **kwargs)
return wrapped
class CliconfBase(AnsiblePlugin):
"""
A base class for implementing cli connections
.. note:: String inputs to :meth:`send_command` will be cast to byte strings
within this method and as such are not required to be made byte strings
beforehand. Please avoid using literal byte strings (``b'string'``) in
:class:`CliConfBase` plugins as this can lead to unexpected errors when
running on Python 3
List of supported rpc's:
:get_config: Retrieves the specified configuration from the device
:edit_config: Loads the specified commands into the remote device
:get: Execute specified command on remote device
:get_capabilities: Retrieves device information and supported rpc methods
:commit: Load configuration from candidate to running
:discard_changes: Discard changes to candidate datastore
Note: List of supported rpc's for remote device can be extracted from
output of get_capabilities()
:returns: Returns output received from remote device as byte string
Usage:
from ansible.module_utils.connection import Connection
conn = Connection()
conn.get('show lldp neighbors detail'')
conn.get_config('running')
conn.edit_config(['hostname test', 'netconf ssh'])
"""
__rpc__ = ['get_config', 'edit_config', 'get_capabilities', 'get', 'enable_response_logging', 'disable_response_logging']
def __init__(self, connection):
super(CliconfBase, self).__init__()
self._connection = connection
self.history = list()
self.response_logging = False
def _alarm_handler(self, signum, frame):
"""Alarm handler raised in case of command timeout """
display.display('closing shell due to command timeout (%s seconds).' % self._connection._play_context.timeout, log_only=True)
self.close()
def send_command(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, prompt_retry_check=False, check_all=False):
"""Executes a command over the device connection
This method will execute a command over the device connection and
return the results to the caller. This method will also perform
logging of any commands based on the `nolog` argument.
:param command: The command to send over the connection to the device
:param prompt: A single regex pattern or a sequence of patterns to evaluate the expected prompt from the command
:param answer: The answer to respond with if the prompt is matched.
:param sendonly: Bool value that will send the command but not wait for a result.
:param newline: Bool value that will append the newline character to the command
:param prompt_retry_check: Bool value for trying to detect more prompts
:param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
given prompt.
:returns: The output from the device after executing the command
"""
kwargs = {
'command': to_bytes(command),
'sendonly': sendonly,
'newline': newline,
'prompt_retry_check': prompt_retry_check,
'check_all': check_all
}
if prompt is not None:
if isinstance(prompt, list):
kwargs['prompt'] = [to_bytes(p) for p in prompt]
else:
kwargs['prompt'] = to_bytes(prompt)
if answer is not None:
if isinstance(answer, list):
kwargs['answer'] = [to_bytes(p) for p in answer]
else:
kwargs['answer'] = to_bytes(answer)
resp = self._connection.send(**kwargs)
if not self.response_logging:
self.history.append(('*****', '*****'))
else:
self.history.append((kwargs['command'], resp))
return resp
def get_base_rpc(self):
"""Returns list of base rpc method supported by remote device"""
return self.__rpc__
def get_history(self):
""" Returns the history file for all commands
This will return a log of all the commands that have been sent to
the device and all of the output received. By default, all commands
and output will be redacted unless explicitly configured otherwise.
:return: An ordered list of command, output pairs
"""
return self.history
def reset_history(self):
""" Resets the history of run commands
:return: None
"""
self.history = list()
def enable_response_logging(self):
"""Enable logging command response"""
self.response_logging = True
def disable_response_logging(self):
"""Disable logging command response"""
self.response_logging = False
@abstractmethod
def get_config(self, source='running', flags=None, format=None):
"""Retrieves the specified configuration from the device
This method will retrieve the configuration specified by source and
return it to the caller as a string. Subsequent calls to this method
will retrieve a new configuration from the device
:param source: The configuration source to return from the device.
This argument accepts either `running` or `startup` as valid values.
:param flags: For devices that support configuration filtering, this
keyword argument is used to filter the returned configuration.
The use of this keyword argument is device dependent adn will be
silently ignored on devices that do not support it.
:param format: For devices that support fetching different configuration
format, this keyword argument is used to specify the format in which
configuration is to be retrieved.
:return: The device configuration as specified by the source argument.
"""
pass
@abstractmethod
def edit_config(self, candidate=None, commit=True, replace=None, diff=False, comment=None):
"""Loads the candidate configuration into the network device
This method will load the specified candidate config into the device
and merge with the current configuration unless replace is set to
True. If the device does not support config replace an errors
is returned.
:param candidate: The configuration to load into the device and merge
with the current running configuration
:param commit: Boolean value that indicates if the device candidate
configuration should be pushed in the running configuration or discarded.
:param replace: If the value is True/False it indicates if running configuration should be completely
replace by candidate configuration. If can also take configuration file path as value,
the file in this case should be present on the remote host in the mentioned path as a
prerequisite.
:param comment: Commit comment provided it is supported by remote host
:return: Returns a json string with contains configuration applied on remote host, the returned
response on executing configuration commands and platform relevant data.
{
"diff": "",
"response": [],
"request": []
}
"""
pass
@abstractmethod
def get(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, output=None, check_all=False):
"""Execute specified command on remote device
This method will retrieve the specified data and
return it to the caller as a string.
:param command: command in string format to be executed on remote device
:param prompt: the expected prompt generated by executing command, this can
be a string or a list of strings
:param answer: the string to respond to the prompt with
:param sendonly: bool to disable waiting for response, default is false
:param newline: bool to indicate if newline should be added at end of answer or not
:param output: For devices that support fetching command output in different
format, this keyword argument is used to specify the output in which
response is to be retrieved.
:param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
given prompt.
:return: The output from the device after executing the command
"""
pass
@abstractmethod
def get_capabilities(self):
"""Returns the basic capabilities of the network device
This method will provide some basic facts about the device and
what capabilities it has to modify the configuration. The minimum
return from this method takes the following format.
eg:
{
'rpc': [list of supported rpcs],
'network_api': <str>, # the name of the transport
'device_info': {
'network_os': <str>,
'network_os_version': <str>,
'network_os_model': <str>,
'network_os_hostname': <str>,
'network_os_image': <str>,
'network_os_platform': <str>,
},
'device_operations': {
'supports_diff_replace': <bool>, # identify if config should be merged or replaced is supported
'supports_commit': <bool>, # identify if commit is supported by device or not
'supports_rollback': <bool>, # identify if rollback is supported or not
'supports_defaults': <bool>, # identify if fetching running config with default is supported
'supports_commit_comment': <bool>, # identify if adding comment to commit is supported of not
'supports_onbox_diff: <bool>, # identify if on box diff capability is supported or not
'supports_generate_diff: <bool>, # identify if diff capability is supported within plugin
'supports_multiline_delimiter: <bool>, # identify if multiline demiliter is supported within config
'supports_diff_match: <bool>, # identify if match is supported
'supports_diff_ignore_lines: <bool>, # identify if ignore line in diff is supported
'supports_config_replace': <bool>, # identify if running config replace with candidate config is supported
'supports_admin': <bool>, # identify if admin configure mode is supported or not
'supports_commit_label': <bool>, # identify if commit label is supported or not
}
'format': [list of supported configuration format],
'diff_match': [list of supported match values],
'diff_replace': [list of supported replace values],
'output': [list of supported command output format]
}
:return: capability as json string
"""
pass
def commit(self, comment=None):
"""Commit configuration changes
This method will perform the commit operation on a previously loaded
candidate configuration that was loaded using `edit_config()`. If
there is a candidate configuration, it will be committed to the
active configuration. If there is not a candidate configuration, this
method should just silently return.
:return: None
"""
return self._connection.method_not_found("commit is not supported by network_os %s" % self._play_context.network_os)
def discard_changes(self):
"""Discard candidate configuration
This method will discard the current candidate configuration if one
is present. If there is no candidate configuration currently loaded,
then this method should just silently return
:returns: None
"""
return self._connection.method_not_found("discard_changes is not supported by network_os %s" % self._play_context.network_os)
def rollback(self, rollback_id, commit=True):
"""
:param rollback_id: The commit id to which configuration should be rollbacked
:param commit: Flag to indicate if changes should be committed or not
:return: Returns diff between before and after change.
"""
pass
def copy_file(self, source=None, destination=None, proto='scp', timeout=30):
"""Copies file over scp/sftp to remote device
:param source: Source file path
:param destination: Destination file path on remote device
:param proto: Protocol to be used for file transfer,
supported protocol: scp and sftp
:param timeout: Specifies the wait time to receive response from
remote host before triggering timeout exception
:return: None
"""
ssh = self._connection.paramiko_conn._connect_uncached()
if proto == 'scp':
if not HAS_SCP:
raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`")
with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp:
out = scp.put(source, destination)
elif proto == 'sftp':
with ssh.open_sftp() as sftp:
sftp.put(source, destination)
def get_file(self, source=None, destination=None, proto='scp', timeout=30):
"""Fetch file over scp/sftp from remote device
:param source: Source file path
:param destination: Destination file path
:param proto: Protocol to be used for file transfer,
supported protocol: scp and sftp
:param timeout: Specifies the wait time to receive response from
remote host before triggering timeout exception
:return: None
"""
"""Fetch file over scp/sftp from remote device"""
ssh = self._connection.paramiko_conn._connect_uncached()
if proto == 'scp':
if not HAS_SCP:
raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`")
with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp:
scp.get(source, destination)
elif proto == 'sftp':
with ssh.open_sftp() as sftp:
sftp.get(source, destination)
def get_diff(self, candidate=None, running=None, diff_match=None, diff_ignore_lines=None, path=None, diff_replace=None):
"""
Generate diff between candidate and running configuration. If the
remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
candidate and running configurations are not required to be passed as argument.
In case if onbox diff capability is not supported candidate argument is mandatory
and running argument is optional.
:param candidate: The configuration which is expected to be present on remote host.
:param running: The base configuration which is used to generate diff.
:param diff_match: Instructs how to match the candidate configuration with current device configuration
Valid values are 'line', 'strict', 'exact', 'none'.
'line' - commands are matched line by line
'strict' - command lines are matched with respect to position
'exact' - command lines must be an equal match
'none' - will not compare the candidate configuration with the running configuration
:param diff_ignore_lines: Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
:param path: The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
:param diff_replace: Instructs on the way to perform the configuration on the device.
If the replace argument is set to I(line) then the modified lines are
pushed to the device in configuration mode. If the replace argument is
set to I(block) then the entire command block is pushed to the device in
configuration mode if any line is not correct.
:return: Configuration and/or banner diff in json format.
{
'config_diff': ''
}
"""
pass
def run_commands(self, commands=None, check_rc=True):
"""
Execute a list of commands on remote host and return the list of response
:param commands: The list of command that needs to be executed on remote host.
The individual command in list can either be a command string or command dict.
If the command is dict the valid keys are
{
'command': <command to be executed>
'prompt': <expected prompt on executing the command>,
'answer': <answer for the prompt>,
'output': <the format in which command output should be rendered eg: 'json', 'text'>,
'sendonly': <Boolean flag to indicate if it command execution response should be ignored or not>
}
:param check_rc: Boolean flag to check if returned response should be checked for error or not.
If check_rc is False the error output is appended in return response list, else if the
value is True an exception is raised.
:return: List of returned response
"""
pass
def check_edit_config_capability(self, operations, candidate=None, commit=True, replace=None, comment=None):
if not candidate and not replace:
raise ValueError("must provide a candidate or replace to load configuration")
if commit not in (True, False):
raise ValueError("'commit' must be a bool, got %s" % commit)
if replace and not operations['supports_replace']:
raise ValueError("configuration replace is not supported")
if comment and not operations.get('supports_commit_comment', False):
raise ValueError("commit comment is not supported")
if replace and not operations.get('supports_replace', False):
raise ValueError("configuration replace is not supported")
| gpl-3.0 |
mzdaniel/oh-mainline | mysite/profile/migrations/0034_create_data_import_attempt_model.py | 17 | 19454 | # This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.profile.models import *
import django.contrib.contenttypes
from django.contrib.contenttypes import *
class Migration:
def forwards(self, orm):
# Adding model 'DataImportAttempt'
db.create_table('profile_dataimportattempt', (
('source', orm['profile.dataimportattempt:source']),
('completed', orm['profile.dataimportattempt:completed']),
('id', orm['profile.dataimportattempt:id']),
))
db.send_create_signal('profile', ['DataImportAttempt'])
# Adding field 'ProjectExp.should_show_this'
db.add_column('profile_projectexp', 'should_show_this', orm['profile.projectexp:should_show_this'])
# Adding field 'ProjectExp.data_import_attempt'
db.add_column('profile_projectexp', 'data_import_attempt', orm['profile.projectexp:data_import_attempt'])
# Changing field 'Person.ohloh_grab_completed'
# (to signature: django.db.models.fields.BooleanField(default=False, blank=True))
db.alter_column('profile_person', 'ohloh_grab_completed', orm['profile.person:ohloh_grab_completed'])
# Changing field 'Person.user'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['auth.User'], unique=True))
db.alter_column('profile_person', 'user_id', orm['profile.person:user'])
# Changing field 'Person.gotten_name_from_ohloh'
# (to signature: django.db.models.fields.BooleanField(default=False, blank=True))
db.alter_column('profile_person', 'gotten_name_from_ohloh', orm['profile.person:gotten_name_from_ohloh'])
# Changing field 'Person.poll_on_next_web_view'
# (to signature: django.db.models.fields.BooleanField(default=True, blank=True))
db.alter_column('profile_person', 'poll_on_next_web_view', orm['profile.person:poll_on_next_web_view'])
# Changing field 'Link_Person_Tag.person'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['profile.Person']))
db.alter_column('profile_link_person_tag', 'person_id', orm['profile.link_person_tag:person'])
# Changing field 'Link_Person_Tag.tag'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['profile.Tag']))
db.alter_column('profile_link_person_tag', 'tag_id', orm['profile.link_person_tag:tag'])
# Changing field 'Tag.tag_type'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['profile.TagType']))
db.alter_column('profile_tag', 'tag_type_id', orm['profile.tag:tag_type'])
# Changing field 'Link_ProjectExp_Tag.favorite'
# (to signature: django.db.models.fields.BooleanField(default=False, blank=True))
db.alter_column('profile_link_projectexp_tag', 'favorite', orm['profile.link_projectexp_tag:favorite'])
# Changing field 'Link_ProjectExp_Tag.project_exp'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['profile.ProjectExp']))
db.alter_column('profile_link_projectexp_tag', 'project_exp_id', orm['profile.link_projectexp_tag:project_exp'])
# Changing field 'Link_ProjectExp_Tag.tag'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['profile.Tag']))
db.alter_column('profile_link_projectexp_tag', 'tag_id', orm['profile.link_projectexp_tag:tag'])
# Changing field 'Link_Project_Tag.project'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['search.Project']))
db.alter_column('profile_link_project_tag', 'project_id', orm['profile.link_project_tag:project'])
# Changing field 'Link_Project_Tag.tag'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['profile.Tag']))
db.alter_column('profile_link_project_tag', 'tag_id', orm['profile.link_project_tag:tag'])
# Changing field 'Link_SF_Proj_Dude_FM.project'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['profile.SourceForgeProject']))
db.alter_column('profile_link_sf_proj_dude_fm', 'project_id', orm['profile.link_sf_proj_dude_fm:project'])
# Changing field 'Link_SF_Proj_Dude_FM.person'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['profile.SourceForgePerson']))
db.alter_column('profile_link_sf_proj_dude_fm', 'person_id', orm['profile.link_sf_proj_dude_fm:person'])
# Changing field 'Link_SF_Proj_Dude_FM.is_admin'
# (to signature: django.db.models.fields.BooleanField(default=False, blank=True))
db.alter_column('profile_link_sf_proj_dude_fm', 'is_admin', orm['profile.link_sf_proj_dude_fm:is_admin'])
# Changing field 'ProjectExp.project'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['search.Project']))
db.alter_column('profile_projectexp', 'project_id', orm['profile.projectexp:project'])
# Changing field 'ProjectExp.person'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['profile.Person']))
db.alter_column('profile_projectexp', 'person_id', orm['profile.projectexp:person'])
def backwards(self, orm):
# Deleting model 'DataImportAttempt'
db.delete_table('profile_dataimportattempt')
# Deleting field 'ProjectExp.should_show_this'
db.delete_column('profile_projectexp', 'should_show_this')
# Deleting field 'ProjectExp.data_import_attempt'
db.delete_column('profile_projectexp', 'data_import_attempt_id')
# Changing field 'Person.ohloh_grab_completed'
# (to signature: models.BooleanField(default=False))
db.alter_column('profile_person', 'ohloh_grab_completed', orm['profile.person:ohloh_grab_completed'])
# Changing field 'Person.user'
# (to signature: models.ForeignKey(orm['auth.User'], unique=True))
db.alter_column('profile_person', 'user_id', orm['profile.person:user'])
# Changing field 'Person.gotten_name_from_ohloh'
# (to signature: models.BooleanField(default=False))
db.alter_column('profile_person', 'gotten_name_from_ohloh', orm['profile.person:gotten_name_from_ohloh'])
# Changing field 'Person.poll_on_next_web_view'
# (to signature: models.BooleanField(default=True))
db.alter_column('profile_person', 'poll_on_next_web_view', orm['profile.person:poll_on_next_web_view'])
# Changing field 'Link_Person_Tag.person'
# (to signature: models.ForeignKey(orm['profile.Person']))
db.alter_column('profile_link_person_tag', 'person_id', orm['profile.link_person_tag:person'])
# Changing field 'Link_Person_Tag.tag'
# (to signature: models.ForeignKey(orm['profile.Tag']))
db.alter_column('profile_link_person_tag', 'tag_id', orm['profile.link_person_tag:tag'])
# Changing field 'Tag.tag_type'
# (to signature: models.ForeignKey(orm['profile.TagType']))
db.alter_column('profile_tag', 'tag_type_id', orm['profile.tag:tag_type'])
# Changing field 'Link_ProjectExp_Tag.favorite'
# (to signature: models.BooleanField(default=False))
db.alter_column('profile_link_projectexp_tag', 'favorite', orm['profile.link_projectexp_tag:favorite'])
# Changing field 'Link_ProjectExp_Tag.project_exp'
# (to signature: models.ForeignKey(orm['profile.ProjectExp']))
db.alter_column('profile_link_projectexp_tag', 'project_exp_id', orm['profile.link_projectexp_tag:project_exp'])
# Changing field 'Link_ProjectExp_Tag.tag'
# (to signature: models.ForeignKey(orm['profile.Tag']))
db.alter_column('profile_link_projectexp_tag', 'tag_id', orm['profile.link_projectexp_tag:tag'])
# Changing field 'Link_Project_Tag.project'
# (to signature: models.ForeignKey(orm['search.Project']))
db.alter_column('profile_link_project_tag', 'project_id', orm['profile.link_project_tag:project'])
# Changing field 'Link_Project_Tag.tag'
# (to signature: models.ForeignKey(orm['profile.Tag']))
db.alter_column('profile_link_project_tag', 'tag_id', orm['profile.link_project_tag:tag'])
# Changing field 'Link_SF_Proj_Dude_FM.project'
# (to signature: models.ForeignKey(orm['profile.SourceForgeProject']))
db.alter_column('profile_link_sf_proj_dude_fm', 'project_id', orm['profile.link_sf_proj_dude_fm:project'])
# Changing field 'Link_SF_Proj_Dude_FM.person'
# (to signature: models.ForeignKey(orm['profile.SourceForgePerson']))
db.alter_column('profile_link_sf_proj_dude_fm', 'person_id', orm['profile.link_sf_proj_dude_fm:person'])
# Changing field 'Link_SF_Proj_Dude_FM.is_admin'
# (to signature: models.BooleanField(default=False))
db.alter_column('profile_link_sf_proj_dude_fm', 'is_admin', orm['profile.link_sf_proj_dude_fm:is_admin'])
# Changing field 'ProjectExp.project'
# (to signature: models.ForeignKey(orm['search.Project']))
db.alter_column('profile_projectexp', 'project_id', orm['profile.projectexp:project'])
# Changing field 'ProjectExp.person'
# (to signature: models.ForeignKey(orm['profile.Person']))
db.alter_column('profile_projectexp', 'person_id', orm['profile.projectexp:person'])
models = {
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profile.person': {
'gotten_name_from_ohloh': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interested_in_working_on': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'ohloh_grab_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'poll_on_next_web_view': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'profile.link_person_tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.sourceforgeproject': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'unixname': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'profile.tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.TagType']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 19, 13, 39, 2, 840826)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 19, 13, 39, 2, 840676)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'profile.sourceforgeperson': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'profile.link_projectexp_tag': {
'Meta': {'unique_together': "[('tag', 'project_exp', 'source')]"},
'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_exp': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.ProjectExp']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.dataimportattempt': {
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'search.project': {
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'profile.link_sf_proj_dude_fm': {
'Meta': {'unique_together': "[('person', 'project')]"},
'date_collected': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgePerson']"}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgeProject']"})
},
'profile.link_project_tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.tagtype': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'profile.projectexp': {
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']", 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'man_months': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'person_role': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'should_show_this': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['profile']
| agpl-3.0 |
robcarver17/pysystemtrade | systems/provided/futures_chapter15/rules.py | 1 | 4311 | """
Trading rules for futures system
"""
from syscore.dateutils import ROOT_BDAYS_INYEAR
import pandas as pd
from sysquant.estimators.vol import robust_vol_calc
def ewmac(price, vol, Lfast, Lslow):
"""
Calculate the ewmac trading rule forecast, given a price and EWMA speeds Lfast, Lslow and vol_lookback
Assumes that 'price' and vol is daily data
This version uses a precalculated price volatility, and does not do capping or scaling
:param price: The price or other series to use (assumed Tx1)
:type price: pd.Series
:param vol: The daily price unit volatility (NOT % vol)
:type vol: pd.Series aligned to price
:param Lfast: Lookback for fast in days
:type Lfast: int
:param Lslow: Lookback for slow in days
:type Lslow: int
:returns: pd.DataFrame -- unscaled, uncapped forecast
>>> from systems.tests.testdata import get_test_object_futures
>>> from systems.basesystem import System
>>> (rawdata, data, config)=get_test_object_futures()
>>> system=System( [rawdata], data, config)
>>>
>>> ewmac(rawdata.get_daily_prices("EDOLLAR"), rawdata.daily_returns_volatility("EDOLLAR"), 64, 256).tail(2)
2015-12-10 5.327019
2015-12-11 4.927339
Freq: B, dtype: float64
"""
# price: This is the stitched price series
# We can't use the price of the contract we're trading, or the volatility will be jumpy
# And we'll miss out on the rolldown. See
# https://qoppac.blogspot.com/2015/05/systems-building-futures-rolling.html
# We don't need to calculate the decay parameter, just use the span
# directly
fast_ewma = price.ewm(span=Lfast).mean()
slow_ewma = price.ewm(span=Lslow).mean()
raw_ewmac = fast_ewma - slow_ewma
return raw_ewmac / vol.ffill()
def ewmac_calc_vol(price, Lfast, Lslow, vol_days=35):
"""
Calculate the ewmac trading rule forecast, given a price and EWMA speeds Lfast, Lslow and vol_lookback
Assumes that 'price' and vol is daily data
This version recalculates the price volatility, and does not do capping or scaling
:param price: The price or other series to use (assumed Tx1)
:type price: pd.Series
:param Lfast: Lookback for fast in days
:type Lfast: int
:param Lslow: Lookback for slow in days
:type Lslow: int
:returns: pd.DataFrame -- unscaled, uncapped forecast
>>> from systems.tests.testdata import get_test_object_futures
>>> from systems.basesystem import System
>>> (rawdata, data, config)=get_test_object_futures()
>>> system=System( [rawdata], data, config)
>>>
>>> ewmac(rawdata.get_daily_prices("EDOLLAR"), rawdata.daily_returns_volatility("EDOLLAR"), 64, 256).tail(2)
2015-12-10 5.327019
2015-12-11 4.927339
Freq: B, dtype: float64
"""
# price: This is the stitched price series
# We can't use the price of the contract we're trading, or the volatility will be jumpy
# And we'll miss out on the rolldown. See
# https://qoppac.blogspot.com/2015/05/systems-building-futures-rolling.html
# We don't need to calculate the decay parameter, just use the span
# directly
fast_ewma = price.ewm(span=Lfast).mean()
slow_ewma = price.ewm(span=Lslow).mean()
raw_ewmac = fast_ewma - slow_ewma
vol = robust_vol_calc(price, vol_days)
return raw_ewmac / vol.ffill()
def carry(daily_ann_roll, vol, smooth_days=90):
"""
Old carry rule
"""
raise Exception("DEPRECATED: USE carry2")
def carry2(raw_carry, smooth_days=90):
"""
Calculate carry forecast, given that there exists a raw_carry() in rawdata
Assumes that everything is daily data
:param raw_carry: The annualised sharpe ratio of rolldown
:type raw_carry: pd.DataFrame (assumed Tx1)
>>> from systems.tests.testdata import get_test_object_futures
>>> from systems.basesystem import System
>>> (rawdata, data, config)=get_test_object_futures()
>>> system=System( [rawdata], data, config)
>>>
>>> carry2(rawdata.raw_carry("EDOLLAR")).tail(2)
2015-12-10 0.411686
2015-12-11 0.411686
Freq: B, dtype: float64
"""
smooth_carry = raw_carry.ewm(smooth_days).mean()
return smooth_carry
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 |
tarzan0820/odoo | addons/account/tests/test_account_move_closed_period.py | 127 | 1623 | from datetime import date
from openerp.tests.common import TransactionCase
from openerp.osv.orm import except_orm
class TestPeriodState(TransactionCase):
"""
Forbid creation of Journal Entries for a closed period.
"""
def setUp(self):
super(TestPeriodState, self).setUp()
cr, uid = self.cr, self.uid
self.wizard_period_close = self.registry('account.period.close')
self.wizard_period_close_id = self.wizard_period_close.create(cr, uid, {'sure': 1})
_, self.sale_journal_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "sales_journal")
_, self.period_9_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "period_9")
def test_period_state(self):
cr, uid = self.cr, self.uid
self.wizard_period_close.data_save(cr, uid, [self.wizard_period_close_id], {
'lang': 'en_US',
'active_model': 'account.period',
'active_ids': [self.period_9_id],
'tz': False,
'active_id': self.period_9_id
})
with self.assertRaises(except_orm):
self.registry('account.move').create(cr, uid, {
'name': '/',
'period_id': self.period_9_id,
'journal_id': self.sale_journal_id,
'date': date.today(),
'line_id': [(0, 0, {
'name': 'foo',
'debit': 10,
}), (0, 0, {
'name': 'bar',
'credit': 10,
})]
})
| agpl-3.0 |
amirfefer/Cyber-Camera | server.py | 1 | 4344 | from flask import Flask, render_template, Response,send_file, request, session, redirect, url_for
import camera
import flask_httpauth
import config
import os
import io
import threading
import time
import hashlib
import logging
import datetime
import ssl
import cloud
app = Flask(__name__)
conf = config.Configuration()
logging.basicConfig(filename='app.log',level=logging.DEBUG)
auth = flask_httpauth.HTTPBasicAuth()
app.secret_key = os.urandom(24)
user = None
online = None
cmra = camera.VideoCamera(conf)
drop = cloud.DropObj(conf)
@auth.get_password
def get_pw(username):
global user
user = username
return conf.get('User')[username]
@auth.hash_password
def hash_pw(password):
return hashlib.sha224(password).hexdigest()
@app.route('/', methods=['GET', 'POST'])
@auth.login_required
def index():
cloud = False
auth = False
error = ''
if request.method == 'POST':
key = request.form['code']
drop.auth(key)
dropbox = '#'
else:
dropbox = drop.get_website()
if conf.get('Cloud')['token'] == 'none':
error = "You need to register your dropbox account first, go to settings tab."
if request.args.get('options') == 'record':
if request.args.has_key('cloud'):
cloud = True
recording = threading.Thread(target=cmra.record,args=[cloud,drop] )
recording.start()
session['options'] = 'record'
return '<IMG id="bg" SRC="/video_feed_record" width="320" height="240" >'
return render_template('index.html', online = online, dropbox = dropbox, error = error)
def gen(camera, save=False, vstart=False):
while True:
frame = camera.get_frame(False,save,vstart)
yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n{0}\r\n\r\n'.format(frame))
@app.route('/audio', methods=['POST'])
@auth.login_required
def audio():
file = request.files['edition[audio]']
timestamp = str(time.time())
file.save("audio" + timestamp + ".wav")
cmra.playAudio(timestamp)
return ('', 204)
@app.route('/video_feed')
@auth.login_required
def video_feed():
if 'options' in session:
if session['options'] == 'record':
return Response(gen(cmra,False,True),
mimetype='multipart/x-mixed-replace; boundary=frame')
else:
return Response(gen(cmra),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/video_feed_record')
@auth.login_required
def video_feed2():
return Response(gen(cmra,False,True),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/get_frame')
@auth.login_required
def get_frame():
frame = cmra.get_frame(False)
if request.args.get('options') == 'save':
timestr = time.strftime("%Y%m%d-%H%M%S")
f = open(conf.get('File')['photos'] + 'image' + timestr +'.jpg', 'wb')
f.write(frame)
logging.info('Snapshot taken at ' + str(datetime.datetime.now()))
return ('', 204)
return send_file(io.BytesIO(frame))
@app.route('/stopV')
@auth.login_required
def stopV():
session.pop('options',None)
cmra.endVideo()
return '<IMG id="bg" SRC="/video_feed" width="320" height="240" >'
@app.route('/toggle_online',methods=['POST'])
@auth.login_required
def toggle_online():
global online
if 'submit' in request.form:
cmra.online = True
return redirect(url_for('index'))
sens = int(request.form['sensitive'])
method = request.form['method']
sound = True if 'chk-sound' in request.form else False
mail = True if 'chk-mail' in request.form else False
notify = True if 'chk-not' in request.form else False
online = threading.Thread(target=cmra.start, args=[sens, method, mail, sound, notify])
online.start()
return redirect(url_for('index'))
if __name__ == "__main__":
if conf.boolean('Connection','https'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(conf.get('Connection')['certificate'], conf.get('Connection')['key'])
app.run(threaded=True, host=conf.get('Connection')['ip'], port=int(conf.get('Connection')['port']) ,ssl_context=context)
else:
app.run(threaded=True,host=conf.get('Connection')['ip'], port=int(conf.get('Connection')['port']))
| gpl-3.0 |
whiteear/cloudbase-init | cloudbaseinit/tests/utils/test_dhcp.py | 7 | 7611 | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netifaces
import socket
import struct
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit.tests import testutils
from cloudbaseinit.utils import dhcp
class DHCPUtilsTests(unittest.TestCase):
def test_get_dhcp_request_data(self):
fake_mac_address = '010203040506'
fake_mac_address_b = bytearray.fromhex(fake_mac_address)
data = b'\x01'
data += b'\x01'
data += b'\x06'
data += b'\x00'
data += struct.pack('!L', 9999)
data += b'\x00\x00'
data += b'\x00\x00'
data += b'\x00\x00\x00\x00'
data += b'\x00\x00\x00\x00'
data += b'\x00\x00\x00\x00'
data += b'\x00\x00\x00\x00'
data += fake_mac_address_b
data += b'\x00' * 10
data += b'\x00' * 64
data += b'\x00' * 128
data += dhcp._DHCP_COOKIE
data += b'\x35\x01\x01'
data += b'\x3c' + struct.pack('b', len('fake id')) + 'fake id'.encode(
'ascii')
data += b'\x3d\x07\x01'
data += fake_mac_address_b
data += b'\x37' + struct.pack('b', len([100]))
data += struct.pack('b', 100)
data += dhcp._OPTION_END
response = dhcp._get_dhcp_request_data(
id_req=9999, mac_address=fake_mac_address,
requested_options=[100], vendor_id='fake id')
self.assertEqual(data, response)
@mock.patch('struct.unpack')
def _test_parse_dhcp_reply(self, mock_unpack, message_type,
id_reply, equals_cookie):
fake_data = 236 * b"1"
if equals_cookie:
fake_data += dhcp._DHCP_COOKIE + b'11'
else:
fake_data += b'111111'
fake_data += b'fake'
fake_data += dhcp._OPTION_END
mock_unpack.side_effect = [(message_type, None), (id_reply, None),
(100, None), (4, None)]
response = dhcp._parse_dhcp_reply(data=fake_data, id_req=9999)
if message_type != 2:
self.assertEqual((False, {}), response)
elif id_reply != 9999:
self.assertEqual((False, {}), response)
elif fake_data[236:240] != dhcp._DHCP_COOKIE:
self.assertEqual((False, {}), response)
else:
self.assertEqual((True, {100: b'fake'}), response)
def test_parse_dhcp_reply(self):
self._test_parse_dhcp_reply(message_type=2, id_reply=9999,
equals_cookie=True)
def test_parse_dhcp_reply_other_message_type(self):
self._test_parse_dhcp_reply(message_type=3, id_reply=9999,
equals_cookie=True)
def test_parse_dhcp_reply_other_reply(self):
self._test_parse_dhcp_reply(message_type=3, id_reply=111,
equals_cookie=True)
def test_parse_dhcp_reply_other_than_cookie(self):
self._test_parse_dhcp_reply(message_type=3, id_reply=111,
equals_cookie=False)
@mock.patch('netifaces.ifaddresses')
@mock.patch('netifaces.interfaces')
def test_get_mac_address_by_local_ip(self, mock_interfaces,
mock_ifaddresses):
fake_addresses = {}
fake_addresses[netifaces.AF_INET] = [{'addr': 'fake address'}]
fake_addresses[netifaces.AF_LINK] = [{'addr': 'fake mac'}]
mock_interfaces.return_value = ['fake interface']
mock_ifaddresses.return_value = fake_addresses
response = dhcp._get_mac_address_by_local_ip('fake address')
mock_interfaces.assert_called_once_with()
mock_ifaddresses.assert_called_once_with('fake interface')
self.assertEqual(fake_addresses[netifaces.AF_LINK][0]['addr'],
response)
@mock.patch('random.randint')
@mock.patch('socket.socket')
@mock.patch('cloudbaseinit.utils.dhcp._get_mac_address_by_local_ip')
@mock.patch('cloudbaseinit.utils.dhcp._get_dhcp_request_data')
@mock.patch('cloudbaseinit.utils.dhcp._parse_dhcp_reply')
def test_get_dhcp_options(self, mock_parse_dhcp_reply,
mock_get_dhcp_request_data,
mock_get_mac_address_by_local_ip, mock_socket,
mock_randint):
mock_randint.return_value = 'fake int'
mock_socket().getsockname.return_value = ['fake local ip']
mock_get_mac_address_by_local_ip.return_value = 'fake mac'
mock_get_dhcp_request_data.return_value = 'fake data'
mock_parse_dhcp_reply.return_value = (True, 'fake replied options')
response = dhcp.get_dhcp_options(
dhcp_host='fake host', requested_options=['fake option'])
mock_randint.assert_called_once_with(0, 2 ** 32 - 1)
mock_socket.assert_called_with(socket.AF_INET, socket.SOCK_DGRAM)
mock_socket().setsockopt.assert_called_once_with(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
mock_socket().bind.assert_called_once_with(('', 68))
mock_socket().settimeout.assert_called_once_with(5)
mock_socket().connect.assert_called_once_with(('fake host', 67))
mock_socket().getsockname.assert_called_once_with()
mock_get_mac_address_by_local_ip.assert_called_once_with(
'fake local ip')
mock_get_dhcp_request_data.assert_called_once_with('fake int',
'fake mac',
['fake option'],
'cloudbase-init')
mock_socket().send.assert_called_once_with('fake data')
mock_socket().recv.assert_called_once_with(1024)
mock_parse_dhcp_reply.assert_called_once_with(mock_socket().recv(),
'fake int')
mock_socket().close.assert_called_once_with()
self.assertEqual('fake replied options', response)
def test__bind_dhcp_client_socket_bind_succeeds(self):
mock_socket = mock.Mock()
dhcp._bind_dhcp_client_socket(mock_socket, 0, 0)
mock_socket.bind.assert_called_once_with(('', 68))
@mock.patch('time.sleep')
def test__bind_dhcp_client_socket(self, mock_time_sleep):
mock_socket = mock.Mock()
exc = socket.error()
exc.errno = 48
mock_socket.bind = mock.Mock(side_effect=exc)
with testutils.LogSnatcher('cloudbaseinit.utils.dhcp') as snatcher:
with self.assertRaises(socket.error):
dhcp._bind_dhcp_client_socket(
mock_socket, max_bind_attempts=4,
bind_retry_interval=mock.sentinel.bind_retry_interval)
expected_occurences = sum(
1 for item in snatcher.output
if item.startswith("Retrying to bind DHCP client port in "))
self.assertEqual(3, expected_occurences)
| apache-2.0 |
leedm777/ansible | lib/ansible/playbook/play.py | 6 | 12420 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import string_types
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.block import Block
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
from ansible.playbook.task import Task
from ansible.utils.vars import combine_vars
__all__ = ['Play']
class Play(Base, Taggable, Become):
"""
A play is a language feature that represents a list of roles and/or
task/handler blocks to execute on a given set of hosts.
Usage:
Play.load(datastructure) -> Play
Play.something(...)
"""
# =================================================================================
# Connection-Related Attributes
# TODO: generalize connection
_accelerate = FieldAttribute(isa='bool', default=False)
_accelerate_ipv6 = FieldAttribute(isa='bool', default=False)
_accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port
# Connection
_gather_facts = FieldAttribute(isa='bool', default=None)
_hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types)
_name = FieldAttribute(isa='string', default='')
# Variable Attributes
_vars = FieldAttribute(isa='dict', default=dict())
_vars_files = FieldAttribute(isa='list', default=[])
_vars_prompt = FieldAttribute(isa='list', default=[])
_vault_password = FieldAttribute(isa='string')
# Role Attributes
_roles = FieldAttribute(isa='list', default=[], priority=100)
# Block (Task) Lists Attributes
_handlers = FieldAttribute(isa='list', default=[])
_pre_tasks = FieldAttribute(isa='list', default=[])
_post_tasks = FieldAttribute(isa='list', default=[])
_tasks = FieldAttribute(isa='list', default=[])
# Flag/Setting Attributes
_any_errors_fatal = FieldAttribute(isa='bool', default=False)
_force_handlers = FieldAttribute(isa='bool')
_max_fail_percentage = FieldAttribute(isa='string', default='0')
_serial = FieldAttribute(isa='int', default=0)
_strategy = FieldAttribute(isa='string', default='linear')
# =================================================================================
def __init__(self):
super(Play, self).__init__()
self.ROLE_CACHE = {}
def __repr__(self):
return self.get_name()
def get_name(self):
''' return the name of the Play '''
return self._attributes.get('name')
@staticmethod
def load(data, variable_manager=None, loader=None):
p = Play()
return p.load_data(data, variable_manager=variable_manager, loader=loader)
def preprocess_data(self, ds):
'''
Adjusts play datastructure to cleanup old/legacy items
'''
assert isinstance(ds, dict)
# The use of 'user' in the Play datastructure was deprecated to
# line up with the same change for Tasks, due to the fact that
# 'user' conflicted with the user module.
if 'user' in ds:
# this should never happen, but error out with a helpful message
# to the user if it does...
if 'remote_user' in ds:
raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds)
ds['remote_user'] = ds['user']
del ds['user']
if 'vars_prompt' in ds and not isinstance(ds['vars_prompt'], list):
ds['vars_prompt'] = [ ds['vars_prompt'] ]
return super(Play, self).preprocess_data(ds)
def _load_hosts(self, attr, ds):
'''
Loads the hosts from the given datastructure, which might be a list
or a simple string. We also switch integers in this list back to strings,
as the YAML parser will turn things that look like numbers into numbers.
'''
if isinstance(ds, (string_types, int)):
ds = [ ds ]
if not isinstance(ds, list):
raise AnsibleParserError("'hosts' must be specified as a list or a single pattern", obj=ds)
# YAML parsing of things that look like numbers may have
# resulted in integers showing up in the list, so convert
# them back to strings to prevent problems
for idx,item in enumerate(ds):
if isinstance(item, int):
ds[idx] = "%s" % item
return ds
def _load_vars(self, attr, ds):
'''
Vars in a play can be specified either as a dictionary directly, or
as a list of dictionaries. If the later, this method will turn the
list into a single dictionary.
'''
try:
if isinstance(ds, dict):
return ds
elif isinstance(ds, list):
all_vars = dict()
for item in ds:
if not isinstance(item, dict):
raise ValueError
all_vars = combine_vars(all_vars, item)
return all_vars
elif ds is None:
return {}
else:
raise ValueError
except ValueError:
raise AnsibleParserError("Vars in a playbook must be specified as a dictionary, or a list of dictionaries", obj=ds)
def _load_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
def _load_pre_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
def _load_post_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
def _load_handlers(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed handlers/blocks.
Bare handlers outside of a block are given an implicit block.
'''
return load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader)
def _load_roles(self, attr, ds):
'''
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions and creates the Role from those objects
'''
if ds is None:
ds = []
role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
roles = []
for ri in role_includes:
roles.append(Role.load(ri, play=self))
return roles
def _post_validate_vars(self, attr, value, templar):
'''
Override post validation of vars on the play, as we don't want to
template these too early.
'''
return value
def _post_validate_vars_files(self, attr, value, templar):
'''
Override post validation of vars_files on the play, as we don't want to
template these too early.
'''
return value
# disable validation on various fields which will be validated later in other objects
def _post_validate_become(self, attr, value, templar):
return value
def _post_validate_become_user(self, attr, value, templar):
return value
def _post_validate_become_method(self, attr, value, templar):
return value
# FIXME: post_validation needs to ensure that become/su/sudo have only 1 set
def _compile_roles(self):
'''
Handles the role compilation step, returning a flat list of tasks
with the lowest level dependencies first. For example, if a role R
has a dependency D1, which also has a dependency D2, the tasks from
D2 are merged first, followed by D1, and lastly by the tasks from
the parent role R last. This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
block_list.extend(r.compile(play=self))
return block_list
def compile_roles_handlers(self):
'''
Handles the role handler compilation step, returning a flat list of Handlers
This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
block_list.extend(r.get_handler_blocks())
return block_list
def compile(self):
'''
Compiles and returns the task list for this play, compiled from the
roles (which are themselves compiled recursively) and/or the list of
tasks specified in the play.
'''
# create a block containing a single flush handlers meta
# task, so we can be sure to run handlers at certain points
# of the playbook execution
flush_block = Block.load(
data={'meta': 'flush_handlers'},
play=self,
variable_manager=self._variable_manager,
loader=self._loader
)
block_list = []
block_list.extend(self.pre_tasks)
block_list.append(flush_block)
block_list.extend(self._compile_roles())
block_list.extend(self.tasks)
block_list.append(flush_block)
block_list.extend(self.post_tasks)
block_list.append(flush_block)
return block_list
def get_vars(self):
return self.vars.copy()
def get_vars_files(self):
return self.vars_files
def get_handlers(self):
return self.handlers[:]
def get_roles(self):
return self.roles[:]
def get_tasks(self):
tasklist = []
for task in self.pre_tasks + self.tasks + self.post_tasks:
if isinstance(task, Block):
tasklist.append(task.block + task.rescue + task.always)
else:
tasklist.append(task)
return tasklist
def serialize(self):
data = super(Play, self).serialize()
roles = []
for role in self.get_roles():
roles.append(role.serialize())
data['roles'] = roles
return data
def deserialize(self, data):
super(Play, self).deserialize(data)
if 'roles' in data:
role_data = data.get('roles', [])
roles = []
for role in role_data:
r = Role()
r.deserialize(role)
roles.append(r)
setattr(self, 'roles', roles)
del data['roles']
def copy(self):
new_me = super(Play, self).copy()
new_me.ROLE_CACHE = self.ROLE_CACHE.copy()
return new_me
| gpl-3.0 |
q1ang/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
owais/Fogger | fogger/FoggerWindow.py | 3 | 10132 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2012 Owais Lone <[email protected]>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from os import path as op
import re
import requests
import urlparse
import tempfile
import threading
from BeautifulSoup import BeautifulSoup, SoupStrainer
import gettext
from gettext import gettext as _
gettext.textdomain('fogger')
from gi.repository import GLib, Gtk, Gdk, GdkPixbuf, GObject, Gio # pylint: disable=E0611
import logging
logger = logging.getLogger('fogger')
from fogger_lib import Window, IconChooserDialog, ConfirmDialog
from fogger_lib import FogAppManager
from fogger_lib.exceptions import BaseFogAppException
from fogger_lib.helpers import get_network_proxies
from fogger_lib.consts import DEFAULT_APP_ICON
from fogger_lib.BackgroundLoader import get_chameleonic_pixbuf_from_svg
from fogger.AboutFoggerDialog import AboutFoggerDialog
ICON_SIZE = Gtk.icon_size_register('FoggerIconSize', 80, 80)
GLib.threads_init()
# See fogger_lib.Window.py for more details about how this class works
class FoggerWindow(Window):
__gtype_name__ = "FoggerWindow"
def finish_initializing(self, builder): # pylint: disable=E1002
"""Set up the main window"""
super(FoggerWindow, self).finish_initializing(builder)
self.AboutDialog = AboutFoggerDialog
self.url = self.builder.get_object('url_entry')
self.name = self.builder.get_object('name_entry')
self.image = self.builder.get_object('image')
self.image_eb = self.builder.get_object('image_eb')
self.create_button = self.builder.get_object('create_button')
self.spinner = self.builder.get_object('spinner')
self.error_message = self.builder.get_object('error')
self.background_image = self.builder.get_object('bgimage')
self.icon = DEFAULT_APP_ICON
self.themed_icon = None
self.icon_selected = False
self.icon_theme = Gtk.IconTheme.get_default()
self.setup_drop_targets()
self.background_image.set_from_pixbuf(get_chameleonic_pixbuf_from_svg(
'background-app.svg'))
def validate_form(self, widget, data=None):
url = self.url.get_text()
name = self.name.get_text()
sensitive = url and name
self.create_button.set_sensitive(sensitive)
def setup_drop_targets(self):
self.drag_dest_set(Gtk.DestDefaults.ALL, [], Gdk.DragAction.MOVE)
self.connect("drag-data-received", self.on_drag_data_received)
self.drag_dest_add_uri_targets()
def on_drag_data_received(self, widget, context, x, y, data, info, time):
try:
path = data.get_uris()[0]
except IndexError:
return
else:
path = path.replace('file://', '')
self.setup_icon(path)
def on_cancel(self, widget, data=None):
self.destroy()
def on_url_changed(self, widget, data=None):
pass
def on_icon_clicked(self, widget, data=None):
icon_chooser = IconChooserDialog(self)
response = icon_chooser.run()
if response == Gtk.ResponseType.OK:
path = icon_chooser.get_filename()
self.setup_icon(path)
icon_chooser.destroy()
def on_name_changed(self, widget, data=None):
if self.icon_selected:
return
name = self.name.get_text().lower().strip().replace(' ', '-')
words = name.split('-')
subnames = []
for i, word in enumerate(words):
x = '-'.join(words[:(i + 1) * -1])
if x:
subnames.append(x)
search_strings = [name] + subnames
icon = self.icon_theme.choose_icon(search_strings, 0, Gtk.IconLookupFlags.GENERIC_FALLBACK)
if icon:
filename = icon.get_filename()
path, ext = op.splitext(filename)
_, themed_icon = op.split(path)
self.setup_icon(filename, themed_icon, False)
else:
self.setup_icon(DEFAULT_APP_ICON, None, False)
def setup_icon(self, path, name=None, selected=True):
pixbuf = GdkPixbuf.Pixbuf.new_from_file(path)
self.image.props.pixbuf = pixbuf.scale_simple(80, 80, GdkPixbuf.InterpType.BILINEAR)
self.icon = path
self.themed_icon = name
self.icon_selected = selected
def on_create(self, widget, data=None):
name = self.name.get_text()
manager = FogAppManager()
existing = manager.get_by_name(name)
if existing:
confirm = ConfirmDialog('Fogger', _('There\'s an app for that!'),
_('A fog app already exists by that name. '\
'Would you like to replace it with a new one?'),
existing.icon, self, _('Replace'))
response = confirm.run()
confirm.destroy()
if response != Gtk.ResponseType.YES:
self.name.grab_focus()
return
self.set_loading_url(True)
self.error_message.hide()
thread = threading.Thread(target=self.verify_url)
thread.daemon = True
thread.start()
def create_app(self, url, name):
manager = FogAppManager()
try:
app = manager.create(name, url, self.icon, self.themed_icon)
except BaseFogAppException:
logger.error("Error creating App %s" % url)
else:
app = Gio.DesktopAppInfo.new_from_filename(app.desktop_file)
app.launch([], Gio.AppLaunchContext())
self.destroy()
def set_loading_url(self, loading):
if loading:
self.spinner.show()
self.create_button.hide()
self.url.set_sensitive(False)
self.name.set_sensitive(False)
else:
self.spinner.hide()
self.create_button.show()
self.url.set_sensitive(True)
self.name.set_sensitive(True)
def set_error_message(self, message):
self.error_message.set_markup('<tt><small>%s</small></tt>' % message)
self.error_message.show()
def verify_url(self):
logger.debug('Fetching url')
url = self.url.get_text()
name = self.name.get_text()
verified = False
proxies = get_network_proxies()
try:
if url.startswith('file://'):
GObject.idle_add(self.set_loading_url, False)
GObject.idle_add(self.create_app, url, name)
return
elif not url.startswith(('http://', 'https://',)):
url = 'http://%s' % url
try:
logger.debug('starting')
response = requests.get(url, proxies=proxies)
verified = True
logger.debug('finishing')
except requests.RequestException:
logger.debug('Error downloading url %s' % url)
GObject.idle_add(self.set_loading_url, False)
GObject.idle_add(self.set_error_message,
_('The URL %s could not be reached.\nPlease double check'\
' the URL you provided and try again.' % url))
return
SkipIcon = type('SkipIcon', (Exception,), {})
if self.icon != DEFAULT_APP_ICON:
raise SkipIcon()
# Try to find the apple-touch-icon
logger.debug('parsing')
soup = BeautifulSoup(response.content, parseOnlyThese=SoupStrainer('link'))
icons = soup.findAll('link', rel=re.compile('^apple-touch-icon'))
logger.debug('finished parsing')
soup = BeautifulSoup(response.content)
if not icons:
logger.debug('No apple touch icon found')
raise SkipIcon()
icon = icons[0]
href = icon.attrMap.get('href', None)
if not href:
logger.debug('Bad apple touch icon')
raise SkipIcon()
icon_url = None
if href.startswith('/'):
parsed = urlparse.urlparse(url)
icon_url = urlparse.urljoin(
'%s://%s' % (parsed.scheme, parsed.netloc,), href)
else:
parsed = urlparse.urlparse(href)
if parsed.scheme:
icon_url = href
else:
icon_url = urlparse.urljoin(url, href)
ext = op.splitext(icon_url)[-1]
tmpf = tempfile.mktemp(ext)
logger.debug('temp file: %s' % tmpf)
headers = {'User-Agent': 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like'\
' Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko)'\
' Version/4.0.4 Mobile/7B334b Safari/531.21.10'}
try:
icon_bytes = requests.get(icon_url, headers=headers,
proxies=proxies).content
except requests.RequestException:
logger.debug('Error dowloading apple touch icon')
else:
handle = open(tmpf, 'w')
handle.write(icon_bytes)
handle.close()
self.setup_icon(tmpf)
except Exception, e:
logger.debug("Error", e)
finally:
GObject.idle_add(self.set_loading_url, False)
if verified:
GObject.idle_add(self.create_app, url, name)
| gpl-3.0 |
ligovirgo/seismon | seismon/input/__init__.py | 4 | 1034 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Michael Coughlin (2013)
#
# This file is part of SeisMon
#
# SeisMon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SeisMon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SeisMon. If not, see <http://www.gnu.org/licenses/>
"""Seismic activity Monitor
SeisMon is used to monitor and predict the impact of earthquakes on the
LIGO gravitational-wave detectors.
"""
__author__ = 'Michael Coughlin <[email protected]>'
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| gpl-3.0 |
shaunbrady/boto | boto/beanstalk/response.py | 153 | 28051 | """Classify responses from layer1 and strict type values."""
from datetime import datetime
from boto.compat import six
class BaseObject(object):
def __repr__(self):
result = self.__class__.__name__ + '{ '
counter = 0
for key, value in six.iteritems(self.__dict__):
# first iteration no comma
counter += 1
if counter > 1:
result += ', '
result += key + ': '
result += self._repr_by_type(value)
result += ' }'
return result
def _repr_by_type(self, value):
# Everything is either a 'Response', 'list', or 'None/str/int/bool'.
result = ''
if isinstance(value, Response):
result += value.__repr__()
elif isinstance(value, list):
result += self._repr_list(value)
else:
result += str(value)
return result
def _repr_list(self, array):
result = '['
for value in array:
result += ' ' + self._repr_by_type(value) + ','
# Check for trailing comma with a space.
if len(result) > 1:
result = result[:-1] + ' '
result += ']'
return result
class Response(BaseObject):
def __init__(self, response):
super(Response, self).__init__()
if response['ResponseMetadata']:
self.response_metadata = ResponseMetadata(response['ResponseMetadata'])
else:
self.response_metadata = None
class ResponseMetadata(BaseObject):
def __init__(self, response):
super(ResponseMetadata, self).__init__()
self.request_id = str(response['RequestId'])
class ApplicationDescription(BaseObject):
def __init__(self, response):
super(ApplicationDescription, self).__init__()
self.application_name = str(response['ApplicationName'])
self.configuration_templates = []
if response['ConfigurationTemplates']:
for member in response['ConfigurationTemplates']:
configuration_template = str(member)
self.configuration_templates.append(configuration_template)
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.description = str(response['Description'])
self.versions = []
if response['Versions']:
for member in response['Versions']:
version = str(member)
self.versions.append(version)
class ApplicationVersionDescription(BaseObject):
def __init__(self, response):
super(ApplicationVersionDescription, self).__init__()
self.application_name = str(response['ApplicationName'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.description = str(response['Description'])
if response['SourceBundle']:
self.source_bundle = S3Location(response['SourceBundle'])
else:
self.source_bundle = None
self.version_label = str(response['VersionLabel'])
class AutoScalingGroup(BaseObject):
def __init__(self, response):
super(AutoScalingGroup, self).__init__()
self.name = str(response['Name'])
class ConfigurationOptionDescription(BaseObject):
def __init__(self, response):
super(ConfigurationOptionDescription, self).__init__()
self.change_severity = str(response['ChangeSeverity'])
self.default_value = str(response['DefaultValue'])
self.max_length = int(response['MaxLength']) if response['MaxLength'] else None
self.max_value = int(response['MaxValue']) if response['MaxValue'] else None
self.min_value = int(response['MinValue']) if response['MinValue'] else None
self.name = str(response['Name'])
self.namespace = str(response['Namespace'])
if response['Regex']:
self.regex = OptionRestrictionRegex(response['Regex'])
else:
self.regex = None
self.user_defined = str(response['UserDefined'])
self.value_options = []
if response['ValueOptions']:
for member in response['ValueOptions']:
value_option = str(member)
self.value_options.append(value_option)
self.value_type = str(response['ValueType'])
class ConfigurationOptionSetting(BaseObject):
def __init__(self, response):
super(ConfigurationOptionSetting, self).__init__()
self.namespace = str(response['Namespace'])
self.option_name = str(response['OptionName'])
self.value = str(response['Value'])
class ConfigurationSettingsDescription(BaseObject):
def __init__(self, response):
super(ConfigurationSettingsDescription, self).__init__()
self.application_name = str(response['ApplicationName'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.deployment_status = str(response['DeploymentStatus'])
self.description = str(response['Description'])
self.environment_name = str(response['EnvironmentName'])
self.option_settings = []
if response['OptionSettings']:
for member in response['OptionSettings']:
option_setting = ConfigurationOptionSetting(member)
self.option_settings.append(option_setting)
self.solution_stack_name = str(response['SolutionStackName'])
self.template_name = str(response['TemplateName'])
class EnvironmentDescription(BaseObject):
def __init__(self, response):
super(EnvironmentDescription, self).__init__()
self.application_name = str(response['ApplicationName'])
self.cname = str(response['CNAME'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.description = str(response['Description'])
self.endpoint_url = str(response['EndpointURL'])
self.environment_id = str(response['EnvironmentId'])
self.environment_name = str(response['EnvironmentName'])
self.health = str(response['Health'])
if response['Resources']:
self.resources = EnvironmentResourcesDescription(response['Resources'])
else:
self.resources = None
self.solution_stack_name = str(response['SolutionStackName'])
self.status = str(response['Status'])
self.template_name = str(response['TemplateName'])
self.version_label = str(response['VersionLabel'])
class EnvironmentInfoDescription(BaseObject):
def __init__(self, response):
super(EnvironmentInfoDescription, self).__init__()
self.ec2_instance_id = str(response['Ec2InstanceId'])
self.info_type = str(response['InfoType'])
self.message = str(response['Message'])
self.sample_timestamp = datetime.fromtimestamp(response['SampleTimestamp'])
class EnvironmentResourceDescription(BaseObject):
def __init__(self, response):
super(EnvironmentResourceDescription, self).__init__()
self.auto_scaling_groups = []
if response['AutoScalingGroups']:
for member in response['AutoScalingGroups']:
auto_scaling_group = AutoScalingGroup(member)
self.auto_scaling_groups.append(auto_scaling_group)
self.environment_name = str(response['EnvironmentName'])
self.instances = []
if response['Instances']:
for member in response['Instances']:
instance = Instance(member)
self.instances.append(instance)
self.launch_configurations = []
if response['LaunchConfigurations']:
for member in response['LaunchConfigurations']:
launch_configuration = LaunchConfiguration(member)
self.launch_configurations.append(launch_configuration)
self.load_balancers = []
if response['LoadBalancers']:
for member in response['LoadBalancers']:
load_balancer = LoadBalancer(member)
self.load_balancers.append(load_balancer)
self.triggers = []
if response['Triggers']:
for member in response['Triggers']:
trigger = Trigger(member)
self.triggers.append(trigger)
class EnvironmentResourcesDescription(BaseObject):
def __init__(self, response):
super(EnvironmentResourcesDescription, self).__init__()
if response['LoadBalancer']:
self.load_balancer = LoadBalancerDescription(response['LoadBalancer'])
else:
self.load_balancer = None
class EventDescription(BaseObject):
def __init__(self, response):
super(EventDescription, self).__init__()
self.application_name = str(response['ApplicationName'])
self.environment_name = str(response['EnvironmentName'])
self.event_date = datetime.fromtimestamp(response['EventDate'])
self.message = str(response['Message'])
self.request_id = str(response['RequestId'])
self.severity = str(response['Severity'])
self.template_name = str(response['TemplateName'])
self.version_label = str(response['VersionLabel'])
class Instance(BaseObject):
def __init__(self, response):
super(Instance, self).__init__()
self.id = str(response['Id'])
class LaunchConfiguration(BaseObject):
def __init__(self, response):
super(LaunchConfiguration, self).__init__()
self.name = str(response['Name'])
class Listener(BaseObject):
def __init__(self, response):
super(Listener, self).__init__()
self.port = int(response['Port']) if response['Port'] else None
self.protocol = str(response['Protocol'])
class LoadBalancer(BaseObject):
def __init__(self, response):
super(LoadBalancer, self).__init__()
self.name = str(response['Name'])
class LoadBalancerDescription(BaseObject):
def __init__(self, response):
super(LoadBalancerDescription, self).__init__()
self.domain = str(response['Domain'])
self.listeners = []
if response['Listeners']:
for member in response['Listeners']:
listener = Listener(member)
self.listeners.append(listener)
self.load_balancer_name = str(response['LoadBalancerName'])
class OptionRestrictionRegex(BaseObject):
def __init__(self, response):
super(OptionRestrictionRegex, self).__init__()
self.label = response['Label']
self.pattern = response['Pattern']
class SolutionStackDescription(BaseObject):
def __init__(self, response):
super(SolutionStackDescription, self).__init__()
self.permitted_file_types = []
if response['PermittedFileTypes']:
for member in response['PermittedFileTypes']:
permitted_file_type = str(member)
self.permitted_file_types.append(permitted_file_type)
self.solution_stack_name = str(response['SolutionStackName'])
class S3Location(BaseObject):
def __init__(self, response):
super(S3Location, self).__init__()
self.s3_bucket = str(response['S3Bucket'])
self.s3_key = str(response['S3Key'])
class Trigger(BaseObject):
def __init__(self, response):
super(Trigger, self).__init__()
self.name = str(response['Name'])
class ValidationMessage(BaseObject):
def __init__(self, response):
super(ValidationMessage, self).__init__()
self.message = str(response['Message'])
self.namespace = str(response['Namespace'])
self.option_name = str(response['OptionName'])
self.severity = str(response['Severity'])
# These are the response objects layer2 uses, one for each layer1 api call.
class CheckDNSAvailabilityResponse(Response):
def __init__(self, response):
response = response['CheckDNSAvailabilityResponse']
super(CheckDNSAvailabilityResponse, self).__init__(response)
response = response['CheckDNSAvailabilityResult']
self.fully_qualified_cname = str(response['FullyQualifiedCNAME'])
self.available = bool(response['Available'])
# Our naming convension produces this class name but api names it with more
# capitals.
class CheckDnsAvailabilityResponse(CheckDNSAvailabilityResponse): pass
class CreateApplicationResponse(Response):
def __init__(self, response):
response = response['CreateApplicationResponse']
super(CreateApplicationResponse, self).__init__(response)
response = response['CreateApplicationResult']
if response['Application']:
self.application = ApplicationDescription(response['Application'])
else:
self.application = None
class CreateApplicationVersionResponse(Response):
def __init__(self, response):
response = response['CreateApplicationVersionResponse']
super(CreateApplicationVersionResponse, self).__init__(response)
response = response['CreateApplicationVersionResult']
if response['ApplicationVersion']:
self.application_version = ApplicationVersionDescription(response['ApplicationVersion'])
else:
self.application_version = None
class CreateConfigurationTemplateResponse(Response):
def __init__(self, response):
response = response['CreateConfigurationTemplateResponse']
super(CreateConfigurationTemplateResponse, self).__init__(response)
response = response['CreateConfigurationTemplateResult']
self.application_name = str(response['ApplicationName'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.deployment_status = str(response['DeploymentStatus'])
self.description = str(response['Description'])
self.environment_name = str(response['EnvironmentName'])
self.option_settings = []
if response['OptionSettings']:
for member in response['OptionSettings']:
option_setting = ConfigurationOptionSetting(member)
self.option_settings.append(option_setting)
self.solution_stack_name = str(response['SolutionStackName'])
self.template_name = str(response['TemplateName'])
class CreateEnvironmentResponse(Response):
def __init__(self, response):
response = response['CreateEnvironmentResponse']
super(CreateEnvironmentResponse, self).__init__(response)
response = response['CreateEnvironmentResult']
self.application_name = str(response['ApplicationName'])
self.cname = str(response['CNAME'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.description = str(response['Description'])
self.endpoint_url = str(response['EndpointURL'])
self.environment_id = str(response['EnvironmentId'])
self.environment_name = str(response['EnvironmentName'])
self.health = str(response['Health'])
if response['Resources']:
self.resources = EnvironmentResourcesDescription(response['Resources'])
else:
self.resources = None
self.solution_stack_name = str(response['SolutionStackName'])
self.status = str(response['Status'])
self.template_name = str(response['TemplateName'])
self.version_label = str(response['VersionLabel'])
class CreateStorageLocationResponse(Response):
def __init__(self, response):
response = response['CreateStorageLocationResponse']
super(CreateStorageLocationResponse, self).__init__(response)
response = response['CreateStorageLocationResult']
self.s3_bucket = str(response['S3Bucket'])
class DeleteApplicationResponse(Response):
def __init__(self, response):
response = response['DeleteApplicationResponse']
super(DeleteApplicationResponse, self).__init__(response)
class DeleteApplicationVersionResponse(Response):
def __init__(self, response):
response = response['DeleteApplicationVersionResponse']
super(DeleteApplicationVersionResponse, self).__init__(response)
class DeleteConfigurationTemplateResponse(Response):
def __init__(self, response):
response = response['DeleteConfigurationTemplateResponse']
super(DeleteConfigurationTemplateResponse, self).__init__(response)
class DeleteEnvironmentConfigurationResponse(Response):
def __init__(self, response):
response = response['DeleteEnvironmentConfigurationResponse']
super(DeleteEnvironmentConfigurationResponse, self).__init__(response)
class DescribeApplicationVersionsResponse(Response):
def __init__(self, response):
response = response['DescribeApplicationVersionsResponse']
super(DescribeApplicationVersionsResponse, self).__init__(response)
response = response['DescribeApplicationVersionsResult']
self.application_versions = []
if response['ApplicationVersions']:
for member in response['ApplicationVersions']:
application_version = ApplicationVersionDescription(member)
self.application_versions.append(application_version)
class DescribeApplicationsResponse(Response):
def __init__(self, response):
response = response['DescribeApplicationsResponse']
super(DescribeApplicationsResponse, self).__init__(response)
response = response['DescribeApplicationsResult']
self.applications = []
if response['Applications']:
for member in response['Applications']:
application = ApplicationDescription(member)
self.applications.append(application)
class DescribeConfigurationOptionsResponse(Response):
def __init__(self, response):
response = response['DescribeConfigurationOptionsResponse']
super(DescribeConfigurationOptionsResponse, self).__init__(response)
response = response['DescribeConfigurationOptionsResult']
self.options = []
if response['Options']:
for member in response['Options']:
option = ConfigurationOptionDescription(member)
self.options.append(option)
self.solution_stack_name = str(response['SolutionStackName'])
class DescribeConfigurationSettingsResponse(Response):
def __init__(self, response):
response = response['DescribeConfigurationSettingsResponse']
super(DescribeConfigurationSettingsResponse, self).__init__(response)
response = response['DescribeConfigurationSettingsResult']
self.configuration_settings = []
if response['ConfigurationSettings']:
for member in response['ConfigurationSettings']:
configuration_setting = ConfigurationSettingsDescription(member)
self.configuration_settings.append(configuration_setting)
class DescribeEnvironmentResourcesResponse(Response):
def __init__(self, response):
response = response['DescribeEnvironmentResourcesResponse']
super(DescribeEnvironmentResourcesResponse, self).__init__(response)
response = response['DescribeEnvironmentResourcesResult']
if response['EnvironmentResources']:
self.environment_resources = EnvironmentResourceDescription(response['EnvironmentResources'])
else:
self.environment_resources = None
class DescribeEnvironmentsResponse(Response):
def __init__(self, response):
response = response['DescribeEnvironmentsResponse']
super(DescribeEnvironmentsResponse, self).__init__(response)
response = response['DescribeEnvironmentsResult']
self.environments = []
if response['Environments']:
for member in response['Environments']:
environment = EnvironmentDescription(member)
self.environments.append(environment)
class DescribeEventsResponse(Response):
def __init__(self, response):
response = response['DescribeEventsResponse']
super(DescribeEventsResponse, self).__init__(response)
response = response['DescribeEventsResult']
self.events = []
if response['Events']:
for member in response['Events']:
event = EventDescription(member)
self.events.append(event)
self.next_tokent = str(response['NextToken'])
class ListAvailableSolutionStacksResponse(Response):
def __init__(self, response):
response = response['ListAvailableSolutionStacksResponse']
super(ListAvailableSolutionStacksResponse, self).__init__(response)
response = response['ListAvailableSolutionStacksResult']
self.solution_stack_details = []
if response['SolutionStackDetails']:
for member in response['SolutionStackDetails']:
solution_stack_detail = SolutionStackDescription(member)
self.solution_stack_details.append(solution_stack_detail)
self.solution_stacks = []
if response['SolutionStacks']:
for member in response['SolutionStacks']:
solution_stack = str(member)
self.solution_stacks.append(solution_stack)
class RebuildEnvironmentResponse(Response):
def __init__(self, response):
response = response['RebuildEnvironmentResponse']
super(RebuildEnvironmentResponse, self).__init__(response)
class RequestEnvironmentInfoResponse(Response):
def __init__(self, response):
response = response['RequestEnvironmentInfoResponse']
super(RequestEnvironmentInfoResponse, self).__init__(response)
class RestartAppServerResponse(Response):
def __init__(self, response):
response = response['RestartAppServerResponse']
super(RestartAppServerResponse, self).__init__(response)
class RetrieveEnvironmentInfoResponse(Response):
def __init__(self, response):
response = response['RetrieveEnvironmentInfoResponse']
super(RetrieveEnvironmentInfoResponse, self).__init__(response)
response = response['RetrieveEnvironmentInfoResult']
self.environment_info = []
if response['EnvironmentInfo']:
for member in response['EnvironmentInfo']:
environment_info = EnvironmentInfoDescription(member)
self.environment_info.append(environment_info)
class SwapEnvironmentCNAMEsResponse(Response):
def __init__(self, response):
response = response['SwapEnvironmentCNAMEsResponse']
super(SwapEnvironmentCNAMEsResponse, self).__init__(response)
class SwapEnvironmentCnamesResponse(SwapEnvironmentCNAMEsResponse): pass
class TerminateEnvironmentResponse(Response):
def __init__(self, response):
response = response['TerminateEnvironmentResponse']
super(TerminateEnvironmentResponse, self).__init__(response)
response = response['TerminateEnvironmentResult']
self.application_name = str(response['ApplicationName'])
self.cname = str(response['CNAME'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.description = str(response['Description'])
self.endpoint_url = str(response['EndpointURL'])
self.environment_id = str(response['EnvironmentId'])
self.environment_name = str(response['EnvironmentName'])
self.health = str(response['Health'])
if response['Resources']:
self.resources = EnvironmentResourcesDescription(response['Resources'])
else:
self.resources = None
self.solution_stack_name = str(response['SolutionStackName'])
self.status = str(response['Status'])
self.template_name = str(response['TemplateName'])
self.version_label = str(response['VersionLabel'])
class UpdateApplicationResponse(Response):
def __init__(self, response):
response = response['UpdateApplicationResponse']
super(UpdateApplicationResponse, self).__init__(response)
response = response['UpdateApplicationResult']
if response['Application']:
self.application = ApplicationDescription(response['Application'])
else:
self.application = None
class UpdateApplicationVersionResponse(Response):
def __init__(self, response):
response = response['UpdateApplicationVersionResponse']
super(UpdateApplicationVersionResponse, self).__init__(response)
response = response['UpdateApplicationVersionResult']
if response['ApplicationVersion']:
self.application_version = ApplicationVersionDescription(response['ApplicationVersion'])
else:
self.application_version = None
class UpdateConfigurationTemplateResponse(Response):
def __init__(self, response):
response = response['UpdateConfigurationTemplateResponse']
super(UpdateConfigurationTemplateResponse, self).__init__(response)
response = response['UpdateConfigurationTemplateResult']
self.application_name = str(response['ApplicationName'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.deployment_status = str(response['DeploymentStatus'])
self.description = str(response['Description'])
self.environment_name = str(response['EnvironmentName'])
self.option_settings = []
if response['OptionSettings']:
for member in response['OptionSettings']:
option_setting = ConfigurationOptionSetting(member)
self.option_settings.append(option_setting)
self.solution_stack_name = str(response['SolutionStackName'])
self.template_name = str(response['TemplateName'])
class UpdateEnvironmentResponse(Response):
def __init__(self, response):
response = response['UpdateEnvironmentResponse']
super(UpdateEnvironmentResponse, self).__init__(response)
response = response['UpdateEnvironmentResult']
self.application_name = str(response['ApplicationName'])
self.cname = str(response['CNAME'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.description = str(response['Description'])
self.endpoint_url = str(response['EndpointURL'])
self.environment_id = str(response['EnvironmentId'])
self.environment_name = str(response['EnvironmentName'])
self.health = str(response['Health'])
if response['Resources']:
self.resources = EnvironmentResourcesDescription(response['Resources'])
else:
self.resources = None
self.solution_stack_name = str(response['SolutionStackName'])
self.status = str(response['Status'])
self.template_name = str(response['TemplateName'])
self.version_label = str(response['VersionLabel'])
class ValidateConfigurationSettingsResponse(Response):
def __init__(self, response):
response = response['ValidateConfigurationSettingsResponse']
super(ValidateConfigurationSettingsResponse, self).__init__(response)
response = response['ValidateConfigurationSettingsResult']
self.messages = []
if response['Messages']:
for member in response['Messages']:
message = ValidationMessage(member)
self.messages.append(message)
| mit |
cginternals/gloperate | source/tests/googletest/googletest/scripts/upload_gtest.py | 1963 | 2851 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
[email protected] to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = '[email protected]'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| mit |
iut-ibk/DynaMind-ToolBox | DynaMind/scripts/TestModules/advanced_data_types.py | 2 | 1395 | from osgeo import ogr
from pydynamind import *
class AdvancedDataTypes(Module):
def __init__(self):
Module.__init__(self)
self.setIsGDALModule(True)
self.createParameter("elements", INT, "Number of elements")
self.elements = 100000
self.createParameter("append", BOOL, "true if append")
self.append = False
self.__container = ViewContainer()
def init(self):
if self.append:
self.__container = ViewContainer("component", NODE, MODIFY)
else:
self.__container = ViewContainer("component", NODE, WRITE)
self.__container.addAttribute("vector", DM.Attribute.DOUBLEVECTOR, WRITE)
self.__container.addAttribute("integer",DM.Attribute.INT, WRITE)
views = []
views.append(self.__container)
self.registerViewContainers(views)
#self.features = []
def run(self):
for i in range(self.elements):
f = self.__container.create_feature()
a = []
for i in range(1000):
a.append(i*1.0)
dm_set_double_list(f, "vector", a)
f.SetField("integer", i)
if i % 100000 == 0:
self.__container.sync()
self.__container.finalise() | gpl-2.0 |
zhjunlang/kbengine | kbe/src/lib/python/Tools/ssl/make_ssl_data.py | 34 | 2163 | #! /usr/bin/env python3
"""
This script should be called *manually* when we want to upgrade SSLError
`library` and `reason` mnemnonics to a more recent OpenSSL version.
It takes two arguments:
- the path to the OpenSSL include files' directory
(e.g. openssl-1.0.1-beta3/include/openssl/)
- the path to the C file to be generated
(probably Modules/_ssl_data.h)
"""
import datetime
import os
import re
import sys
def parse_error_codes(h_file, prefix):
pat = re.compile(r"#define\W+(%s([\w]+))\W+(\d+)\b" % re.escape(prefix))
codes = []
with open(h_file, "r", encoding="latin1") as f:
for line in f:
match = pat.search(line)
if match:
code, name, num = match.groups()
num = int(num)
codes.append((code, name, num))
return codes
if __name__ == "__main__":
openssl_inc = sys.argv[1]
outfile = sys.argv[2]
use_stdout = outfile == '-'
f = sys.stdout if use_stdout else open(outfile, "w")
error_libraries = (
# (library code, mnemonic, error prefix, header file)
('ERR_LIB_PEM', 'PEM', 'PEM_R_', 'pem.h'),
('ERR_LIB_SSL', 'SSL', 'SSL_R_', 'ssl.h'),
('ERR_LIB_X509', 'X509', 'X509_R_', 'x509.h'),
)
def w(l):
f.write(l + "\n")
w("/* File generated by Tools/ssl/make_ssl_data.py */")
w("/* Generated on %s */" % datetime.datetime.now().isoformat())
w("")
w("static struct py_ssl_library_code library_codes[] = {")
for libcode, mnemo, _, _ in error_libraries:
w(' {"%s", %s},' % (mnemo, libcode))
w(' { NULL }')
w('};')
w("")
w("static struct py_ssl_error_code error_codes[] = {")
for libcode, _, prefix, h_file in error_libraries:
codes = parse_error_codes(os.path.join(openssl_inc, h_file), prefix)
for code, name, num in sorted(codes):
w(' #ifdef %s' % (code))
w(' {"%s", %s, %s},' % (name, libcode, code))
w(' #else')
w(' {"%s", %s, %d},' % (name, libcode, num))
w(' #endif')
w(' { NULL }')
w('};')
if not use_stdout:
f.close()
| lgpl-3.0 |
slitvinov/lammps-sph-multiphase | tools/moltemplate/src/nbody_Impropers.py | 35 | 2079 | from nbody_graph_search import Ugraph
# To find 4-body "improper" interactions,
# (by default, most of the time), we would use this subgraph:
# 3
# * 1st bond connects atoms 0 and 1
# | => 2nd bond connects atoms 0 and 2
# _.*._ 3rd bond connects atoms 0 and 3
# *' 0 `*
# 1 2
#
bond_pattern = Ugraph([(0,1), (0,2), (0,3)])
# (Ugraph atom indices begin at 0, not 1)
def canonical_order(match):
"""
When searching for atoms with matching bond patterns GraphMatcher
often returns redundant results. We must define a "canonical_order"
function which sorts the atoms and bonds in a way which is consistent
with the type of N-body interaction being considered.
The atoms (and bonds) in a candidate match are rearranged by the
canonical_order(). Then the re-ordered list of atom and bond ids is
tested against the list of atom/bond ids in the matches-found-so-far,
before it is added to the list of interactions found so far.
(For example, it does not make sense to define a separate 4-body improper-
angle interaction between atoms 1, 2, 3, 4 AND 1, 3, 2, 4.
The improper-angle is usually defined as the angle between planes formed
by atoms 1,2,3 & 2,3,4. Alternately, it may instead be defined as the
angle between the 1,2,3 plane and atom 4. Either way, this angle does
not change when swapping the middle pair of atoms so we arbitrarily
sort them so that the second atom has a lower atomID than the third atom.)
"""
atom0 = match[0][0]
atom1 = match[0][1]
atom2 = match[0][2]
atom3 = match[0][3]
# match[1][0:2] contains the ID numbers for the 3 bonds
bond0 = match[1][0]
bond1 = match[1][1]
bond2 = match[1][2]
if atom1 <= atom2:
#return ((atom0,atom1,atom2,atom3), (bond0, bond1, bond2))
# But this is the same thing as:
return match
else:
return ((atom0,atom2,atom1,atom3), (bond1, bond0, bond2))
| gpl-2.0 |
auag92/n2dm | Asap-3.8.4/Projects/NanoparticleMC/resume_amc_gas.py | 1 | 2557 | #PBS -l nodes=20:ppn=4:opteron4
#PBS -q verylong
#PBS -N amc_n100_conv1
#PBS -m ae
import os
from montecarlo import SurfaceMonteCarloData
from ase.cluster.cubic import FaceCenteredCubic
from ase.cluster import data
from asap3.MonteCarlo.Metropolis import Metropolis
from asap3.MonteCarlo.Moves import SurfaceMove
from asap3 import EMT
import numpy as np
from ase.visualize import view
from resizecluster import resizecluster
from ase.io.trajectory import PickleTrajectory
import sys
from atommontecarlodata import AtomMonteCarloData
from ase.parallel import world
from AdsCalc import adscalc
from time import time,sleep
import pickle
#Change note: Added gas option, check for indentation tab vs. spaces error.
#Added resume option.
#Arguments:
filename = sys.argv[1]
temperature = float(sys.argv[2])
nsteps = int(sys.argv[3])
outdir= sys.argv[4]
tgas = float(sys.argv[5])
pgas = float(sys.argv[6])
species = "AuCO"
def read_and_do_montecarlo(filename,use_gas):
d = SurfaceMonteCarloData()
d.read(filename)
print "Starting "+str(len(d))+" sims."
surfaces = data.fcc.surface_names
#for n in range(0,len(d)):
for n in range(world.rank,len(d),world.size):
file = outdir+"/a%05i.amc.gz" % n
if not os.path.exists(file):
layers = d[n][1] # Really d[n]["layers"]
atoms = FaceCenteredCubic(d.atomic_number,
surfaces, layers,latticeconstant=d.lattice_constant)
resizecluster(atoms, d.fitsize)
print "Resized number of atoms:", len(atoms)
do_monte_carlo(atoms,n,outdir,use_gas)
world.barrier()#Let the cpu's wait until all in same state.
def do_monte_carlo(atoms,iteration,outdir,use_gas):
tempcalc = EMT()
if use_gas==True:
atoms.set_calculator(adscalc(tempcalc,temperature=tgas,pressure=pgas,species=species))
else:
atoms.set_calculator(tempcalc)
Esmc = atoms.get_potential_energy()
mc = Metropolis(atoms=atoms,log=None)
surfmove =SurfaceMove()
mc.attach_move(surfmove)
outfilename = "a%05i.amc" % iteration
amcd = AtomMonteCarloData(atoms=atoms,surfmove=surfmove,temp=temperature,filename=outfilename,Esmc=Esmc)
mc.attach_observer(amcd.accept_move) #Because default event is at acceptmove
mc.attach_observer(amcd.reject_move, attime='reject')
amcd.accept_move() #We need to write the first configuration,
mc.run(nsteps, temp=temperature)
amcd.write(os.path.join(outdir,outfilename))
read_and_do_montecarlo(filename,True)
| mit |
nanditav/15712-TensorFlow | tensorflow/python/ops/nn_fused_batchnorm_test.py | 19 | 8762 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fused_batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class BatchNormalizationTest(tf.test.TestCase):
def _inference_ref(self, x, scale, offset, mean, var, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = tf.transpose(x, [0, 2, 3, 1])
y = tf.nn.batch_normalization(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = tf.transpose(y, [0, 3, 1, 2])
return y.eval()
def _test_inference(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.constant(x_val, name='x')
scale = tf.constant(scale_val, name='scale')
offset = tf.constant(offset_val, name='offset')
mean = tf.constant(mean_val, name='mean')
var = tf.constant(var_val, name='variance')
epsilon = 0.001
y, _, _ = tf.nn.fused_batch_norm(
x,
scale,
offset,
mean=mean,
variance=var,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val = sess.run(y)
y_ref = self._inference_ref(x, scale, offset, mean, var, epsilon,
data_format)
self.assertAllClose(y_ref, y_val, atol=1e-3)
def _training_ref(self, x, scale, offset, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = tf.transpose(x, [0, 2, 3, 1])
mean, var = tf.nn.moments(x, [0, 1, 2], keep_dims=False)
y = tf.nn.batch_normalization(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = tf.transpose(y, [0, 3, 1, 2])
return y.eval(), mean.eval(), var.eval()
def _test_training(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.constant(x_val, name='x')
scale = tf.constant(scale_val, name='scale')
offset = tf.constant(offset_val, name='offset')
epsilon = 0.001
y, mean, var = tf.nn.fused_batch_norm(
x,
scale,
offset,
epsilon=epsilon,
data_format=data_format,
is_training=True)
y_val, mean_val, var_val = sess.run([y, mean, var])
y_ref, mean_ref, var_ref = self._training_ref(x, scale, offset, epsilon,
data_format)
self.assertAllClose(y_ref, y_val, atol=1e-3)
self.assertAllClose(mean_ref, mean_val, atol=1e-3)
# This is for Bessel's correction. tf.nn.moments uses n, instead of n-1, as
# the denominator in the formula to calculate variance, while
# tf.nn.fused_batch_norm has Bessel's correction built in.
sample_size = x_val.size / scale_val.size
var_ref = var_ref * sample_size / (max(sample_size - 1.0, 1.0))
self.assertAllClose(var_ref, var_val, atol=1e-3)
def _test_gradient(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu):
x = tf.constant(x_val, name='x')
scale = tf.constant(scale_val, name='scale')
offset = tf.constant(offset_val, name='offset')
y, _, _ = tf.nn.fused_batch_norm(
x, scale, offset, data_format=data_format)
err_x = tf.test.compute_gradient_error(x, x_shape, y, x_shape)
err_scale = tf.test.compute_gradient_error(scale, scale_shape, y, x_shape)
err_offset = tf.test.compute_gradient_error(offset, scale_shape, y,
x_shape)
err_tolerance = 1e-3
self.assertLess(err_x, err_tolerance)
self.assertLess(err_scale, err_tolerance)
self.assertLess(err_offset, err_tolerance)
def testInference(self):
x_shape = [1, 1, 6, 1]
if tf.test.is_gpu_available():
self._test_inference(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_inference(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if tf.test.is_gpu_available():
self._test_inference(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if tf.test.is_gpu_available():
self._test_inference(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
if tf.test.is_gpu_available():
self._test_inference(x_shape, [131], use_gpu=True, data_format='NCHW')
self._test_inference(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [6], use_gpu=False, data_format='NHWC')
def testTraining(self):
x_shape = [1, 1, 6, 1]
if tf.test.is_gpu_available():
self._test_training(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_training(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if tf.test.is_gpu_available():
self._test_training(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if tf.test.is_gpu_available():
self._test_training(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
if tf.test.is_gpu_available():
self._test_training(x_shape, [131], use_gpu=True, data_format='NCHW')
self._test_training(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [6], use_gpu=False, data_format='NHWC')
def testBatchNormGrad(self):
x_shape = [1, 1, 6, 1]
if tf.test.is_gpu_available():
self._test_gradient(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_gradient(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_gradient(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if tf.test.is_gpu_available():
self._test_gradient(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_gradient(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if tf.test.is_gpu_available():
self._test_gradient(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [7, 9, 13, 6]
if tf.test.is_gpu_available():
self._test_gradient(x_shape, [9], use_gpu=True, data_format='NCHW')
self._test_gradient(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_gradient(x_shape, [6], use_gpu=False, data_format='NHWC')
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
xin3liang/platform_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/thirdparty/coverage/control.py | 64 | 26459 | """Core control stuff for Coverage."""
import atexit, os, random, socket, sys
from coverage.annotate import AnnotateReporter
from coverage.backward import string_class
from coverage.codeunit import code_unit_factory, CodeUnit
from coverage.collector import Collector
from coverage.config import CoverageConfig
from coverage.data import CoverageData
from coverage.files import FileLocator, TreeMatcher, FnmatchMatcher
from coverage.files import PathAliases, find_python_files
from coverage.html import HtmlReporter
from coverage.misc import CoverageException, bool_or_none, join_regex
from coverage.results import Analysis, Numbers
from coverage.summary import SummaryReporter
from coverage.xmlreport import XmlReporter
class coverage(object):
"""Programmatic access to Coverage.
To use::
from coverage import coverage
cov = coverage()
cov.start()
#.. blah blah (run your code) blah blah ..
cov.stop()
cov.html_report(directory='covhtml')
"""
def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
auto_data=False, timid=None, branch=None, config_file=True,
source=None, omit=None, include=None):
"""
`data_file` is the base name of the data file to use, defaulting to
".coverage". `data_suffix` is appended (with a dot) to `data_file` to
create the final file name. If `data_suffix` is simply True, then a
suffix is created with the machine and process identity included.
`cover_pylib` is a boolean determining whether Python code installed
with the Python interpreter is measured. This includes the Python
standard library and any packages installed with the interpreter.
If `auto_data` is true, then any existing data file will be read when
coverage measurement starts, and data will be saved automatically when
measurement stops.
If `timid` is true, then a slower and simpler trace function will be
used. This is important for some environments where manipulation of
tracing functions breaks the faster trace function.
If `branch` is true, then branch coverage will be measured in addition
to the usual statement coverage.
`config_file` determines what config file to read. If it is a string,
it is the name of the config file to read. If it is True, then a
standard file is read (".coveragerc"). If it is False, then no file is
read.
`source` is a list of file paths or package names. Only code located
in the trees indicated by the file paths or package names will be
measured.
`include` and `omit` are lists of filename patterns. Files that match
`include` will be measured, files that match `omit` will not. Each
will also accept a single string argument.
"""
from coverage import __version__
# A record of all the warnings that have been issued.
self._warnings = []
# Build our configuration from a number of sources:
# 1: defaults:
self.config = CoverageConfig()
# 2: from the coveragerc file:
if config_file:
if config_file is True:
config_file = ".coveragerc"
try:
self.config.from_file(config_file)
except ValueError:
_, err, _ = sys.exc_info()
raise CoverageException(
"Couldn't read config file %s: %s" % (config_file, err)
)
# 3: from environment variables:
self.config.from_environment('COVERAGE_OPTIONS')
env_data_file = os.environ.get('COVERAGE_FILE')
if env_data_file:
self.config.data_file = env_data_file
# 4: from constructor arguments:
if isinstance(omit, string_class):
omit = [omit]
if isinstance(include, string_class):
include = [include]
self.config.from_args(
data_file=data_file, cover_pylib=cover_pylib, timid=timid,
branch=branch, parallel=bool_or_none(data_suffix),
source=source, omit=omit, include=include
)
self.auto_data = auto_data
self.atexit_registered = False
# _exclude_re is a dict mapping exclusion list names to compiled
# regexes.
self._exclude_re = {}
self._exclude_regex_stale()
self.file_locator = FileLocator()
# The source argument can be directories or package names.
self.source = []
self.source_pkgs = []
for src in self.config.source or []:
if os.path.exists(src):
self.source.append(self.file_locator.canonical_filename(src))
else:
self.source_pkgs.append(src)
self.omit = self._prep_patterns(self.config.omit)
self.include = self._prep_patterns(self.config.include)
self.collector = Collector(
self._should_trace, timid=self.config.timid,
branch=self.config.branch, warn=self._warn
)
# Suffixes are a bit tricky. We want to use the data suffix only when
# collecting data, not when combining data. So we save it as
# `self.run_suffix` now, and promote it to `self.data_suffix` if we
# find that we are collecting data later.
if data_suffix or self.config.parallel:
if not isinstance(data_suffix, string_class):
# if data_suffix=True, use .machinename.pid.random
data_suffix = True
else:
data_suffix = None
self.data_suffix = None
self.run_suffix = data_suffix
# Create the data file. We do this at construction time so that the
# data file will be written into the directory where the process
# started rather than wherever the process eventually chdir'd to.
self.data = CoverageData(
basename=self.config.data_file,
collector="coverage v%s" % __version__
)
# The dirs for files considered "installed with the interpreter".
self.pylib_dirs = []
if not self.config.cover_pylib:
# Look at where some standard modules are located. That's the
# indication for "installed with the interpreter". In some
# environments (virtualenv, for example), these modules may be
# spread across a few locations. Look at all the candidate modules
# we've imported, and take all the different ones.
for m in (atexit, os, random, socket):
if hasattr(m, "__file__"):
m_dir = self._canonical_dir(m.__file__)
if m_dir not in self.pylib_dirs:
self.pylib_dirs.append(m_dir)
# To avoid tracing the coverage code itself, we skip anything located
# where we are.
self.cover_dir = self._canonical_dir(__file__)
# The matchers for _should_trace, created when tracing starts.
self.source_match = None
self.pylib_match = self.cover_match = None
self.include_match = self.omit_match = None
# Only _harvest_data once per measurement cycle.
self._harvested = False
# Set the reporting precision.
Numbers.set_precision(self.config.precision)
# When tearing down the coverage object, modules can become None.
# Saving the modules as object attributes avoids problems, but it is
# quite ad-hoc which modules need to be saved and which references
# need to use the object attributes.
self.socket = socket
self.os = os
self.random = random
def _canonical_dir(self, f):
"""Return the canonical directory of the file `f`."""
return os.path.split(self.file_locator.canonical_filename(f))[0]
def _source_for_file(self, filename):
"""Return the source file for `filename`."""
if not filename.endswith(".py"):
if filename[-4:-1] == ".py":
filename = filename[:-1]
return filename
def _should_trace(self, filename, frame):
"""Decide whether to trace execution in `filename`
This function is called from the trace function. As each new file name
is encountered, this function determines whether it is traced or not.
Returns a canonicalized filename if it should be traced, False if it
should not.
"""
if os is None:
return False
if filename.startswith('<'):
# Lots of non-file execution is represented with artificial
# filenames like "<string>", "<doctest readme.txt[0]>", or
# "<exec_function>". Don't ever trace these executions, since we
# can't do anything with the data later anyway.
return False
if filename.endswith(".html"):
# Jinja and maybe other templating systems compile templates into
# Python code, but use the template filename as the filename in
# the compiled code. Of course, those filenames are useless later
# so don't bother collecting. TODO: How should we really separate
# out good file extensions from bad?
return False
self._check_for_packages()
# Compiled Python files have two filenames: frame.f_code.co_filename is
# the filename at the time the .pyc was compiled. The second name is
# __file__, which is where the .pyc was actually loaded from. Since
# .pyc files can be moved after compilation (for example, by being
# installed), we look for __file__ in the frame and prefer it to the
# co_filename value.
dunder_file = frame.f_globals.get('__file__')
if dunder_file:
filename = self._source_for_file(dunder_file)
# Jython reports the .class file to the tracer, use the source file.
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
canonical = self.file_locator.canonical_filename(filename)
# If the user specified source or include, then that's authoritative
# about the outer bound of what to measure and we don't have to apply
# any canned exclusions. If they didn't, then we have to exclude the
# stdlib and coverage.py directories.
if self.source_match:
if not self.source_match.match(canonical):
return False
elif self.include_match:
if not self.include_match.match(canonical):
return False
else:
# If we aren't supposed to trace installed code, then check if this
# is near the Python standard library and skip it if so.
if self.pylib_match and self.pylib_match.match(canonical):
return False
# We exclude the coverage code itself, since a little of it will be
# measured otherwise.
if self.cover_match and self.cover_match.match(canonical):
return False
# Check the file against the omit pattern.
if self.omit_match and self.omit_match.match(canonical):
return False
return canonical
# To log what should_trace returns, change this to "if 1:"
if 0:
_real_should_trace = _should_trace
def _should_trace(self, filename, frame): # pylint: disable=E0102
"""A logging decorator around the real _should_trace function."""
ret = self._real_should_trace(filename, frame)
print("should_trace: %r -> %r" % (filename, ret))
return ret
def _warn(self, msg):
"""Use `msg` as a warning."""
self._warnings.append(msg)
sys.stderr.write("Coverage.py warning: %s\n" % msg)
def _prep_patterns(self, patterns):
"""Prepare the file patterns for use in a `FnmatchMatcher`.
If a pattern starts with a wildcard, it is used as a pattern
as-is. If it does not start with a wildcard, then it is made
absolute with the current directory.
If `patterns` is None, an empty list is returned.
"""
patterns = patterns or []
prepped = []
for p in patterns or []:
if p.startswith("*") or p.startswith("?"):
prepped.append(p)
else:
prepped.append(self.file_locator.abs_file(p))
return prepped
def _check_for_packages(self):
"""Update the source_match matcher with latest imported packages."""
# Our self.source_pkgs attribute is a list of package names we want to
# measure. Each time through here, we see if we've imported any of
# them yet. If so, we add its file to source_match, and we don't have
# to look for that package any more.
if self.source_pkgs:
found = []
for pkg in self.source_pkgs:
try:
mod = sys.modules[pkg]
except KeyError:
continue
found.append(pkg)
try:
pkg_file = mod.__file__
except AttributeError:
self._warn("Module %s has no Python source." % pkg)
else:
d, f = os.path.split(pkg_file)
if f.startswith('__init__.'):
# This is actually a package, return the directory.
pkg_file = d
else:
pkg_file = self._source_for_file(pkg_file)
pkg_file = self.file_locator.canonical_filename(pkg_file)
self.source.append(pkg_file)
self.source_match.add(pkg_file)
for pkg in found:
self.source_pkgs.remove(pkg)
def use_cache(self, usecache):
"""Control the use of a data file (incorrectly called a cache).
`usecache` is true or false, whether to read and write data on disk.
"""
self.data.usefile(usecache)
def load(self):
"""Load previously-collected coverage data from the data file."""
self.collector.reset()
self.data.read()
def start(self):
"""Start measuring code coverage."""
if self.run_suffix:
# Calling start() means we're running code, so use the run_suffix
# as the data_suffix when we eventually save the data.
self.data_suffix = self.run_suffix
if self.auto_data:
self.load()
# Save coverage data when Python exits.
if not self.atexit_registered:
atexit.register(self.save)
self.atexit_registered = True
# Create the matchers we need for _should_trace
if self.source or self.source_pkgs:
self.source_match = TreeMatcher(self.source)
else:
if self.cover_dir:
self.cover_match = TreeMatcher([self.cover_dir])
if self.pylib_dirs:
self.pylib_match = TreeMatcher(self.pylib_dirs)
if self.include:
self.include_match = FnmatchMatcher(self.include)
if self.omit:
self.omit_match = FnmatchMatcher(self.omit)
self._harvested = False
self.collector.start()
def stop(self):
"""Stop measuring code coverage."""
self.collector.stop()
self._harvest_data()
def erase(self):
"""Erase previously-collected coverage data.
This removes the in-memory data collected in this session as well as
discarding the data file.
"""
self.collector.reset()
self.data.erase()
def clear_exclude(self, which='exclude'):
"""Clear the exclude list."""
setattr(self.config, which + "_list", [])
self._exclude_regex_stale()
def exclude(self, regex, which='exclude'):
"""Exclude source lines from execution consideration.
A number of lists of regular expressions are maintained. Each list
selects lines that are treated differently during reporting.
`which` determines which list is modified. The "exclude" list selects
lines that are not considered executable at all. The "partial" list
indicates lines with branches that are not taken.
`regex` is a regular expression. The regex is added to the specified
list. If any of the regexes in the list is found in a line, the line
is marked for special treatment during reporting.
"""
excl_list = getattr(self.config, which + "_list")
excl_list.append(regex)
self._exclude_regex_stale()
def _exclude_regex_stale(self):
"""Drop all the compiled exclusion regexes, a list was modified."""
self._exclude_re.clear()
def _exclude_regex(self, which):
"""Return a compiled regex for the given exclusion list."""
if which not in self._exclude_re:
excl_list = getattr(self.config, which + "_list")
self._exclude_re[which] = join_regex(excl_list)
return self._exclude_re[which]
def get_exclude_list(self, which='exclude'):
"""Return a list of excluded regex patterns.
`which` indicates which list is desired. See `exclude` for the lists
that are available, and their meaning.
"""
return getattr(self.config, which + "_list")
def save(self):
"""Save the collected coverage data to the data file."""
data_suffix = self.data_suffix
if data_suffix is True:
# If data_suffix was a simple true value, then make a suffix with
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
data_suffix = "%s.%s.%06d" % (
self.socket.gethostname(), self.os.getpid(),
self.random.randint(0, 99999)
)
self._harvest_data()
self.data.write(suffix=data_suffix)
def combine(self):
"""Combine together a number of similarly-named coverage data files.
All coverage data files whose name starts with `data_file` (from the
coverage() constructor) will be read, and combined together into the
current measurements.
"""
aliases = None
if self.config.paths:
aliases = PathAliases(self.file_locator)
for paths in self.config.paths.values():
result = paths[0]
for pattern in paths[1:]:
aliases.add(pattern, result)
self.data.combine_parallel_data(aliases=aliases)
def _harvest_data(self):
"""Get the collected data and reset the collector.
Also warn about various problems collecting data.
"""
if not self._harvested:
self.data.add_line_data(self.collector.get_line_data())
self.data.add_arc_data(self.collector.get_arc_data())
self.collector.reset()
# If there are still entries in the source_pkgs list, then we never
# encountered those packages.
for pkg in self.source_pkgs:
self._warn("Module %s was never imported." % pkg)
# Find out if we got any data.
summary = self.data.summary()
if not summary:
self._warn("No data was collected.")
# Find files that were never executed at all.
for src in self.source:
for py_file in find_python_files(src):
self.data.touch_file(py_file)
self._harvested = True
# Backward compatibility with version 1.
def analysis(self, morf):
"""Like `analysis2` but doesn't return excluded line numbers."""
f, s, _, m, mf = self.analysis2(morf)
return f, s, m, mf
def analysis2(self, morf):
"""Analyze a module.
`morf` is a module or a filename. It will be analyzed to determine
its coverage statistics. The return value is a 5-tuple:
* The filename for the module.
* A list of line numbers of executable statements.
* A list of line numbers of excluded statements.
* A list of line numbers of statements not run (missing from
execution).
* A readable formatted string of the missing line numbers.
The analysis uses the source file itself and the current measured
coverage data.
"""
analysis = self._analyze(morf)
return (
analysis.filename, analysis.statements, analysis.excluded,
analysis.missing, analysis.missing_formatted()
)
def _analyze(self, it):
"""Analyze a single morf or code unit.
Returns an `Analysis` object.
"""
if not isinstance(it, CodeUnit):
it = code_unit_factory(it, self.file_locator)[0]
return Analysis(self, it)
def report(self, morfs=None, show_missing=True, ignore_errors=None,
file=None, # pylint: disable=W0622
omit=None, include=None
):
"""Write a summary report to `file`.
Each module in `morfs` is listed, with counts of statements, executed
statements, missing statements, and a list of lines missed.
`include` is a list of filename patterns. Modules whose filenames
match those patterns will be included in the report. Modules matching
`omit` will not be included in the report.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include
)
reporter = SummaryReporter(
self, show_missing, self.config.ignore_errors
)
reporter.report(morfs, outfile=file, config=self.config)
def annotate(self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None):
"""Annotate a list of modules.
Each module in `morfs` is annotated. The source is written to a new
file, named with a ",cover" suffix, with each line prefixed with a
marker to indicate the coverage of the line. Covered lines have ">",
excluded lines have "-", and missing lines have "!".
See `coverage.report()` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include
)
reporter = AnnotateReporter(self, self.config.ignore_errors)
reporter.report(morfs, config=self.config, directory=directory)
def html_report(self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None):
"""Generate an HTML report.
See `coverage.report()` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
html_dir=directory,
)
reporter = HtmlReporter(self, self.config.ignore_errors)
reporter.report(morfs, config=self.config)
def xml_report(self, morfs=None, outfile=None, ignore_errors=None,
omit=None, include=None):
"""Generate an XML report of coverage results.
The report is compatible with Cobertura reports.
Each module in `morfs` is included in the report. `outfile` is the
path to write the file to, "-" will write to stdout.
See `coverage.report()` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
xml_output=outfile,
)
file_to_close = None
if self.config.xml_output:
if self.config.xml_output == '-':
outfile = sys.stdout
else:
outfile = open(self.config.xml_output, "w")
file_to_close = outfile
try:
reporter = XmlReporter(self, self.config.ignore_errors)
reporter.report(morfs, outfile=outfile, config=self.config)
finally:
if file_to_close:
file_to_close.close()
def sysinfo(self):
"""Return a list of (key, value) pairs showing internal information."""
import coverage as covmod
import platform, re
try:
implementation = platform.python_implementation()
except AttributeError:
implementation = "unknown"
info = [
('version', covmod.__version__),
('coverage', covmod.__file__),
('cover_dir', self.cover_dir),
('pylib_dirs', self.pylib_dirs),
('tracer', self.collector.tracer_name()),
('data_path', self.data.filename),
('python', sys.version.replace('\n', '')),
('platform', platform.platform()),
('implementation', implementation),
('cwd', os.getcwd()),
('path', sys.path),
('environment', [
("%s = %s" % (k, v)) for k, v in os.environ.items()
if re.search("^COV|^PY", k)
]),
]
return info
def process_startup():
"""Call this at Python startup to perhaps measure coverage.
If the environment variable COVERAGE_PROCESS_START is defined, coverage
measurement is started. The value of the variable is the config file
to use.
There are two ways to configure your Python installation to invoke this
function when Python starts:
#. Create or append to sitecustomize.py to add these lines::
import coverage
coverage.process_startup()
#. Create a .pth file in your Python installation containing::
import coverage; coverage.process_startup()
"""
cps = os.environ.get("COVERAGE_PROCESS_START")
if cps:
cov = coverage(config_file=cps, auto_data=True)
if os.environ.get("COVERAGE_COVERAGE"):
# Measuring coverage within coverage.py takes yet more trickery.
cov.cover_dir = "Please measure coverage.py!"
cov.start()
| bsd-3-clause |
gaurav/phylo2owl | tests/test_alt_inputs.py | 1 | 2038 | #!/usr/bin/env python
"""
test_alt_inputs.py: Test whether phylo2owl supports multiple input types,
including NEXUS and NexML. All of these file types should return exactly the
same RDF/XML output.
"""
import libphylo2owl
import pytest
import os
def test_newick_convert_to_OWL(path_tre):
""" Test all .tre files by comparing them to the corresponding .owl file. """
# This might seem redundant, but it tests that '--format newick' works.
path_owl = path_tre[:-4] + '.owl'
if os.path.isfile(path_tre):
compare_example_file(path_tre, 'Newick', path_owl)
else:
pytest.skip("Newick file '{0}' does not have a comparable OWL file at {1}.".format(
path_tre,
path_owl
))
def test_nexus_convert_to_OWL(path_nex):
""" Test all .nex files by comparing them to the corresponding .owl file. """
path_owl = path_nex[:-4] + '.owl'
if os.path.isfile(path_owl):
compare_example_file(path_nex, 'NEXUS', path_owl)
else:
pytest.skip("Nexus file '{0}' does not have a comparable OWL file at {1}.".format(
path_nex,
path_owl
))
def test_nexml_convert_to_OWL(path_nexml):
""" Test all .nexml files by comparing them to the corresponding .owl file. """
path_owl = path_nexml[:-6] + '.owl'
if os.path.isfile(path_owl):
compare_example_file(path_nexml, 'NeXML', path_owl)
else:
pytest.skip("NeXML file '{0}' does not have a comparable OWL file at {1}.".format(
path_nexml,
path_owl
))
def compare_example_file(input_file, input_format, expected_output_file):
"""
For a given input file and format, generate OWL output, and then compare
it with the expected output file to make sure it's identical.
"""
(rc, stdout, stderr) = libphylo2owl.exec_phylo2owl([input_file, "--format", input_format])
assert rc == 0
with open(expected_output_file) as f:
expected_output = f.read()
assert expected_output == stdout
| mit |
Adward-R/SwayMini | lib/python2.7/site-packages/pip/locations.py | 117 | 7778 | """Locations where we look for configs, install stuff, etc"""
from __future__ import absolute_import
import getpass
import os
import os.path
import site
import sys
import tempfile
from distutils import sysconfig
from distutils.command.install import install, SCHEME_KEYS
from pip.compat import get_path_uid, WINDOWS
from pip.utils import appdirs
from pip import exceptions
# Hack for flake8
install
# CA Bundle Locations
CA_BUNDLE_PATHS = [
# Debian/Ubuntu/Gentoo etc.
"/etc/ssl/certs/ca-certificates.crt",
# Fedora/RHEL
"/etc/pki/tls/certs/ca-bundle.crt",
# OpenSUSE
"/etc/ssl/ca-bundle.pem",
# OpenBSD
"/etc/ssl/cert.pem",
# FreeBSD/DragonFly
"/usr/local/share/certs/ca-root-nss.crt",
# Homebrew on OSX
"/usr/local/etc/openssl/cert.pem",
]
# Attempt to locate a CA Bundle that we can pass into requests, we have a list
# of possible ones from various systems. If we cannot find one then we'll set
# this to None so that we default to whatever requests is setup to handle.
#
# Note to Downstream: If you wish to disable this autodetection and simply use
# whatever requests does (likely you've already patched
# requests.certs.where()) then simply edit this line so
# that it reads ``CA_BUNDLE_PATH = None``.
CA_BUNDLE_PATH = next((x for x in CA_BUNDLE_PATHS if os.path.exists(x)), None)
# Application Directories
USER_CACHE_DIR = appdirs.user_cache_dir("pip")
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def write_delete_marker_file(directory):
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE)
def running_under_virtualenv():
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def virtualenv_no_global():
"""
Return True if in a venv and no system site packages.
"""
# this mirrors the logic in virtualenv.py for locating the
# no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
def __get_username():
""" Returns the effective username of the current process. """
if WINDOWS:
return getpass.getuser()
import pwd
return pwd.getpwuid(os.geteuid()).pw_name
def _get_build_prefix():
""" Returns a safe build_prefix """
path = os.path.join(
tempfile.gettempdir(),
'pip_build_%s' % __get_username().replace(' ', '_')
)
if WINDOWS:
""" on windows(tested on 7) temp dirs are isolated """
return path
try:
os.mkdir(path)
write_delete_marker_file(path)
except OSError:
file_uid = None
try:
# raises OSError for symlinks
# https://github.com/pypa/pip/pull/935#discussion_r5307003
file_uid = get_path_uid(path)
except OSError:
file_uid = None
if file_uid != os.geteuid():
msg = (
"The temporary folder for building (%s) is either not owned by"
" you, or is a symlink." % path
)
print(msg)
print(
"pip will not work until the temporary folder is either "
"deleted or is a real directory owned by your user account."
)
raise exceptions.InstallationError(msg)
return path
if running_under_virtualenv():
build_prefix = os.path.join(sys.prefix, 'build')
src_prefix = os.path.join(sys.prefix, 'src')
else:
# Note: intentionally NOT using mkdtemp
# See https://github.com/pypa/pip/issues/906 for plan to move to mkdtemp
build_prefix = _get_build_prefix()
# FIXME: keep src in cwd for now (it is not a temporary folder)
try:
src_prefix = os.path.join(os.getcwd(), 'src')
except OSError:
# In case the current working directory has been renamed or deleted
sys.exit(
"The folder you are executing pip from can no longer be found."
)
# under Mac OS X + virtualenv sys.prefix is not properly resolved
# it is something like /path/to/python/bin/..
# Note: using realpath due to tmp dirs on OSX being symlinks
build_prefix = os.path.abspath(os.path.realpath(build_prefix))
src_prefix = os.path.abspath(src_prefix)
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_python_lib()
user_site = site.USER_SITE
user_dir = os.path.expanduser('~')
if WINDOWS:
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.ini'
legacy_storage_dir = os.path.join(user_dir, 'pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.conf'
legacy_storage_dir = os.path.join(user_dir, '.pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
# Forcing to use /usr/local/bin for standard Mac OS X framework installs
# Also log to ~/Library/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
site_config_files = [
os.path.join(path, config_basename)
for path in appdirs.site_config_dirs('pip')
]
def distutils_scheme(dist_name, user=False, home=None, root=None,
isolated=False):
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
if isolated:
extra_dist_args = {"script_args": ["--no-user-cfg"]}
else:
extra_dist_args = {}
dist_args = {'name': dist_name}
dist_args.update(extra_dist_args)
d = Distribution(dist_args)
d.parse_config_files()
i = d.get_command_obj('install', create=True)
# NOTE: setting user or home has the side-effect of creating the home dir
# or user base for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
i.user = user or i.user
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_' + key)
if i.install_lib is not None:
# install_lib takes precedence over purelib and platlib
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
if running_under_virtualenv():
scheme['headers'] = os.path.join(
sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name,
)
if root is not None:
scheme["headers"] = os.path.join(
root,
os.path.abspath(scheme["headers"])[1:],
)
return scheme
| apache-2.0 |
precedenceguo/mxnet | python/mxnet/contrib/text/embedding.py | 12 | 34598 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=consider-iterating-dictionary
# pylint: disable=super-init-not-called
"""Text token embeddings."""
from __future__ import absolute_import
from __future__ import print_function
import io
import logging
import os
import tarfile
import warnings
import zipfile
from . import _constants as C
from . import vocab
from ... import ndarray as nd
from ... import registry
def register(embedding_cls):
"""Registers a new token embedding.
Once an embedding is registered, we can create an instance of this embedding with
:func:`~mxnet.contrib.text.embedding.create`.
Examples
--------
>>> @mxnet.contrib.text.embedding.register
... class MyTextEmbed(mxnet.contrib.text.embedding._TokenEmbedding):
... def __init__(self, pretrained_file_name='my_pretrain_file'):
... pass
>>> embed = mxnet.contrib.text.embedding.create('MyTokenEmbed')
>>> print(type(embed))
<class '__main__.MyTokenEmbed'>
"""
register_text_embedding = registry.get_register_func(_TokenEmbedding, 'token embedding')
return register_text_embedding(embedding_cls)
def create(embedding_name, **kwargs):
"""Creates an instance of token embedding.
Creates a token embedding instance by loading embedding vectors from an externally hosted
pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid
`embedding_name` and `pretrained_file_name`, use
`mxnet.contrib.text.embedding.get_pretrained_file_names()`.
Parameters
----------
embedding_name : str
The token embedding name (case-insensitive).
Returns
-------
An instance of `mxnet.contrib.text.glossary._TokenEmbedding`:
A token embedding instance that loads embedding vectors from an externally hosted
pre-trained token embedding file.
"""
create_text_embedding = registry.get_create_func(_TokenEmbedding, 'token embedding')
return create_text_embedding(embedding_name, **kwargs)
def get_pretrained_file_names(embedding_name=None):
"""Get valid token embedding names and their pre-trained file names.
To load token embedding vectors from an externally hosted pre-trained token embedding file,
such as those of GloVe and FastText, one should use
`mxnet.contrib.text.embedding.create(embedding_name, pretrained_file_name)`.
This method returns all the valid names of `pretrained_file_name` for the specified
`embedding_name`. If `embedding_name` is set to None, this method returns all the valid
names of `embedding_name` with their associated `pretrained_file_name`.
Parameters
----------
embedding_name : str or None, default None
The pre-trained token embedding name.
Returns
-------
dict or list:
A list of all the valid pre-trained token embedding file names (`pretrained_file_name`)
for the specified token embedding name (`embedding_name`). If the text embeding name is
set to None, returns a dict mapping each valid token embedding name to a list of valid
pre-trained files (`pretrained_file_name`). They can be plugged into
`mxnet.contrib.text.embedding.create(embedding_name,
pretrained_file_name)`.
"""
text_embedding_reg = registry.get_registry(_TokenEmbedding)
if embedding_name is not None:
if embedding_name not in text_embedding_reg:
raise KeyError('Cannot find `embedding_name` %s. Use '
'`get_pretrained_file_names('
'embedding_name=None).keys()` to get all the valid embedding '
'names.' % embedding_name)
return list(text_embedding_reg[embedding_name].pretrained_file_name_sha1.keys())
else:
return {embedding_name: list(embedding_cls.pretrained_file_name_sha1.keys())
for embedding_name, embedding_cls in registry.get_registry(_TokenEmbedding).items()}
class _TokenEmbedding(vocab.Vocabulary):
"""Token embedding base class.
To load token embeddings from an externally hosted pre-trained token embedding file, such as
those of GloVe and FastText, use
:func:`~mxnet.contrib.text.embedding.create(embedding_name, pretrained_file_name)`.
To get all the available `embedding_name` and `pretrained_file_name`, use
:func:`~mxnet.contrib.text.embedding.get_pretrained_file_names()`.
Alternatively, to load embedding vectors from a custom pre-trained token embedding file, use
:class:`~mxnet.contrib.text.embedding.CustomEmbedding`.
Moreover, to load composite embedding vectors, such as to concatenate embedding vectors, use
:class:`~mxnet.contrib.text.embedding.CompositeEmbedding`.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the
token embedding vector initialized by `init_unknown_vec`.
If a token is encountered multiple times in the pre-trained token embedding file, only the
first-encountered token embedding vector will be loaded and the rest will be skipped.
The indexed tokens in a text token embedding may come from a vocabulary or from the loaded
embedding vectors. In the former case, only the indexed tokens in a vocabulary are associated
with the loaded embedding vectors, such as loaded from a pre-trained token embedding file. In
the later case, all the tokens from the loaded embedding vectors, such as loaded from a
pre-trained token embedding file, are taken as the indexed tokens of the embedding.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
def __init__(self, **kwargs):
super(_TokenEmbedding, self).__init__(**kwargs)
@classmethod
def _get_download_file_name(cls, pretrained_file_name):
return pretrained_file_name
@classmethod
def _get_pretrained_file_url(cls, pretrained_file_name):
repo_url = os.environ.get('MXNET_GLUON_REPO', C.APACHE_REPO_URL)
embedding_cls = cls.__name__.lower()
url_format = '{repo_url}gluon/embeddings/{cls}/{file_name}'
return url_format.format(repo_url=repo_url, cls=embedding_cls,
file_name=cls._get_download_file_name(pretrained_file_name))
@classmethod
def _get_pretrained_file(cls, embedding_root, pretrained_file_name):
from ...gluon.utils import check_sha1, download
embedding_cls = cls.__name__.lower()
embedding_root = os.path.expanduser(embedding_root)
url = cls._get_pretrained_file_url(pretrained_file_name)
embedding_dir = os.path.join(embedding_root, embedding_cls)
pretrained_file_path = os.path.join(embedding_dir, pretrained_file_name)
downloaded_file = os.path.basename(url)
downloaded_file_path = os.path.join(embedding_dir, downloaded_file)
expected_file_hash = cls.pretrained_file_name_sha1[pretrained_file_name]
if hasattr(cls, 'pretrained_archive_name_sha1'):
expected_downloaded_hash = \
cls.pretrained_archive_name_sha1[downloaded_file]
else:
expected_downloaded_hash = expected_file_hash
if not os.path.exists(pretrained_file_path) \
or not check_sha1(pretrained_file_path, expected_file_hash):
download(url, downloaded_file_path, sha1_hash=expected_downloaded_hash)
ext = os.path.splitext(downloaded_file)[1]
if ext == '.zip':
with zipfile.ZipFile(downloaded_file_path, 'r') as zf:
zf.extractall(embedding_dir)
elif ext == '.gz':
with tarfile.open(downloaded_file_path, 'r:gz') as tar:
tar.extractall(path=embedding_dir)
return pretrained_file_path
def _load_embedding(self, pretrained_file_path, elem_delim, init_unknown_vec, encoding='utf8'):
"""Load embedding vectors from the pre-trained token embedding file.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the
text embedding vector initialized by `init_unknown_vec`.
If a token is encountered multiple times in the pre-trained text embedding file, only the
first-encountered token embedding vector will be loaded and the rest will be skipped.
"""
pretrained_file_path = os.path.expanduser(pretrained_file_path)
if not os.path.isfile(pretrained_file_path):
raise ValueError('`pretrained_file_path` must be a valid path to '
'the pre-trained token embedding file.')
logging.info('Loading pre-trained token embedding vectors from %s', pretrained_file_path)
vec_len = None
all_elems = []
tokens = set()
loaded_unknown_vec = None
line_num = 0
with io.open(pretrained_file_path, 'r', encoding=encoding) as f:
for line in f:
line_num += 1
elems = line.rstrip().split(elem_delim)
assert len(elems) > 1, 'At line %d of the pre-trained text embedding file: the ' \
'data format of the pre-trained token embedding file %s ' \
'is unexpected.' % (line_num, pretrained_file_path)
token, elems = elems[0], [float(i) for i in elems[1:]]
if token == self.unknown_token and loaded_unknown_vec is None:
loaded_unknown_vec = elems
tokens.add(self.unknown_token)
elif token in tokens:
warnings.warn('At line %d of the pre-trained token embedding file: the '
'embedding vector for token %s has been loaded and a duplicate '
'embedding for the same token is seen and skipped.' %
(line_num, token))
elif len(elems) == 1:
warnings.warn('At line %d of the pre-trained text embedding file: token %s '
'with 1-dimensional vector %s is likely a header and is '
'skipped.' % (line_num, token, elems))
else:
if vec_len is None:
vec_len = len(elems)
# Reserve a vector slot for the unknown token at the very beggining because
# the unknown index is 0.
all_elems.extend([0] * vec_len)
else:
assert len(elems) == vec_len, \
'At line %d of the pre-trained token embedding file: the dimension ' \
'of token %s is %d but the dimension of previous tokens is %d. ' \
'Dimensions of all the tokens must be the same.' \
% (line_num, token, len(elems), vec_len)
all_elems.extend(elems)
self._idx_to_token.append(token)
self._token_to_idx[token] = len(self._idx_to_token) - 1
tokens.add(token)
self._vec_len = vec_len
self._idx_to_vec = nd.array(all_elems).reshape((-1, self.vec_len))
if loaded_unknown_vec is None:
self._idx_to_vec[C.UNKNOWN_IDX] = init_unknown_vec(shape=self.vec_len)
else:
self._idx_to_vec[C.UNKNOWN_IDX] = nd.array(loaded_unknown_vec)
def _index_tokens_from_vocabulary(self, vocabulary):
self._token_to_idx = vocabulary.token_to_idx.copy() \
if vocabulary.token_to_idx is not None else None
self._idx_to_token = vocabulary.idx_to_token[:] \
if vocabulary.idx_to_token is not None else None
self._unknown_token = vocabulary.unknown_token
self._reserved_tokens = vocabulary.reserved_tokens[:] \
if vocabulary.reserved_tokens is not None else None
def _set_idx_to_vec_by_embeddings(self, token_embeddings, vocab_len, vocab_idx_to_token):
"""Sets the mapping between token indices and token embedding vectors.
Parameters
----------
token_embeddings : instance or list `mxnet.contrib.text.embedding._TokenEmbedding`
One or multiple pre-trained token embeddings to load. If it is a list of multiple
embeddings, these embedding vectors will be concatenated for each token.
vocab_len : int
Length of vocabulary whose tokens are indexed in the token embedding.
vocab_idx_to_token: list of str
A list of indexed tokens in the vocabulary. These tokens are indexed in the token
embedding.
"""
new_vec_len = sum(embed.vec_len for embed in token_embeddings)
new_idx_to_vec = nd.zeros(shape=(vocab_len, new_vec_len))
col_start = 0
# Concatenate all the embedding vectors in token_embeddings.
for embed in token_embeddings:
col_end = col_start + embed.vec_len
# Cancatenate vectors of the unknown token.
new_idx_to_vec[0, col_start:col_end] = embed.idx_to_vec[0]
new_idx_to_vec[1:, col_start:col_end] = embed.get_vecs_by_tokens(vocab_idx_to_token[1:])
col_start = col_end
self._vec_len = new_vec_len
self._idx_to_vec = new_idx_to_vec
def _build_embedding_for_vocabulary(self, vocabulary):
if vocabulary is not None:
assert isinstance(vocabulary, vocab.Vocabulary), \
'The argument `vocabulary` must be an instance of ' \
'mxnet.contrib.text.vocab.Vocabulary.'
# Set _idx_to_vec so that indices of tokens from vocabulary are associated with the
# loaded token embedding vectors.
self._set_idx_to_vec_by_embeddings([self], len(vocabulary), vocabulary.idx_to_token)
# Index tokens from vocabulary.
self._index_tokens_from_vocabulary(vocabulary)
@property
def vec_len(self):
return self._vec_len
@property
def idx_to_vec(self):
return self._idx_to_vec
def get_vecs_by_tokens(self, tokens, lower_case_backup=False):
"""Look up embedding vectors of tokens.
Parameters
----------
tokens : str or list of strs
A token or a list of tokens.
lower_case_backup : bool, default False
If False, each token in the original case will be looked up; if True, each token in the
original case will be looked up first, if not found in the keys of the property
`token_to_idx`, the token in the lower case will be looked up.
Returns
-------
mxnet.ndarray.NDArray:
The embedding vector(s) of the token(s). According to numpy conventions, if `tokens` is
a string, returns a 1-D NDArray of shape `self.vec_len`; if `tokens` is a list of
strings, returns a 2-D NDArray of shape=(len(tokens), self.vec_len).
"""
to_reduce = False
if not isinstance(tokens, list):
tokens = [tokens]
to_reduce = True
if not lower_case_backup:
indices = [self.token_to_idx.get(token, C.UNKNOWN_IDX) for token in tokens]
else:
indices = [self.token_to_idx[token] if token in self.token_to_idx
else self.token_to_idx.get(token.lower(), C.UNKNOWN_IDX)
for token in tokens]
vecs = nd.Embedding(nd.array(indices), self.idx_to_vec, self.idx_to_vec.shape[0],
self.idx_to_vec.shape[1])
return vecs[0] if to_reduce else vecs
def update_token_vectors(self, tokens, new_vectors):
"""Updates embedding vectors for tokens.
Parameters
----------
tokens : str or a list of strs
A token or a list of tokens whose embedding vector are to be updated.
new_vectors : mxnet.ndarray.NDArray
An NDArray to be assigned to the embedding vectors of `tokens`. Its length must be equal
to the number of `tokens` and its width must be equal to the dimension of embeddings of
the glossary. If `tokens` is a singleton, it must be 1-D or 2-D. If `tokens` is a list
of multiple strings, it must be 2-D.
"""
assert self.idx_to_vec is not None, 'The property `idx_to_vec` has not been properly set.'
if not isinstance(tokens, list) or len(tokens) == 1:
assert isinstance(new_vectors, nd.NDArray) and len(new_vectors.shape) in [1, 2], \
'`new_vectors` must be a 1-D or 2-D NDArray if `tokens` is a singleton.'
if not isinstance(tokens, list):
tokens = [tokens]
if len(new_vectors.shape) == 1:
new_vectors = new_vectors.expand_dims(0)
else:
assert isinstance(new_vectors, nd.NDArray) and len(new_vectors.shape) == 2, \
'`new_vectors` must be a 2-D NDArray if `tokens` is a list of multiple strings.'
assert new_vectors.shape == (len(tokens), self.vec_len), \
'The length of new_vectors must be equal to the number of tokens and the width of' \
'new_vectors must be equal to the dimension of embeddings of the glossary.'
indices = []
for token in tokens:
if token in self.token_to_idx:
indices.append(self.token_to_idx[token])
else:
raise ValueError('Token %s is unknown. To update the embedding vector for an '
'unknown token, please specify it explicitly as the '
'`unknown_token` %s in `tokens`. This is to avoid unintended '
'updates.' % (token, self.idx_to_token[C.UNKNOWN_IDX]))
self._idx_to_vec[nd.array(indices)] = new_vectors
@classmethod
def _check_pretrained_file_names(cls, pretrained_file_name):
"""Checks if a pre-trained token embedding file name is valid.
Parameters
----------
pretrained_file_name : str
The pre-trained token embedding file.
"""
embedding_name = cls.__name__.lower()
if pretrained_file_name not in cls.pretrained_file_name_sha1:
raise KeyError('Cannot find pretrained file %s for token embedding %s. Valid '
'pretrained files for embedding %s: %s' %
(pretrained_file_name, embedding_name, embedding_name,
', '.join(cls.pretrained_file_name_sha1.keys())))
@register
class GloVe(_TokenEmbedding):
"""The GloVe word embedding.
GloVe is an unsupervised learning algorithm for obtaining vector representations for words.
Training is performed on aggregated global word-word co-occurrence statistics from a corpus, and
the resulting representations showcase interesting linear substructures of the word vector
space. (Source from https://nlp.stanford.edu/projects/glove/)
Reference:
GloVe: Global Vectors for Word Representation.
Jeffrey Pennington, Richard Socher, and Christopher D. Manning.
https://nlp.stanford.edu/pubs/glove.pdf
Website:
https://nlp.stanford.edu/projects/glove/
To get the updated URLs to the externally hosted pre-trained token embedding
files, visit https://nlp.stanford.edu/projects/glove/
License for pre-trained embeddings:
https://opendatacommons.org/licenses/pddl/
Parameters
----------
pretrained_file_name : str, default 'glove.840B.300d.txt'
The name of the pre-trained token embedding file.
embedding_root : str, default os.path.join('~', '.mxnet', 'embeddings')
The root directory for storing embedding-related files.
init_unknown_vec : callback
The callback used to initialize the embedding vector for the unknown token.
vocabulary : :class:`~mxnet.contrib.text.vocab.Vocabulary`, default None
It contains the tokens to index. Each indexed token will be associated with the loaded
embedding vectors, such as loaded from a pre-trained token embedding file. If None, all the
tokens from the loaded embedding vectors, such as loaded from a pre-trained token embedding
file, will be indexed.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
# Map a pre-trained token embedding archive file and its SHA-1 hash.
pretrained_archive_name_sha1 = C.GLOVE_PRETRAINED_FILE_SHA1
# Map a pre-trained token embedding file and its SHA-1 hash.
pretrained_file_name_sha1 = C.GLOVE_PRETRAINED_ARCHIVE_SHA1
@classmethod
def _get_download_file_name(cls, pretrained_file_name):
# Map a pre-trained embedding file to its archive to download.
src_archive = {archive.split('.')[1]: archive for archive in
GloVe.pretrained_archive_name_sha1.keys()}
archive = src_archive[pretrained_file_name.split('.')[1]]
return archive
def __init__(self, pretrained_file_name='glove.840B.300d.txt',
embedding_root=os.path.join('~', '.mxnet', 'embeddings'),
init_unknown_vec=nd.zeros, vocabulary=None, **kwargs):
GloVe._check_pretrained_file_names(pretrained_file_name)
super(GloVe, self).__init__(**kwargs)
pretrained_file_path = GloVe._get_pretrained_file(embedding_root, pretrained_file_name)
self._load_embedding(pretrained_file_path, ' ', init_unknown_vec)
if vocabulary is not None:
self._build_embedding_for_vocabulary(vocabulary)
@register
class FastText(_TokenEmbedding):
"""The fastText word embedding.
FastText is an open-source, free, lightweight library that allows users to learn text
representations and text classifiers. It works on standard, generic hardware. Models can later
be reduced in size to even fit on mobile devices. (Source from https://fasttext.cc/)
References:
Enriching Word Vectors with Subword Information.
Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov.
https://arxiv.org/abs/1607.04606
Bag of Tricks for Efficient Text Classification.
Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov.
https://arxiv.org/abs/1607.01759
FastText.zip: Compressing text classification models.
Armand Joulin, Edouard Grave, Piotr Bojanowski, Matthijs Douze, Herve Jegou,
and Tomas Mikolov.
https://arxiv.org/abs/1612.03651
For 'wiki.multi' embeddings:
Word Translation Without Parallel Data
Alexis Conneau, Guillaume Lample, Marc'Aurelio Ranzato, Ludovic Denoyer,
and Herve Jegou.
https://arxiv.org/abs/1710.04087
Website:
https://fasttext.cc/
To get the updated URLs to the externally hosted pre-trained token embedding files, visit
https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md
License for pre-trained embeddings:
https://creativecommons.org/licenses/by-sa/3.0/
Parameters
----------
pretrained_file_name : str, default 'wiki.en.vec'
The name of the pre-trained token embedding file.
embedding_root : str, default os.path.join('~', '.mxnet', 'embeddings')
The root directory for storing embedding-related files.
init_unknown_vec : callback
The callback used to initialize the embedding vector for the unknown token.
vocabulary : :class:`~mxnet.contrib.text.vocab.Vocabulary`, default None
It contains the tokens to index. Each indexed token will be associated with the loaded
embedding vectors, such as loaded from a pre-trained token embedding file. If None, all the
tokens from the loaded embedding vectors, such as loaded from a pre-trained token embedding
file, will be indexed.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
# Map a pre-trained token embedding archive file and its SHA-1 hash.
pretrained_archive_name_sha1 = C.FAST_TEXT_ARCHIVE_SHA1
# Map a pre-trained token embedding file and its SHA-1 hash.
pretrained_file_name_sha1 = C.FAST_TEXT_FILE_SHA1
@classmethod
def _get_download_file_name(cls, pretrained_file_name):
# Map a pre-trained embedding file to its archive to download.
return '.'.join(pretrained_file_name.split('.')[:-1])+'.zip'
def __init__(self, pretrained_file_name='wiki.simple.vec',
embedding_root=os.path.join('~', '.mxnet', 'embeddings'),
init_unknown_vec=nd.zeros, vocabulary=None, **kwargs):
FastText._check_pretrained_file_names(pretrained_file_name)
super(FastText, self).__init__(**kwargs)
pretrained_file_path = FastText._get_pretrained_file(embedding_root, pretrained_file_name)
self._load_embedding(pretrained_file_path, ' ', init_unknown_vec)
if vocabulary is not None:
self._build_embedding_for_vocabulary(vocabulary)
class CustomEmbedding(_TokenEmbedding):
"""User-defined token embedding.
This is to load embedding vectors from a user-defined pre-trained text embedding file.
Denote by '[ed]' the argument `elem_delim`. Denote by [v_ij] the j-th element of the token
embedding vector for [token_i], the expected format of a custom pre-trained token embedding file
is:
'[token_1][ed][v_11][ed][v_12][ed]...[ed][v_1k]\\\\n[token_2][ed][v_21][ed][v_22][ed]...[ed]
[v_2k]\\\\n...'
where k is the length of the embedding vector `vec_len`.
Parameters
----------
pretrained_file_path : str
The path to the custom pre-trained token embedding file.
elem_delim : str, default ' '
The delimiter for splitting a token and every embedding vector element value on the same
line of the custom pre-trained token embedding file.
encoding : str, default 'utf8'
The encoding scheme for reading the custom pre-trained token embedding file.
init_unknown_vec : callback
The callback used to initialize the embedding vector for the unknown token.
vocabulary : :class:`~mxnet.contrib.text.vocab.Vocabulary`, default None
It contains the tokens to index. Each indexed token will be associated with the loaded
embedding vectors, such as loaded from a pre-trained token embedding file. If None, all the
tokens from the loaded embedding vectors, such as loaded from a pre-trained token embedding
file, will be indexed.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
def __init__(self, pretrained_file_path, elem_delim=' ', encoding='utf8',
init_unknown_vec=nd.zeros, vocabulary=None, **kwargs):
super(CustomEmbedding, self).__init__(**kwargs)
self._load_embedding(pretrained_file_path, elem_delim, init_unknown_vec, encoding)
if vocabulary is not None:
self._build_embedding_for_vocabulary(vocabulary)
class CompositeEmbedding(_TokenEmbedding):
"""Composite token embeddings.
For each indexed token in a vocabulary, multiple embedding vectors, such as concatenated
multiple embedding vectors, will be associated with it. Such embedding vectors can be loaded
from externally hosted or custom pre-trained token embedding files, such as via token embedding
instances.
Parameters
----------
vocabulary : :class:`~mxnet.contrib.text.vocab.Vocabulary`
For each indexed token in a vocabulary, multiple embedding vectors, such as concatenated
multiple embedding vectors, will be associated with it.
token_embeddings : instance or list of `mxnet.contrib.text.embedding._TokenEmbedding`
One or multiple pre-trained token embeddings to load. If it is a list of multiple
embeddings, these embedding vectors will be concatenated for each token.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
def __init__(self, vocabulary, token_embeddings):
# Sanity checks.
assert isinstance(vocabulary, vocab.Vocabulary), \
'The argument `vocabulary` must be an instance of ' \
'mxnet.contrib.text.indexer.Vocabulary.'
if not isinstance(token_embeddings, list):
token_embeddings = [token_embeddings]
for embed in token_embeddings:
assert isinstance(embed, _TokenEmbedding), \
'The argument `token_embeddings` must be an instance or a list of instances ' \
'of `mxnet.contrib.text.embedding.TextEmbedding` whose embedding vectors will be' \
'loaded or concatenated-then-loaded to map to the indexed tokens.'
# Index tokens.
self._index_tokens_from_vocabulary(vocabulary)
# Set _idx_to_vec so that indices of tokens from keys of `counter` are associated with token
# embedding vectors from `token_embeddings`.
self._set_idx_to_vec_by_embeddings(token_embeddings, len(self), self.idx_to_token)
| apache-2.0 |
anrl/gini | backend/src/gloader/xml/dom/ext/reader/Sgmlop.py | 10 | 10310 | import string, re, types, sys
from xml.parsers import sgmlop
from xml.dom import implementation
from xml.dom import Node
from xml.dom import NotSupportedErr
from xml.dom import EMPTY_NAMESPACE
from xml.dom.html import HTML_DTD, HTML_CHARACTER_ENTITIES
DEFAULT_CHARSET = 'ISO-8859-1'
_root = '(?P<root>[a-zA-Z][a-zA-Z0-9]*)'
_quoted = '("[^"]*")|' + "('[^']*')"
_sysId = r'\s*(?P<system%d>' + _quoted + ')'
_pubId = r'\s*PUBLIC\s*(?P<public>' + _quoted + '(' + (_sysId % 1) + ')?)'
_sysId = 'SYSTEM' + (_sysId % 2)
_doctype = re.compile('DOCTYPE ' + _root + '(%s|%s)?' % (_pubId, _sysId), re.I)
try:
unicode()
except:
from xml.unicode.iso8859 import wstring
wstring.install_alias('ISO-8859-1', 'ISO_8859-1:1987')
def unicode(str, encoding='US-ASCII'):
"""Create a UTF-8 string"""
try:
return wstring.decode(string.upper(encoding), str).utf8()
except:
return str
def unichr(char):
"""Create a UTF-8 string from a Unicode character code"""
try:
return wstring.chr(char).utf8()
except:
return char
class SgmlopParser:
def __init__(self, entities=None):
self.entities = {'amp' : '&',
'apos' : "'",
'lt' : '<',
'gt' : '>',
'quot' : '"',
}
entities and self.entities.update(entities)
def initParser(self, parser):
self._parser = parser
self._parser.register(self)
return
def initState(self, ownerDoc=None):
raise NotImplementError('initState: ownerDoc=%s' % ownerDoc)
def parse(self, stream):
self._parser.parse(stream.read())
return
def handle_special(self, data):
"""Handles <!...> directives"""
raise NotImplementedError('handle_special: data=%s' % data)
def handle_proc(self, target, data):
"""Handles processing instructions."""
raise NotImplementedError('handle_proc: target=%s, data=%s' % (target, data))
def finish_starttag(self, tagname, attrs):
"""
In XML mode attrs is a dictionary, otherwise a list.
"""
raise NotImplementedError('finish_starttag: name=%s' % tagname)
def finish_endtag(self, tagname):
raise NotImplementedError('finish_endtag: name=%s' % tagname)
def handle_entityref(self, name):
if self.entities.has_key(name):
self.handle_data(self.entities[name])
else:
self.unknown_entityref(name)
return
#Handled internally in sgmlop, but can be overridden
#def handle_charref(self, char):
# # char is a string number
# # either DDD or xHHH
# if char[0] == 'x':
# self.handle_data(chr(eval('0' + char)))
# else:
# self.handle_data(chr(int(char)))
# return
def handle_cdata(self, data):
raise NotImplementedError('handle_cdata: data=%s' % data)
def handle_data(self, data):
raise NotImplementedError('handle_data: data=%s' % data)
def handle_comment(self, data):
raise NotImplementedError('handle_comment: data=%s' % data)
def unknown_endtag(self, name): pass
def unknown_entityref(self, name): pass
g_reCharset = re.compile(r'charset\s*=\s*(?P<charset>[a-zA-Z0-9_\-]+)')
HTML_ENTITIES = {}
for (char, name) in HTML_CHARACTER_ENTITIES.items():
HTML_ENTITIES[name] = unichr(char)
class HtmlParser(SgmlopParser):
def __init__(self):
SgmlopParser.__init__(self, HTML_ENTITIES)
def initParser(self):
SgmlopParser.initParser(self, sgmlop.SGMLParser())
def initState(self, ownerDoc=None, charset=''):
self._ownerDoc = ownerDoc or implementation.createHTMLDocument('')
self._charset = charset or DEFAULT_CHARSET
self.rootNode = self._ownerDoc.createDocumentFragment()
self._stack = [self.rootNode]
self._hasHtml = 0
return
def handle_special(self, data):
# This would be a doctype, but HTML DOMs do not use them
return
def handle_proc(self, target, data):
# HTML DOMs do not support processing instructions either.
return
def finish_starttag(self, tagname, attrs):
unicodeTagName = unicode(tagname, self._charset)
lowerTagName = string.lower(unicodeTagName)
if not HTML_DTD.has_key(lowerTagName):
# Skip any tags not defined in HTML 4.01
return
element = self._ownerDoc.createElementNS(EMPTY_NAMESPACE, unicodeTagName)
# Allows for multiple META tags in a document
if lowerTagName == 'meta':
lowered = map(lambda (name, value):
(string.lower(name), string.lower(value)),
attrs)
if ('http-equiv', 'content-type') in lowered:
for (name, value) in lowered:
if name == 'content':
match = g_reCharset.search(value)
if match:
self._charset = match.group('charset')
# Add any attributes to the tag
for (name, value) in attrs:
element.setAttributeNS(EMPTY_NAMESPACE, unicode(name, self._charset),
unicode(value, self._charset))
# Look for its parent
for i in range(1, len(self._stack)):
parent = self._stack[-i]
if lowerTagName in HTML_DTD[string.lower(parent.tagName)]:
parent.appendChild(element)
if i > 1:
self._stack = self._stack[:-i+1]
if HTML_DTD[lowerTagName]:
self._stack.append(element)
return
# no parent found
if not self._hasHtml and lowerTagName == 'html':
self._stack[0].appendChild(element)
self._stack.append(element)
self._hasHtml = 1
return
def finish_endtag(self, tagname):
uppercase = string.upper(unicode(tagname, self._charset))
# Look for opening tag
for i in range(1, len(self._stack)):
element = self._stack[-i]
if uppercase == element.tagName:
self._stack = self._stack[:-i]
break
return
def handle_entityref(self, name):
if self.entities.has_key(name):
unidata = self.entities[name]
node = self._stack[-1]
text_node = node.lastChild or node
if text_node.nodeType == Node.TEXT_NODE:
text_node.appendData(unidata)
else:
node.appendChild(self._ownerDoc.createTextNode(unidata))
else:
self.unknown_entityref(name)
return
def handle_data(self, data):
unidata = unicode(data, self._charset)
node = self._stack[-1]
text_node = node.lastChild or node
if text_node.nodeType == Node.TEXT_NODE:
text_node.appendData(unidata)
else:
node.appendChild(self._ownerDoc.createTextNode(unidata))
return
def handle_charref(self, value):
# Can't rely on sgmlop to handle charrefs itself: it can't
# report Unicode (since it won't know the document encoding),
# and it may encounter non-ASCII characters
if value[0] == 'x':
value = int(value[1:], 16)
else:
value = int(value)
unidata = unichr(value)
node = self._stack[-1]
text_node = node.lastChild or node
if text_node.nodeType == Node.TEXT_NODE:
text_node.appendData(unidata)
else:
node.appendChild(self._ownerDoc.createTextNode(unidata))
return
def handle_comment(self, data):
comment = self._ownerDoc.createComment(data)
self._stack[-1].appendChild(comment)
return
class XmlParser(SgmlopParser):
def initParser(self):
SgmlopParser.initParser(self, sgmlop.XMLParser())
def initState(self, ownerDoc=None):
self._ownerDoc = None
#Set up the stack which keeps track of the nesting of DOM nodes.
if ownerDoc:
self._ownerDoc = ownerDoc
#Create a docfrag to hold all the generated nodes.
self._rootNode = self._ownerDoc.createDocumentFragment()
self._stack = [self._rootNode]
else:
self._rootNode = None
self._stack = []
self._dt = None
self._xmlDecl = None
self._orphanedNodes = []
self._namespaces = {'xml': XML_NAMESPACE}
self._namespaceStack = []
self._currText = ''
return
def finish_starttag(self, tagname, attrs):
old_nss = {}
del_nss = []
split_attrs = {}
for (name, value) in attrs.items():
(prefix, local) = SplitQName(name)
split_attrs[(prefix, local, name)] = value
if local == 'xmlns':
if self._namespaces.has_key(prefix):
old_nss[prefix] = self._namespaces[prefix]
else:
del_nss.append(prefix)
if prefix or value:
self._namespaces[prefix] = value
else:
del_nss.append(prefix)
self._namespaceStack.append((old_nss, del_nss))
(prefix, local) = SplitQName(tagname)
namespace = self._namespaces.get(prefix, None)
element = self._ownerDoc.createElementNS(namespace, tagname)
for ((prefix, local, name), value) in split_attrs.items():
if local == 'xmlns':
namespace = XMLNS_NAMESPACE
else:
namespace = self._namespaces.get(prefix, None)
attr = self._ownerDoc.createAttributeNS(namespace, name)
attr.value = value
element.setAttributeNodeNS(attr)
self._stack.append(element)
def finish_endtag(self, tagname):
element = self._stack.pop()
(old_nss, del_nss) = self._namespaceStack.pop()
self._namespaces.update(old_nss)
for prefix in del_nss:
del self._namespaces[prefix]
self._stack[-1].appendChild(element)
return
| mit |
rshriram/api | python/istio_api/mcp/v1alpha1/resource_pb2.py | 1 | 3194 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mcp/v1alpha1/resource.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from mcp.v1alpha1 import metadata_pb2 as mcp_dot_v1alpha1_dot_metadata__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mcp/v1alpha1/resource.proto',
package='istio.mcp.v1alpha1',
syntax='proto3',
serialized_options=_b('Z\031istio.io/api/mcp/v1alpha1\250\342\036\001'),
serialized_pb=_b('\n\x1bmcp/v1alpha1/resource.proto\x12\x12istio.mcp.v1alpha1\x1a\x19google/protobuf/any.proto\x1a\x14gogoproto/gogo.proto\x1a\x1bmcp/v1alpha1/metadata.proto\"^\n\x08Resource\x12.\n\x08metadata\x18\x01 \x01(\x0b\x32\x1c.istio.mcp.v1alpha1.Metadata\x12\"\n\x04\x62ody\x18\x02 \x01(\x0b\x32\x14.google.protobuf.AnyB\x1fZ\x19istio.io/api/mcp/v1alpha1\xa8\xe2\x1e\x01\x62\x06proto3')
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,gogoproto_dot_gogo__pb2.DESCRIPTOR,mcp_dot_v1alpha1_dot_metadata__pb2.DESCRIPTOR,])
_RESOURCE = _descriptor.Descriptor(
name='Resource',
full_name='istio.mcp.v1alpha1.Resource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='metadata', full_name='istio.mcp.v1alpha1.Resource.metadata', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='body', full_name='istio.mcp.v1alpha1.Resource.body', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=129,
serialized_end=223,
)
_RESOURCE.fields_by_name['metadata'].message_type = mcp_dot_v1alpha1_dot_metadata__pb2._METADATA
_RESOURCE.fields_by_name['body'].message_type = google_dot_protobuf_dot_any__pb2._ANY
DESCRIPTOR.message_types_by_name['Resource'] = _RESOURCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Resource = _reflection.GeneratedProtocolMessageType('Resource', (_message.Message,), dict(
DESCRIPTOR = _RESOURCE,
__module__ = 'mcp.v1alpha1.resource_pb2'
# @@protoc_insertion_point(class_scope:istio.mcp.v1alpha1.Resource)
))
_sym_db.RegisterMessage(Resource)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
blacklin/kbengine | kbe/src/lib/python/Lib/test/test_unpack.py | 174 | 2619 | doctests = """
Unpack tuple
>>> t = (1, 2, 3)
>>> a, b, c = t
>>> a == 1 and b == 2 and c == 3
True
Unpack list
>>> l = [4, 5, 6]
>>> a, b, c = l
>>> a == 4 and b == 5 and c == 6
True
Unpack implied tuple
>>> a, b, c = 7, 8, 9
>>> a == 7 and b == 8 and c == 9
True
Unpack string... fun!
>>> a, b, c = 'one'
>>> a == 'o' and b == 'n' and c == 'e'
True
Unpack generic sequence
>>> class Seq:
... def __getitem__(self, i):
... if i >= 0 and i < 3: return i
... raise IndexError
...
>>> a, b, c = Seq()
>>> a == 0 and b == 1 and c == 2
True
Single element unpacking, with extra syntax
>>> st = (99,)
>>> sl = [100]
>>> a, = st
>>> a
99
>>> b, = sl
>>> b
100
Now for some failures
Unpacking non-sequence
>>> a, b, c = 7
Traceback (most recent call last):
...
TypeError: 'int' object is not iterable
Unpacking tuple of wrong size
>>> a, b = t
Traceback (most recent call last):
...
ValueError: too many values to unpack (expected 2)
Unpacking tuple of wrong size
>>> a, b = l
Traceback (most recent call last):
...
ValueError: too many values to unpack (expected 2)
Unpacking sequence too short
>>> a, b, c, d = Seq()
Traceback (most recent call last):
...
ValueError: need more than 3 values to unpack
Unpacking sequence too long
>>> a, b = Seq()
Traceback (most recent call last):
...
ValueError: too many values to unpack (expected 2)
Unpacking a sequence where the test for too long raises a different kind of
error
>>> class BozoError(Exception):
... pass
...
>>> class BadSeq:
... def __getitem__(self, i):
... if i >= 0 and i < 3:
... return i
... elif i == 3:
... raise BozoError
... else:
... raise IndexError
...
Trigger code while not expecting an IndexError (unpack sequence too long, wrong
error)
>>> a, b, c, d, e = BadSeq()
Traceback (most recent call last):
...
test.test_unpack.BozoError
Trigger code while expecting an IndexError (unpack sequence too short, wrong
error)
>>> a, b, c = BadSeq()
Traceback (most recent call last):
...
test.test_unpack.BozoError
"""
__test__ = {'doctests' : doctests}
def test_main(verbose=False):
from test import support
from test import test_unpack
support.run_doctest(test_unpack, verbose)
if __name__ == "__main__":
test_main(verbose=True)
| lgpl-3.0 |
abloomston/sympy | sympy/integrals/tests/test_quadrature.py | 82 | 20020 | from sympy.core import S
from sympy.integrals.quadrature import (gauss_legendre, gauss_laguerre,
gauss_hermite, gauss_gen_laguerre,
gauss_chebyshev_t, gauss_chebyshev_u,
gauss_jacobi)
def test_legendre():
x, w = gauss_legendre(1, 17)
assert [str(r) for r in x] == ['0']
assert [str(r) for r in w] == ['2.0000000000000000']
x, w = gauss_legendre(2, 17)
assert [str(r) for r in x] == ['-0.57735026918962576',
'0.57735026918962576']
assert [str(r) for r in w] == ['1.0000000000000000', '1.0000000000000000']
x, w = gauss_legendre(3, 17)
assert [str(r) for r in x] == ['-0.77459666924148338', '0',
'0.77459666924148338']
assert [str(r) for r in w] == ['0.55555555555555556',
'0.88888888888888889', '0.55555555555555556']
x, w = gauss_legendre(4, 17)
assert [str(r) for r in x] == ['-0.86113631159405258',
'-0.33998104358485626', '0.33998104358485626',
'0.86113631159405258']
assert [str(r) for r in w] == ['0.34785484513745386',
'0.65214515486254614', '0.65214515486254614',
'0.34785484513745386']
def test_legendre_precise():
x, w = gauss_legendre(3, 40)
assert [str(r) for r in x] == \
['-0.7745966692414833770358530799564799221666', '0',
'0.7745966692414833770358530799564799221666']
assert [str(r) for r in w] == \
['0.5555555555555555555555555555555555555556',
'0.8888888888888888888888888888888888888889',
'0.5555555555555555555555555555555555555556']
def test_laguerre():
x, w = gauss_laguerre(1, 17)
assert [str(r) for r in x] == ['1.0000000000000000']
assert [str(r) for r in w] == ['1.0000000000000000']
x, w = gauss_laguerre(2, 17)
assert [str(r) for r in x] == ['0.58578643762690495',
'3.4142135623730950']
assert [str(r) for r in w] == ['0.85355339059327376',
'0.14644660940672624']
x, w = gauss_laguerre(3, 17)
assert [str(r) for r in x] == [
'0.41577455678347908',
'2.2942803602790417',
'6.2899450829374792',
]
assert [str(r) for r in w] == [
'0.71109300992917302',
'0.27851773356924085',
'0.010389256501586136',
]
x, w = gauss_laguerre(4, 17)
assert [str(r) for r in x] == ['0.32254768961939231', '1.7457611011583466',
'4.5366202969211280', '9.3950709123011331']
assert [str(r) for r in w] == ['0.60315410434163360',
'0.35741869243779969', '0.038887908515005384',
'0.00053929470556132745']
x, w = gauss_laguerre(5, 17)
assert [str(r) for r in x] == ['0.26356031971814091', '1.4134030591065168',
'3.5964257710407221', '7.0858100058588376', '12.640800844275783']
assert [str(r) for r in w] == ['0.52175561058280865',
'0.39866681108317593', '0.075942449681707595',
'0.0036117586799220485', '2.3369972385776228e-5']
def test_laguerre_precise():
x, w = gauss_laguerre(3, 40)
assert [str(r) for r in x] == \
['0.4157745567834790833115338731282744735466',
'2.294280360279041719822050361359593868960',
'6.289945082937479196866415765512131657493']
assert [str(r) for r in w] == \
['0.7110930099291730154495901911425944313094',
'0.2785177335692408488014448884567264810349',
'0.01038925650158613574896492040067908765572']
def test_hermite():
x, w = gauss_hermite(1, 17)
assert [str(r) for r in x] == ['0']
assert [str(r) for r in w] == ['1.7724538509055160']
x, w = gauss_hermite(2, 17)
assert [str(r) for r in x] == ['-0.70710678118654752',
'0.70710678118654752']
assert [str(r) for r in w] == ['0.88622692545275801',
'0.88622692545275801']
x, w = gauss_hermite(3, 17)
assert [str(r) for r in x] == [
'-1.2247448713915890',
'0',
'1.2247448713915890']
assert [str(r) for r in w] == [
'0.29540897515091934',
'1.1816359006036774',
'0.29540897515091934']
x, w = gauss_hermite(4, 17)
assert [str(r) for r in x] == [
'-1.6506801238857846',
'-0.52464762327529032',
'0.52464762327529032',
'1.6506801238857846'
]
assert [str(r) for r in w] == [
'0.081312835447245177',
'0.80491409000551284',
'0.80491409000551284',
'0.081312835447245177'
]
x, w = gauss_hermite(5, 17)
assert [str(r) for r in x] == [
'-2.0201828704560856',
'-0.95857246461381851',
'0',
'0.95857246461381851',
'2.0201828704560856'
]
assert [str(r) for r in w] == [
'0.019953242059045913',
'0.39361932315224116',
'0.94530872048294188',
'0.39361932315224116',
'0.019953242059045913'
]
def test_hermite_precise():
x, w = gauss_hermite(3, 40)
assert [str(r) for r in x] == [
'-1.224744871391589049098642037352945695983',
'0',
'1.224744871391589049098642037352945695983'
]
assert [str(r) for r in w] == [
'0.2954089751509193378830279138901908637996',
'1.181635900603677351532111655560763455198',
'0.2954089751509193378830279138901908637996'
]
def test_gen_laguerre():
x, w = gauss_gen_laguerre(1, -S.Half, 17)
assert [str(r) for r in x] == ['0.50000000000000000']
assert [str(r) for r in w] == ['1.7724538509055160']
x, w = gauss_gen_laguerre(2, -S.Half, 17)
assert [str(r) for r in x] == ['0.27525512860841095',
'2.7247448713915890']
assert [str(r) for r in w] == ['1.6098281800110257',
'0.16262567089449035']
x, w = gauss_gen_laguerre(3, -S.Half, 17)
assert [str(r) for r in x] == ['0.19016350919348813',
'1.7844927485432516',
'5.5253437422632603']
assert [str(r) for r in w] == ['1.4492591904487850',
'0.31413464064571329',
'0.0090600198110176913']
x, w = gauss_gen_laguerre(4, -S.Half, 17)
assert [str(r) for r in x] == ['0.14530352150331709',
'1.3390972881263614',
'3.9269635013582872',
'8.5886356890120343']
assert [str(r) for r in w] ==['1.3222940251164826',
'0.41560465162978376',
'0.034155966014826951',
'0.00039920814442273524']
x, w = gauss_gen_laguerre(5, -S.Half, 17)
assert [str(r) for r in x] ==['0.11758132021177814',
'1.0745620124369040',
'3.0859374437175500',
'6.4147297336620305',
'11.807189489971737']
assert [str(r) for r in w] ==['1.2217252674706516',
'0.48027722216462937',
'0.067748788910962126',
'0.0026872914935624654',
'1.5280865710465241e-5']
x, w = gauss_gen_laguerre(1, 2, 17)
assert [str(r) for r in x] ==['3.0000000000000000']
assert [str(r) for r in w] == ['2.0000000000000000']
x, w = gauss_gen_laguerre(2, 2, 17)
assert [str(r) for r in x] == ['2.0000000000000000',
'6.0000000000000000']
assert [str(r) for r in w] ==['1.5000000000000000',
'0.50000000000000000']
x, w = gauss_gen_laguerre(3, 2, 17)
assert [str(r) for r in x] ==['1.5173870806774125',
'4.3115831337195203',
'9.1710297856030672']
assert [str(r) for r in w] ==['1.0374949614904253',
'0.90575000470306537',
'0.056755033806509347']
x, w = gauss_gen_laguerre(4, 2, 17)
assert [str(r) for r in x] ==['1.2267632635003021',
'3.4125073586969460',
'6.9026926058516134',
'12.458036771951139']
assert [str(r) for r in w] ==['0.72552499769865438',
'1.0634242919791946',
'0.20669613102835355',
'0.0043545792937974889']
x, w = gauss_gen_laguerre(5, 2, 17)
assert [str(r) for r in x] ==['1.0311091440933816',
'2.8372128239538217',
'5.6202942725987079',
'9.6829098376640271',
'15.828473921690062']
assert [str(r) for r in w] == ['0.52091739683509184',
'1.0667059331592211',
'0.38354972366693113',
'0.028564233532974658',
'0.00026271280578124935']
def test_gen_laguerre_precise():
x, w = gauss_gen_laguerre(3, -S.Half, 40)
assert [str(r) for r in x] ==['0.1901635091934881328718554276203028970878',
'1.784492748543251591186722461957367638500',
'5.525343742263260275941422110422329464413']
assert [str(r) for r in w] == ['1.449259190448785048183829411195134343108',
'0.3141346406457132878326231270167565378246',
'0.009060019811017691281714945129254301865020']
x, w = gauss_gen_laguerre(3, 2, 40)
assert [str(r) for r in x] == ['1.517387080677412495020323111016672547482',
'4.311583133719520302881184669723530562299',
'9.171029785603067202098492219259796890218']
assert [str(r) for r in w] ==['1.037494961490425285817554606541269153041',
'0.9057500047030653669269785048806009945254',
'0.05675503380650934725546688857812985243312']
def test_chebyshev_t():
x, w = gauss_chebyshev_t(1, 17)
assert [str(r) for r in x] == ['0']
assert [str(r) for r in w] == ['3.1415926535897932']
x, w = gauss_chebyshev_t(2, 17)
assert [str(r) for r in x] == ['0.70710678118654752',
'-0.70710678118654752']
assert [str(r) for r in w] == ['1.5707963267948966',
'1.5707963267948966']
x, w = gauss_chebyshev_t(3, 17)
assert [str(r) for r in x] == ['0.86602540378443865',
'0',
'-0.86602540378443865']
assert [str(r) for r in w] == ['1.0471975511965977',
'1.0471975511965977',
'1.0471975511965977']
x, w = gauss_chebyshev_t(4, 17)
assert [str(r) for r in x] == ['0.92387953251128676',
'0.38268343236508977',
'-0.38268343236508977',
'-0.92387953251128676']
assert [str(r) for r in w] == ['0.78539816339744831',
'0.78539816339744831',
'0.78539816339744831',
'0.78539816339744831']
x, w = gauss_chebyshev_t(5, 17)
assert [str(r) for r in x] == ['0.95105651629515357',
'0.58778525229247313',
'0',
'-0.58778525229247313',
'-0.95105651629515357']
assert [str(r) for r in w] == ['0.62831853071795865',
'0.62831853071795865',
'0.62831853071795865',
'0.62831853071795865',
'0.62831853071795865']
def test_chebyshev_t_precise():
x, w = gauss_chebyshev_t(3, 40)
assert [str(r) for r in x] == [
'0.8660254037844386467637231707529361834714',
'0',
'-0.8660254037844386467637231707529361834714']
assert [str(r) for r in w] == [
'1.047197551196597746154214461093167628066',
'1.047197551196597746154214461093167628066',
'1.047197551196597746154214461093167628066']
def test_chebyshev_u():
x, w = gauss_chebyshev_u(1, 17)
assert [str(r) for r in x] == ['0']
assert [str(r) for r in w] == ['1.5707963267948966']
x, w = gauss_chebyshev_u(2, 17)
assert [str(r) for r in x] == ['0.50000000000000000',
'-0.50000000000000000']
assert [str(r) for r in w] == ['0.78539816339744831',
'0.78539816339744831']
x, w = gauss_chebyshev_u(3, 17)
assert [str(r) for r in x] == ['0.70710678118654752',
'0',
'-0.70710678118654752']
assert [str(r) for r in w] == ['0.39269908169872415',
'0.78539816339744831',
'0.39269908169872415']
x, w = gauss_chebyshev_u(4, 17)
assert [str(r) for r in x] == ['0.80901699437494742',
'0.30901699437494742',
'-0.30901699437494742',
'-0.80901699437494742']
assert [str(r) for r in w] == ['0.21707871342270599',
'0.56831944997474231',
'0.56831944997474231',
'0.21707871342270599']
x, w = gauss_chebyshev_u(5, 17)
assert [str(r) for r in x] == ['0.86602540378443865',
'0.50000000000000000',
'0',
'-0.50000000000000000',
'-0.86602540378443865']
assert [str(r) for r in w] == ['0.13089969389957472',
'0.39269908169872415',
'0.52359877559829887',
'0.39269908169872415',
'0.13089969389957472']
def test_chebyshev_u_precise():
x, w = gauss_chebyshev_u(3, 40)
assert [str(r) for r in x] == [
'0.7071067811865475244008443621048490392848',
'0',
'-0.7071067811865475244008443621048490392848']
assert [str(r) for r in w] == [
'0.3926990816987241548078304229099378605246',
'0.7853981633974483096156608458198757210493',
'0.3926990816987241548078304229099378605246']
def test_jacobi():
x, w = gauss_jacobi(1, -S.Half, S.Half, 17)
assert [str(r) for r in x] == ['0.50000000000000000']
assert [str(r) for r in w] == ['3.1415926535897932']
x, w = gauss_jacobi(2, -S.Half, S.Half, 17)
assert [str(r) for r in x] == ['-0.30901699437494742',
'0.80901699437494742']
assert [str(r) for r in w] == ['0.86831485369082398',
'2.2732777998989693']
x, w = gauss_jacobi(3, -S.Half, S.Half, 17)
assert [str(r) for r in x] == ['-0.62348980185873353',
'0.22252093395631440',
'0.90096886790241913']
assert [str(r) for r in w] == ['0.33795476356635433',
'1.0973322242791115',
'1.7063056657443274']
x, w = gauss_jacobi(4, -S.Half, S.Half, 17)
assert [str(r) for r in x] == ['-0.76604444311897804',
'-0.17364817766693035',
'0.50000000000000000',
'0.93969262078590838']
assert [str(r) for r in w] == ['0.16333179083642836',
'0.57690240318269103',
'1.0471975511965977',
'1.3541609083740761']
x, w = gauss_jacobi(5, -S.Half, S.Half, 17)
assert [str(r) for r in x] == ['-0.84125353283118117',
'-0.41541501300188643',
'0.14231483827328514',
'0.65486073394528506',
'0.95949297361449739']
assert [str(r) for r in w] == ['0.090675770007435371',
'0.33391416373675607',
'0.65248870981926643',
'0.94525424081394926',
'1.1192597692123861']
x, w = gauss_jacobi(1, 2, 3, 17)
assert [str(r) for r in x] == ['0.14285714285714286']
assert [str(r) for r in w] == ['1.0666666666666667']
x, w = gauss_jacobi(2, 2, 3, 17)
assert [str(r) for r in x] == ['-0.24025307335204215',
'0.46247529557426437']
assert [str(r) for r in w] == ['0.48514624517838660',
'0.58152042148828007']
x, w = gauss_jacobi(3, 2, 3, 17)
assert [str(r) for r in x] == ['-0.46115870378089762',
'0.10438533038323902',
'0.62950064612493132']
assert [str(r) for r in w] == ['0.17937613502213266',
'0.61595640991147154',
'0.27133412173306246']
x, w = gauss_jacobi(4, 2, 3, 17)
assert [str(r) for r in x] == ['-0.59903470850824782',
'-0.14761105199952565',
'0.32554377081188859',
'0.72879429738819258']
assert [str(r) for r in w] == ['0.067809641836772187',
'0.38956404952032481',
'0.47995970868024150',
'0.12933326662932816']
x, w = gauss_jacobi(5, 2, 3, 17)
assert [str(r) for r in x] == ['-0.69045775012676106',
'-0.32651993134900065',
'0.082337849552034905',
'0.47517887061283164',
'0.79279429464422850']
assert [str(r) for r in w] ==['0.027410178066337099',
'0.21291786060364828',
'0.43908437944395081',
'0.32220656547221822',
'0.065047683080512268']
def test_jacobi_precise():
x, w = gauss_jacobi(3, -S.Half, S.Half, 40)
assert [str(r) for r in x] == [
'-0.6234898018587335305250048840042398106323',
'0.2225209339563144042889025644967947594664',
'0.9009688679024191262361023195074450511659']
assert [str(r) for r in w] == [
'0.3379547635663543330553835737094171534907',
'1.097332224279111467485302294320899710461',
'1.706305665744327437921957515249186020246']
x, w = gauss_jacobi(3, 2, 3, 40)
assert [str(r) for r in x] == [
'-0.4611587037808976179121958105554375981274',
'0.1043853303832390210914918407615869143233',
'0.6295006461249313240934312425211234110769']
assert [str(r) for r in w] == [
'0.1793761350221326596137764371503859752628',
'0.6159564099114715430909548532229749439714',
'0.2713341217330624639619353762933057474325']
| bsd-3-clause |
mozilla/verbatim | vendor/lib/python/django/contrib/flatpages/models.py | 410 | 1134 | from django.db import models
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
class FlatPage(models.Model):
url = models.CharField(_('URL'), max_length=100, db_index=True)
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'), blank=True)
enable_comments = models.BooleanField(_('enable comments'))
template_name = models.CharField(_('template name'), max_length=70, blank=True,
help_text=_("Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use 'flatpages/default.html'."))
registration_required = models.BooleanField(_('registration required'), help_text=_("If this is checked, only logged-in users will be able to view the page."))
sites = models.ManyToManyField(Site)
class Meta:
db_table = 'django_flatpage'
verbose_name = _('flat page')
verbose_name_plural = _('flat pages')
ordering = ('url',)
def __unicode__(self):
return u"%s -- %s" % (self.url, self.title)
def get_absolute_url(self):
return self.url
| gpl-2.0 |
FHannes/intellij-community | python/testData/inspections/PyCompatibilityInspection/asyncComprehensions.py | 12 | 2070 | <warning descr="Python versions < 3.5 do not support this syntax">async</warning> def asyncgen():
<warning descr="Python version 3.5 does not support 'yield' inside async functions">yield 10</warning>
<warning descr="Python versions < 3.5 do not support this syntax">async</warning> def run():
<warning descr="Python version 2.4, 2.5, 2.6, 3.0 do not support set comprehensions">{i <warning descr="Python version 3.5 does not support 'async' inside comprehensions and generator expressions">async</warning> for i in asyncgen()}</warning>
[i <warning descr="Python version 3.5 does not support 'async' inside comprehensions and generator expressions">async</warning> for i in asyncgen()]
<warning descr="Python version 2.4, 2.5, 2.6, 3.0 do not support dictionary comprehensions">{i: i ** 2 <warning descr="Python version 3.5 does not support 'async' inside comprehensions and generator expressions">async</warning> for i in asyncgen()}</warning>
(i ** 2 <warning descr="Python version 3.5 does not support 'async' inside comprehensions and generator expressions">async</warning> for i in asyncgen())
list(i <warning descr="Python version 3.5 does not support 'async' inside comprehensions and generator expressions">async</warning> for i in asyncgen())
dataset = <warning descr="Python version 2.4, 2.5, 2.6, 3.0 do not support set comprehensions">{data for line in gen()
<warning descr="Python version 3.5 does not support 'async' inside comprehensions and generator expressions">async</warning> for data in line
if check(data)}</warning>
dataset = <warning descr="Python version 2.4, 2.5, 2.6, 3.0 do not support set comprehensions">{data <warning descr="Python version 3.5 does not support 'async' inside comprehensions and generator expressions">async</warning> for line in asyncgen()
<warning descr="Python version 3.5 does not support 'async' inside comprehensions and generator expressions">async</warning> for data in line
if check(data)}</warning> | apache-2.0 |
chandrikas/sm | tests/test_LVHDoISCSISR.py | 2 | 3031 | import unittest
import mock
import SR
import LVHDoISCSISR
import xs_errors
import testlib
from test_ISCSISR import NonInitingISCSISR
class RandomError(Exception):
pass
class NonInitingLVHDoISCSISR(LVHDoISCSISR.LVHDoISCSISR):
"""
Helper class; Creates dummy LVHDoISCSISR object.
Add attributes/methods as appropriate.
"""
def __init__(self, extra_dconf=None, extra_params=None):
from SRCommand import SRCommand
from DummySR import DRIVER_INFO
self.mpath = "false"
self.dconf = {
'target': 'target',
'localIQN': 'localIQN',
'targetIQN': 'targetIQN',
'SCSIid': 'SCSIid'
}
self.srcmd = mock.Mock(spec=SRCommand(DRIVER_INFO))
self.srcmd.dconf = self.dconf
self.original_srcmd = self.srcmd
self.srcmd.params = {'command': 'command'}
self.srcmd.dconf.update(extra_dconf or {})
self.srcmd.params.update(extra_params or {})
class TestLVHDoISCSISR_load(unittest.TestCase):
"""
Tests for 'LVHDoISCSISR.load()'
"""
def setUp(self):
patchers = [
mock.patch(
'BaseISCSI.BaseISCSISR',
return_value=NonInitingISCSISR()
),
mock.patch('util._convertDNS', return_value='127.0.0.1'),
mock.patch('SR.driver'),
]
map(lambda patcher: patcher.start(), patchers)
map(lambda patcher: self.addCleanup(patcher.stop), patchers)
self.lvhd_o_iscsi_sr = NonInitingLVHDoISCSISR(
{'targetIQN': '*'},
{'command': 'sr_create'}
)
self.fake_uuid = 'deadbeef'
@mock.patch('iscsilib.ensure_daemon_running_ok')
@testlib.with_context
def test_1st_try_block_raise_XenError(
self,
context,
mock_iscsilib_ensure_daemon_running_ok):
context.setup_error_codes()
mock_iscsilib_ensure_daemon_running_ok.side_effect = xs_errors.XenError(
'ISCSIInitiator',
'Raise XenError'
)
with self.assertRaises(SR.SROSError) as cm:
self.lvhd_o_iscsi_sr.load(self.fake_uuid)
self.assertEqual(cm.exception.errno, 70)
self.assertEqual(
str(cm.exception),
'Failed to set ISCSI initiator [opterr=Raise XenError]'
)
@mock.patch('iscsilib.ensure_daemon_running_ok')
@testlib.with_context
def test_1st_try_block_raise_RandomError(
self,
context,
mock_iscsilib_ensure_daemon_running_ok):
context.setup_error_codes()
mock_iscsilib_ensure_daemon_running_ok.side_effect = RandomError(
'Raise RandomError'
)
with self.assertRaises(SR.SROSError) as cm:
self.lvhd_o_iscsi_sr.load(self.fake_uuid)
self.assertEqual(cm.exception.errno, 202)
self.assertEqual(
str(cm.exception),
'General backend error [opterr=Raise RandomError]'
)
| lgpl-2.1 |
garvitr/sympy | sympy/vector/scalar.py | 42 | 1640 | from sympy.core.symbol import Symbol
from sympy.core.compatibility import u, range
from sympy.printing.pretty.stringpict import prettyForm
class BaseScalar(Symbol):
"""
A coordinate symbol/base scalar.
Ideally, users should not instantiate this class.
"""
def __new__(cls, name, index, system, pretty_str, latex_str):
from sympy.vector.coordsysrect import CoordSysCartesian
obj = super(BaseScalar, cls).__new__(cls, name)
if not isinstance(system, CoordSysCartesian):
raise TypeError("system should be a CoordSysCartesian")
if index not in range(0, 3):
raise ValueError("Invalid index specified.")
#The _id is used for equating purposes, and for hashing
obj._id = (index, system)
obj._name = name
obj._pretty_form = u(pretty_str)
obj._latex_form = latex_str
obj._system = system
return obj
def _latex(self, printer=None):
return self._latex_form
def _pretty(self, printer=None):
return prettyForm(self._pretty_form)
@property
def system(self):
return self._system
def __eq__(self, other):
#Check if the other object is a BaseScalar of same index
#and coordinate system
if isinstance(other, BaseScalar):
if other._id == self._id:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self._id.__hash__()
def __str__(self, printer=None):
return self._name
__repr__ = __str__
_sympystr = __str__
| bsd-3-clause |
channsoden/hannsoden-bioinformatics | WholeGenomePhylogeny/partition_finder_tools.py | 1 | 5531 | # defunct partion finder functions
def configure_PF(alignment_file, user_tree = '', branchlengths = 'linked', models='GTR+G', criteria = 'aicc', partition = '', search = 'kmeans'):
# Create a partition_finder.cfg file
cfg = open('partition_finder.cfg', 'w')
cfg.write("# ALIGNMENT FILE #\n")
cfg.write("alignment = {};\n".format(os.path.basename(alignment_file)))
if user_tree:
# Link the user tree into the working directory if necessary
treebase = os.path.basename(user_tree)
if not treebase in os.listdir('.'):
os.symlink(user_tree, treebase)
cfg.write("user_tree_topology = {};\n".format(treebase))
cfg.write("\n")
cfg.write("# BRANCHLENGTHS #\n")
cfg.write("branchlengths = {};\n".format(branchlengths))
cfg.write("\n")
cfg.write("# MODELS OF EVOLUTION #\n")
cfg.write("models = {};\n".format(models))
cfg.write("model_selection = {};\n".format(criteria))
cfg.write("\n")
cfg.write("# DATA BLOCKS #\n")
cfg.write("[data_blocks]\n")
if partition:
exit('configure_pf(): Configuring PF with a user defined partition is not yet implemented. Only kmeans algorithm is implimented at this point.')
else:
with open(alignment_file, 'r') as fh:
genomesize = int(fh.readline().strip().split()[1])
cfg.write("genome = 1-{};\n".format(genomesize))
cfg.write("\n")
cfg.write("# SCHEMES #\n")
cfg.write("[schemes]\n")
cfg.write("search = {};\n".format(search))
cfg.write("\n")
cfg.write("# user schemes (see manual)\n")
cfg.close()
def partition_finder(args):
# Run Partition Finder 2
# Using more than one thread does not seem to make a difference, at least on my system with the current version.
PFpath = '/global/scratch/hannsode/pkgs/partitionfinder/PartitionFinder.py'
basedir = os.getcwd()
os.chdir(args.output)
command = '{0} {1} {2} --raxml'.format(sys.executable, PFpath, os.getcwd())
partitioning = sp.Popen(command.split())
os.chdir(basedir)
return partitioning.wait()
def get_scheme(output):
# Pulls the partitioning scheme suitable for RAxML/ExaML out of the results of a PartitionFinder analysis.
with open(output+'/analysis/best_scheme.txt', 'r') as best_scheme:
subsets = [line for line in best_scheme if line.startswith('DNA, Subset')]
outfile = output+'.best_scheme.partition'
with open(outfile, 'w') as fh:
fh.writelines(subsets)
return outfile
def partition(args, alignment):
basedir = os.getcwd()
try:
os.mkdir('3_partitioning')
except OSError:
pass
os.chdir('3_partitioning')
try:
os.mkdir(args.output)
except OSError:
pass
phylip = fasta_to_phylip(alignment)
# PartitionFinder must be run from a unique subdirectory
# because the names of it's intermediate and output files
# are hardcoded. This will allow for running multiple
# instances of WGP from the same directory.
phypath = basedir+'/3_partitioning/'+phylip
link = basedir+'/3_partitioning/'+args.output+'/'+phylip
if not (os.path.islink(link) and os.path.realpath(link) == phypath):
try:
os.remove(link)
except OSError:
pass
os.symlink(phypath, link)
os.chdir(args.output)
configure_PF(phylip)
os.chdir(basedir+'/3_partitioning')
input_size = os.stat(phylip).st_size
if input_size > 300 * 10 ** 6:
ID = submit('{} {} {} 20'.format(sys.executable, __file__, basedir+'/'+args.output+'.args.pickle'),
partition = 'savio_bigmem',
account = 'co_rosalind',
qos = 'savio_lowprio',
time = '12:0:0',
job_name = 'partitionfinder',
cpus_per_task = 20,
mem_per_cpu = '25600',
modules = ['raxml/8.1.17'])
elif input_size > 50 * 10 ** 6:
ID = submit('{} {} {} 24'.format(sys.executable, __file__, basedir+'/'+args.output+'.args.pickle'),
partition = 'savio2_bigmem',
account = 'co_rosalind',
qos = 'savio_lowprio',
time = '12:0:0',
job_name = 'partitionfinder',
cpus_per_task = 24,
mem_per_cpu = '5300',
modules = ['raxml/8.1.17'])
else:
ID = submit('{} {} {} 20'.format(sys.executable, __file__, basedir+'/'+args.output+'.args.pickle'),
partition = 'savio',
account = 'co_rosalind',
qos = 'rosalind_savio_normal',
time = '12:0:0',
job_name = 'partitionfinder',
cpus_per_task = 20,
mem_per_cpu = '3000',
modules = ['raxml/8.1.17'])
job_wait(ID)
outfile = 'partitionfinder_'+str(ID)+'.out'
errfile = 'partitionfinder_'+str(ID)+'.err'
partition_file = get_scheme(args.output)
os.chdir(basedir)
cleanup(logs=[outfile, errfile], trashdir=basedir+'/3_partitioning/'+args.output)
return basedir+'/3_partitioning/'+partition_file, phypath
def cleanup(logs=[], trashdir=None):
if logs and not os.path.isdir('3_partitioning/logs'):
os.mkdir('3_partitioning/logs')
[os.rename('3_partitioning/'+log, '3_partitioning/logs/'+log) for log in logs]
if trashdir:
shutil.rmtree(trashdir)
| gpl-3.0 |
GeoCat/QGIS | python/plugins/processing/algs/grass7/ext/r_li_padrange_ascii.py | 5 | 1412 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_padrange_ascii.py
----------------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .r_li import checkMovingWindow, configFile, moveOutputTxtFile
def checkParameterValuesBeforeExecuting(alg):
return checkMovingWindow(alg, True)
def processCommand(alg, parameters):
configFile(alg, parameters, True)
def processOutputs(alg):
moveOutputTxtFile(alg)
| gpl-2.0 |
erget/tnsmaster | tests/test_aliasFinder.py | 2 | 1116 | import os
from unittest import TestCase
from antlr4 import FileStream, CommonTokenStream, ParseTreeWalker
from tnsnames.aliasFinder import AliasFinder
from tnsnames.tnsnamesLexer import tnsnamesLexer
from tnsnames.tnsnamesParser import tnsnamesParser
__author__ = 'dirkfuchs'
class TestAliasFinder(TestCase):
_tnsnames_file = None
def setUp(self):
path = os.path.dirname(os.path.abspath(__file__))
self._tnsnames_file = '{0}/testFiles/tnsnames.ora'.format(path)
def test_get_aliases(self):
input_file_stream = FileStream(self._tnsnames_file)
lexer = tnsnamesLexer(input_file_stream)
stream = CommonTokenStream(lexer)
parser = tnsnamesParser(stream)
tree = parser.tnsnames()
listener = AliasFinder()
walker = ParseTreeWalker()
walker.walk(listener, tree)
expected_aliases = ['LSNR_FRED', 'LSNR_WILMA', 'lsnr_barney', 'alias_1', 'alias_2.world',
'alias3.dunbar-it.co.uk', 'someother_alias', 'someother_alias2']
self.assertListEqual(listener.get_aliases, expected_aliases)
| mit |
DualSpark/ansible | lib/ansible/parsing/yaml/loader.py | 234 | 1877 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from _yaml import CParser, CEmitter
HAVE_PYYAML_C = True
except ImportError:
HAVE_PYYAML_C = False
from yaml.resolver import Resolver
from ansible.parsing.yaml.constructor import AnsibleConstructor
if HAVE_PYYAML_C:
class AnsibleLoader(CParser, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None):
CParser.__init__(self, stream)
AnsibleConstructor.__init__(self, file_name=file_name)
Resolver.__init__(self)
else:
from yaml.composer import Composer
from yaml.reader import Reader
from yaml.scanner import Scanner
from yaml.parser import Parser
class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
AnsibleConstructor.__init__(self, file_name=file_name)
Resolver.__init__(self)
| gpl-3.0 |
Panos512/invenio | modules/bibcatalog/lib/bibcatalog_system.py | 17 | 8332 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Provide a "ticket" interface with a request tracker.
Please see the help/hacking/bibcatalog-api page for details.
This is a base class that cannot be instantiated.
"""
from invenio.webuser import get_user_preferences
class BibCatalogSystem(object):
""" A template class for ticket support."""
TICKET_ATTRIBUTES = ['ticketid', 'priority', 'recordid', 'subject', 'text', 'creator', 'owner', 'date', 'status', 'queue', 'url_display', 'url_modify', 'url_close', 'created']
def check_system(self, uid=None):
"""Check connectivity. Return a string describing the error or an empty str
@param uid: invenio user id. optional
@type uid: number
@return: empty string on success. Otherwise a string describing error.
@rtype: string
"""
raise NotImplementedError("This class cannot be instantiated")
def ticket_search(self, uid, recordid=-1, subject="", text="", creator="", owner="", \
date_from="", date_until="", status="", priority="", queue=""):
"""Search for tickets based on various criteria. Return an array of ticket numbers
@param uid: invenio user id.
@type uid: number
@param recordid: search criteria - ticket contains this record id.
@type recordid: number
@param subject: search criteria - ticket has this subject (substr).
@type subject: string
@param text: search criteria - ticket has this text in body (substr).
@type text: string
@param creator: search criteria - ticket creator's id.
@type creator: number
@param owner: search criteria - ticket owner's id.
@type owner: number
@param date_from: search criteria - ticket created starting from this date. Example: '2009-01-24'
@type date_until: date in yyyy-mm-dd format
@param date_until: search criteria - ticket created until from this date. Example: '2009-01-24'
@type date_from: date in yyyy-mm-dd format
@param status: search criteria - ticket has this status. Example: 'resolved'.
@type status: string
@param priority: search criteria - ticket priority number.
@type priority: number
@param queue: search criteria - specific queue to search within
@type queue: string
"""
raise NotImplementedError("This class cannot be instantiated")
def ticket_submit(self, uid=None, subject="", recordid=-1, text="", queue="", priority="", owner="",requestor=""):
"""submit a ticket. Return ticket number on success, otherwise None
@param uid: invenio user id. optional
@type uid: number
@param subject: set this as the ticket's subject.
@type subject: string
@param recordid: ticket concerns this record.
@type recordid: number
@param text: ticket body.
@type text: string
@param queue: the queue for this ticket (if supported).
@type queue: string
@param priority: ticket priority.
@type priority: number
@param owner: set ticket owner to this uid.
@type owner: number
@param requestor: set ticket requestor to this email.
@type requestor: string
@return: new ticket id or None
"""
raise NotImplementedError("This class cannot be instantiated")
def ticket_assign(self, uid, ticketid, to_user):
"""assign a ticket to a user. Return 1 on success
@param uid: invenio user id
@type uid: number
@param ticketid: ticket id
@type ticketid: number
@param to_user: assign ticket to this user
@type to_user: number
@return: 1 on success, 0 otherwise
@rtype: number
"""
raise NotImplementedError("This class cannot be instantiated")
def ticket_steal(self, uid, ticketid):
"""Steal a ticket from a user.
@param uid: invenio user id
@type uid: number
@param ticketid: ticket id
@type ticketid: number
@return: 1 on success, 0 otherwise
@rtype: number
"""
raise NotImplementedError("This class cannot be instantiated")
def ticket_set_attribute(self, uid, ticketid, attribute, new_value):
"""set an attribute of a ticket. Return 1 on success
@param uid: invenio user id
@type uid: number
@param ticketid: ticket id
@type ticketid: number
@param attribute: This is a member of TICKET_ATTRIBUTES.
@type attribute: string
@param new_value: new value for this attribute.
@type new_value: string
@return: 1 on success, 0 otherwise
@rtype: number
"""
raise NotImplementedError("This class cannot be instantiated")
def ticket_get_attribute(self, uid, ticketid, attribute):
"""return an attribute
@param uid: invenio user id
@type uid: number
@param ticketid: ticket id
@type ticketid: number
@param attribute: attribute name.
@type attribute: string
@return: the value of the attribute, or None if the ticket or attribute does not exist
@rtype: string
"""
raise NotImplementedError("This class cannot be instantiated")
def ticket_get_info(self, uid, ticketid, attributes = None):
"""Return the attributes of a ticket as a dictionary whose fields are TICKET_ATTRIBUTES.
@param uid: user id
@type uid: number
@param ticketid: ticket id
@type ticketid: number
@param attributes: a list of attributes, each in TICKET_ATTRIBUTES.
@type attributes: list
@return: dictionary whose fields are TICKET_ATTRIBUTES
@rtype: dictionary
"""
raise NotImplementedError("This class cannot be instantiated")
def get_queues(self, uid):
"""Return a list of all available queues
@param uid: user id
@type uid: number
@return: list whose every element is a dictionary representing a queue
e.g {'id': '35', 'name': 'Admins'}
@rtype: list
"""
raise NotImplementedError("This class cannot be instantiated")
def ticket_comment(self, uid, ticketid, comment):
"""Submit a comment to specified ticket. Accepts multi-line text.
@param uid: user id
@type uid: number
@param ticketid: ticket id
@type ticketid: number
@param comment: the comment to send.
@type comment: string
@return: 1 on success, otherwise 0
@rtype: int
"""
raise NotImplementedError("This class cannot be instantiated")
def get_bibcat_from_prefs(uid):
"""gets username and pw from user prefs as a tuple.
if not successfull, returns None
@param uid: user id
@type uid: number
@return: ('bibcatalog_username', 'bibcatalog_password')
@rtype: tuple
"""
user_pref = get_user_preferences(uid)
if not user_pref.has_key('bibcatalog_username'):
return (None, None)
if not user_pref.has_key('bibcatalog_password'):
return (None, None)
return (user_pref['bibcatalog_username'], user_pref['bibcatalog_password'])
| gpl-2.0 |
doug-fish/horizon | openstack_dashboard/test/integration_tests/tests/test_sahara_job_binaries.py | 50 | 3397 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.pages.project.data_processing\
import jobbinariespage
from openstack_dashboard.test.integration_tests.tests import decorators
JOB_BINARY_INTERNAL = {
# Size of binary name is limited to 50 characters
jobbinariespage.JobbinariesPage.BINARY_NAME:
helpers.gen_random_resource_name(resource='jobbinary',
timestamp=False)[0:50],
jobbinariespage.JobbinariesPage.BINARY_STORAGE_TYPE:
"Internal database",
jobbinariespage.JobbinariesPage.BINARY_URL: None,
jobbinariespage.JobbinariesPage.INTERNAL_BINARY:
"*Create a script",
jobbinariespage.JobbinariesPage.BINARY_PATH: None,
jobbinariespage.JobbinariesPage.SCRIPT_NAME:
helpers.gen_random_resource_name(resource='scriptname',
timestamp=False),
jobbinariespage.JobbinariesPage.SCRIPT_TEXT: "test_script_text",
jobbinariespage.JobbinariesPage.USERNAME: None,
jobbinariespage.JobbinariesPage.PASSWORD: None,
jobbinariespage.JobbinariesPage.DESCRIPTION: "test description"
}
@decorators.services_required("sahara")
class TestSaharaJobBinary(helpers.TestCase):
def _sahara_create_delete_job_binary(self, job_binary_template):
job_name = \
job_binary_template[jobbinariespage.JobbinariesPage.BINARY_NAME]
# create job binary
job_binary_pg = self.home_pg.go_to_dataprocessing_jobbinariespage()
self.assertFalse(job_binary_pg.is_job_binary_present(job_name),
"Job binary was present in the binaries table"
" before its creation.")
job_binary_pg.create_job_binary(**job_binary_template)
# verify that job is created without problems
self.assertFalse(job_binary_pg.is_error_message_present(),
"Error message occurred during binary job creation.")
self.assertTrue(job_binary_pg.is_job_binary_present(job_name),
"Job binary is not in the binaries job table after"
" its creation.")
# delete binary job
job_binary_pg.delete_job_binary(job_name)
# verify that job was successfully deleted
self.assertFalse(job_binary_pg.is_error_message_present(),
"Error message occurred during binary job deletion.")
self.assertFalse(job_binary_pg.is_job_binary_present(job_name),
"Job binary was not removed from binaries job table.")
def test_sahara_create_delete_job_binary_internaldb(self):
"""Test the creation of a Job Binary in the Internal DB."""
self._sahara_create_delete_job_binary(JOB_BINARY_INTERNAL)
| apache-2.0 |
jounex/hue | desktop/core/ext-py/Django-1.6.10/django/db/backends/__init__.py | 41 | 47980 | import datetime
import time
from django.db.utils import DatabaseError
try:
from django.utils.six.moves import _thread as thread
except ImportError:
from django.utils.six.moves import _dummy_thread as thread
from collections import namedtuple
from contextlib import contextmanager
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.signals import connection_created
from django.db.backends import util
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseErrorWrapper
from django.utils.functional import cached_property
from django.utils.importlib import import_module
from django.utils import six
from django.utils import timezone
class BaseDatabaseWrapper(object):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Savepoint management related attributes
self.savepoint_state = 0
# Transaction management related attributes
self.autocommit = False
self.transaction_state = []
# Tracks if the connection is believed to be in transaction. This is
# set somewhat aggressively, as the DBAPI doesn't make it easy to
# deduce if the connection is in transaction or not.
self._dirty = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# List of savepoints created by 'atomic'
self.savepoint_ids = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# Connection termination related attributes
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
# Thread-safety related attributes
self.allow_thread_sharing = allow_thread_sharing
self._thread_ident = thread.get_ident()
def __eq__(self, other):
return self.alias == other.alias
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.alias)
##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Returns a dict of parameters suitable for get_new_connection."""
raise NotImplementedError
def get_new_connection(self, conn_params):
"""Opens a connection to the database."""
raise NotImplementedError
def init_connection_state(self):
"""Initializes the database connection settings."""
raise NotImplementedError
def create_cursor(self):
"""Creates a cursor. Assumes that a connection is established."""
raise NotImplementedError
##### Backend-specific methods for creating connections #####
def connect(self):
"""Connects to the database. Assumes that the connection is closed."""
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.needs_rollback = False
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.time() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.init_connection_state()
if self.settings_dict['AUTOCOMMIT']:
self.set_autocommit(True)
connection_created.send(sender=self.__class__, connection=self)
def ensure_connection(self):
"""
Guarantees that a connection to the database is established.
"""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
##### Backend-specific wrappers for PEP-249 connection methods #####
def _cursor(self):
self.ensure_connection()
with self.wrap_database_errors:
return self.create_cursor()
def _commit(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
##### Generic wrappers for PEP-249 connection methods #####
def cursor(self):
"""
Creates a cursor, opening a connection if necessary.
"""
self.validate_thread_sharing()
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = util.CursorWrapper(self._cursor(), self)
return cursor
def commit(self):
"""
Commits a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
self.set_clean()
def rollback(self):
"""
Rolls back a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
self.set_clean()
def close(self):
"""
Closes the connection to the database.
"""
self.validate_thread_sharing()
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
self.set_clean()
##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
##### Generic savepoint management methods #####
def savepoint(self):
"""
Creates a savepoint inside the current transaction. Returns an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = thread.get_ident()
tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back to a savepoint. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Releases a savepoint. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
def clean_savepoints(self):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError
##### Generic transaction management methods #####
def enter_transaction_management(self, managed=True, forced=False):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
If you switch off transaction management and there is a pending
commit/rollback, the data will be commited, unless "forced" is True.
"""
self.validate_no_atomic_block()
self.transaction_state.append(managed)
if not managed and self.is_dirty() and not forced:
self.commit()
self.set_clean()
if managed == self.get_autocommit():
self.set_autocommit(not managed)
def leave_transaction_management(self):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
self.validate_no_atomic_block()
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError(
"This code isn't under transaction management")
if self.transaction_state:
managed = self.transaction_state[-1]
else:
managed = not self.settings_dict['AUTOCOMMIT']
if self._dirty:
self.rollback()
if managed == self.get_autocommit():
self.set_autocommit(not managed)
raise TransactionManagementError(
"Transaction managed block ended with pending COMMIT/ROLLBACK")
if managed == self.get_autocommit():
self.set_autocommit(not managed)
def get_autocommit(self):
"""
Check the autocommit state.
"""
self.ensure_connection()
return self.autocommit
def set_autocommit(self, autocommit):
"""
Enable or disable autocommit.
"""
self.validate_no_atomic_block()
self.ensure_connection()
self._set_autocommit(autocommit)
self.autocommit = autocommit
def get_rollback(self):
"""
Get the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""
Raise an error if an atomic block is active.
"""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active.")
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block.")
def abort(self):
"""
Roll back any ongoing transaction and clean the transaction state
stack.
"""
if self._dirty:
self.rollback()
while self.transaction_state:
self.leave_transaction_management()
def is_dirty(self):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return self._dirty
def set_dirty(self):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if not self.get_autocommit():
self._dirty = True
def set_clean(self):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
self._dirty = False
self.clean_savepoints()
##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Context manager that disables foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
##### Connection termination handling #####
def is_usable(self):
"""
Tests if the database connection is usable.
This function may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require an is_usable() method")
def close_if_unusable_or_obsolete(self):
"""
Closes the current connection if unrecoverable errors have occurred,
or if it outlived its maximum age.
"""
if self.connection is not None:
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:
self.close()
return
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
else:
self.close()
return
if self.close_at is not None and time.time() >= self.close_at:
self.close()
return
##### Thread safety handling #####
def validate_thread_sharing(self):
"""
Validates that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raises an exception if the validation fails.
"""
if not (self.allow_thread_sharing
or self._thread_ident == thread.get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, thread.get_ident()))
##### Miscellaneous #####
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def make_debug_cursor(self, cursor):
"""
Creates a cursor that logs all queries in self.queries.
"""
return util.CursorDebugWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provides a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
cursor = self.cursor()
try:
yield cursor
finally:
cursor.close()
if must_close:
self.close()
def _start_transaction_under_autocommit(self):
"""
Only required when autocommits_when_autocommit_is_off = True.
"""
raise NotImplementedError
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists, but one of the unique_together columns is NULL?
ignores_nulls_in_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
uses_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
has_select_for_update = False
has_select_for_update_nowait = False
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does a dirty transaction need to be rolled back
# before the cursor can be used again?
requires_rollback_on_dirty_transaction = False
# Does the backend allow very long model names without error?
supports_long_model_names = True
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have a primary key of 0? MySQL says No.
allows_primary_key_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend decide to commit before SAVEPOINT statements
# when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
autocommits_when_autocommit_is_off = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_transactions(self):
"Confirm support for transactions"
try:
# Make sure to run inside a managed transaction block,
# otherwise autocommit will cause the confimation to
# fail.
self.connection.enter_transaction_management()
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection.commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection.rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection.commit()
finally:
self.connection.leave_transaction_management()
return count == 0
@cached_property
def supports_stddev(self):
"Confirm support for STDDEV and related stats functions"
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
self.connection.ops.check_aggregate_support(StdDevPop())
return True
except NotImplementedError:
return False
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
raise NotImplementedError()
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
raise NotImplementedError()
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import force_text
# Convert params to contain Unicode values.
to_unicode = lambda s: force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_unicode(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = dict((to_unicode(k), to_unicode(v)) for k, v in params.items())
return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError()
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import force_text
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_time(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
return [first, second]
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent type
that is compatible with the field type.
"""
if value is None or field is None:
return value
internal_type = field.get_internal_type()
if internal_type == 'FloatField':
return float(value)
elif (internal_type and (internal_type.endswith('IntegerField')
or internal_type == 'AutoField')):
return int(value)
return value
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def modify_insert_params(self, placeholders, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
# Structure returned by the DB-API cursor.description interface (PEP 249)
FieldInfo = namedtuple('FieldInfo',
'name type_code display_size internal_size precision scale null_ok'
)
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self, cursor=None):
"""
Returns a list of names of all tables that exist in the database.
The returned table list is sorted by Python's default sorting. We
do NOT use database's ORDER BY here to avoid subtle differences
in sorting order between databases.
"""
if cursor is None:
cursor = self.connection.cursor()
return sorted(self.get_table_list(cursor))
def get_table_list(self, cursor):
"""
Returns an unsorted list of names of all tables that exist in the
database.
"""
raise NotImplementedError
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
tables = list(tables)
if only_existing:
existing_tables = self.table_names()
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
tables = list(map(self.table_name_converter, tables))
return set([
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if model._meta.swapped:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
raise NotImplementedError
def get_primary_key_column(self, cursor, table_name):
"""
Returns the name of the primary key column for the given table.
"""
for column in six.iteritems(self.get_indexes(cursor, table_name)):
if column[1]['primary_key']:
return column[0]
return None
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of indexed fieldname -> infodict for the given
table, where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
Only single-column indexes are introspected.
"""
raise NotImplementedError
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
| apache-2.0 |
gdietz/OpenMEE | edit_funnel_plot_form.py | 1 | 1408 | '''
Created on Dec 12, 2013
@author: george
'''
import sys
from PyQt4 import QtCore, QtGui
from PyQt4.Qt import *
from common_wizard_pages.funnel_page import FunnelPage
class EditFunnelPlotForm(QDialog):
def __init__(self, funnel_params, parent=None):
super(EditFunnelPlotForm, self).__init__(parent)
self.funnelpage = FunnelPage(old_funnel_params=funnel_params)
self.buttonbox = QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
self.buttonbox.accepted.connect(self.accept)
self.buttonbox.rejected.connect(self.reject)
self.ok_button= self.buttonbox.button(QDialogButtonBox.Ok)
self.funnelpage.completeChanged.connect(self.setenable_OK)
vlayout = QVBoxLayout()
vlayout.addWidget(self.funnelpage)
vlayout.addWidget(self.buttonbox)
self.setLayout(vlayout)
self.adjustSize()
def setenable_OK(self):
if self.funnelpage.isComplete():
self.ok_button.setEnabled(True)
else:
self.ok_button.setEnabled(False)
def get_params(self):
return self.funnelpage.get_parameters()
if __name__ == "__main__":
app = QApplication(sys.argv)
form = EditFunnelPlotForm()
form.show()
form.raise_()
sys.exit(app.exec_()) | gpl-3.0 |
errx/django | django/db/migrations/writer.py | 7 | 11484 | from __future__ import unicode_literals
import datetime
import inspect
import decimal
import collections
from importlib import import_module
import os
import types
from django.apps import apps
from django.db import models
from django.db.migrations.loader import MigrationLoader
from django.utils.encoding import force_text
from django.utils.functional import Promise
from django.utils import six
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class OperationWriter(object):
indentation = 2
def __init__(self, operation):
self.operation = operation
self.buff = []
def serialize(self):
imports = set()
name, args, kwargs = self.operation.deconstruct()
argspec = inspect.getargspec(self.operation.__init__)
normalized_kwargs = inspect.getcallargs(self.operation.__init__, *args, **kwargs)
self.feed('migrations.%s(' % name)
self.indent()
for arg_name in argspec.args[1:]:
arg_value = normalized_kwargs[arg_name]
if (arg_name in self.operation.serialization_expand_args and
isinstance(arg_value, (list, tuple, dict))):
if isinstance(arg_value, dict):
self.feed('%s={' % arg_name)
self.indent()
for key, value in arg_value.items():
arg_string, arg_imports = MigrationWriter.serialize(value)
self.feed('%s: %s,' % (repr(key), arg_string))
imports.update(arg_imports)
self.unindent()
self.feed('},')
else:
self.feed('%s=[' % arg_name)
self.indent()
for item in arg_value:
arg_string, arg_imports = MigrationWriter.serialize(item)
self.feed('%s,' % arg_string)
imports.update(arg_imports)
self.unindent()
self.feed('],')
else:
arg_string, arg_imports = MigrationWriter.serialize(arg_value)
self.feed('%s=%s,' % (arg_name, arg_string))
imports.update(arg_imports)
self.unindent()
self.feed('),')
return self.render(), imports
def indent(self):
self.indentation += 1
def unindent(self):
self.indentation -= 1
def feed(self, line):
self.buff.append(' ' * (self.indentation * 4) + line)
def render(self):
return '\n'.join(self.buff)
class MigrationWriter(object):
"""
Takes a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
def as_string(self):
"""
Returns a string of the file contents.
"""
items = {
"replaces_str": "",
}
imports = set()
# Deconstruct operations
operations = []
for operation in self.migration.operations:
operation_string, operation_imports = OperationWriter(operation).serialize()
imports.update(operation_imports)
operations.append(operation_string)
items["operations"] = "\n".join(operations) + "\n" if operations else ""
# Format dependencies and write out swappable dependencies right
dependencies = []
for dependency in self.migration.dependencies:
if dependency[0] == "__setting__":
dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1])
imports.add("from django.conf import settings")
else:
dependencies.append(" %s," % repr(dependency))
items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else ""
# Format imports nicely
imports.discard("from django.db import models")
items["imports"] = "\n".join(imports) + "\n" if imports else ""
# If there's a replaces, make a string for it
if self.migration.replaces:
items['replaces_str'] = "\n replaces = %s\n" % repr(self.migration.replaces)
return (MIGRATION_TEMPLATE % items).encode("utf8")
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
migrations_package_name = MigrationLoader.migrations_module(self.migration.app_label)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
basedir = os.path.dirname(migrations_module.__file__)
except ImportError:
app_config = apps.get_app_config(self.migration.app_label)
migrations_package_basename = migrations_package_name.split(".")[-1]
# Alright, see if it's a direct submodule of the app
if '%s.%s' % (app_config.name, migrations_package_basename) == migrations_package_name:
basedir = os.path.join(app_config.path, migrations_package_basename)
else:
raise ImportError("Cannot open migrations module %s for app %s" % (migrations_package_name, self.migration.app_label))
return os.path.join(basedir, self.filename)
@classmethod
def serialize_deconstructed(cls, path, args, kwargs):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = set(["from django.db import models"])
name = "models.%s" % name
else:
imports = set(["import %s" % module])
name = path
strings = []
for arg in args:
arg_string, arg_imports = cls.serialize(arg)
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in kwargs.items():
arg_string, arg_imports = cls.serialize(arg)
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@classmethod
def serialize(cls, value):
"""
Serializes the value to a string that's parsable by Python, along
with any needed imports to make that string work.
More advanced than repr() as it can encode things
like datetime.datetime.now.
"""
# Sequences
if isinstance(value, (list, set, tuple)):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
if isinstance(value, set):
format = "set([%s])"
elif isinstance(value, tuple):
format = "(%s)" if len(value) > 1 else "(%s,)"
else:
format = "[%s]"
return format % (", ".join(strings)), imports
# Dictionaries
elif isinstance(value, dict):
imports = set()
strings = []
for k, v in value.items():
k_string, k_imports = cls.serialize(k)
v_string, v_imports = cls.serialize(v)
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
# Datetimes
elif isinstance(value, datetime.datetime):
if value.tzinfo is not None:
raise ValueError("Cannot serialize datetime values with timezones. Either use a callable value for default or remove the timezone.")
return repr(value), set(["import datetime"])
# Dates
elif isinstance(value, datetime.date):
return repr(value), set(["import datetime"])
# Settings references
elif isinstance(value, SettingsReference):
return "settings.%s" % value.setting_name, set(["from django.conf import settings"])
# Simple types
elif isinstance(value, six.integer_types + (float, six.binary_type, six.text_type, bool, type(None))):
return repr(value), set()
# Promise
elif isinstance(value, Promise):
return repr(force_text(value)), set()
# Decimal
elif isinstance(value, decimal.Decimal):
return repr(value), set(["from decimal import Decimal"])
# Django fields
elif isinstance(value, models.Field):
attr_name, path, args, kwargs = value.deconstruct()
return cls.serialize_deconstructed(path, args, kwargs)
# Anything that knows how to deconstruct itself.
elif hasattr(value, 'deconstruct'):
return cls.serialize_deconstructed(*value.deconstruct())
# Functions
elif isinstance(value, (types.FunctionType, types.BuiltinFunctionType)):
# @classmethod?
if getattr(value, "__self__", None) and isinstance(value.__self__, type):
klass = value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, value.__name__), set(["import %s" % module])
elif value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
elif value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % value)
else:
module = value.__module__
return "%s.%s" % (module, value.__name__), set(["import %s" % module])
# Classes
elif isinstance(value, type):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is value:
return string, set(imports)
if hasattr(value, "__module__"):
module = value.__module__
return "%s.%s" % (module, value.__name__), set(["import %s" % module])
# Other iterables
elif isinstance(value, collections.Iterable):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
format = "(%s)" if len(strings) > 1 else "(%s,)"
return format % (", ".join(strings)), imports
# Uh oh.
else:
raise ValueError("Cannot serialize: %r\nThere are some values Django cannot serialize into migration files.\nFor more, see https://docs.djangoproject.com/en/dev/topics/migrations/#migration-serializing" % value)
MIGRATION_TEMPLATE = """\
# encoding: utf8
from django.db import models, migrations
%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
| bsd-3-clause |
Opticalp/instrumentall | testsuite/python/dataBufferTest.py | 1 | 4601 | # -*- coding: utf-8 -*-
## @file testsuite/python/dataBufferTest.py
## @date jul. 2016
## @author PhRG - opticalp.fr
##
## Test the features of the DataProxy
#
# Copyright (c) 2016 Ph. Renaud-Goud / Opticalp
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def myMain(baseDir):
"""Main function. Run the tests. """
print("Test the DataProxy class. ")
from instru import *
fac = Factory("DataGenFactory")
print("Retrieved factory: " + fac.name)
print("Create module from intDataGen factory")
mod1 = fac.select("int32").create("intGenerator")
print("module " + mod1.name + " created (" + mod1.internalName + ") ")
print("Set output value to 314")
mod1.setParameterValue("value", 314)
print("Run module")
task = runModule(mod1)
task.wait()
print("Return value is: " + str(mod1.outPort("data").getDataValue()))
if mod1.outPort("data").getDataValue() != 314 :
raise RuntimeError("Wrong return value: 314 expected. ")
fac = Factory("DemoRootFactory")
print("Retrieved factory: " + fac.name)
print("Create module from leafForwarder factory")
mod2 = fac.select("branch").select("leafForwarder").create("mod2")
print("module " + mod2.name + " created. ")
print("Bind the output of mod1 (data gen) to the forwarder")
bind(mod1.outPorts()[0], mod2.inPorts()[0])
print('2 loggers creation using the constructor: DataLogger("DataPocoLogger")')
logger = DataLogger("DataPocoLogger")
logger1 = DataLogger("DataPocoLogger")
print("Register the loggers to mod1 and mod2 output")
mod1.outPorts()[0].register(logger)
mod2.outPorts()[0].register(logger1)
print("mod1 port#0 targets (outports): ")
targets = mod1.outPorts()[0].getTargetPorts()
for target in targets:
print ( " " + target.name + ", from module: " +
target.parent().name )
print("mod1 port#0 data loggers: ")
loggers = mod1.outPorts()[0].loggers()
for logger in loggers:
print (" Logger: " + logger.name +
" (" + logger.description + ")" +
" on port: " + logger.portSource().name +
" of module: " + logger.portSource().parent().name )
print("Run module mod1")
runModule(mod1)
waitAll()
# query the possible DataProxy class names for DataProxy creation
proxyClasses = dataProxyClasses() # DataManager::dataProxyClasses()
print("Available data proxy classes: ")
for proxyClass in proxyClasses:
print(" - " + proxyClass + ": " + proxyClasses[proxyClass])
print('Proxy creation using the constructor: DataProxy("DataBuffer")')
proxy = DataProxy("DataBuffer")
print(" - Name: " + proxy.name)
print(" - Description: " + proxy.description)
print("Bind the output of mod1 (data gen) to the forwarder via the proxy")
bind(mod1.outPorts()[0], mod2.inPorts()[0], proxy)
print("Run module mod1")
runModule(mod1)
print("End of script dataBufferTest.py")
# main body
import sys
import os
from os.path import dirname
if len(sys.argv) >= 1:
# probably called from InstrumentAll
checker = os.path.basename(sys.argv[0])
if checker == "instrumentall" or checker == "instrumentall.exe":
print("current script: ",os.path.realpath(__file__))
baseDir = dirname(dirname(__file__))
myMain(baseDir)
exit(0)
print("Presumably not called from InstrumentAll >> Exiting...")
exit("This script has to be launched from inside InstrumentAll")
| mit |
rabipanda/tensorflow | tensorflow/python/ops/distributions/special_math.py | 14 | 15497 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = [
"erfinv",
"ndtr",
"ndtri",
"log_ndtr",
"log_cdf_laplace",
]
# log_ndtr uses different functions over the ranges
# (-infty, lower](lower, upper](upper, infty)
# Lower bound values were chosen by examining where the support of ndtr
# appears to be zero, relative to scipy's (which is always 64bit). They were
# then made more conservative just to be safe. (Conservative means use the
# expansion more than we probably need to.) See `NdtrTest` in
# special_math_test.py.
LOGNDTR_FLOAT64_LOWER = -20
LOGNDTR_FLOAT32_LOWER = -10
# Upper bound values were chosen by examining for which values of 'x'
# Log[cdf(x)] is 0, after which point we need to use the approximation
# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly
# conservative, meaning we use the approximation earlier than needed.
LOGNDTR_FLOAT64_UPPER = 8
LOGNDTR_FLOAT32_UPPER = 5
def ndtr(x, name="ndtr"):
"""Normal distribution function.
Returns the area under the Gaussian probability density function, integrated
from minus infinity to x:
```
1 / x
ndtr(x) = ---------- | exp(-0.5 t**2) dt
sqrt(2 pi) /-inf
= 0.5 (1 + erf(x / sqrt(2)))
= 0.5 erfc(x / sqrt(2))
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return _ndtr(x)
def _ndtr(x):
"""Implements ndtr core logic."""
half_sqrt_2 = constant_op.constant(
0.5 * math.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
w = x * half_sqrt_2
z = math_ops.abs(w)
y = array_ops.where(math_ops.less(z, half_sqrt_2),
1. + math_ops.erf(w),
array_ops.where(math_ops.greater(w, 0.),
2. - math_ops.erfc(z),
math_ops.erfc(z)))
return 0.5 * y
def ndtri(p, name="ndtri"):
"""The inverse of the CDF of the Normal distribution function.
Returns x such that the area under the pdf from minus infinity to x is equal
to p.
A piece-wise rational approximation is done for the function.
This is a port of the implementation in netlib.
Args:
p: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtri").
Returns:
x: `Tensor` with `dtype=p.dtype`.
Raises:
TypeError: if `p` is not floating-type.
"""
with ops.name_scope(name, values=[p]):
p = ops.convert_to_tensor(p, name="p")
if p.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"p.dtype=%s is not handled, see docstring for supported types."
% p.dtype)
return _ndtri(p)
def _ndtri(p):
"""Implements ndtri core logic."""
# Constants used in piece-wise rational approximations. Taken from the cephes
# library:
# https://github.com/scipy/scipy/blob/master/scipy/special/cephes/ndtri.c
p0 = list(reversed([-5.99633501014107895267E1,
9.80010754185999661536E1,
-5.66762857469070293439E1,
1.39312609387279679503E1,
-1.23916583867381258016E0]))
q0 = list(reversed([1.0,
1.95448858338141759834E0,
4.67627912898881538453E0,
8.63602421390890590575E1,
-2.25462687854119370527E2,
2.00260212380060660359E2,
-8.20372256168333339912E1,
1.59056225126211695515E1,
-1.18331621121330003142E0]))
p1 = list(reversed([4.05544892305962419923E0,
3.15251094599893866154E1,
5.71628192246421288162E1,
4.40805073893200834700E1,
1.46849561928858024014E1,
2.18663306850790267539E0,
-1.40256079171354495875E-1,
-3.50424626827848203418E-2,
-8.57456785154685413611E-4]))
q1 = list(reversed([1.0,
1.57799883256466749731E1,
4.53907635128879210584E1,
4.13172038254672030440E1,
1.50425385692907503408E1,
2.50464946208309415979E0,
-1.42182922854787788574E-1,
-3.80806407691578277194E-2,
-9.33259480895457427372E-4]))
p2 = list(reversed([3.23774891776946035970E0,
6.91522889068984211695E0,
3.93881025292474443415E0,
1.33303460815807542389E0,
2.01485389549179081538E-1,
1.23716634817820021358E-2,
3.01581553508235416007E-4,
2.65806974686737550832E-6,
6.23974539184983293730E-9]))
q2 = list(reversed([1.0,
6.02427039364742014255E0,
3.67983563856160859403E0,
1.37702099489081330271E0,
2.16236993594496635890E-1,
1.34204006088543189037E-2,
3.28014464682127739104E-4,
2.89247864745380683936E-6,
6.79019408009981274425E-9]))
def _create_polynomial(var, coeffs):
"""Compute n_th order polynomial via Horner's method."""
if not coeffs:
return 0.
return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var
maybe_complement_p = array_ops.where(p > 1. - np.exp(-2.), 1. - p, p)
# Write in an arbitrary value in place of 0 for p since 0 will cause NaNs
# later on. The result from the computation when p == 0 is not used so any
# number that doesn't result in NaNs is fine.
one_half = constant_op.constant(0.5, dtype=p.dtype)
sanitized_mcp = array_ops.where(
maybe_complement_p <= 0.,
array_ops.fill(array_ops.shape(p), one_half),
maybe_complement_p)
# Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2).
w = sanitized_mcp - 0.5
ww = w ** 2
x_for_big_p = w + w * ww * (_create_polynomial(ww, p0)
/ _create_polynomial(ww, q0))
x_for_big_p *= -np.sqrt(2. * np.pi)
# Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z),
# where z = sqrt(-2. * log(p)), and P/Q are chosen between two different
# arrays based on wether p < exp(-32).
z = math_ops.sqrt(-2. * math_ops.log(sanitized_mcp))
first_term = z - math_ops.log(z) / z
second_term_small_p = (_create_polynomial(1. / z, p2)
/ _create_polynomial(1. / z, q2)) / z
second_term_otherwise = (_create_polynomial(1. / z, p1)
/ _create_polynomial(1. / z, q1)) / z
x_for_small_p = first_term - second_term_small_p
x_otherwise = first_term - second_term_otherwise
x = array_ops.where(sanitized_mcp > np.exp(-2.),
x_for_big_p,
array_ops.where(z >= 8.0, x_for_small_p, x_otherwise))
x = array_ops.where(p > 1. - np.exp(-2.), x, -x)
infinity_scalar = constant_op.constant(np.inf, dtype=p.dtype)
infinity = array_ops.fill(array_ops.shape(p), infinity_scalar)
x_nan_replaced = array_ops.where(
p <= 0.0, -infinity, array_ops.where(p >= 1.0, infinity, x))
return x_nan_replaced
def log_ndtr(x, series_order=3, name="log_ndtr"):
"""Log Normal distribution function.
For details of the Normal distribution function see `ndtr`.
This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or
using an asymptotic series. Specifically:
- For `x > upper_segment`, use the approximation `-ndtr(-x)` based on
`log(1-x) ~= -x, x << 1`.
- For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique
and take a log.
- For `x <= lower_segment`, we use the series approximation of erf to compute
the log CDF directly.
The `lower_segment` is set based on the precision of the input:
```
lower_segment = { -20, x.dtype=float64
{ -10, x.dtype=float32
upper_segment = { 8, x.dtype=float64
{ 5, x.dtype=float32
```
When `x < lower_segment`, the `ndtr` asymptotic series approximation is:
```
ndtr(x) = scale * (1 + sum) + R_N
scale = exp(-0.5 x**2) / (-x sqrt(2 pi))
sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N}
R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3})
```
where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a
[double-factorial](https://en.wikipedia.org/wiki/Double_factorial).
Args:
x: `Tensor` of type `float32`, `float64`.
series_order: Positive Python `integer`. Maximum depth to
evaluate the asymptotic expansion. This is the `N` above.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
log_ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
TypeError: if `series_order` is a not Python `integer.`
ValueError: if `series_order` is not in `[0, 30]`.
"""
if not isinstance(series_order, int):
raise TypeError("series_order must be a Python integer.")
if series_order < 0:
raise ValueError("series_order must be non-negative.")
if series_order > 30:
raise ValueError("series_order must be <= 30.")
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype == np.float64:
lower_segment = LOGNDTR_FLOAT64_LOWER
upper_segment = LOGNDTR_FLOAT64_UPPER
elif x.dtype.as_numpy_dtype == np.float32:
lower_segment = LOGNDTR_FLOAT32_LOWER
upper_segment = LOGNDTR_FLOAT32_UPPER
else:
raise TypeError("x.dtype=%s is not supported." % x.dtype)
# The basic idea here was ported from py/scipy/special/cephes/ndtr.c.
# We copy the main idea, with a few changes
# * For x >> 1, and X ~ Normal(0, 1),
# Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],
# which extends the range of validity of this function.
# * We use one fixed series_order for all of 'x', rather than adaptive.
# * Our docstring properly reflects that this is an asymptotic series, not a
# Taylor series. We also provided a correct bound on the remainder.
# * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when
# x=0. This happens even though the branch is unchosen because when x=0
# the gradient of a select involves the calculation 1*dy+0*(-inf)=nan
# regardless of whether dy is finite. Note that the minimum is a NOP if
# the branch is chosen.
return array_ops.where(
math_ops.greater(x, upper_segment),
-_ndtr(-x), # log(1-x) ~= -x, x << 1
array_ops.where(math_ops.greater(x, lower_segment),
math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))),
_log_ndtr_lower(math_ops.minimum(x, lower_segment),
series_order)))
def _log_ndtr_lower(x, series_order):
"""Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`."""
x_2 = math_ops.square(x)
# Log of the term multiplying (1 + sum)
log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * math.log(2. * math.pi)
return log_scale + math_ops.log(_log_ndtr_asymptotic_series(x, series_order))
def _log_ndtr_asymptotic_series(x, series_order):
"""Calculates the asymptotic series used in log_ndtr."""
if series_order <= 0:
return 1.
x_2 = math_ops.square(x)
even_sum = 0.
odd_sum = 0.
x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.
for n in range(1, series_order + 1):
if n % 2:
odd_sum += _double_factorial(2 * n - 1) / x_2n
else:
even_sum += _double_factorial(2 * n - 1) / x_2n
x_2n *= x_2
return 1. + even_sum - odd_sum
def erfinv(x, name="erfinv"):
"""The inverse function for erf, the error function.
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="erfinv").
Returns:
x: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return ndtri((x + 1.0) / 2.0) / np.sqrt(2)
def _double_factorial(n):
"""The double factorial function for small Python integer `n`."""
return np.prod(np.arange(n, 1, -2))
def log_cdf_laplace(x, name="log_cdf_laplace"):
"""Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.
lower_solution = -np.log(2.) + x
# safe_exp_neg_x = exp{-x} for x > 0, but is
# bounded above by 1, which avoids
# log[1 - 1] = -inf for x = log(1/2), AND
# exp{-x} --> inf, for x << -1
safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))
# log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used
# internally by log1p, rather than being done explicitly here.
upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)
return array_ops.where(x < 0., lower_solution, upper_solution)
| apache-2.0 |
CatoTH/OpenSlides | server/tests/example_data_generator/management/commands/create-example-data.py | 7 | 12105 | from textwrap import dedent
from typing import Optional
from django.contrib.auth.hashers import make_password
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.db.utils import IntegrityError
from django.utils.crypto import get_random_string
from openslides.agenda.models import Item, ListOfSpeakers
from openslides.assignments.models import Assignment
from openslides.motions.models import Motion
from openslides.topics.models import Topic
from openslides.users.models import Group, User
from openslides.utils.startup import run_startup_hooks
MOTION_NUMBER_OF_PARAGRAPHS = 4
LOREM_IPSUM = [
"""\
<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod
tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea
commodi consequat. Quis aute iure reprehenderit in voluptate velit esse
cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat
cupiditat non proident, sunt in culpa qui officia deserunt mollit anim
id est laborum.</p>""".replace(
"\n", " "
),
"""\
<p>Sed ut perspiciatis, unde omnis iste natus error sit voluptatem
accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae
ab illo inventore veritatis et quasi architecto beatae vitae dicta
sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit,
aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos,
qui ratione voluptatem sequi nesciunt, neque porro quisquam est, qui
dolorem ipsum, quia dolor sit amet consectetur adipisci[ng] velit, sed
quia non numquam [do] eius modi tempora inci[di]dunt, ut labore et
dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam,
quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut
aliquid ex ea commodi consequatur? Quis autem vel eum iure
reprehenderit, qui in ea voluptate velit esse, quam nihil molestiae
consequatur, vel illum, qui dolorem eum fugiat, quo voluptas nulla
pariatur?</p>""".replace(
"\n", " "
),
"""\
<p>At vero eos et accusamus et iusto odio dignissimos ducimus, qui
blanditiis praesentium voluptatum deleniti atque corrupti, quos dolores
et quas molestias excepturi sint, obcaecati cupiditate non provident,
similique sunt in culpa, qui officia deserunt mollitia animi, id est
laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita
distinctio. Nam libero tempore, cum soluta nobis est eligendi optio,
cumque nihil impedit, quo minus id, quod maxime placeat, facere
possimus, omnis voluptas assumenda est, omnis dolor repellendus.
Temporibus autem quibusdam et aut officiis debitis aut rerum
necessitatibus saepe eveniet, ut et voluptates repudiandae sint et
molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente
delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut
perferendis doloribus asperiores repellat…</p>""".replace(
"\n", " "
),
]
DEFAULT_NUMBER = 100
STAFF_USER_USERNAME = "admin{}"
DEFAULT_USER_USERNAME = "user{}"
PASSWORD = "password"
class Command(BaseCommand):
"""
Command to create example data for OpenSlides.
"""
help = "Create example data for OpenSlides."
chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
def add_arguments(self, parser):
"""
Adds arguments to the command parser. The default values for the apps
are set by DEFAULT_NUMBER.
"""
parser.add_argument(
"--only",
action="store_true",
help="Only the given objects are created i. e. all defaults are set to 0.",
)
parser.add_argument(
"-t",
"--topics",
type=int,
help=f"Number of topics to be created (default {DEFAULT_NUMBER}).",
)
parser.add_argument(
"-m",
"--motions",
type=int,
help=f"Number of motions to be created (default {DEFAULT_NUMBER}).",
)
parser.add_argument(
"-a",
"--assignments",
type=int,
help=f"Number of assignments to be created (default {DEFAULT_NUMBER}).",
)
parser.add_argument(
"-u",
"--users",
nargs=2,
type=int,
help=dedent(
f"""
Number of users to be created. The first number of users is added \
to the group "Staff" (default {DEFAULT_NUMBER}). The second number \
of users is not added to any group (default {DEFAULT_NUMBER}).
"""
),
)
def handle(self, *args, **options):
run_startup_hooks()
self.create_topics(options)
self.create_motions(options)
self.create_assignments(options)
self.create_users(options)
@transaction.atomic
def create_topics(self, options):
number_of_topics = options["topics"]
if number_of_topics is None and not options["only"]:
number_of_topics = DEFAULT_NUMBER
if number_of_topics is not None and number_of_topics > 0:
self.stdout.write(f"Start creating {number_of_topics} topcis ...")
current_topics = list(Topic.objects.values_list("id", flat=True))
new_topics = []
for i in range(number_of_topics):
new_topics.append(Topic(title=get_random_string(20, self.chars)))
Topic.objects.bulk_create(new_topics)
items = []
lists_of_speakers = []
for topic in Topic.objects.exclude(pk__in=current_topics):
items.append(Item(content_object=topic, type=Item.AGENDA_ITEM))
lists_of_speakers.append(ListOfSpeakers(content_object=topic))
Item.objects.bulk_create(items)
ListOfSpeakers.objects.bulk_create(lists_of_speakers)
self.stdout.write(
self.style.SUCCESS(f"{number_of_topics} topcis successfully created.")
)
elif number_of_topics is not None and number_of_topics < 0:
raise CommandError("Number for topics must not be negative.")
@transaction.atomic
def create_motions(self, options):
number_of_motions = options["motions"]
if number_of_motions is None and not options["only"]:
number_of_motions = DEFAULT_NUMBER
if number_of_motions is not None and number_of_motions > 0:
self.stdout.write(f"Start creating {number_of_motions} motions ...")
text = ""
for i in range(MOTION_NUMBER_OF_PARAGRAPHS):
text += dedent(LOREM_IPSUM[i % 3])
for i in range(number_of_motions):
motion = Motion(title=get_random_string(20, self.chars), text=text)
motion.save(skip_autoupdate=True)
self.stdout.write(
self.style.SUCCESS(f"{number_of_motions} motions successfully created.")
)
elif number_of_motions is not None and number_of_motions < 0:
raise CommandError("Number for motions must not be negative.")
@transaction.atomic
def create_assignments(self, options):
number_of_assignments = options["assignments"]
if number_of_assignments is None and not options["only"]:
number_of_assignments = DEFAULT_NUMBER
if number_of_assignments is not None and number_of_assignments > 0:
self.stdout.write(f"Start creating {number_of_assignments} assignments ...")
current_assignments = list(Assignment.objects.values_list("id", flat=True))
new_assignments = []
for i in range(number_of_assignments):
new_assignments.append(
Assignment(title=get_random_string(20, self.chars), open_posts=1)
)
Assignment.objects.bulk_create(new_assignments)
items = []
lists_of_speakers = []
for assignment in Assignment.objects.exclude(pk__in=current_assignments):
items.append(Item(content_object=assignment))
lists_of_speakers.append(ListOfSpeakers(content_object=assignment))
Item.objects.bulk_create(items)
ListOfSpeakers.objects.bulk_create(lists_of_speakers)
self.stdout.write(
self.style.SUCCESS(
f"{number_of_assignments} assignments successfully created."
)
)
elif number_of_assignments is not None and number_of_assignments < 0:
raise CommandError("Number for assignments must not be negative.")
def create_users(self, options):
self.create_staff_users(options)
self.create_default_users(options)
@transaction.atomic
def create_staff_users(self, options):
if options["users"] is None and not options["only"]:
staff_users: Optional[int] = DEFAULT_NUMBER
elif options["users"] is None:
staff_users = None
else:
staff_users = options["users"][0]
if staff_users is not None and staff_users > 0:
self.stdout.write(f"Start creating {staff_users} staff users ...")
group_staff = Group.objects.get(name="Staff")
hashed_password = make_password(PASSWORD)
current_users = list(User.objects.values_list("id", flat=True))
new_users = []
for i in range(staff_users):
new_users.append(
User(
username=STAFF_USER_USERNAME.format(i),
default_password=PASSWORD,
password=hashed_password,
)
)
try:
User.objects.bulk_create(new_users)
except IntegrityError:
self.stdout.write(
"FAILED: The requested staff users to create are already existing..."
)
else:
for user in User.objects.exclude(pk__in=current_users):
user.groups.add(group_staff)
self.stdout.write(
self.style.SUCCESS(
f"{staff_users} staff users successfully created."
)
)
elif staff_users is not None and staff_users < 0:
raise CommandError("Number for staff users must not be negative.")
@transaction.atomic
def create_default_users(self, options):
if options["users"] is None and not options["only"]:
default_users: Optional[int] = DEFAULT_NUMBER
elif options["users"] is None:
default_users = None
else:
default_users = options["users"][1]
if default_users is not None and default_users > 0:
self.stdout.write(f"Start creating {default_users} default users ...")
hashed_password = make_password(PASSWORD)
new_users = []
for i in range(default_users):
new_users.append(
User(
username=DEFAULT_USER_USERNAME.format(i),
default_password=PASSWORD,
password=hashed_password,
)
)
try:
User.objects.bulk_create(new_users)
except IntegrityError:
self.stdout.write(
"FAILED: The requested staff users to create are already existing..."
)
else:
self.stdout.write(
self.style.SUCCESS(
f"{default_users} default users successfully created."
)
)
elif default_users is not None and default_users < 0:
raise CommandError("Number for default users must not be negative.")
| mit |
pcm17/tensorflow | tensorflow/python/kernel_tests/dynamic_partition_op_test.py | 97 | 6138 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the DynamicPartition op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DynamicPartitionTest(test.TestCase):
def testSimpleOneDimensional(self):
with self.test_session() as sess:
data = constant_op.constant([0, 13, 2, 39, 4, 17])
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertAllEqual([0, 13], partition_vals[0])
self.assertAllEqual([17], partition_vals[1])
self.assertAllEqual([2, 4], partition_vals[2])
self.assertAllEqual([39], partition_vals[3])
# Vector data input to DynamicPartition results in
# `num_partitions` vectors of unknown length.
self.assertEqual([None], partitions[0].get_shape().as_list())
self.assertEqual([None], partitions[1].get_shape().as_list())
self.assertEqual([None], partitions[2].get_shape().as_list())
self.assertEqual([None], partitions[3].get_shape().as_list())
def testSimpleTwoDimensional(self):
with self.test_session() as sess:
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14], [15, 16, 17]])
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], partition_vals[0])
self.assertAllEqual([[15, 16, 17]], partition_vals[1])
self.assertAllEqual([[6, 7, 8], [12, 13, 14]], partition_vals[2])
self.assertAllEqual([[9, 10, 11]], partition_vals[3])
# Vector data input to DynamicPartition results in
# `num_partitions` matrices with an unknown number of rows, and 3 columns.
self.assertEqual([None, 3], partitions[0].get_shape().as_list())
self.assertEqual([None, 3], partitions[1].get_shape().as_list())
self.assertEqual([None, 3], partitions[2].get_shape().as_list())
self.assertEqual([None, 3], partitions[3].get_shape().as_list())
def testHigherRank(self):
np.random.seed(7)
with self.test_session() as sess:
for n in 2, 3:
for shape in (4,), (4, 5), (4, 5, 2):
partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
for extra_shape in (), (6,), (6, 7):
data = np.random.randn(*(shape + extra_shape))
partitions_t = constant_op.constant(partitions, dtype=dtypes.int32)
data_t = constant_op.constant(data)
outputs = data_flow_ops.dynamic_partition(
data_t, partitions_t, num_partitions=n)
self.assertEqual(n, len(outputs))
outputs_val = sess.run(outputs)
for i, output in enumerate(outputs_val):
self.assertAllEqual(output, data[partitions == i])
# Test gradients
outputs_grad = [7 * output for output in outputs_val]
grads = gradients_impl.gradients(outputs, [data_t, partitions_t],
outputs_grad)
self.assertEqual(grads[1], None) # Partitions has no gradients
self.assertAllEqual(7 * data, sess.run(grads[0]))
def testErrorIndexOutOfRange(self):
with self.test_session() as sess:
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
indices = constant_op.constant([0, 2, 99, 2, 2])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
with self.assertRaisesOpError(r"partitions\[2\] = 99 is not in \[0, 4\)"):
sess.run(partitions)
def testScalarIndexOutOfRange(self):
with self.test_session() as sess:
bad = 17
data = np.zeros(5)
partitions = data_flow_ops.dynamic_partition(data, bad, num_partitions=7)
with self.assertRaisesOpError(r"partitions = 17 is not in \[0, 7\)"):
sess.run(partitions)
def testHigherRankIndexOutOfRange(self):
with self.test_session() as sess:
shape = (2, 3)
indices = array_ops.placeholder(shape=shape, dtype=np.int32)
data = np.zeros(shape + (5,))
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=7)
for i in xrange(2):
for j in xrange(3):
bad = np.zeros(shape, dtype=np.int32)
bad[i, j] = 17
with self.assertRaisesOpError(
r"partitions\[%d,%d\] = 17 is not in \[0, 7\)" % (i, j)):
sess.run(partitions, feed_dict={indices: bad})
def testErrorWrongDimsIndices(self):
data = constant_op.constant([[0], [1], [2]])
indices = constant_op.constant([[0], [0]])
with self.assertRaises(ValueError):
data_flow_ops.dynamic_partition(data, indices, num_partitions=4)
if __name__ == "__main__":
test.main()
| apache-2.0 |
apehua/pilas | pilas/colisiones.py | 5 | 4860 | # -*- encoding: utf-8 -*-
# pilas engine - a video game framework.
#
# copyright 2010 - hugo ruscitti
# license: lgplv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# website - http://www.pilas-engine.com.ar
from . import utils
import pilas
class Colisiones:
"Administra todas las colisiones entre actores."
def __init__(self):
self.colisiones = []
def verificar_colisiones(self):
for x in self.colisiones:
self._verificar_colisiones_en_tupla(x)
def _verificar_colisiones_en_tupla(self, tupla):
"Toma dos grupos de actores y analiza colisiones entre ellos."
(grupo_a, grupo_b, funcion_a_llamar) = tupla
for a in grupo_a:
for b in grupo_b:
try:
if id(a) != id(b) and utils.colisionan(a, b):
funcion_a_llamar(a, b)
# verifica si alguno de los dos objetos muere en la colision.
if a not in pilas.escena_actual().actores:
if a in grupo_a:
list.remove(grupo_a, a)
if b not in pilas.escena_actual().actores:
if b in grupo_b:
list.remove(grupo_b, b)
except Exception as e:
list.remove(grupo_a, a)
raise e
def verificar_colisiones_fisicas(self, id_actor_a, id_actor_b):
for x in self.colisiones:
self._verificar_colisiones_fisicas_en_tupla(x, id_actor_a, id_actor_b)
def _verificar_colisiones_fisicas_en_tupla(self, tupla, id_actor_a, id_actor_b):
"Toma dos grupos de actores y analiza colisiones entre ellos."
(grupo_a, grupo_b, funcion_a_llamar) = tupla
for a in grupo_a:
for b in grupo_b:
try:
if self._es_objeto_fisico_con_actor_asociado(a):
a_id = a.figura.id
else:
a_id = a.id
if self._es_objeto_fisico_con_actor_asociado(b):
b_id = b.figura.id
else:
b_id = b.id
if a_id == id_actor_a and b_id == id_actor_b:
funcion_a_llamar(a, b)
# verifica si alguno de los dos objetos muere en la colision.
if (self._es_objeto_fisico_con_actor_asociado(a)):
if a not in pilas.escena_actual().actores:
if a in grupo_a:
list.remove(grupo_a, a)
if (self._es_objeto_fisico_con_actor_asociado(b)):
if b not in pilas.escena_actual().actores:
if b in grupo_b:
list.remove(grupo_b, b)
except Exception as e:
list.remove(grupo_a, a)
raise e
def _es_objeto_fisico_con_actor_asociado(self, objeto):
# Comprobamos si el objeto tiene la propiedad "figura" establecida.
# Esta propiedad se establece en la Habilidad de Imitar.
return hasattr(objeto, 'figura')
def agregar(self, grupo_a, grupo_b, funcion_a_llamar):
"Agrega dos listas de actores para analizar colisiones."
if not isinstance(grupo_a, list):
grupo_a = [grupo_a]
if not isinstance(grupo_b, list):
grupo_b = [grupo_b]
self.colisiones.append((grupo_a, grupo_b, funcion_a_llamar))
def eliminar_colisiones_con_actor(self, actor):
for x in self.colisiones:
grupo_a = x[0]
grupo_b = x[1]
#funcion_a_llamar = x[2]
if actor in grupo_a:
# Si solo estaba el actor en este grupo eliminamos la colision.
if len(grupo_a) == 1:
self.colisiones.remove(x)
else:
# Si hay mas de un actore eliminamos el actor de la lista.
grupo_a.remove(x)
break
if actor in grupo_b:
# Si solo estaba el actor en este grupo eliminamos la colision.
if len(grupo_b) == 1:
self.colisiones.remove(x)
else:
# Si hay mas de un actore eliminamos el actor de la lista.
grupo_b.remove(x)
break
def obtener_colisiones(self, actor, grupo_de_actores):
"Retorna una lista de los actores que colisionan con uno en particular."
lista_de_colisiones = []
for a in grupo_de_actores:
if id(actor) != id(a) and utils.colisionan(actor, a):
lista_de_colisiones.append(a)
return lista_de_colisiones
| lgpl-3.0 |
jmesteve/openerp | openerp/tests/test_osv.py | 446 | 4722 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import unittest
from openerp.osv.query import Query
class QueryTestCase(unittest.TestCase):
def test_basic_query(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_product", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # outer join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product" LEFT JOIN "res_user" as "product_product__user_id" ON ("product_product"."user_id" = "product_product__user_id"."id"),"product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") """.strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id""".strip())
def test_query_chained_explicit_joins(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_template__categ_id", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # CHAINED outer join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product","product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" as "product_template__categ_id__user_id" ON ("product_template__categ_id"."user_id" = "product_template__categ_id__user_id"."id")""".strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id""".strip())
def test_mixed_query_chained_explicit_implicit_joins(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_template__categ_id", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # CHAINED outer join
query.tables.append('"account.account"')
query.where_clause.append("product_category.expense_account_id = account_account.id") # additional implicit join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product","product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" as "product_template__categ_id__user_id" ON ("product_template__categ_id"."user_id" = "product_template__categ_id__user_id"."id"),"account.account" """.strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id AND product_category.expense_account_id = account_account.id""".strip())
def test_raise_missing_lhs(self):
query = Query()
query.tables.append('"product_product"')
self.assertRaises(AssertionError, query.add_join, ("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
adityacs/ansible | lib/ansible/modules/cloud/vmware/vmware_vsan_cluster.py | 48 | 4083 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Russell Teague <rteague2 () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: vmware_vsan_cluster
short_description: Configure VSAN clustering on an ESXi host
description:
- This module can be used to configure VSAN clustering on an ESXi host
version_added: 2.0
author: "Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
cluster_uuid:
description:
- Desired cluster UUID
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example command from Ansible Playbook
- name: Configure VMware VSAN Cluster
hosts: deploy_node
gather_facts: False
tags:
- vsan
tasks:
- name: Configure VSAN on first host
vmware_vsan_cluster:
hostname: "{{ groups['esxi'][0] }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
register: vsan_cluster
- name: Configure VSAN on remaining hosts
vmware_vsan_cluster:
hostname: "{{ item }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
cluster_uuid: "{{ vsan_cluster.cluster_uuid }}"
with_items: "{{ groups['esxi'][1:] }}"
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def create_vsan_cluster(host_system, new_cluster_uuid):
host_config_manager = host_system.configManager
vsan_system = host_config_manager.vsanSystem
vsan_config = vim.vsan.host.ConfigInfo()
vsan_config.enabled = True
if new_cluster_uuid is not None:
vsan_config.clusterInfo = vim.vsan.host.ConfigInfo.ClusterInfo()
vsan_config.clusterInfo.uuid = new_cluster_uuid
vsan_config.storageInfo = vim.vsan.host.ConfigInfo.StorageInfo()
vsan_config.storageInfo.autoClaimStorage = True
task = vsan_system.UpdateVsan_Task(vsan_config)
changed, result = wait_for_task(task)
host_status = vsan_system.QueryHostStatus()
cluster_uuid = host_status.uuid
return changed, result, cluster_uuid
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(cluster_uuid=dict(required=False, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
new_cluster_uuid = module.params['cluster_uuid']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed, result, cluster_uuid = create_vsan_cluster(host_system, new_cluster_uuid)
module.exit_json(changed=changed, result=result, cluster_uuid=cluster_uuid)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
UManPychron/pychron | pychron/image/video.py | 2 | 10139 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from __future__ import absolute_import
from __future__ import print_function
import os
import time
from threading import Thread, Lock, Event
from numpy import uint16
from skimage.io import imsave
from traits.api import Any, Bool, Float, List, Str, Int, Enum
from pychron.core.yaml import yload
from pychron.globals import globalv
from pychron.image.image import Image
from .cv_wrapper import get_capture_device
def convert_to_video(path, fps, name_filter='snapshot%03d.jpg',
ffmpeg=None,
output=None):
"""
path: path to directory containing list of images
commandline
$ ffmpeg -r 25 -codec x264 -i /snapshot%03d.jpg -o output.avi
"""
import subprocess
if output is None:
output = os.path.join(path, '{}.avi'.format(path))
if os.path.exists(output):
return
frame_rate = str(fps)
# codec = '{}'.format('x264') # H.264
path = str(os.path.join(path, name_filter))
if ffmpeg is None or not os.path.isfile(ffmpeg):
ffmpeg = '/usr/local/bin/ffmpeg'
# print 'calling {}, frame_rate={} '.format(ffmpeg, frame_rate)
call_args = [ffmpeg, '-r', frame_rate, '-i', path, output]
subprocess.call(call_args)
BIT_8 = 2 ** 8 - 1
BIT_16 = 2 ** 16 - 1
def pil_save(src, p):
head, ext = os.path.splitext(p)
if src.dtype == uint16:
# assume its a pylon mono12 frame
# for tiff need to rescale image to 16bit
# for jpg need to rescale to 8bit and change dtype
src = src / 4095
if ext == '.jpg':
src = (src * BIT_8).astype('uint8')
else:
src = (src * BIT_16).astype('uint16')
imsave(p, src)
class Video(Image):
"""
class for accessing a streaming camera.
"""
cap = Any
track_mouse = Bool
mouse_x = Float
mouse_y = Float
users = List
_recording = Bool(False)
_lock = None
_prev_frame = None
_stop_recording_event = None
_save_ok_event = None
_last_get = None
output_path = Str
output_pic_mode = Enum('jpg', 'tif')
ffmpeg_path = Str
fps = Int
identifier = 0
max_recording_duration = Float
@property
def pixel_depth(self):
pd = 255
if hasattr(self.cap, 'pixel_depth'):
pd = self.cap.pixel_depth
return pd
def is_recording(self):
return self._recording
def is_open(self):
return self.cap is not None
def load_configuration(self, p):
if os.path.isfile(p):
cfg = yload(p)
gen = cfg.get('General')
if gen:
self.swap_rb = gen.get('swap_rb', False)
self.hflip = gen.get('hflip', False)
self.vflip = gen.get('vflip', False)
self.rotate = gen.get('rotate', False)
vid = cfg.get('Video')
if vid:
self.output_pic_mode = vid.get('output_pic_mode', 'jpg')
self.ffmpeg_path = vid.get('ffmpeg_path', '')
self.fps = vid.get('fps')
self.max_recording_duration = vid.get('max_recording_duration', 30)
if hasattr(self.cap, 'load_configuration'):
self.cap.load_configuration(cfg)
def open(self, user=None, identifier=None, force=False):
"""
get a camera/capture device
"""
self._lock = Lock()
self.width = 640
self.height = 480
if self.cap is None or force:
if globalv.video_test:
self.cap = 1
else:
if identifier is None:
identifier = self.identifier
if isinstance(identifier, str):
if identifier.startswith('pvs'):
self.cap = self._get_remote_device(identifier)
elif identifier.startswith('basler_pylon'):
_, i = identifier.split(':')
self.cap = self._get_balser_pylon_device(i)
elif identifier.startswith('pylon'):
_, i = identifier.split(':')
self.cap = self._get_pylon_device(i)
# identifier is a url
else:
# ideally an identifier is passed in
try:
self.cap = get_capture_device()
self.cap.open(int(identifier) if identifier else 0)
except Exception as e:
print('video.open', e)
self.cap = None
if user not in self.users:
self.users.append(user)
def close(self, user=None, force=False):
"""
remove user for user list.
if user list is empty release/close the capture device
"""
if force and self.cap:
if not isinstance(self.cap, int):
self.cap.release()
self.cap = None
return
if user in self.users:
i = self.users.index(user)
self.users.pop(i)
if not self.users:
if self.cap is not None:
self.cap.release()
self.cap = None
def get_image_data(self, cmap=None, **kw):
return self.get_frame(**kw)
# return asarray(frame)
# if frame is not None:
# return asarray(frame[:, :])
def start_recording(self, path, renderer=None):
self._stop_recording_event = Event()
self.output_path = path
if self.cap is None:
self.open()
if self.cap is not None:
self._recording = True
t = Thread(target=self._ffmpeg_record, args=(path, self._stop_recording_event, renderer))
t.start()
def stop_recording(self, wait=False):
"""
"""
if self._stop_recording_event is not None:
self._stop_recording_event.set()
self._recording = False
if wait:
self._save_ok_event = Event()
return self._ready_to_save()
def record_frame(self, path, crop=None, **kw):
"""
"""
src = self.get_frame(**kw)
if src is not None:
self.save(path, src=src)
return src.clone()
# private
def _ready_to_save(self, timeout=120):
if self._save_ok_event:
st = time.time()
while not self._save_ok_event.is_set():
time.sleep(0.5)
if timeout and time.time() - st > timeout:
return
return True
def _ffmpeg_record(self, path, stop, renderer=None):
"""
use ffmpeg to stitch a directory of jpegs into a video
max_duration: recording will stop after max_duration minutes
"""
root = os.path.dirname(path)
name = os.path.basename(path)
name, _ext = os.path.splitext(name)
image_dir = os.path.join(root, '{}-images'.format(name))
cnt = 0
while os.path.exists(image_dir):
image_dir = os.path.join(root, '{}-images-{:03d}'.format(name, cnt))
cnt += 1
os.mkdir(image_dir)
cnt = 0
if renderer is None:
def renderer(p):
frame = self.get_cached_frame()
if frame is not None:
pil_save(frame, p)
fps_1 = 1 / self.fps
ext = self.output_pic_mode
max_duration = self.max_recording_duration * 60
start = time.time()
while not stop.is_set():
st = time.time()
if max_duration and st - start > max_duration:
break
renderer(os.path.join(image_dir, 'image_{:05d}.{}'.format(cnt, ext)))
cnt += 1
time.sleep(max(0, fps_1 - (time.time() - st)))
self._convert_to_video(image_dir, name_filter='image_%05d.{}'.format(ext), output=path)
if self._save_ok_event:
self._save_ok_event.set()
def _get_balser_pylon_device(self, identifier):
from .basler_pylon_camera import BaslerPylonCamera
cam = BaslerPylonCamera(identifier)
if cam.open():
return cam
def _get_pylon_device(self, identifier):
from .pylon_camera import PylonCamera
cam = PylonCamera(identifier)
if cam.open():
return cam
def _get_remote_device(self, url):
from pychron.image.video_source import VideoSource
vs = VideoSource()
vs.set_url(url)
vs.on_trait_change(self._update_fps, 'fps')
return vs
def _update_fps(self, fps):
self.fps = fps
def _get_frame(self, lock=True, **kw):
cap = self.cap
if globalv.video_test:
p = globalv.video_test_path
self.load(p, swap_rb=True)
f = self.source_frame
return f
elif cap is not None:
s, img = self.cap.read()
if s:
return img
def _convert_to_video(self, path, name_filter='snapshot%03d.jpg', output=None):
ffmpeg = self.ffmpeg_path
convert_to_video(path, self.fps, name_filter, ffmpeg, output)
# =================== EOF =================================================
| apache-2.0 |
softak/webfaction_demo | vendor-local/lib/python/celery/tests/test_app/test_app_amqp.py | 32 | 3364 | from __future__ import absolute_import
from __future__ import with_statement
from mock import Mock
from celery.app.amqp import MSG_OPTIONS, extract_msg_options
from celery.tests.utils import AppCase
class TestMsgOptions(AppCase):
def test_MSG_OPTIONS(self):
self.assertTrue(MSG_OPTIONS)
def test_extract_msg_options(self):
testing = {"mandatory": True, "routing_key": "foo.xuzzy"}
result = extract_msg_options(testing)
self.assertEqual(result["mandatory"], True)
self.assertEqual(result["routing_key"], "foo.xuzzy")
class test_TaskPublisher(AppCase):
def test__exit__(self):
publisher = self.app.amqp.TaskPublisher(self.app.broker_connection())
publisher.release = Mock()
with publisher:
pass
publisher.release.assert_called_with()
def test_ensure_declare_queue(self, q="x1242112"):
publisher = self.app.amqp.TaskPublisher(Mock())
self.app.amqp.queues.add(q, q, q)
publisher._declare_queue(q, retry=True)
self.assertTrue(publisher.connection.ensure.call_count)
def test_ensure_declare_exchange(self, e="x9248311"):
publisher = self.app.amqp.TaskPublisher(Mock())
publisher._declare_exchange(e, "direct", retry=True)
self.assertTrue(publisher.connection.ensure.call_count)
def test_retry_policy(self):
pub = self.app.amqp.TaskPublisher(Mock())
pub.delay_task("tasks.add", (2, 2), {},
retry_policy={"frobulate": 32.4})
def test_publish_no_retry(self):
pub = self.app.amqp.TaskPublisher(Mock())
pub.delay_task("tasks.add", (2, 2), {}, retry=False, chord=123)
self.assertFalse(pub.connection.ensure.call_count)
class test_PublisherPool(AppCase):
def test_setup_nolimit(self):
L = self.app.conf.BROKER_POOL_LIMIT
self.app.conf.BROKER_POOL_LIMIT = None
try:
delattr(self.app, "_pool")
except AttributeError:
pass
self.app.amqp.__dict__.pop("publisher_pool", None)
try:
pool = self.app.amqp.publisher_pool
self.assertEqual(pool.limit, self.app.pool.limit)
self.assertFalse(pool._resource.queue)
r1 = pool.acquire()
r2 = pool.acquire()
r1.release()
r2.release()
r1 = pool.acquire()
r2 = pool.acquire()
finally:
self.app.conf.BROKER_POOL_LIMIT = L
def test_setup(self):
L = self.app.conf.BROKER_POOL_LIMIT
self.app.conf.BROKER_POOL_LIMIT = 2
try:
delattr(self.app, "_pool")
except AttributeError:
pass
self.app.amqp.__dict__.pop("publisher_pool", None)
try:
pool = self.app.amqp.publisher_pool
self.assertEqual(pool.limit, self.app.pool.limit)
self.assertTrue(pool._resource.queue)
p1 = r1 = pool.acquire()
p2 = r2 = pool.acquire()
delattr(r1.connection, "_producer_chan")
r1.release()
r2.release()
r1 = pool.acquire()
r2 = pool.acquire()
self.assertIs(p2, r1)
self.assertIs(p1, r2)
r1.release()
r2.release()
finally:
self.app.conf.BROKER_POOL_LIMIT = L
| bsd-3-clause |
anitahitouch/mediadrop | mediadrop/lib/compat/__init__.py | 10 | 10467 | # This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
__all__ = [
'ElementTree',
'all',
'any',
'chain',
'defaultdict',
'inet_aton',
'max',
'md5',
'namedtuple',
'SEEK_END',
'sha1',
'wraps',
]
try:
from functools import wraps
except ImportError:
from mediadrop.lib.compat.functional import wraps
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
any = any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
all = all
except NameError:
def all(iterable):
for element in iterable:
if not element:
return False
return True
try:
import os
# os.SEEK_* constants were added in Python 2.5
SEEK_END = os.SEEK_END
except AttributeError:
SEEK_END = 2
try:
max([1], key=lambda x:x)
max = max
except TypeError:
max24 = max
# Re-implement a python-only version of keyed max() for py2.4
def max(iterable, key=None, *args):
if key is None:
return max24(iterable, *args)
else:
if args:
args.insert(iterable, 0)
iterable = args
first = True
cur_val = None
vur_obj = None
for x in iterable:
y = key(x)
if first or y > cur_val:
cur_obj = x
cur_val = y
first = False
return cur_obj
try:
from collections import namedtuple
except ImportError:
# Backported for py2.4 and 2.5 by Raymond Hettinger
# http://code.activestate.com/recipes/500261/
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not min(c.isalnum() or c == '_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not min(c.isalnum() or c == '_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new dict which maps field names to their values'
return dict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
if verbose:
print template
# Execute the template string in a temporary namespace
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
try:
from collections import defaultdict
except:
# Backported for py2.4 by Jason Kirtland
# http://code.activestate.com/recipes/523034/
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
from itertools import chain
try:
chain.from_iterable
except AttributeError:
# New in version 2.6: Alternate constructor for chain().
# Gets chained inputs from a single iterable arg that is evaluated lazily.
# NOTE: itertools is written in C so we can't monkeypatch it.
_chain = chain
def chain(*iterables):
return _chain(*iterables)
def _chain_from_iterable(iterables):
for it in iterables:
for element in it:
yield element
chain.from_iterable = _chain_from_iterable
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
from socket import inet_aton as _inet_aton
def inet_aton(ip_string):
# On some 64 bit platforms, with some versions of Python, socket.inet_aton
# returns the a full 64 bit register, rather than the 32 bit value.
# The result of this is that the returned bit string is right-padded with
# 32 bits (4 chars) of zeroes. See:
# http://bugs.python.org/issue767150
# http://bugs.python.org/issue1008086
# This wrapper ensures the result is always truncated to the first 32 bits.
return _inet_aton(ip_string)[:4]
| gpl-3.0 |
yesudeep/mils-secure | app/rapi.py | 2 | 18254 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import configuration as config
import logging
from google.appengine.api import users, memcache
from google.appengine.ext import webapp, db
from google.appengine.ext.webapp.util import run_wsgi_app
from lovely.jsonrpc import wsgi
import models
from utils import dec, parse_iso_datetime_string, get_iso_datetime_string, queue_task, queue_mail_task
from data.countries import COUNTRY_NAME_ISO_ALPHA_3_TABLE
from datetime import datetime
logging.basicConfig(level=logging.DEBUG)
# Keep the timeout short because the admin expects the "freshest" data at "all" times.
DEFAULT_CACHE_TIMEOUT = 5 # seconds
def toggle_active(key):
item = db.get(db.Key(key))
item.is_active = not item.is_active
item.put()
return item.is_active
def toggle_starred(key):
item = db.get(db.Key(key))
item.is_starred = not item.is_starred
item.put()
return item.is_starred
def toggle_deleted(key):
item = db.get(db.Key(key))
item.is_deleted = not item.is_deleted
item.put()
return item.is_deleted
def toggle_premium(key):
item = db.get(db.Key(key))
item.is_premium = not item.is_premium
item.put()
return item.is_premium
def toggle_draft(key):
item = db.get(db.Key(key))
item.is_draft = not item.is_draft
item.put()
return item.is_draft
def toggle_keys_active(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_active = not item.is_active
item_list.append(item)
db.put(item_list)
return keys
def toggle_keys_starred(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_starred = not item.is_starred
item_list.append(item)
db.put(item_list)
return keys
def activate_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_active = True
item_list.append(item)
db.put(item_list)
return keys
def activate_user_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_active = True
item.wants_activation = False
item_list.append(item)
queue_mail_task(url='/worker/mail/account_activation_notification/' + key, method='GET')
db.put(item_list)
return keys
def deactivate_user_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_active = False
item.wants_activation = False
item_list.append(item)
db.put(item_list)
return keys
def deactivate_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_active = False
item_list.append(item)
db.put(item_list)
return keys
def publish_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_draft = False
item_list.append(item)
db.put(item_list)
return keys
def draft_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_draft = True
item_list.append(item)
db.put(item_list)
return keys
def regularize_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_premium = False
item_list.append(item)
db.put(item_list)
return keys
def premiumize_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_premium = True
item_list.append(item)
db.put(item_list)
return keys
def star_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_starred = True
item_list.append(item)
db.put(item_list)
return keys
def unstar_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_starred = False
item_list.append(item)
db.put(item_list)
return keys
def delete_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_deleted = True
item_list.append(item)
db.put(item_list)
return keys
def undelete_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_deleted = False
item_list.append(item)
db.put(item_list)
return keys
delete_users = delete_keys
undelete_users = undelete_keys
star_users = star_keys
unstar_users = unstar_keys
def get_person_from_user(key):
cache_key = 'json.get_person_from_user(' + key + ')'
cached_result = memcache.get(cache_key)
if cached_result:
return cached_result
else:
user = db.get(db.Key(key))
person = user.people_singleton[0]
host_info = db.Query(models.UserHostInformation).filter('user = ', user).get() #user.host_information_set
phones = []
for phone in person.phones:
phones.append(dict(
key = str(phone.key()),
phone_number = phone.number,
phone_type = phone.phone_type
))
addresses = []
for address in person.addresses:
addresses.append(dict(
key = str(address.key()),
address_type = address.address_type,
#apartment = address.apartment,
#state_province = address.state_province,
#city = address.city,
#zip_code = address.zip_code,
#street_name = address.street_name,
#country_code = address.country,
#country_name = COUNTRY_NAME_ISO_ALPHA_3_TABLE.get(address.country, 'Unknown Country'),
#landmark = address.landmark,
#nearest_railway_line = address.nearest_railway_line,
address_line = address.address_line
))
corporate_email = user.corporate_email
if not corporate_email:
corporate_email = ''
retval = dict(
key = str(person.key()),
user_key = str(person.user.key()),
signin_email = user.signin_email,
corporate_email = corporate_email,
first_name = person.first_name,
last_name = person.last_name,
gender = person.gender,
company = person.company,
designation = person.designation,
graduation_year = person.graduation_year,
t_shirt_size = person.t_shirt_size,
birthdate = get_iso_datetime_string(person.birthdate),
addresses = addresses,
phones = phones,
is_student = person.is_student,
when_created = get_iso_datetime_string(user.when_created),
http_user_agent = host_info.http_user_agent
)
memcache.set(cache_key, retval, DEFAULT_CACHE_TIMEOUT)
return retval
def get_users():
cache_key = 'api.get_users'
cached_user_list = memcache.get(cache_key)
if cached_user_list:
return cached_user_list
else:
user_list = []
users = models.User.all().order('nickname').fetch(models.FETCH_ALL_VALUES)
for user in users:
person = user.people_singleton[0]
user_list.append(dict(username=user.username,
email=user.email,
signin_email=user.signin_email,
corporate_email=user.corporate_email,
nickname=user.nickname,
key=str(user.key()),
is_active=user.is_active,
is_deleted=user.is_deleted,
is_starred=user.is_starred,
wants_activation=user.wants_activation,
is_premium=user.is_premium,
auth_provider=user.auth_provider,
person_key=str(person.key()),
graduation_year=person.graduation_year,
when_created=get_iso_datetime_string(user.when_created)
))
memcache.set(cache_key, user_list, DEFAULT_CACHE_TIMEOUT)
return user_list
def get_books():
book_list = []
books = models.Book.all().order('title').fetch(models.FETCH_ALL_VALUES)
for book in books:
book_list.append(dict(title=book.title,
isbn_10=book.isbn_10,
isbn_13=book.isbn_13,
author_name=book.author_name,
key=str(book.key()),
is_active=book.is_active,
is_starred=book.is_starred,
is_deleted=book.is_deleted,
info_url=book.info_url
))
return book_list
def get_book(key):
book = db.get(db.Key(key))
return dict(key=str(book.key()),
title=book.title,
author_name=book.author_name,
isbn_10=book.isbn_10,
isbn_13=book.isbn_13,
is_active=book.is_active,
is_starred=book.is_starred,
is_deleted=book.is_deleted,
info_url=book.info_url
)
def is_openlibrary_cover_available(isbn):
isbn = str(isbn)
cache_key = 'cover_for_' + isbn
cached_value = memcache.get(cache_key)
if cached_value in (True, False):
return cached_value
else:
from google.appengine.api import urlfetch
cover_url = 'http://covers.openlibrary.org/b/isbn/' + isbn + '-S.jpg?default=false'
result = urlfetch.fetch(cover_url)
retval = False
if result.status_code == 200:
retval = True
memcache.set(cache_key, retval, DEFAULT_CACHE_TIMEOUT)
return retval
def save_book(key='', title='', author_name='', isbn_10='', isbn_13='', info_url=''):
if key:
book = db.get(db.Key(key))
else:
book = models.Book()
book.title = title
book.author_name = author_name
book.isbn_10 = isbn_10
book.isbn_13 = isbn_13
if info_url:
book.info_url = info_url
book.put()
return dict(key=str(book.key()),
title=book.title,
author_name=book.author_name,
isbn_10=book.isbn_10,
isbn_13=book.isbn_13,
is_active=book.is_active,
is_starred=book.is_starred,
is_deleted=book.is_deleted,
info_url=book.info_url
)
def get_articles():
cache_key = 'api.get_articles'
cached_articles = memcache.get(cache_key)
if cached_articles:
return cached_articles
else:
articles_list = []
articles = models.Article.all().order('-when_published').fetch(models.FETCH_ALL_VALUES)
for article in articles:
articles_list.append(dict(title=article.title,
is_draft=article.is_draft,
when_published=get_iso_datetime_string(article.when_published),
when_created=get_iso_datetime_string(article.when_created),
key=str(article.key()),
author_nickname=article.author.nickname(),
author_email=article.author.email(),
is_starred=article.is_starred,
is_deleted=article.is_deleted
))
memcache.set(cache_key, articles_list, DEFAULT_CACHE_TIMEOUT)
return articles_list
def get_article(key):
article = db.get(db.Key(key))
return dict(title=article.title,
is_draft=article.is_draft,
key=str(article.key()),
when_published=get_iso_datetime_string(article.when_published),
when_created=get_iso_datetime_string(article.when_created),
author_nickname=article.author.nickname(),
author_email=article.author.email(),
content=article.content,
is_starred=article.is_starred,
is_deleted=article.is_deleted
)
def get_article_content(key):
article = db.get(db.Key(key))
return dict(key=key,
content=article.content
)
def save_article(key='', title='', content='', is_draft=''):
if key:
article = db.get(db.Key(key))
else:
article = models.Article()
article.title = title
article.content = content
article.is_draft = is_draft
article.author = users.get_current_user()
article.put()
return dict(title=article.title,
is_draft=article.is_draft,
key=str(article.key()),
when_published=get_iso_datetime_string(article.when_published),
when_created=get_iso_datetime_string(article.when_created),
author_nickname=article.author.nickname(),
author_email=article.author.email(),
#content=article.content,
is_starred=article.is_starred,
is_deleted=article.is_deleted
)
def save_training_program(key='', title='', venue='', faculty='',
when_from='',
when_to='',
when_registration_ends='',
participation_counts=[],
participation_fees=[]):
if key:
training_program = db.get(db.Key(key))
else:
training_program = models.TrainingProgram
training_program.title = title
training_program.venue = venue
training_program.faculty = faculty
training_program.when_from = parse_iso_datetime_string(when_from)
training_program.when_to = parse_iso_datetime_string(when_to)
training_program.when_registration_ends = parse_iso_datetime_string(when_registration_ends)
training_program.put()
fees = []
for count, fee in izip(participation_counts, participation_fees):
tpfee = models.TrainingProgramFee()
tpfee.for_participation_count = count
if '.' in fee:
fee_integer, fee_fraction = fee.split('.')
else:
fee_integer, fee_fraction = fee, '0'
tpfee.fee_integer = dec(fee_integer)
tpfee.fee_fraction = dec(fee_fraction)
tpfee.training_program = training_program
fees.append(tpfee)
db.put(fees)
def get_training_program(key):
cache_key = 'api.get_training_program.json.' + key
cached_value = memcache.get(cache_key)
if cached_value:
return cached_value
else:
training_program = db.get(db.Key(key))
fees = [fee.to_json_dict('fee_integer', 'fee_fraction', 'for_participants_count') for fee in training_program.fees]
training_program_json_dict = training_program.to_json_dict(
'title',
'venue',
'when_from',
'when_to',
'when_registration_ends',
'max_participants',
'faculty',
'is_starred',
'is_deleted',
'is_active'
)
training_program_json_dict['fees'] = fees
memcache.set(cache_key, training_program_json_dict, DEFAULT_CACHE_TIMEOUT)
return training_program_json_dict
def get_training_programs():
cache_key = 'api.get_training_programs'
cached_values = memcache.get(cache_key)
if cached_values:
return cached_values
else:
training_programs = models.TrainingProgram.get_all()
training_programs_list = []
for training_program in training_programs:
fees = [fee.to_json_dict('fee_integer', 'fee_fraction', 'for_participants_count') for fee in training_program.fees]
training_program_json_dict = training_program.to_json_dict(
'title',
'venue',
'when_from',
'when_to',
'when_registration_ends',
'max_participants',
'faculty',
'is_starred',
'is_deleted',
'is_active')
training_program_json_dict['fees'] = fees
training_programs_list.append(training_program_json_dict)
memcache.set(cache_key, training_programs_list, DEFAULT_CACHE_TIMEOUT)
return training_programs_list
def main():
application = wsgi.WSGIJSONRPCApplication()
application.register_method(activate_keys, 'activate_keys')
application.register_method(deactivate_keys, 'deactivate_keys')
application.register_method(star_keys, 'star_keys')
application.register_method(unstar_keys, 'unstar_keys')
application.register_method(delete_keys, 'delete_keys')
application.register_method(undelete_keys, 'undelete_keys')
application.register_method(toggle_starred, 'toggle_starred')
application.register_method(toggle_active, 'toggle_active')
application.register_method(toggle_deleted, 'toggle_deleted')
application.register_method(toggle_premium, 'toggle_premium')
application.register_method(toggle_draft, 'toggle_draft')
application.register_method(regularize_keys, 'regularize_keys')
application.register_method(premiumize_keys, 'premiumize_keys')
application.register_method(publish_keys, 'publish_keys')
application.register_method(draft_keys, 'draft_keys')
application.register_method(toggle_keys_starred, 'toggle_keys_starred')
application.register_method(toggle_keys_active, 'toggle_keys_active')
application.register_method(activate_user_keys, 'activate_user_keys')
application.register_method(deactivate_user_keys, 'deactivate_user_keys')
application.register_method(get_person_from_user, 'get_person_from_user')
application.register_method(get_users, 'get_users')
application.register_method(get_articles, 'get_articles')
application.register_method(get_article, 'get_article')
application.register_method(save_article, 'save_article')
application.register_method(get_article_content, 'get_article_content')
application.register_method(get_books, 'get_books')
application.register_method(get_book, 'get_book')
application.register_method(save_book, 'save_book')
application.register_method(is_openlibrary_cover_available, 'is_cover_available')
application.register_method(get_training_programs, 'get_training_programs')
application.register_method(get_training_program, 'get_training_program')
application.register_method(save_training_program, 'save_training_program')
run_wsgi_app(application)
if __name__ == '__main__':
main()
| mit |
dezynetechnologies/odoo | addons/auth_crypt/__init__.py | 435 | 1050 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import auth_crypt
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
keerts/home-assistant | homeassistant/components/zoneminder.py | 3 | 3271 | """
Support for ZoneMinder.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zoneminder/
"""
import logging
from urllib.parse import urljoin
import requests
import voluptuous as vol
from homeassistant.const import (
CONF_PATH, CONF_HOST, CONF_SSL, CONF_PASSWORD, CONF_USERNAME)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_PATH = '/zm/'
DEFAULT_SSL = False
DEFAULT_TIMEOUT = 10
DOMAIN = 'zoneminder'
LOGIN_RETRIES = 2
ZM = {}
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the ZoneMinder component."""
global ZM
ZM = {}
conf = config[DOMAIN]
if conf[CONF_SSL]:
schema = 'https'
else:
schema = 'http'
url = urljoin('{}://{}'.format(schema, conf[CONF_HOST]), conf[CONF_PATH])
username = conf.get(CONF_USERNAME, None)
password = conf.get(CONF_PASSWORD, None)
ZM['url'] = url
ZM['username'] = username
ZM['password'] = password
return login()
# pylint: disable=no-member
def login():
"""Login to the ZoneMinder API."""
_LOGGER.debug("Attempting to login to ZoneMinder")
login_post = {'view': 'console', 'action': 'login'}
if ZM['username']:
login_post['username'] = ZM['username']
if ZM['password']:
login_post['password'] = ZM['password']
req = requests.post(ZM['url'] + '/index.php', data=login_post)
ZM['cookies'] = req.cookies
# Login calls returns a 200 response on both failure and success.
# The only way to tell if you logged in correctly is to issue an api call.
req = requests.get(
ZM['url'] + 'api/host/getVersion.json', cookies=ZM['cookies'],
timeout=DEFAULT_TIMEOUT)
if not req.ok:
_LOGGER.error("Connection error logging into ZoneMinder")
return False
return True
def _zm_request(method, api_url, data=None):
"""Perform a Zoneminder request."""
# Since the API uses sessions that expire, sometimes we need to re-auth
# if the call fails.
for _ in range(LOGIN_RETRIES):
req = requests.request(
method, urljoin(ZM['url'], api_url), data=data,
cookies=ZM['cookies'], timeout=DEFAULT_TIMEOUT)
if not req.ok:
login()
else:
break
else:
_LOGGER.exception("Unable to get API response from ZoneMinder")
try:
return req.json()
except ValueError:
_LOGGER.exception('JSON decode exception caught while attempting to '
'decode "%s"', req.text)
# pylint: disable=no-member
def get_state(api_url):
"""Get a state from the ZoneMinder API service."""
return _zm_request('get', api_url)
# pylint: disable=no-member
def change_state(api_url, post_data):
"""Update a state using the Zoneminder API."""
return _zm_request('post', api_url, data=post_data)
| apache-2.0 |
zubair-arbi/edx-platform | common/lib/xmodule/xmodule/tests/test_annotatable_module.py | 172 | 5909 | """Module annotatable tests"""
import unittest
from lxml import etree
from mock import Mock
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.annotatable_module import AnnotatableModule
from opaque_keys.edx.locations import Location
from . import get_test_system
class AnnotatableModuleTestCase(unittest.TestCase):
sample_xml = '''
<annotatable display_name="Iliad">
<instructions>Read the text.</instructions>
<p>
<annotation body="first">Sing</annotation>,
<annotation title="goddess" body="second">O goddess</annotation>,
<annotation title="anger" body="third" highlight="blue">the anger of Achilles son of Peleus</annotation>,
that brought <i>countless</i> ills upon the Achaeans. Many a brave soul did it send
hurrying down to Hades, and many a hero did it yield a prey to dogs and
<div style="font-weight:bold"><annotation body="fourth" problem="4">vultures</annotation>, for so were the counsels
of Jove fulfilled from the day on which the son of Atreus, king of men, and great
Achilles, first fell out with one another.</div>
</p>
<annotation title="footnote" body="the end">The Iliad of Homer by Samuel Butler</annotation>
</annotatable>
'''
def setUp(self):
super(AnnotatableModuleTestCase, self).setUp()
self.annotatable = AnnotatableModule(
Mock(),
get_test_system(),
DictFieldData({'data': self.sample_xml}),
ScopeIds(None, None, None, Location('org', 'course', 'run', 'category', 'name', None))
)
def test_annotation_data_attr(self):
el = etree.fromstring('<annotation title="bar" body="foo" problem="0">test</annotation>')
expected_attr = {
'data-comment-body': {'value': 'foo', '_delete': 'body'},
'data-comment-title': {'value': 'bar', '_delete': 'title'},
'data-problem-id': {'value': '0', '_delete': 'problem'}
}
actual_attr = self.annotatable._get_annotation_data_attr(0, el)
self.assertIsInstance(actual_attr, dict)
self.assertDictEqual(expected_attr, actual_attr)
def test_annotation_class_attr_default(self):
xml = '<annotation title="x" body="y" problem="0">test</annotation>'
el = etree.fromstring(xml)
expected_attr = {'class': {'value': 'annotatable-span highlight'}}
actual_attr = self.annotatable._get_annotation_class_attr(0, el)
self.assertIsInstance(actual_attr, dict)
self.assertDictEqual(expected_attr, actual_attr)
def test_annotation_class_attr_with_valid_highlight(self):
xml = '<annotation title="x" body="y" problem="0" highlight="{highlight}">test</annotation>'
for color in self.annotatable.highlight_colors:
el = etree.fromstring(xml.format(highlight=color))
value = 'annotatable-span highlight highlight-{highlight}'.format(highlight=color)
expected_attr = {
'class': {
'value': value,
'_delete': 'highlight'
}
}
actual_attr = self.annotatable._get_annotation_class_attr(0, el)
self.assertIsInstance(actual_attr, dict)
self.assertDictEqual(expected_attr, actual_attr)
def test_annotation_class_attr_with_invalid_highlight(self):
xml = '<annotation title="x" body="y" problem="0" highlight="{highlight}">test</annotation>'
for invalid_color in ['rainbow', 'blink', 'invisible', '', None]:
el = etree.fromstring(xml.format(highlight=invalid_color))
expected_attr = {
'class': {
'value': 'annotatable-span highlight',
'_delete': 'highlight'
}
}
actual_attr = self.annotatable._get_annotation_class_attr(0, el)
self.assertIsInstance(actual_attr, dict)
self.assertDictEqual(expected_attr, actual_attr)
def test_render_annotation(self):
expected_html = '<span class="annotatable-span highlight highlight-yellow" data-comment-title="x" data-comment-body="y" data-problem-id="0">z</span>'
expected_el = etree.fromstring(expected_html)
actual_el = etree.fromstring('<annotation title="x" body="y" problem="0" highlight="yellow">z</annotation>')
self.annotatable._render_annotation(0, actual_el)
self.assertEqual(expected_el.tag, actual_el.tag)
self.assertEqual(expected_el.text, actual_el.text)
self.assertDictEqual(dict(expected_el.attrib), dict(actual_el.attrib))
def test_render_content(self):
content = self.annotatable._render_content()
el = etree.fromstring(content)
self.assertEqual('div', el.tag, 'root tag is a div')
expected_num_annotations = 5
actual_num_annotations = el.xpath('count(//span[contains(@class,"annotatable-span")])')
self.assertEqual(expected_num_annotations, actual_num_annotations, 'check number of annotations')
def test_get_html(self):
context = self.annotatable.get_html()
for key in ['display_name', 'element_id', 'content_html', 'instructions_html']:
self.assertIn(key, context)
def test_extract_instructions(self):
xmltree = etree.fromstring(self.sample_xml)
expected_xml = u"<div>Read the text.</div>"
actual_xml = self.annotatable._extract_instructions(xmltree)
self.assertIsNotNone(actual_xml)
self.assertEqual(expected_xml.strip(), actual_xml.strip())
xmltree = etree.fromstring('<annotatable>foo</annotatable>')
actual = self.annotatable._extract_instructions(xmltree)
self.assertIsNone(actual)
| agpl-3.0 |
yaybu/touchdown | touchdown/goals/get_signin_url.py | 1 | 1515 | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import errors
from touchdown.core.goals import Goal, register
class GetSigninUrl(Goal):
""" Generate short-lived access urls """
name = "get-signin-url"
mutator = False
def get_plan_class(self, resource):
plan_class = resource.meta.get_plan("get-signin-url")
if not plan_class:
plan_class = resource.meta.get_plan("null")
return plan_class
@classmethod
def setup_argparse(cls, parser):
parser.add_argument(
"resource",
metavar="RESOURCE",
type=str,
help="The resource to create a signin url for",
)
def execute(self, resource):
resources = self.collect_as_dict("get-signin-url")
if resource not in resources:
raise errors.Error('No such resource "{}"'.format(resource))
self.ui.echo(resources[resource].get_signin_url())
register(GetSigninUrl)
| apache-2.0 |
kdeloach/GeositeFramework | tools/JsTestTools/phantomjs/python/phantom.py | 14 | 13346 | '''
This file is part of the PyPhantomJS project.
Copyright (C) 2011 James Roe <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import codecs
from utils import version_major, version_minor, version_patch
from plugincontroller import Bunch, do_action
from csconverter import CSConverter
from math import ceil, floor
from time import sleep as usleep
from webpage import WebPage
from networkaccessmanager import NetworkAccessManager
from PyQt4.QtCore import pyqtProperty, pyqtSlot, Qt, QObject, QRect, \
SLOT, QTimer, QUrl, QFileInfo, QDir, QSize, \
QSizeF, QTime, QEventLoop, qDebug
from PyQt4.QtGui import QPalette, QDesktopServices, qApp, QPrinter, \
QImage, QPainter, QRegion, QApplication, qRgba
from PyQt4.QtWebKit import QWebSettings, QWebPage
from PyQt4.QtNetwork import QNetworkProxy, QNetworkProxyFactory
# Different defaults.
# OSX: 72, X11: 75(?), Windows: 96
pdf_dpi = 72
class Phantom(QObject):
def __init__(self, args, parent=None):
QObject.__init__(self, parent)
# variable declarations
self.m_loadStatus = self.m_state = ''
self.m_var = self.m_paperSize = self.m_loadScript_cache = {}
self.m_verbose = args.verbose
self.m_page = WebPage(self)
self.m_clipRect = QRect()
# setup the values from args
self.m_script = args.script.read()
self.m_scriptFile = args.script.name
self.m_scriptDir = os.path.dirname(args.script.name) + '/'
self.m_args = args.script_args
self.m_upload_file = args.upload_file
autoLoadImages = False if args.load_images == 'no' else True
pluginsEnabled = True if args.load_plugins == 'yes' else False
args.script.close()
do_action('PhantomInitPre', Bunch(locals()))
palette = self.m_page.palette()
palette.setBrush(QPalette.Base, Qt.transparent)
self.m_page.setPalette(palette)
if not args.proxy:
QNetworkProxyFactory.setUseSystemConfiguration(True)
else:
proxy = QNetworkProxy(QNetworkProxy.HttpProxy, args.proxy[0], int(args.proxy[1]))
QNetworkProxy.setApplicationProxy(proxy)
self.m_page.settings().setAttribute(QWebSettings.AutoLoadImages, autoLoadImages)
self.m_page.settings().setAttribute(QWebSettings.PluginsEnabled, pluginsEnabled)
self.m_page.settings().setAttribute(QWebSettings.FrameFlatteningEnabled, True)
self.m_page.settings().setAttribute(QWebSettings.OfflineStorageDatabaseEnabled, True)
self.m_page.settings().setAttribute(QWebSettings.LocalStorageEnabled, True)
self.m_page.settings().setLocalStoragePath(QDesktopServices.storageLocation(QDesktopServices.DataLocation))
self.m_page.settings().setOfflineStoragePath(QDesktopServices.storageLocation(QDesktopServices.DataLocation))
# Ensure we have a document.body.
self.m_page.mainFrame().setHtml('<html><body></body></html>')
self.m_page.mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff)
self.m_page.mainFrame().setScrollBarPolicy(Qt.Vertical, Qt.ScrollBarAlwaysOff)
m_netAccessMan = NetworkAccessManager(args.disk_cache, args.ignore_ssl_errors, self)
self.m_page.setNetworkAccessManager(m_netAccessMan)
# inject our properties and slots into javascript
self.m_page.mainFrame().javaScriptWindowObjectCleared.connect(self.inject)
self.m_page.loadFinished.connect(self.finish)
do_action('PhantomInitPost', Bunch(locals()))
def execute(self):
if self.m_script.startswith('#!'):
self.m_script = '//' + self.m_script
if self.m_scriptFile.lower().endswith('.coffee'):
coffee = CSConverter(self)
self.m_script = coffee.convert(self.m_script)
self.m_page.mainFrame().evaluateJavaScript(self.m_script)
def finish(self, success):
self.m_loadStatus = 'success' if success else 'fail'
self.m_page.mainFrame().evaluateJavaScript(self.m_script)
def inject(self):
self.m_page.mainFrame().addToJavaScriptWindowObject('phantom', self)
def renderPdf(self, fileName):
p = QPrinter()
p.setOutputFormat(QPrinter.PdfFormat)
p.setOutputFileName(fileName)
p.setResolution(pdf_dpi)
paperSize = self.m_paperSize
if not len(paperSize):
pageSize = QSize(self.m_page.mainFrame().contentsSize())
paperSize['width'] = str(pageSize.width()) + 'px'
paperSize['height'] = str(pageSize.height()) + 'px'
paperSize['border'] = '0px'
if paperSize.get('width') and paperSize.get('height'):
sizePt = QSizeF(ceil(self.stringToPointSize(paperSize['width'])),
ceil(self.stringToPointSize(paperSize['height'])))
p.setPaperSize(sizePt, QPrinter.Point)
elif 'format' in paperSize:
orientation = QPrinter.Landscape if paperSize.get('orientation') and paperSize['orientation'].lower() == 'landscape' else QPrinter.Portrait
orientation = QPrinter.Orientation(orientation)
p.setOrientation(orientation)
formats = {
'A3': QPrinter.A3,
'A4': QPrinter.A4,
'A5': QPrinter.A5,
'Legal': QPrinter.Legal,
'Letter': QPrinter.Letter,
'Tabloid': QPrinter.Tabloid
}
p.setPaperSize(QPrinter.A4) # fallback
for format, size in formats.items():
if format.lower() == paperSize['format'].lower():
p.setPaperSize(size)
break
else:
return False
border = floor(self.stringToPointSize(paperSize['border'])) if paperSize.get('border') else 0
p.setPageMargins(border, border, border, border, QPrinter.Point)
self.m_page.mainFrame().print_(p)
return True
def returnValue(self):
return self.m_returnValue
def stringToPointSize(self, string):
units = (
('mm', 72 / 25.4),
('cm', 72 / 2.54),
('in', 72.0),
('px', 72.0 / pdf_dpi / 2.54),
('', 72.0 / pdf_dpi / 2.54)
)
for unit, format in units:
if string.endswith(unit):
value = string.rstrip(unit)
return float(value) * format
return 0
##
# Properties and methods exposed to JavaScript
##
@pyqtProperty('QStringList')
def args(self):
return self.m_args
@pyqtProperty('QVariantMap')
def clipRect(self):
result = {
'width': self.m_clipRect.width(),
'height': self.m_clipRect.height(),
'top': self.m_clipRect.top(),
'left': self.m_clipRect.left()
}
return result
@clipRect.setter
def clipRect(self, size):
names = ('width', 'height', 'top', 'left')
for item in names:
try:
globals()[item] = int(size[item])
if globals()[item] < 0:
if item not in ('top', 'left'):
globals()[item] = 0
except KeyError:
globals()[item] = getattr(self.m_clipRect, item)()
self.m_clipRect = QRect(left, top, width, height)
@pyqtProperty(str)
def content(self):
return self.m_page.mainFrame().toHtml()
@content.setter
def content(self, content):
self.m_page.mainFrame().setHtml(content)
@pyqtSlot()
@pyqtSlot(int)
def exit(self, code=0):
self.m_returnValue = code
self.m_page.loadFinished.disconnect(self.finish)
QTimer.singleShot(0, qApp, SLOT('quit()'))
@pyqtProperty(str)
def loadStatus(self):
return self.m_loadStatus
@pyqtSlot(str, result=bool)
def loadScript(self, script):
if script in self.m_loadScript_cache:
self.m_page.mainFrame().evaluateJavaScript(self.m_loadScript_cache[script])
return True
scriptFile = script
try:
script = codecs.open(self.m_scriptDir + script, encoding='utf-8')
script = script.read()
except IOError:
return False
if script.startswith('#!'):
script = '//' + script
if scriptFile.lower().endswith('.coffee'):
coffee = CSConverter(self)
script = coffee.convert(script)
self.m_loadScript_cache[scriptFile] = script
self.m_page.mainFrame().evaluateJavaScript(script)
return True
@pyqtSlot(str, name='open')
def open_(self, address):
qDebug('Opening address %s' % address)
self.m_page.triggerAction(QWebPage.Stop)
self.m_loadStatus = 'loading'
self.m_page.mainFrame().setUrl(QUrl(address))
@pyqtProperty('QVariantMap')
def paperSize(self):
return self.m_paperSize
@paperSize.setter
def paperSize(self, size):
self.m_paperSize = size
@pyqtSlot(str, result=bool)
def render(self, fileName):
fileInfo = QFileInfo(fileName)
path = QDir()
path.mkpath(fileInfo.absolutePath())
if fileName.lower().endswith('.pdf'):
return self.renderPdf(fileName)
viewportSize = QSize(self.m_page.viewportSize())
pageSize = QSize(self.m_page.mainFrame().contentsSize())
bufferSize = QSize()
if not self.m_clipRect.isEmpty():
bufferSize = self.m_clipRect.size()
else:
bufferSize = self.m_page.mainFrame().contentsSize()
if pageSize == '':
return False
image = QImage(bufferSize, QImage.Format_ARGB32)
image.fill(qRgba(255, 255, 255, 0))
p = QPainter(image)
p.setRenderHint(QPainter.Antialiasing, True)
p.setRenderHint(QPainter.TextAntialiasing, True)
p.setRenderHint(QPainter.SmoothPixmapTransform, True)
self.m_page.setViewportSize(pageSize)
if not self.m_clipRect.isEmpty():
p.translate(-self.m_clipRect.left(), -self.m_clipRect.top())
self.m_page.mainFrame().render(p, QRegion(self.m_clipRect))
else:
self.m_page.mainFrame().render(p)
p.end()
self.m_page.setViewportSize(viewportSize)
return image.save(fileName)
@pyqtSlot('QWebElement', str)
def setFormInputFile(self, el, fileTag):
self.m_page.m_nextFileTag = fileTag
el.evaluateJavaScript('''(function(target){
var evt = document.createEvent('MouseEvents');
evt.initMouseEvent("click", true, true, window,
0, 0, 0, 0, 0, false, false, false, false, 0, null);
target.dispatchEvent(evt);})(this);''')
@pyqtSlot(int)
def sleep(self, ms):
startTime = QTime.currentTime()
while True:
QApplication.processEvents(QEventLoop.AllEvents, 25)
if startTime.msecsTo(QTime.currentTime()) > ms:
break
usleep(0.005)
@pyqtProperty(str)
def state(self):
return self.m_state
@state.setter
def state(self, value):
self.m_state = value
@pyqtProperty(str)
def userAgent(self):
return self.m_page.m_userAgent
@userAgent.setter
def userAgent(self, ua):
self.m_page.m_userAgent = ua
@pyqtSlot(str, result='QVariant')
@pyqtSlot(int, result='QVariant')
@pyqtSlot(str, 'QVariant')
@pyqtSlot(int, 'QVariant')
def ctx(self, name, value=None):
if not value:
return self.m_var.get(name)
self.m_var[name] = value
@pyqtProperty('QVariantMap')
def version(self):
version = {
'major': version_major,
'minor': version_minor,
'patch': version_patch
}
return version
@pyqtProperty('QVariantMap')
def viewportSize(self):
size = self.m_page.viewportSize()
result = {
'width': size.width(),
'height': size.height()
}
return result
@viewportSize.setter
def viewportSize(self, size):
names = ('width', 'height')
for item in names:
try:
globals()[item] = int(size[item])
if globals()[item] < 0:
globals()[item] = 0
except KeyError:
globals()[item] = getattr(self.m_page.viewportSize(), item)()
self.m_page.setViewportSize(QSize(width, height))
do_action('Phantom', Bunch(locals()))
| gpl-3.0 |
iivic/BoiseStateX | cms/djangoapps/contentstore/views/tests/test_course_index.py | 7 | 35847 | """
Unit tests for getting the list of courses and the course outline.
"""
import ddt
import json
import lxml
import datetime
import mock
import pytz
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url, reverse_library_url, add_instructor, reverse_usage_url
from contentstore.views.course import (
course_outline_initial_state, reindex_course_and_check_access, _deprecated_blocks_info
)
from contentstore.views.item import create_xblock_info, VisibilityState
from course_action_state.managers import CourseRerunUIStateManager
from course_action_state.models import CourseRerunState
from opaque_keys.edx.locator import CourseLocator
from search.api import perform_search
from student.auth import has_course_author_access
from student.tests.factories import UserFactory
from util.date_utils import get_default_time_display
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, LibraryFactory
class TestCourseIndex(CourseTestCase):
"""
Unit tests for getting the list of courses and the course outline.
"""
def setUp(self):
"""
Add a course with odd characters in the fields
"""
super(TestCourseIndex, self).setUp()
# had a problem where index showed course but has_access failed to retrieve it for non-staff
self.odd_course = CourseFactory.create(
org='test.org_1-2',
number='test-2.3_course',
display_name='dotted.course.name-2',
)
def check_index_and_outline(self, authed_client):
"""
Test getting the list of courses and then pulling up their outlines
"""
index_url = '/home/'
index_response = authed_client.get(index_url, {}, HTTP_ACCEPT='text/html')
parsed_html = lxml.html.fromstring(index_response.content)
course_link_eles = parsed_html.find_class('course-link')
self.assertGreaterEqual(len(course_link_eles), 2)
for link in course_link_eles:
self.assertRegexpMatches(
link.get("href"),
'course/{}'.format(settings.COURSE_KEY_PATTERN)
)
# now test that url
outline_response = authed_client.get(link.get("href"), {}, HTTP_ACCEPT='text/html')
# ensure it has the expected 2 self referential links
outline_parsed = lxml.html.fromstring(outline_response.content)
outline_link = outline_parsed.find_class('course-link')[0]
self.assertEqual(outline_link.get("href"), link.get("href"))
course_menu_link = outline_parsed.find_class('nav-course-courseware-outline')[0]
self.assertEqual(course_menu_link.find("a").get("href"), link.get("href"))
def test_libraries_on_course_index(self):
"""
Test getting the list of libraries from the course listing page
"""
# Add a library:
lib1 = LibraryFactory.create()
index_url = '/home/'
index_response = self.client.get(index_url, {}, HTTP_ACCEPT='text/html')
parsed_html = lxml.html.fromstring(index_response.content)
library_link_elements = parsed_html.find_class('library-link')
self.assertEqual(len(library_link_elements), 1)
link = library_link_elements[0]
self.assertEqual(
link.get("href"),
reverse_library_url('library_handler', lib1.location.library_key),
)
# now test that url
outline_response = self.client.get(link.get("href"), {}, HTTP_ACCEPT='text/html')
self.assertEqual(outline_response.status_code, 200)
def test_is_staff_access(self):
"""
Test that people with is_staff see the courses and can navigate into them
"""
self.check_index_and_outline(self.client)
def test_negative_conditions(self):
"""
Test the error conditions for the access
"""
outline_url = reverse_course_url('course_handler', self.course.id)
# register a non-staff member and try to delete the course branch
non_staff_client, _ = self.create_non_staff_authed_user_client()
response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 403)
def test_course_staff_access(self):
"""
Make and register course_staff and ensure they can access the courses
"""
course_staff_client, course_staff = self.create_non_staff_authed_user_client()
for course in [self.course, self.odd_course]:
permission_url = reverse_course_url('course_team_handler', course.id, kwargs={'email': course_staff.email})
self.client.post(
permission_url,
data=json.dumps({"role": "staff"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
# test access
self.check_index_and_outline(course_staff_client)
def test_json_responses(self):
outline_url = reverse_course_url('course_handler', self.course.id)
chapter = ItemFactory.create(parent_location=self.course.location, category='chapter', display_name="Week 1")
lesson = ItemFactory.create(parent_location=chapter.location, category='sequential', display_name="Lesson 1")
subsection = ItemFactory.create(
parent_location=lesson.location,
category='vertical',
display_name='Subsection 1'
)
ItemFactory.create(parent_location=subsection.location, category="video", display_name="My Video")
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
# First spot check some values in the root response
self.assertEqual(json_response['category'], 'course')
self.assertEqual(json_response['id'], unicode(self.course.location))
self.assertEqual(json_response['display_name'], self.course.display_name)
self.assertTrue(json_response['published'])
self.assertIsNone(json_response['visibility_state'])
# Now verify the first child
children = json_response['child_info']['children']
self.assertTrue(len(children) > 0)
first_child_response = children[0]
self.assertEqual(first_child_response['category'], 'chapter')
self.assertEqual(first_child_response['id'], unicode(chapter.location))
self.assertEqual(first_child_response['display_name'], 'Week 1')
self.assertTrue(json_response['published'])
self.assertEqual(first_child_response['visibility_state'], VisibilityState.unscheduled)
self.assertTrue(len(first_child_response['child_info']['children']) > 0)
# Finally, validate the entire response for consistency
self.assert_correct_json_response(json_response)
def test_notifications_handler_get(self):
state = CourseRerunUIStateManager.State.FAILED
action = CourseRerunUIStateManager.ACTION
should_display = True
# try when no notification exists
notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': 1,
})
resp = self.client.get(notification_url, HTTP_ACCEPT='application/json')
# verify that we get an empty dict out
self.assertEquals(resp.status_code, 400)
# create a test notification
rerun_state = CourseRerunState.objects.update_state(
course_key=self.course.id,
new_state=state,
allow_not_found=True
)
CourseRerunState.objects.update_should_display(
entry_id=rerun_state.id,
user=UserFactory(),
should_display=should_display
)
# try to get information on this notification
notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': rerun_state.id,
})
resp = self.client.get(notification_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
self.assertEquals(json_response['state'], state)
self.assertEquals(json_response['action'], action)
self.assertEquals(json_response['should_display'], should_display)
def test_notifications_handler_dismiss(self):
state = CourseRerunUIStateManager.State.FAILED
should_display = True
rerun_course_key = CourseLocator(org='testx', course='test_course', run='test_run')
# add an instructor to this course
user2 = UserFactory()
add_instructor(rerun_course_key, self.user, user2)
# create a test notification
rerun_state = CourseRerunState.objects.update_state(
course_key=rerun_course_key,
new_state=state,
allow_not_found=True
)
CourseRerunState.objects.update_should_display(
entry_id=rerun_state.id,
user=user2,
should_display=should_display
)
# try to get information on this notification
notification_dismiss_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': rerun_state.id,
})
resp = self.client.delete(notification_dismiss_url)
self.assertEquals(resp.status_code, 200)
with self.assertRaises(CourseRerunState.DoesNotExist):
# delete nofications that are dismissed
CourseRerunState.objects.get(id=rerun_state.id)
self.assertFalse(has_course_author_access(user2, rerun_course_key))
def assert_correct_json_response(self, json_response):
"""
Asserts that the JSON response is syntactically consistent
"""
self.assertIsNotNone(json_response['display_name'])
self.assertIsNotNone(json_response['id'])
self.assertIsNotNone(json_response['category'])
self.assertTrue(json_response['published'])
if json_response.get('child_info', None):
for child_response in json_response['child_info']['children']:
self.assert_correct_json_response(child_response)
def test_course_updates_invalid_url(self):
"""
Tests the error conditions for the invalid course updates URL.
"""
# Testing the response code by passing slash separated course id whose format is valid but no course
# having this id exists.
invalid_course_key = '{}_blah_blah_blah'.format(self.course.id)
course_updates_url = reverse_course_url('course_info_handler', invalid_course_key)
response = self.client.get(course_updates_url)
self.assertEqual(response.status_code, 404)
# Testing the response code by passing split course id whose format is valid but no course
# having this id exists.
split_course_key = CourseLocator(org='orgASD', course='course_01213', run='Run_0_hhh_hhh_hhh')
course_updates_url_split = reverse_course_url('course_info_handler', split_course_key)
response = self.client.get(course_updates_url_split)
self.assertEqual(response.status_code, 404)
# Testing the response by passing split course id whose format is invalid.
invalid_course_id = 'invalid.course.key/{}'.format(split_course_key)
course_updates_url_split = reverse_course_url('course_info_handler', invalid_course_id)
response = self.client.get(course_updates_url_split)
self.assertEqual(response.status_code, 404)
def test_course_index_invalid_url(self):
"""
Tests the error conditions for the invalid course index URL.
"""
# Testing the response code by passing slash separated course key, no course
# having this key exists.
invalid_course_key = '{}_some_invalid_run'.format(self.course.id)
course_outline_url = reverse_course_url('course_handler', invalid_course_key)
response = self.client.get_html(course_outline_url)
self.assertEqual(response.status_code, 404)
# Testing the response code by passing split course key, no course
# having this key exists.
split_course_key = CourseLocator(org='invalid_org', course='course_01111', run='Run_0_invalid')
course_outline_url_split = reverse_course_url('course_handler', split_course_key)
response = self.client.get_html(course_outline_url_split)
self.assertEqual(response.status_code, 404)
@ddt.ddt
class TestCourseOutline(CourseTestCase):
"""
Unit tests for the course outline.
"""
def setUp(self):
"""
Set up the for the course outline tests.
"""
super(TestCourseOutline, self).setUp()
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1"
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Lesson 1"
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Subsection 1'
)
self.video = ItemFactory.create(
parent_location=self.vertical.location, category="video", display_name="My Video"
)
def test_json_responses(self):
"""
Verify the JSON responses returned for the course.
"""
outline_url = reverse_course_url('course_handler', self.course.id)
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
# First spot check some values in the root response
self.assertEqual(json_response['category'], 'course')
self.assertEqual(json_response['id'], unicode(self.course.location))
self.assertEqual(json_response['display_name'], self.course.display_name)
self.assertTrue(json_response['published'])
self.assertIsNone(json_response['visibility_state'])
# Now verify the first child
children = json_response['child_info']['children']
self.assertTrue(len(children) > 0)
first_child_response = children[0]
self.assertEqual(first_child_response['category'], 'chapter')
self.assertEqual(first_child_response['id'], unicode(self.chapter.location))
self.assertEqual(first_child_response['display_name'], 'Week 1')
self.assertTrue(json_response['published'])
self.assertEqual(first_child_response['visibility_state'], VisibilityState.unscheduled)
self.assertTrue(len(first_child_response['child_info']['children']) > 0)
# Finally, validate the entire response for consistency
self.assert_correct_json_response(json_response)
def assert_correct_json_response(self, json_response):
"""
Asserts that the JSON response is syntactically consistent
"""
self.assertIsNotNone(json_response['display_name'])
self.assertIsNotNone(json_response['id'])
self.assertIsNotNone(json_response['category'])
self.assertTrue(json_response['published'])
if json_response.get('child_info', None):
for child_response in json_response['child_info']['children']:
self.assert_correct_json_response(child_response)
def test_course_outline_initial_state(self):
course_module = modulestore().get_item(self.course.location)
course_structure = create_xblock_info(
course_module,
include_child_info=True,
include_children_predicate=lambda xblock: not xblock.category == 'vertical'
)
# Verify that None is returned for a non-existent locator
self.assertIsNone(course_outline_initial_state('no-such-locator', course_structure))
# Verify that the correct initial state is returned for the test chapter
chapter_locator = unicode(self.chapter.location)
initial_state = course_outline_initial_state(chapter_locator, course_structure)
self.assertEqual(initial_state['locator_to_show'], chapter_locator)
expanded_locators = initial_state['expanded_locators']
self.assertIn(unicode(self.sequential.location), expanded_locators)
self.assertIn(unicode(self.vertical.location), expanded_locators)
def test_start_date_on_page(self):
"""
Verify that the course start date is included on the course outline page.
"""
def _get_release_date(response):
"""Return the release date from the course page"""
parsed_html = lxml.html.fromstring(response.content)
return parsed_html.find_class('course-status')[0].find_class('status-release-value')[0].text_content()
def _assert_settings_link_present(response):
"""
Asserts there's a course settings link on the course page by the course release date.
"""
parsed_html = lxml.html.fromstring(response.content)
settings_link = parsed_html.find_class('course-status')[0].find_class('action-edit')[0].find('a')
self.assertIsNotNone(settings_link)
self.assertEqual(settings_link.get('href'), reverse_course_url('settings_handler', self.course.id))
outline_url = reverse_course_url('course_handler', self.course.id)
response = self.client.get(outline_url, {}, HTTP_ACCEPT='text/html')
# A course with the default release date should display as "Unscheduled"
self.assertEqual(_get_release_date(response), 'Unscheduled')
_assert_settings_link_present(response)
self.course.start = datetime.datetime(2014, 1, 1, tzinfo=pytz.utc)
modulestore().update_item(self.course, ModuleStoreEnum.UserID.test)
response = self.client.get(outline_url, {}, HTTP_ACCEPT='text/html')
self.assertEqual(_get_release_date(response), get_default_time_display(self.course.start))
_assert_settings_link_present(response)
def _create_test_data(self, course_module, create_blocks=False, publish=True, block_types=None):
"""
Create data for test.
"""
if create_blocks:
for block_type in block_types:
ItemFactory.create(
parent_location=self.vertical.location,
category=block_type,
display_name='{} Problem'.format(block_type)
)
if not publish:
self.store.unpublish(self.vertical.location, self.user.id)
course_module.advanced_modules.extend(block_types)
def _verify_deprecated_info(self, course_id, advanced_modules, info, deprecated_block_types):
"""
Verify deprecated info.
"""
expected_blocks = []
for block_type in deprecated_block_types:
expected_blocks.append(
[
reverse_usage_url('container_handler', self.vertical.location),
'{} Problem'.format(block_type)
]
)
self.assertEqual(info['block_types'], deprecated_block_types)
self.assertEqual(
info['block_types_enabled'],
any(component in advanced_modules for component in deprecated_block_types)
)
self.assertItemsEqual(info['blocks'], expected_blocks)
self.assertEqual(
info['advance_settings_url'],
reverse_course_url('advanced_settings_handler', course_id)
)
@ddt.data(
{'publish': True},
{'publish': False},
)
@ddt.unpack
def test_verify_deprecated_warning_message_with_single_feature(self, publish):
"""
Verify deprecated warning info for single deprecated feature.
"""
block_types = settings.DEPRECATED_BLOCK_TYPES
course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types, publish=publish)
info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(
course_module.id,
course_module.advanced_modules,
info,
block_types
)
def test_verify_deprecated_warning_message_with_multiple_features(self):
"""
Verify deprecated warning info for multiple deprecated features.
"""
block_types = ['peergrading', 'combinedopenended', 'openassessment']
course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types)
info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(course_module.id, course_module.advanced_modules, info, block_types)
@ddt.data(
{'delete_vertical': True},
{'delete_vertical': False},
)
@ddt.unpack
def test_deprecated_blocks_list_updated_correctly(self, delete_vertical):
"""
Verify that deprecated blocks list shown on banner is updated correctly.
Here is the scenario:
This list of deprecated blocks shown on banner contains published
and un-published blocks. That list should be updated when we delete
un-published block(s). This behavior should be same if we delete
unpublished vertical or problem.
"""
block_types = ['peergrading']
course_module = modulestore().get_item(self.course.location)
vertical1 = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Vert1 Subsection1'
)
problem1 = ItemFactory.create(
parent_location=vertical1.location,
category='peergrading',
display_name='peergrading problem in vert1',
publish_item=False
)
info = _deprecated_blocks_info(course_module, block_types)
# info['blocks'] should be empty here because there is nothing
# published or un-published present
self.assertEqual(info['blocks'], [])
vertical2 = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Vert2 Subsection1'
)
ItemFactory.create(
parent_location=vertical2.location,
category='peergrading',
display_name='peergrading problem in vert2',
pubish_item=True
)
# At this point CourseStructure will contain both the above
# published and un-published verticals
info = _deprecated_blocks_info(course_module, block_types)
self.assertItemsEqual(
info['blocks'],
[
[reverse_usage_url('container_handler', vertical1.location), 'peergrading problem in vert1'],
[reverse_usage_url('container_handler', vertical2.location), 'peergrading problem in vert2']
]
)
# Delete the un-published vertical or problem so that CourseStructure updates its data
if delete_vertical:
self.store.delete_item(vertical1.location, self.user.id)
else:
self.store.delete_item(problem1.location, self.user.id)
info = _deprecated_blocks_info(course_module, block_types)
# info['blocks'] should only contain the info about vertical2 which is published.
# There shouldn't be any info present about un-published vertical1
self.assertEqual(
info['blocks'],
[[reverse_usage_url('container_handler', vertical2.location), 'peergrading problem in vert2']]
)
class TestCourseReIndex(CourseTestCase):
"""
Unit tests for the course outline.
"""
SUCCESSFUL_RESPONSE = _("Course has been successfully reindexed.")
def setUp(self):
"""
Set up the for the course outline tests.
"""
super(TestCourseReIndex, self).setUp()
self.course.start = datetime.datetime(2014, 1, 1, tzinfo=pytz.utc)
modulestore().update_item(self.course, self.user.id)
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1"
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Lesson 1"
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Subsection 1'
)
self.video = ItemFactory.create(
parent_location=self.vertical.location, category="video", display_name="My Video"
)
self.html = ItemFactory.create(
parent_location=self.vertical.location, category="html", display_name="My HTML",
data="<div>This is my unique HTML content</div>",
)
def test_reindex_course(self):
"""
Verify that course gets reindexed.
"""
index_url = reverse_course_url('course_search_index_handler', self.course.id)
response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json')
# A course with the default release date should display as "Unscheduled"
self.assertIn(self.SUCCESSFUL_RESPONSE, response.content)
self.assertEqual(response.status_code, 200)
response = self.client.post(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.content, '')
self.assertEqual(response.status_code, 405)
self.client.logout()
response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 302)
def test_negative_conditions(self):
"""
Test the error conditions for the access
"""
index_url = reverse_course_url('course_search_index_handler', self.course.id)
# register a non-staff member and try to delete the course branch
non_staff_client, _ = self.create_non_staff_authed_user_client()
response = non_staff_client.get(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 403)
def test_content_type_none(self):
"""
Test json content type is set if none is selected
"""
index_url = reverse_course_url('course_search_index_handler', self.course.id)
response = self.client.get(index_url, {}, CONTENT_TYPE=None)
# A course with the default release date should display as "Unscheduled"
self.assertIn(self.SUCCESSFUL_RESPONSE, response.content)
self.assertEqual(response.status_code, 200)
@mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary')
def test_reindex_course_search_index_error(self, mock_index_dictionary):
"""
Test json response with mocked error data for html
"""
# set mocked exception response
err = SearchIndexingError
mock_index_dictionary.return_value = err
index_url = reverse_course_url('course_search_index_handler', self.course.id)
# Start manual reindex and check error in response
response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 500)
def test_reindex_json_responses(self):
"""
Test json response with real data
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# Start manual reindex
reindex_course_and_check_access(self.course.id, self.user)
# Check results remain the same
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
@mock.patch('xmodule.video_module.VideoDescriptor.index_dictionary')
def test_reindex_video_error_json_responses(self, mock_index_dictionary):
"""
Test json response with mocked error data for video
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = SearchIndexingError
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
@mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary')
def test_reindex_html_error_json_responses(self, mock_index_dictionary):
"""
Test json response with mocked error data for html
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = SearchIndexingError
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
@mock.patch('xmodule.seq_module.SequenceDescriptor.index_dictionary')
def test_reindex_seq_error_json_responses(self, mock_index_dictionary):
"""
Test json response with mocked error data for sequence
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
@mock.patch('xmodule.modulestore.mongo.base.MongoModuleStore.get_course')
def test_reindex_no_item(self, mock_get_course):
"""
Test system logs an error if no item found.
"""
# set mocked exception response
err = ItemNotFoundError
mock_get_course.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
def test_reindex_no_permissions(self):
# register a non-staff member and try to delete the course branch
user2 = UserFactory()
with self.assertRaises(PermissionDenied):
reindex_course_and_check_access(self.course.id, user2)
def test_indexing_responses(self):
"""
Test do_course_reindex response with real data
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# Start manual reindex
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
# Check results are the same following reindex
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
@mock.patch('xmodule.video_module.VideoDescriptor.index_dictionary')
def test_indexing_video_error_responses(self, mock_index_dictionary):
"""
Test do_course_reindex response with mocked error data for video
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
@mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary')
def test_indexing_html_error_responses(self, mock_index_dictionary):
"""
Test do_course_reindex response with mocked error data for html
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
@mock.patch('xmodule.seq_module.SequenceDescriptor.index_dictionary')
def test_indexing_seq_error_responses(self, mock_index_dictionary):
"""
Test do_course_reindex response with mocked error data for sequence
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
@mock.patch('xmodule.modulestore.mongo.base.MongoModuleStore.get_course')
def test_indexing_no_item(self, mock_get_course):
"""
Test system logs an error if no item found.
"""
# set mocked exception response
err = ItemNotFoundError
mock_get_course.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
| agpl-3.0 |
nfjinjing/shadowsocks | shadowsocks/crypto/openssl.py | 1038 | 5414 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
__all__ = ['ciphers']
libcrypto = None
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, buf
libcrypto = util.find_library(('crypto', 'eay32'),
'EVP_get_cipherbyname',
'libcrypto')
if libcrypto is None:
raise Exception('libcrypto(OpenSSL) not found')
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
if bytes != str:
func_name = str(func_name, 'utf-8')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None
class OpenSSLCrypto(object):
def __init__(self, cipher_name, key, iv, op):
self._ctx = None
if not loaded:
load_openssl()
cipher_name = common.to_bytes(cipher_name)
cipher = libcrypto.EVP_get_cipherbyname(cipher_name)
if not cipher:
cipher = load_cipher(cipher_name)
if not cipher:
raise Exception('cipher %s not found in libcrypto' % cipher_name)
key_ptr = c_char_p(key)
iv_ptr = c_char_p(iv)
self._ctx = libcrypto.EVP_CIPHER_CTX_new()
if not self._ctx:
raise Exception('can not create cipher context')
r = libcrypto.EVP_CipherInit_ex(self._ctx, cipher, None,
key_ptr, iv_ptr, c_int(op))
if not r:
self.clean()
raise Exception('can not initialize cipher context')
def update(self, data):
global buf_size, buf
cipher_out_len = c_long(0)
l = len(data)
if buf_size < l:
buf_size = l * 2
buf = create_string_buffer(buf_size)
libcrypto.EVP_CipherUpdate(self._ctx, byref(buf),
byref(cipher_out_len), c_char_p(data), l)
# buf is copied to a str object when we access buf.raw
return buf.raw[:cipher_out_len.value]
def __del__(self):
self.clean()
def clean(self):
if self._ctx:
libcrypto.EVP_CIPHER_CTX_cleanup(self._ctx)
libcrypto.EVP_CIPHER_CTX_free(self._ctx)
ciphers = {
'aes-128-cfb': (16, 16, OpenSSLCrypto),
'aes-192-cfb': (24, 16, OpenSSLCrypto),
'aes-256-cfb': (32, 16, OpenSSLCrypto),
'aes-128-ofb': (16, 16, OpenSSLCrypto),
'aes-192-ofb': (24, 16, OpenSSLCrypto),
'aes-256-ofb': (32, 16, OpenSSLCrypto),
'aes-128-ctr': (16, 16, OpenSSLCrypto),
'aes-192-ctr': (24, 16, OpenSSLCrypto),
'aes-256-ctr': (32, 16, OpenSSLCrypto),
'aes-128-cfb8': (16, 16, OpenSSLCrypto),
'aes-192-cfb8': (24, 16, OpenSSLCrypto),
'aes-256-cfb8': (32, 16, OpenSSLCrypto),
'aes-128-cfb1': (16, 16, OpenSSLCrypto),
'aes-192-cfb1': (24, 16, OpenSSLCrypto),
'aes-256-cfb1': (32, 16, OpenSSLCrypto),
'bf-cfb': (16, 8, OpenSSLCrypto),
'camellia-128-cfb': (16, 16, OpenSSLCrypto),
'camellia-192-cfb': (24, 16, OpenSSLCrypto),
'camellia-256-cfb': (32, 16, OpenSSLCrypto),
'cast5-cfb': (16, 8, OpenSSLCrypto),
'des-cfb': (8, 8, OpenSSLCrypto),
'idea-cfb': (16, 8, OpenSSLCrypto),
'rc2-cfb': (16, 8, OpenSSLCrypto),
'rc4': (16, 0, OpenSSLCrypto),
'seed-cfb': (16, 16, OpenSSLCrypto),
}
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method('aes-128-cfb')
def test_aes_256_cfb():
run_method('aes-256-cfb')
def test_aes_128_cfb8():
run_method('aes-128-cfb8')
def test_aes_256_ofb():
run_method('aes-256-ofb')
def test_aes_256_ctr():
run_method('aes-256-ctr')
def test_bf_cfb():
run_method('bf-cfb')
def test_rc4():
run_method('rc4')
if __name__ == '__main__':
test_aes_128_cfb()
| apache-2.0 |
elijah513/django | tests/check_framework/test_security.py | 242 | 17428 | from django.conf import settings
from django.core.checks.security import base, csrf, sessions
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckSessionCookieSecureTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.sessions import check_session_cookie_secure
return check_session_cookie_secure
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=[])
def test_session_cookie_secure_with_installed_app(self):
"""
Warn if SESSION_COOKIE_SECURE is off and "django.contrib.sessions" is
in INSTALLED_APPS.
"""
self.assertEqual(self.func(None), [sessions.W010])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=[],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_with_middleware(self):
"""
Warn if SESSION_COOKIE_SECURE is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [sessions.W011])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_both(self):
"""
If SESSION_COOKIE_SECURE is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(self.func(None), [sessions.W012])
@override_settings(
SESSION_COOKIE_SECURE=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_true(self):
"""
If SESSION_COOKIE_SECURE is on, there's no warning about it.
"""
self.assertEqual(self.func(None), [])
class CheckSessionCookieHttpOnlyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.sessions import check_session_cookie_httponly
return check_session_cookie_httponly
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=[])
def test_session_cookie_httponly_with_installed_app(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and "django.contrib.sessions"
is in INSTALLED_APPS.
"""
self.assertEqual(self.func(None), [sessions.W013])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=[],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_with_middleware(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [sessions.W014])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_both(self):
"""
If SESSION_COOKIE_HTTPONLY is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(self.func(None), [sessions.W015])
@override_settings(
SESSION_COOKIE_HTTPONLY=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_true(self):
"""
If SESSION_COOKIE_HTTPONLY is on, there's no warning about it.
"""
self.assertEqual(self.func(None), [])
class CheckCSRFMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_middleware
return check_csrf_middleware
@override_settings(MIDDLEWARE_CLASSES=[])
def test_no_csrf_middleware(self):
"""
Warn if CsrfViewMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [csrf.W003])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"])
def test_with_csrf_middleware(self):
self.assertEqual(self.func(None), [])
class CheckCSRFCookieSecureTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_cookie_secure
return check_csrf_cookie_secure
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE_CLASSES but
CSRF_COOKIE_SECURE isn't True.
"""
self.assertEqual(self.func(None), [csrf.W016])
@override_settings(MIDDLEWARE_CLASSES=[], CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE_CLASSES, even if
CSRF_COOKIE_SECURE is False.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=True)
def test_with_csrf_cookie_secure_true(self):
self.assertEqual(self.func(None), [])
class CheckCSRFCookieHttpOnlyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_cookie_httponly
return check_csrf_cookie_httponly
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_HTTPONLY=False)
def test_with_csrf_cookie_httponly_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE_CLASSES but
CSRF_COOKIE_HTTPONLY isn't True.
"""
self.assertEqual(self.func(None), [csrf.W017])
@override_settings(MIDDLEWARE_CLASSES=[], CSRF_COOKIE_HTTPONLY=False)
def test_with_csrf_cookie_httponly_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE_CLASSES, even if
CSRF_COOKIE_HTTPONLY is False.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_HTTPONLY=True)
def test_with_csrf_cookie_httponly_true(self):
self.assertEqual(self.func(None), [])
class CheckSecurityMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_security_middleware
return check_security_middleware
@override_settings(MIDDLEWARE_CLASSES=[])
def test_no_security_middleware(self):
"""
Warn if SecurityMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [base.W001])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"])
def test_with_security_middleware(self):
self.assertEqual(self.func(None), [])
class CheckStrictTransportSecurityTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_sts
return check_sts
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=0)
def test_no_sts(self):
"""
Warn if SECURE_HSTS_SECONDS isn't > 0.
"""
self.assertEqual(self.func(None), [base.W004])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_HSTS_SECONDS=0)
def test_no_sts_no_middlware(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't > 0 and SecurityMiddleware isn't
installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=3600)
def test_with_sts(self):
self.assertEqual(self.func(None), [])
class CheckStrictTransportSecuritySubdomainsTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_sts_include_subdomains
return check_sts_include_subdomains
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600)
def test_no_sts_subdomains(self):
"""
Warn if SECURE_HSTS_INCLUDE_SUBDOMAINS isn't True.
"""
self.assertEqual(self.func(None), [base.W005])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600)
def test_no_sts_subdomains_no_middlware(self):
"""
Don't warn if SecurityMiddleware isn't installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
SECURE_HSTS_SECONDS=None)
def test_no_sts_subdomains_no_seconds(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't set.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=True,
SECURE_HSTS_SECONDS=3600)
def test_with_sts_subdomains(self):
self.assertEqual(self.func(None), [])
class CheckXFrameOptionsMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xframe_options_middleware
return check_xframe_options_middleware
@override_settings(MIDDLEWARE_CLASSES=[])
def test_middleware_not_installed(self):
"""
Warn if XFrameOptionsMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [base.W002])
@override_settings(MIDDLEWARE_CLASSES=["django.middleware.clickjacking.XFrameOptionsMiddleware"])
def test_middleware_installed(self):
self.assertEqual(self.func(None), [])
class CheckXFrameOptionsDenyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xframe_deny
return check_xframe_deny
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='SAMEORIGIN',
)
def test_x_frame_options_not_deny(self):
"""
Warn if XFrameOptionsMiddleware is in MIDDLEWARE_CLASSES but
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(self.func(None), [base.W019])
@override_settings(MIDDLEWARE_CLASSES=[], X_FRAME_OPTIONS='SAMEORIGIN')
def test_middleware_not_installed(self):
"""
No error if XFrameOptionsMiddleware isn't in MIDDLEWARE_CLASSES even if
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='DENY',
)
def test_xframe_deny(self):
self.assertEqual(self.func(None), [])
class CheckContentTypeNosniffTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_content_type_nosniff
return check_content_type_nosniff
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff(self):
"""
Warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True.
"""
self.assertEqual(self.func(None), [base.W006])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff_no_middleware(self):
"""
Don't warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True and
SecurityMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_with_content_type_nosniff(self):
self.assertEqual(self.func(None), [])
class CheckXssFilterTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xss_filter
return check_xss_filter
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_BROWSER_XSS_FILTER=False)
def test_no_xss_filter(self):
"""
Warn if SECURE_BROWSER_XSS_FILTER isn't True.
"""
self.assertEqual(self.func(None), [base.W007])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_BROWSER_XSS_FILTER=False)
def test_no_xss_filter_no_middleware(self):
"""
Don't warn if SECURE_BROWSER_XSS_FILTER isn't True and
SecurityMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_BROWSER_XSS_FILTER=True)
def test_with_xss_filter(self):
self.assertEqual(self.func(None), [])
class CheckSSLRedirectTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_ssl_redirect
return check_ssl_redirect
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect(self):
"""
Warn if SECURE_SSL_REDIRECT isn't True.
"""
self.assertEqual(self.func(None), [base.W008])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect_no_middlware(self):
"""
Don't warn if SECURE_SSL_REDIRECT is False and SecurityMiddleware isn't
installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=True)
def test_with_ssl_redirect(self):
self.assertEqual(self.func(None), [])
class CheckSecretKeyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_secret_key
return check_secret_key
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'ab')
def test_okay_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertGreater(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(self.func(None), [])
@override_settings(SECRET_KEY='')
def test_empty_secret_key(self):
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_missing_secret_key(self):
del settings.SECRET_KEY
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_none_secret_key(self):
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'a')
def test_low_length_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH - 1)
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY='abcd' * 20)
def test_low_entropy_secret_key(self):
self.assertGreater(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertLess(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(self.func(None), [base.W009])
class CheckDebugTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_debug
return check_debug
@override_settings(DEBUG=True)
def test_debug_true(self):
"""
Warn if DEBUG is True.
"""
self.assertEqual(self.func(None), [base.W018])
@override_settings(DEBUG=False)
def test_debug_false(self):
self.assertEqual(self.func(None), [])
class CheckAllowedHostsTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_allowed_hosts
return check_allowed_hosts
@override_settings(ALLOWED_HOSTS=[])
def test_allowed_hosts_empty(self):
self.assertEqual(self.func(None), [base.W020])
@override_settings(ALLOWED_HOSTS=['.example.com', ])
def test_allowed_hosts_set(self):
self.assertEqual(self.func(None), [])
| bsd-3-clause |
hawkrives/project-e.a.s.t. | syllogizmos/truthfinder.py | 1 | 1254 | """
truthfinder.py - given a statement and a list of truths, determine whether or
not the statement is confirmed (always true), plausible
(sometimes true), or busted (never true). Thanks Mythbusters.
"""
from .constants import CONFIRMED, BUSTED, PLAUSIBLE
from .utils import invert
def searchForTruthInner(subject, object, truths, contradictions):
'''
The truth-finding function that will recursively determine whether or not
a statement is true.
'''
if subject == object:
return True
if subject in truths:
for word in truths[subject]:
if searchForTruthInner(word, object, truths, contradictions):
return True
elif searchForTruthInner('%s %s' % (word, subject), object, truths, contradictions):
return True
return False
def searchForTruth(subject, object, truths, contradictions):
'''
The truth-finding function that will recursively determine whether or not
a statement is true.
'''
if searchForTruthInner(subject, object, truths, contradictions):
return CONFIRMED
elif searchForTruthInner(subject, invert(object), truths, contradictions):
return BUSTED
return PLAUSIBLE
| mit |
zycdragonball/tensorflow | tensorflow/python/framework/subscribe_test.py | 46 | 9259 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.subscribe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import subscribe
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SubscribeTest(test_util.TensorFlowTestCase):
def _ExpectSubscribedIdentities(self, container):
"""Convenience function to test a container of subscribed identities."""
self.assertTrue(
all(subscribe._is_subscribed_identity(x) for x in container))
def testSideEffect(self):
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
with ops.control_dependencies([c]):
d = constant_op.constant(42)
n = math_ops.negative(c)
shared = []
def sub(t):
shared.append(t)
return t
c = subscribe.subscribe(c,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.test_session() as sess:
c_out = sess.run([c])
n_out = sess.run([n])
d_out = sess.run([d])
self.assertEquals(n_out, [-2])
self.assertEquals(c_out, [2])
self.assertEquals(d_out, [42])
self.assertEquals(shared, [2, 2, 2])
def testSupportedTypes(self):
"""Confirm that supported types are correctly detected and handled."""
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
def sub(t):
return t
# Tuples.
subscribed = subscribe.subscribe(
(a, b), lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, tuple)
self._ExpectSubscribedIdentities(subscribed)
# Lists.
subscribed = subscribe.subscribe(
[a, b], lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, list)
self._ExpectSubscribedIdentities(subscribed)
# Dictionaries.
subscribed = subscribe.subscribe({
'first': a,
'second': b
}, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, dict)
self._ExpectSubscribedIdentities(subscribed.values())
# Namedtuples.
# pylint: disable=invalid-name
TensorPair = collections.namedtuple('TensorPair', ['first', 'second'])
# pylint: enable=invalid-name
pair = TensorPair(a, b)
subscribed = subscribe.subscribe(
pair, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, TensorPair)
self._ExpectSubscribedIdentities(subscribed)
# Expect an exception to be raised for unsupported types.
with self.assertRaisesRegexp(TypeError, 'has invalid type'):
subscribe.subscribe(c.name,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
def testCaching(self):
"""Confirm caching of control output is recalculated between calls."""
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.control_dependencies([a]):
c = constant_op.constant(42)
shared = {}
def sub(t):
shared[t] = shared.get(t, 0) + 1
return t
a = subscribe.subscribe(a,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with ops.control_dependencies([b]):
d = constant_op.constant(11)
# If it was using outdated cached control_outputs then
# evaling would not trigger the new subscription.
b = subscribe.subscribe(b,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.test_session() as sess:
c_out = sess.run([c])
d_out = sess.run([d])
self.assertEquals(c_out, [42])
self.assertEquals(d_out, [11])
self.assertEquals(shared, {2: 1, 1: 1})
def testIsSubscribedIdentity(self):
"""Confirm subscribed identity ops are correctly detected."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
idop = array_ops.identity(c)
c_sub = subscribe.subscribe(c, [])
self.assertFalse(subscribe._is_subscribed_identity(a))
self.assertFalse(subscribe._is_subscribed_identity(c))
self.assertFalse(subscribe._is_subscribed_identity(idop))
self.assertTrue(subscribe._is_subscribed_identity(c_sub))
def testSubscribeExtend(self):
"""Confirm side effect are correctly added for different input types."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
shared = {}
def sub(t, name):
shared[name] = shared.get(name, 0) + 1
return t
# Subscribe with a first side effect graph, passing an unsubscribed tensor.
sub_graph1 = lambda t: sub(t, 'graph1')
c_sub = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph1, [t], [t.dtype]))
# Add a second side effect graph, passing the tensor returned by the
# previous call to subscribe().
sub_graph2 = lambda t: sub(t, 'graph2')
c_sub2 = subscribe.subscribe(
c_sub, lambda t: script_ops.py_func(sub_graph2, [t], [t.dtype]))
# Add a third side effect graph, passing the original tensor.
sub_graph3 = lambda t: sub(t, 'graph3')
c_sub3 = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph3, [t], [t.dtype]))
# Make sure there's only one identity op matching the source tensor's name.
graph_ops = ops.get_default_graph().get_operations()
name_prefix = c.op.name + '/subscription/Identity'
identity_ops = [op for op in graph_ops if op.name.startswith(name_prefix)]
self.assertEquals(1, len(identity_ops))
# Expect the objects returned by subscribe() to reference the same tensor.
self.assertIs(c_sub, c_sub2)
self.assertIs(c_sub, c_sub3)
# Expect the three side effect graphs to have been evaluated.
with self.test_session() as sess:
sess.run([c_sub])
self.assertIn('graph1', shared)
self.assertIn('graph2', shared)
self.assertIn('graph3', shared)
def testSubscribeVariable(self):
"""Confirm that variables can be subscribed."""
v1 = variables.Variable(0.0)
v2 = variables.Variable(4.0)
add = math_ops.add(v1, v2)
assign_v1 = v1.assign(3.0)
shared = []
def sub(t):
shared.append(t)
return t
v1_sub = subscribe.subscribe(
v1, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertTrue(subscribe._is_subscribed_identity(v1_sub))
with self.test_session() as sess:
# Initialize the variables first.
sess.run([v1.initializer])
sess.run([v2.initializer])
# Expect the side effects to be triggered when evaluating the add op as
# it will read the value of the variable.
sess.run([add])
self.assertEquals(1, len(shared))
# Expect the side effect not to be triggered when evaluating the assign
# op as it will not access the 'read' output of the variable.
sess.run([assign_v1])
self.assertEquals(1, len(shared))
sess.run([add])
self.assertEquals(2, len(shared))
# Make sure the values read from the variable match the expected ones.
self.assertEquals([0.0, 3.0], shared)
def testResourceType(self):
"""Confirm that subscribe correctly handles tensors with 'resource' type."""
tensor_array = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name='test',
size=3,
infer_shape=False)
writer = tensor_array.write(0, [[4.0, 5.0]])
reader = writer.read(0)
shared = []
def sub(t):
shared.append(t)
return t
# TensorArray's handle output tensor has a 'resource' type and cannot be
# subscribed as it's not 'numpy compatible' (see dtypes.py).
# Expect that the original tensor is returned when subscribing to it.
tensor_array_sub = subscribe.subscribe(
tensor_array.handle, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIs(tensor_array_sub, tensor_array.handle)
self.assertFalse(subscribe._is_subscribed_identity(tensor_array.handle))
with self.test_session() as sess:
sess.run([reader])
self.assertEquals(0, len(shared))
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
miconof/CouchPotatoServer | libs/tornado/platform/posix.py | 352 | 1859 | #!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Posix implementations of platform-specific functionality."""
from __future__ import absolute_import, division, print_function, with_statement
import fcntl
import os
from tornado.platform import interface
def set_close_exec(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def _set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
class Waker(interface.Waker):
def __init__(self):
r, w = os.pipe()
_set_nonblocking(r)
_set_nonblocking(w)
set_close_exec(r)
set_close_exec(w)
self.reader = os.fdopen(r, "rb", 0)
self.writer = os.fdopen(w, "wb", 0)
def fileno(self):
return self.reader.fileno()
def write_fileno(self):
return self.writer.fileno()
def wake(self):
try:
self.writer.write(b"x")
except IOError:
pass
def consume(self):
try:
while True:
result = self.reader.read()
if not result:
break
except IOError:
pass
def close(self):
self.reader.close()
self.writer.close()
| gpl-3.0 |
jordotech/sherri_satchmo | satchmo/apps/payment/modules/cybersource/processor.py | 5 | 10404 | from django.template import Context, loader
from payment.modules.base import BasePaymentProcessor, ProcessorResult
from satchmo_utils.numbers import trunc_decimal
from django.utils.translation import ugettext_lazy as _
import urllib2
try:
from xml.etree.ElementTree import fromstring
except ImportError:
from elementtree.ElementTree import fromstring
# Response codes available at:
# http://apps.cybersource.com/library/documentation/sbc/api_guide/SB_API.pdf
CYBERSOURCE_RESPONSES = {
'100' : 'Successful transaction.',
'101' : 'The request is missing one or more required fields.',
'102' : 'One or more fields in the request contains invalid data.',
'104' : 'The merchantReferenceCode sent with this authorization request matches the merchantReferenceCode of another authorization request that you sent in the last 15 minutes.',
'150' : 'Error: General system failure. ',
'151' : 'Error: The request was received but there was a server timeout. This error does not include timeouts between the client and the server.',
'152' : 'Error: The request was received, but a service did not finish running in time.',
'201' : 'The issuing bank has questions about the request. You do not receive an authorization code in the reply message, but you might receive one verbally by calling the processor.',
'202' : 'Expired card. You might also receive this if the expiration date you provided does not match the date the issuing bank has on file.',
'203' : 'General decline of the card. No other information provided by the issuing bank.',
'204' : 'Insufficient funds in the account.',
'205' : 'Stolen or lost card.',
'207' : 'Issuing bank unavailable.',
'208' : 'Inactive card or card not authorized for card-not-present transactions.',
'210' : 'The card has reached the credit limit. ',
'211' : 'Invalid card verification number.',
'221' : 'The customer matched an entry on the processor\'s negative file.',
'231' : 'Invalid account number.',
'232' : 'The card type is not accepted by the payment processor.',
'233' : 'General decline by the processor.',
'234' : 'There is a problem with your CyberSource merchant configuration.',
'235' : 'The requested amount exceeds the originally authorized amount. Occurs, for example, if you try to capture an amount larger than the original authorization amount. This reason code only applies if you are processing a capture through the API.',
'236' : 'Processor Failure',
'238' : 'The authorization has already been captured. This reason code only applies if you are processing a capture through the API.',
'239' : 'The requested transaction amount must match the previous transaction amount. This reason code only applies if you are processing a capture or credit through the API.',
'240' : 'The card type sent is invalid or does not correlate with the credit card number.',
'241' : 'The request ID is invalid. This reason code only applies when you are processing a capture or credit through the API.',
'242' : 'You requested a capture through the API, but there is no corresponding, unused authorization record. Occurs if there was not a previously successful authorization request or if the previously successful authorization has already been used by another capture request. This reason code only applies when you are processing a capture through the API.',
'243' : 'The transaction has already been settled or reversed.',
'246' : 'The capture or credit is not voidable because the capture or credit information has already been submitted to your processor. Or, you requested a void for a type of transaction that cannot be voided. This reason code applies only if you are processing a void through the API.',
'247' : 'You requested a credit for a capture that was previously voided. This reason code applies only if you are processing a void through the API.',
'250' : 'Error: The request was received, but there was a timeout at the payment processor.',
'520' : 'The authorization request was approved by the issuing bank but declined by CyberSource based on your Smart Authorization settings.',
}
class PaymentProcessor(BasePaymentProcessor):
"""
Cybersource payment processing module
You must have an account with Cybersource in order to use this module
"""
def __init__(self, settings):
super(PaymentProcessor, self).__init__('cybersource', settings)
self.contents = ''
if settings.LIVE.value:
self.testflag = 'FALSE'
self.connection = settings.CONNECTION.value
else:
self.testflag = 'TRUE'
self.connection = settings.CONNECTION_TEST.value
self.configuration = {
'merchantID' : settings.MERCHANT_ID.value,
'password' : settings.TRANKEY.value,
}
def prepare_content(self, order, amount):
self.bill_to = {
'firstName' : order.bill_first_name,
'lastName' : order.bill_last_name,
'street1': order.full_bill_street,
'city': order.bill_city,
'state' : order.bill_state,
'postalCode' : order.bill_postal_code,
'country': order.bill_country,
'email' : order.contact.email,
'phoneNumber' : order.contact.primary_phone,
# Can add additional info here if you want to but it's not required
}
exp = order.credit_card.expirationDate.split('/')
self.card = {
'accountNumber' : order.credit_card.decryptedCC,
'expirationMonth' : exp[0],
'expirationYear' : exp[1],
'cvNumber' : order.credit_card.ccv
}
currency = self.settings.CURRENCY_CODE.value
currency = currency.replace("_", "")
self.purchase_totals = {
'currency' : currency,
'grandTotalAmount' : trunc_decimal(amount, 2),
}
def capture_payment(self, testing=False, order=None, amount=None):
"""
Creates and sends XML representation of transaction to Cybersource
"""
if not order:
order = self.order
if order.paid_in_full:
self.log_extra('%s is paid in full, no capture attempted.', order)
self.record_payment()
return ProcessorResult(self.key, True, _("No charge needed, paid in full."))
self.log_extra('Capturing payment for %s', order)
if amount is None:
amount = order.balance
self.prepare_content(order, amount)
invoice = "%s" % order.id
failct = order.paymentfailures.count()
if failct > 0:
invoice = "%s_%i" % (invoice, failct)
# XML format is very simple, using ElementTree for generation would be overkill
t = loader.get_template('shop/checkout/cybersource/request.xml')
c = Context({
'config' : self.configuration,
'merchantReferenceCode' : invoice,
'billTo' : self.bill_to,
'purchaseTotals' : self.purchase_totals,
'card' : self.card,
})
request = t.render(c)
conn = urllib2.Request(url=self.connection, data=request)
try:
f = urllib2.urlopen(conn)
except urllib2.HTTPError, e:
# we probably didn't authenticate properly
# make sure the 'v' in your account number is lowercase
return ProcessorResult(self.key, False, 'Problem parsing results')
f = urllib2.urlopen(conn)
all_results = f.read()
tree = fromstring(all_results)
parsed_results = tree.getiterator('{urn:schemas-cybersource-com:transaction-data-1.26}reasonCode')
try:
reason_code = parsed_results[0].text
except KeyError:
return ProcessorResult(self.key, False, 'Problem parsing results')
response_text = CYBERSOURCE_RESPONSES.get(reason_code, 'Unknown Failure')
if reason_code == '100':
payment = self.record_payment(order=order, amount=amount,
transaction_id="", reason_code=reason_code)
return ProcessorResult(self.key, True, response_text, payment=payment)
else:
payment = self.record_failure(order=order, amount=amount,
transaction_id="", reason_code=reason_code,
details=response_text)
return ProcessorResult(self.key, False, response_text)
if __name__ == "__main__":
"""
For testing purposes only.
Allows this module to be run as a script to test the connection
"""
import os
from livesettings import config_get_group
# Set up some dummy classes to mimic classes being passed through Satchmo
class testContact(object):
pass
class testCC(object):
pass
class testOrder(object):
def __init__(self):
self.contact = testContact()
self.credit_card = testCC()
def order_success(self):
pass
if not os.environ.has_key("DJANGO_SETTINGS_MODULE"):
os.environ["DJANGO_SETTINGS_MODULE"]="satchmo_store.settings"
settings_module = os.environ['DJANGO_SETTINGS_MODULE']
settingsl = settings_module.split('.')
settings = __import__(settings_module, {}, {}, settingsl[-1])
sampleOrder = testOrder()
sampleOrder.id = '1234'
sampleOrder.contact.first_name = 'Chris'
sampleOrder.contact.last_name = 'Smith'
sampleOrder.contact.primary_phone = '801-555-9242'
sampleOrder.contact.email = '[email protected]'
sampleOrder.full_bill_street = '123 Main Street'
sampleOrder.bill_postal_code = '12345'
sampleOrder.bill_state = 'TN'
sampleOrder.bill_city = 'Some City'
sampleOrder.bill_country = 'US'
sampleOrder.total = "27.00"
sampleOrder.balance = "27.00"
sampleOrder.credit_card.decryptedCC = '6011000000000012'
sampleOrder.credit_card.expirationDate = "10/09"
sampleOrder.credit_card.ccv = "144"
cybersource_settings = config_get_group('PAYMENT_CYBERSOURCE')
if cybersource_settings.LIVE.value:
print "Warning. You are submitting a live order. CYBERSOURCE system is set LIVE."
processor = PaymentProcessor(cybersource_settings)
processor.prepare_data(sampleOrder)
results = processor.process(testing=True)
print results
| bsd-3-clause |
flavioarchilli/root-web-monitoring | webmonitor/FlaskWithJobResolvers.py | 2 | 2887 | from flask import Flask
class ExistingJobResolverError(Exception):
pass
class FlaskWithJobResolvers(Flask):
"""A Flask app that manages the resolution of task names in to job names.
A task name is that received by the Jobs API from the client.
The workers need to know where this job actually is though, i.e. what
method it corresponds to and what module that method lives in.
The task of a *job resolver* is to take a task name, as a string, and
return a full, dotted import-like path to the method.
For example, the job resolver below just adds a module name to the task:
def module_resolver(task_name):
return 'foo.{0}'.format(job_name)
Given a task name of 'bar', this resolver returns 'foo.bar', and the worker
process calls the method 'bar' inside the module 'foo'.
If a job resolver can't resolve a task name to a method, it must return
None.
This application class manages job resolvers by holding references to them
as a list of methods.
For example, to add the above module_resolver, do
app.add_job_resolver(module_resolver)
You cannot add the same resolver twice.
To remove it, do
app.remove_job_resolver(module_resolver)
The application resolves a task name in to a 'module.method'-type string
by interrogating all job resolvers it has
app.resolve_job_name('bar')
This would return 'foo.bar', if the module_resolver was added to the app.
Resolvers are interrogated in the order they were added, i.e. the most
recently added resolver will be called last, the least recent first.
"""
def __init__(self, *args, **kwargs):
super(FlaskWithJobResolvers, self).__init__(*args, **kwargs)
self._job_resolvers = []
def job_resolvers(self):
return self._job_resolvers
def add_job_resolver(self, job_resolver):
for r in self.job_resolvers():
if job_resolver == r:
raise ExistingJobResolverError
self._job_resolvers.append(job_resolver)
def remove_job_resolver(self, job_resolver):
"""Remove job_resolver from the list of job resolvers.
Keyword arguments:
job_resolver -- Function reference of the job resolver to be removed.
"""
for i, r in enumerate(self.job_resolvers()):
if job_resolver == r:
del self._job_resolvers[i]
def resolve_job(self, name):
"""Attempt to resolve the task name in to a job name.
If no job resolver can resolve the task, i.e. they all return None,
return None.
Keyword arguments:
name -- Name of the task to be resolved.
"""
for r in self.job_resolvers():
resolved_name = r(name)
if resolved_name is not None:
return resolved_name
return None
| mit |
C00kiie/Youtube-Mp3-telegram-bot | youtube_dl/extractor/buzzfeed.py | 47 | 3655 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from .facebook import FacebookIE
class BuzzFeedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?buzzfeed\.com/[^?#]*?/(?P<id>[^?#]+)'
_TESTS = [{
'url': 'http://www.buzzfeed.com/abagg/this-angry-ram-destroys-a-punching-bag-like-a-boss?utm_term=4ldqpia',
'info_dict': {
'id': 'this-angry-ram-destroys-a-punching-bag-like-a-boss',
'title': 'This Angry Ram Destroys A Punching Bag Like A Boss',
'description': 'Rambro!',
},
'playlist': [{
'info_dict': {
'id': 'aVCR29aE_OQ',
'ext': 'mp4',
'title': 'Angry Ram destroys a punching bag..',
'description': 'md5:c59533190ef23fd4458a5e8c8c872345',
'upload_date': '20141024',
'uploader_id': 'Buddhanz1',
'uploader': 'Angry Ram',
}
}]
}, {
'url': 'http://www.buzzfeed.com/sheridanwatson/look-at-this-cute-dog-omg?utm_term=4ldqpia',
'params': {
'skip_download': True, # Got enough YouTube download tests
},
'info_dict': {
'id': 'look-at-this-cute-dog-omg',
'description': 're:Munchkin the Teddy Bear is back ?!',
'title': 'You Need To Stop What You\'re Doing And Watching This Dog Walk On A Treadmill',
},
'playlist': [{
'info_dict': {
'id': 'mVmBL8B-In0',
'ext': 'mp4',
'title': 're:Munchkin the Teddy Bear gets her exercise',
'description': 'md5:28faab95cda6e361bcff06ec12fc21d8',
'upload_date': '20141124',
'uploader_id': 'CindysMunchkin',
'uploader': 're:^Munchkin the',
},
}]
}, {
'url': 'http://www.buzzfeed.com/craigsilverman/the-most-adorable-crash-landing-ever#.eq7pX0BAmK',
'info_dict': {
'id': 'the-most-adorable-crash-landing-ever',
'title': 'Watch This Baby Goose Make The Most Adorable Crash Landing',
'description': 'This gosling knows how to stick a landing.',
},
'playlist': [{
'md5': '763ca415512f91ca62e4621086900a23',
'info_dict': {
'id': '971793786185728',
'ext': 'mp4',
'title': 'We set up crash pads so that the goslings on our roof would have a safe landi...',
'uploader': 'Calgary Outdoor Centre-University of Calgary',
},
}],
'add_ie': ['Facebook'],
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
all_buckets = re.findall(
r'(?s)<div class="video-embed[^"]*"..*?rel:bf_bucket_data=\'([^\']+)\'',
webpage)
entries = []
for bd_json in all_buckets:
bd = json.loads(bd_json)
video = bd.get('video') or bd.get('progload_video')
if not video:
continue
entries.append(self.url_result(video['url']))
facebook_urls = FacebookIE._extract_urls(webpage)
entries.extend([
self.url_result(facebook_url)
for facebook_url in facebook_urls])
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'entries': entries,
}
| mit |
yamila-moreno/django | tests/generic_relations/tests.py | 62 | 26688 | from __future__ import unicode_literals
from django import forms
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db.models import Q
from django.test import SimpleTestCase, TestCase
from django.utils import six
from .models import (
AllowsNullGFK, Animal, Comparison, ConcreteRelatedModel,
ForConcreteModelModel, ForProxyModelModel, Gecko, ManualPK, Mineral,
ProxyRelatedModel, Rock, TaggedItem, ValuableTaggedItem, Vegetable,
)
class GenericRelationsTests(TestCase):
def setUp(self):
self.lion = Animal.objects.create(
common_name="Lion", latin_name="Panthera leo")
self.platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus")
Vegetable.objects.create(name="Eggplant", is_yucky=True)
self.bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
self.quartz = Mineral.objects.create(name="Quartz", hardness=7)
# Tagging stuff.
self.bacon.tags.create(tag="fatty")
self.bacon.tags.create(tag="salty")
self.lion.tags.create(tag="yellow")
self.lion.tags.create(tag="hairy")
# Original list of tags:
self.comp_func = lambda obj: (
obj.tag, obj.content_type.model_class(), obj.object_id
)
def test_generic_update_or_create_when_created(self):
"""
Should be able to use update_or_create from the generic related manager
to create a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag, created = self.bacon.tags.update_or_create(tag='stinky')
self.assertTrue(created)
self.assertEqual(count + 1, self.bacon.tags.count())
def test_generic_update_or_create_when_updated(self):
"""
Should be able to use update_or_create from the generic related manager
to update a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag = self.bacon.tags.create(tag='stinky')
self.assertEqual(count + 1, self.bacon.tags.count())
tag, created = self.bacon.tags.update_or_create(defaults={'tag': 'juicy'}, id=tag.id)
self.assertFalse(created)
self.assertEqual(count + 1, self.bacon.tags.count())
self.assertEqual(tag.tag, 'juicy')
def test_generic_get_or_create_when_created(self):
"""
Should be able to use get_or_create from the generic related manager
to create a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag, created = self.bacon.tags.get_or_create(tag='stinky')
self.assertTrue(created)
self.assertEqual(count + 1, self.bacon.tags.count())
def test_generic_get_or_create_when_exists(self):
"""
Should be able to use get_or_create from the generic related manager
to get a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag = self.bacon.tags.create(tag="stinky")
self.assertEqual(count + 1, self.bacon.tags.count())
tag, created = self.bacon.tags.get_or_create(id=tag.id, defaults={'tag': 'juicy'})
self.assertFalse(created)
self.assertEqual(count + 1, self.bacon.tags.count())
# shouldn't had changed the tag
self.assertEqual(tag.tag, 'stinky')
def test_generic_relations_m2m_mimic(self):
"""
Objects with declared GenericRelations can be tagged directly -- the
API mimics the many-to-many API.
"""
self.assertQuerysetEqual(self.lion.tags.all(), [
"<TaggedItem: hairy>",
"<TaggedItem: yellow>"
])
self.assertQuerysetEqual(self.bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>"
])
def test_access_content_object(self):
"""
Test accessing the content object like a foreign key.
"""
tagged_item = TaggedItem.objects.get(tag="salty")
self.assertEqual(tagged_item.content_object, self.bacon)
def test_query_content_object(self):
qs = TaggedItem.objects.filter(
animal__isnull=False).order_by('animal__common_name', 'tag')
self.assertQuerysetEqual(
qs, ["<TaggedItem: hairy>", "<TaggedItem: yellow>"]
)
mpk = ManualPK.objects.create(id=1)
mpk.tags.create(tag='mpk')
qs = TaggedItem.objects.filter(
Q(animal__isnull=False) | Q(manualpk__id=1)).order_by('tag')
self.assertQuerysetEqual(
qs, ["hairy", "mpk", "yellow"], lambda x: x.tag)
def test_exclude_generic_relations(self):
"""
Test lookups over an object without GenericRelations.
"""
# Recall that the Mineral class doesn't have an explicit GenericRelation
# defined. That's OK, because you can create TaggedItems explicitly.
# However, excluding GenericRelations means your lookups have to be a
# bit more explicit.
TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
ctype = ContentType.objects.get_for_model(self.quartz)
q = TaggedItem.objects.filter(
content_type__pk=ctype.id, object_id=self.quartz.id
)
self.assertQuerysetEqual(q, [
"<TaggedItem: clearish>",
"<TaggedItem: shiny>"
])
def test_access_via_content_type(self):
"""
Test lookups through content type.
"""
self.lion.delete()
self.platypus.tags.create(tag="fatty")
ctype = ContentType.objects.get_for_model(self.platypus)
self.assertQuerysetEqual(
Animal.objects.filter(tags__content_type=ctype),
["<Animal: Platypus>"])
def test_set_foreign_key(self):
"""
You can set a generic foreign key in the way you'd expect.
"""
tag1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
tag1.content_object = self.platypus
tag1.save()
self.assertQuerysetEqual(
self.platypus.tags.all(),
["<TaggedItem: shiny>"])
def test_queries_across_generic_relations(self):
"""
Queries across generic relations respect the content types. Even though
there are two TaggedItems with a tag of "fatty", this query only pulls
out the one with the content type related to Animals.
"""
self.assertQuerysetEqual(Animal.objects.order_by('common_name'), [
"<Animal: Lion>",
"<Animal: Platypus>"
])
def test_queries_content_type_restriction(self):
"""
Create another fatty tagged instance with different PK to ensure there
is a content type restriction in the generated queries below.
"""
mpk = ManualPK.objects.create(id=self.lion.pk)
mpk.tags.create(tag="fatty")
self.platypus.tags.create(tag="fatty")
self.assertQuerysetEqual(
Animal.objects.filter(tags__tag='fatty'), ["<Animal: Platypus>"])
self.assertQuerysetEqual(
Animal.objects.exclude(tags__tag='fatty'), ["<Animal: Lion>"])
def test_object_deletion_with_generic_relation(self):
"""
If you delete an object with an explicit Generic relation, the related
objects are deleted when the source object is deleted.
"""
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('fatty', Vegetable, self.bacon.pk),
('hairy', Animal, self.lion.pk),
('salty', Vegetable, self.bacon.pk),
('yellow', Animal, self.lion.pk)
],
self.comp_func
)
self.lion.delete()
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('fatty', Vegetable, self.bacon.pk),
('salty', Vegetable, self.bacon.pk),
],
self.comp_func
)
def test_object_deletion_without_generic_relation(self):
"""
If Generic Relation is not explicitly defined, any related objects
remain after deletion of the source object.
"""
TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
quartz_pk = self.quartz.pk
self.quartz.delete()
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('clearish', Mineral, quartz_pk),
('fatty', Vegetable, self.bacon.pk),
('hairy', Animal, self.lion.pk),
('salty', Vegetable, self.bacon.pk),
('yellow', Animal, self.lion.pk),
],
self.comp_func
)
def test_tag_deletion_related_objects_unaffected(self):
"""
If you delete a tag, the objects using the tag are unaffected (other
than losing a tag).
"""
ctype = ContentType.objects.get_for_model(self.lion)
tag = TaggedItem.objects.get(
content_type__pk=ctype.id, object_id=self.lion.id, tag="hairy")
tag.delete()
self.assertQuerysetEqual(self.lion.tags.all(), ["<TaggedItem: yellow>"])
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('fatty', Vegetable, self.bacon.pk),
('salty', Vegetable, self.bacon.pk),
('yellow', Animal, self.lion.pk)
],
self.comp_func
)
def test_set(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
fatty = bacon.tags.create(tag="fatty")
salty = bacon.tags.create(tag="salty")
bacon.tags.set([fatty, salty])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>",
])
bacon.tags.set([fatty])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
])
bacon.tags.set([])
self.assertQuerysetEqual(bacon.tags.all(), [])
bacon.tags.set([fatty, salty], clear=True)
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>",
])
bacon.tags.set([fatty], clear=True)
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
])
bacon.tags.set([], clear=True)
self.assertQuerysetEqual(bacon.tags.all(), [])
def test_assign(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
fatty = bacon.tags.create(tag="fatty")
salty = bacon.tags.create(tag="salty")
bacon.tags = [fatty, salty]
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>",
])
bacon.tags = [fatty]
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
])
bacon.tags = []
self.assertQuerysetEqual(bacon.tags.all(), [])
def test_assign_with_queryset(self):
# Ensure that querysets used in reverse GFK assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ManyRelatedObjectsDescriptor.__set__. Refs #19816.
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
bacon.tags.create(tag="fatty")
bacon.tags.create(tag="salty")
self.assertEqual(2, bacon.tags.count())
qs = bacon.tags.filter(tag="fatty")
bacon.tags = qs
self.assertEqual(1, bacon.tags.count())
self.assertEqual(1, qs.count())
def test_generic_relation_related_name_default(self):
# Test that GenericRelation by default isn't usable from
# the reverse side.
with self.assertRaises(FieldError):
TaggedItem.objects.filter(vegetable__isnull=True)
def test_multiple_gfk(self):
# Simple tests for multiple GenericForeignKeys
# only uses one model, since the above tests should be sufficient.
tiger = Animal.objects.create(common_name="tiger")
cheetah = Animal.objects.create(common_name="cheetah")
bear = Animal.objects.create(common_name="bear")
# Create directly
Comparison.objects.create(
first_obj=cheetah, other_obj=tiger, comparative="faster"
)
Comparison.objects.create(
first_obj=tiger, other_obj=cheetah, comparative="cooler"
)
# Create using GenericRelation
tiger.comparisons.create(other_obj=bear, comparative="cooler")
tiger.comparisons.create(other_obj=cheetah, comparative="stronger")
self.assertQuerysetEqual(cheetah.comparisons.all(), [
"<Comparison: cheetah is faster than tiger>"
])
# Filtering works
self.assertQuerysetEqual(tiger.comparisons.filter(comparative="cooler"), [
"<Comparison: tiger is cooler than cheetah>",
"<Comparison: tiger is cooler than bear>",
], ordered=False)
# Filtering and deleting works
subjective = ["cooler"]
tiger.comparisons.filter(comparative__in=subjective).delete()
self.assertQuerysetEqual(Comparison.objects.all(), [
"<Comparison: cheetah is faster than tiger>",
"<Comparison: tiger is stronger than cheetah>"
], ordered=False)
# If we delete cheetah, Comparisons with cheetah as 'first_obj' will be
# deleted since Animal has an explicit GenericRelation to Comparison
# through first_obj. Comparisons with cheetah as 'other_obj' will not
# be deleted.
cheetah.delete()
self.assertQuerysetEqual(Comparison.objects.all(), [
"<Comparison: tiger is stronger than None>"
])
def test_gfk_subclasses(self):
# GenericForeignKey should work with subclasses (see #8309)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
valuedtag = ValuableTaggedItem.objects.create(
content_object=quartz, tag="shiny", value=10
)
self.assertEqual(valuedtag.content_object, quartz)
def test_generic_inline_formsets(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet()
self.assertHTMLEqual(''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>""")
formset = GenericFormSet(instance=Animal())
self.assertHTMLEqual(''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>""")
platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus"
)
platypus.tags.create(tag="shiny")
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet(instance=platypus)
tagged_item_id = TaggedItem.objects.get(
tag='shiny', object_id=platypus.id
).id
self.assertHTMLEqual(''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" value="shiny" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" value="%s" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p><p><label for="id_generic_relations-taggeditem-content_type-object_id-1-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-1-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-1-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-1-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-1-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-1-id" id="id_generic_relations-taggeditem-content_type-object_id-1-id" /></p>""" % tagged_item_id)
lion = Animal.objects.create(common_name="Lion", latin_name="Panthera leo")
formset = GenericFormSet(instance=lion, prefix='x')
self.assertHTMLEqual(''.join(form.as_p() for form in formset.forms), """<p><label for="id_x-0-tag">Tag:</label> <input id="id_x-0-tag" type="text" name="x-0-tag" maxlength="50" /></p>
<p><label for="id_x-0-DELETE">Delete:</label> <input type="checkbox" name="x-0-DELETE" id="id_x-0-DELETE" /><input type="hidden" name="x-0-id" id="id_x-0-id" /></p>""")
def test_gfk_manager(self):
# GenericForeignKey should not use the default manager (which may filter objects) #16048
tailless = Gecko.objects.create(has_tail=False)
tag = TaggedItem.objects.create(content_object=tailless, tag="lizard")
self.assertEqual(tag.content_object, tailless)
def test_subclasses_with_gen_rel(self):
"""
Test that concrete model subclasses with generic relations work
correctly (ticket 11263).
"""
granite = Rock.objects.create(name='granite', hardness=5)
TaggedItem.objects.create(content_object=granite, tag="countertop")
self.assertEqual(Rock.objects.filter(tags__tag="countertop").count(), 1)
def test_generic_inline_formsets_initial(self):
"""
Test for #17927 Initial values support for BaseGenericInlineFormSet.
"""
quartz = Mineral.objects.create(name="Quartz", hardness=7)
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
ctype = ContentType.objects.get_for_model(quartz)
initial_data = [{
'tag': 'lizard',
'content_type': ctype.pk,
'object_id': quartz.pk,
}]
formset = GenericFormSet(initial=initial_data)
self.assertEqual(formset.forms[0].initial, initial_data[0])
def test_get_or_create(self):
# get_or_create should work with virtual fields (content_object)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
tag, created = TaggedItem.objects.get_or_create(tag="shiny",
defaults={'content_object': quartz})
self.assertTrue(created)
self.assertEqual(tag.tag, "shiny")
self.assertEqual(tag.content_object.id, quartz.id)
def test_update_or_create_defaults(self):
# update_or_create should work with virtual fields (content_object)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
diamond = Mineral.objects.create(name="Diamond", hardness=7)
tag, created = TaggedItem.objects.update_or_create(tag="shiny",
defaults={'content_object': quartz})
self.assertTrue(created)
self.assertEqual(tag.content_object.id, quartz.id)
tag, created = TaggedItem.objects.update_or_create(tag="shiny",
defaults={'content_object': diamond})
self.assertFalse(created)
self.assertEqual(tag.content_object.id, diamond.id)
def test_query_content_type(self):
msg = "Field 'content_object' does not generate an automatic reverse relation"
with self.assertRaisesMessage(FieldError, msg):
TaggedItem.objects.get(content_object='')
class CustomWidget(forms.TextInput):
pass
class TaggedItemForm(forms.ModelForm):
class Meta:
model = TaggedItem
fields = '__all__'
widgets = {'tag': CustomWidget}
class GenericInlineFormsetTest(TestCase):
def test_generic_inlineformset_factory(self):
"""
Regression for #14572: Using base forms with widgets
defined in Meta should not raise errors.
"""
Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm)
form = Formset().forms[0]
self.assertIsInstance(form['tag'].field.widget, CustomWidget)
def test_save_new_uses_form_save(self):
"""
Regression for #16260: save_new should call form.save()
"""
class SaveTestForm(forms.ModelForm):
def save(self, *args, **kwargs):
self.instance.saved_by = "custom method"
return super(SaveTestForm, self).save(*args, **kwargs)
Formset = generic_inlineformset_factory(
ForProxyModelModel, fields='__all__', form=SaveTestForm)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj = formset.save()[0]
self.assertEqual(new_obj.saved_by, "custom method")
def test_save_new_for_proxy(self):
Formset = generic_inlineformset_factory(ForProxyModelModel,
fields='__all__', for_concrete_model=False)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertEqual(new_obj.obj, instance)
def test_save_new_for_concrete(self):
Formset = generic_inlineformset_factory(ForProxyModelModel,
fields='__all__', for_concrete_model=True)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel)
class ProxyRelatedModelTest(TestCase):
def test_default_behavior(self):
"""
The default for for_concrete_model should be True
"""
base = ForConcreteModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
base = ForConcreteModelModel.objects.get(pk=base.pk)
rel = ConcreteRelatedModel.objects.get(pk=rel.pk)
self.assertEqual(base.obj, rel)
def test_works_normally(self):
"""
When for_concrete_model is False, we should still be able to get
an instance of the concrete class.
"""
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertEqual(base.obj, rel)
def test_proxy_is_returned(self):
"""
Instances of the proxy should be returned when
for_concrete_model is False.
"""
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertIsInstance(base.obj, ProxyRelatedModel)
def test_query(self):
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ConcreteRelatedModel.objects.get(bases__id=base.id))
def test_query_proxy(self):
base = ForProxyModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ProxyRelatedModel.objects.get(bases__id=base.id))
def test_generic_relation(self):
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
rel = ProxyRelatedModel.objects.get(pk=base.obj.pk)
self.assertEqual(base, rel.bases.get())
def test_generic_relation_set(self):
base = ForProxyModelModel()
base.obj = ConcreteRelatedModel.objects.create()
base.save()
newrel = ConcreteRelatedModel.objects.create()
newrel.bases = [base]
newrel = ConcreteRelatedModel.objects.get(pk=newrel.pk)
self.assertEqual(base, newrel.bases.get())
class TestInitWithNoneArgument(SimpleTestCase):
def test_none_not_allowed(self):
# TaggedItem requires a content_type, initializing with None should
# raise a ValueError.
with six.assertRaisesRegex(self, ValueError,
'Cannot assign None: "TaggedItem.content_type" does not allow null values'):
TaggedItem(content_object=None)
def test_none_allowed(self):
# AllowsNullGFK doesn't require a content_type, so None argument should
# also be allowed.
AllowsNullGFK(content_object=None)
| bsd-3-clause |
vileopratama/vitech | src/addons/lunch/tests/test_lunch.py | 47 | 3259 | # -*- coding: utf-8 -*-
from openerp.tests import common
class Test_Lunch(common.TransactionCase):
def setUp(self):
"""*****setUp*****"""
super(Test_Lunch, self).setUp()
self.demo_user = self.env['res.users'].search([('name', '=', 'Demo User')])
self.product_bolognese_ref = self.env['ir.model.data'].get_object_reference('lunch', 'product_Bolognese')
self.product_Bolognese_id = self.product_bolognese_ref and self.product_bolognese_ref[1] or False
self.new_id_order = self.env['lunch.order'].create({
'user_id': self.demo_user.id,
'order_line_ids': '[]',
})
self.new_id_order_line = self.env['lunch.order.line'].create({
'order_id': self.new_id_order.id,
'product_id': self.product_Bolognese_id,
'note': '+Emmental',
'cashmove': [],
'price': self.env['lunch.product'].browse(self.product_Bolognese_id).price,
})
def test_00_lunch_order(self):
"""Change the state of an order line from 'new' to 'ordered'. Check that there are no cashmove linked to that order line"""
self.order_one = self.new_id_order_line
#we check that our order_line is a 'new' one and that there are no cashmove linked to that order_line:
self.assertEqual(self.order_one.state, 'new')
self.assertEqual(list(self.order_one.cashmove), [])
#we order that orderline so it's state will be 'ordered'
self.order_one.order()
self.order_one = self.new_id_order_line
#we check that our order_line is a 'ordered' one and that there are no cashmove linked to that order_line:
self.assertEqual(self.order_one.state, 'ordered')
self.assertEqual(list(self.order_one.cashmove), [])
def test_01_lunch_order(self):
"""Change the state of an order line from 'new' to 'ordered' then to 'confirmed'. Check that there is a cashmove linked to the order line"""
self.test_00_lunch_order()
#We receive the order so we confirm the order line so it's state will be 'confirmed'
#A cashmove will be created and we will test that the cashmove amount equals the order line price
self.order_one.confirm()
self.order_one = self.new_id_order_line
#we check that our order_line is a 'confirmed' one and that there are a cashmove linked to that order_line with an amount equals to the order line price:
self.assertEqual(self.order_one.state, 'confirmed')
self.assertTrue(self.order_one.cashmove)
self.assertTrue(self.order_one.cashmove[0].amount == -self.order_one.price)
def test_02_lunch_order(self):
"""Change the state of an order line from 'confirmed' to 'cancelled' and check that the cashmove linked to that order line will be deleted"""
self.test_01_lunch_order()
#We have a confirmed order with its associate cashmove
#We execute the cancel function
self.order_one.cancel()
self.order_one = self.new_id_order_line
#We check that the state is cancelled and that the cashmove has been deleted
self.assertEqual(self.order_one.state, 'cancelled')
self.assertFalse(self.order_one.cashmove)
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.py | 328 | 3365 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from . import base
from ..constants import namespaces, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class Filter(base.Filter):
def __init__(self, source, require_matching_tags=True):
super(Filter, self).__init__(source)
self.require_matching_tags = require_matching_tags
def __iter__(self):
open_elements = []
for token in base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
namespace = token["namespace"]
name = token["name"]
assert namespace is None or isinstance(namespace, text_type)
assert namespace != ""
assert isinstance(name, text_type)
assert name != ""
assert isinstance(token["data"], dict)
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
assert type == "EmptyTag"
else:
assert type == "StartTag"
if type == "StartTag" and self.require_matching_tags:
open_elements.append((namespace, name))
for (namespace, name), value in token["data"].items():
assert namespace is None or isinstance(namespace, text_type)
assert namespace != ""
assert isinstance(name, text_type)
assert name != ""
assert isinstance(value, text_type)
elif type == "EndTag":
namespace = token["namespace"]
name = token["name"]
assert namespace is None or isinstance(namespace, text_type)
assert namespace != ""
assert isinstance(name, text_type)
assert name != ""
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
assert False, "Void element reported as EndTag token: %(tag)s" % {"tag": name}
elif self.require_matching_tags:
start = open_elements.pop()
assert start == (namespace, name)
elif type == "Comment":
data = token["data"]
assert isinstance(data, text_type)
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
assert isinstance(data, text_type)
assert data != ""
if type == "SpaceCharacters":
assert data.strip(spaceCharacters) == ""
elif type == "Doctype":
name = token["name"]
assert name is None or isinstance(name, text_type)
assert token["publicId"] is None or isinstance(name, text_type)
assert token["systemId"] is None or isinstance(name, text_type)
elif type == "Entity":
assert isinstance(token["name"], text_type)
elif type == "SerializerError":
assert isinstance(token["data"], text_type)
else:
assert False, "Unknown token type: %(type)s" % {"type": type}
yield token
| mit |
catinred2/obfsproxy | obfsproxy/transports/scramblesuit/message.py | 16 | 7118 | """
This module provides code to handle ScrambleSuit protocol messages.
The exported classes and functions provide interfaces to handle protocol
messages, check message headers for validity and create protocol messages out
of application data.
"""
import obfsproxy.common.log as logging
import obfsproxy.common.serialize as pack
import obfsproxy.transports.base as base
import mycrypto
import const
log = logging.get_obfslogger()
def createProtocolMessages( data, flags=const.FLAG_PAYLOAD ):
"""
Create protocol messages out of the given payload.
The given `data' is turned into a list of protocol messages with the given
`flags' set. The list is then returned. If possible, all messages fill
the MTU.
"""
messages = []
while len(data) > const.MPU:
messages.append(ProtocolMessage(data[:const.MPU], flags=flags))
data = data[const.MPU:]
messages.append(ProtocolMessage(data, flags=flags))
log.debug("Created %d protocol messages." % len(messages))
return messages
def getFlagNames( flags ):
"""
Return the flag name encoded in the integer `flags' as string.
This function is only useful for printing easy-to-read flag names in debug
log messages.
"""
if flags == 1:
return "PAYLOAD"
elif flags == 2:
return "NEW_TICKET"
elif flags == 4:
return "PRNG_SEED"
else:
return "Undefined"
def isSane( totalLen, payloadLen, flags ):
"""
Verifies whether the given header fields are sane.
The values of the fields `totalLen', `payloadLen' and `flags' are checked
for their sanity. If they are in the expected range, `True' is returned.
If any of these fields has an invalid value, `False' is returned.
"""
def isFine( length ):
"""
Check if the given length is fine.
"""
return True if (0 <= length <= const.MPU) else False
log.debug("Message header: totalLen=%d, payloadLen=%d, flags"
"=%s" % (totalLen, payloadLen, getFlagNames(flags)))
validFlags = [
const.FLAG_PAYLOAD,
const.FLAG_NEW_TICKET,
const.FLAG_PRNG_SEED,
]
return isFine(totalLen) and \
isFine(payloadLen) and \
totalLen >= payloadLen and \
(flags in validFlags)
class ProtocolMessage( object ):
"""
Represents a ScrambleSuit protocol message.
This class provides methods to deal with protocol messages. The methods
make it possible to add padding as well as to encrypt and authenticate
protocol messages.
"""
def __init__( self, payload="", paddingLen=0, flags=const.FLAG_PAYLOAD ):
"""
Initialises a ProtocolMessage object.
"""
payloadLen = len(payload)
if (payloadLen + paddingLen) > const.MPU:
raise base.PluggableTransportError("No overly long messages.")
self.totalLen = payloadLen + paddingLen
self.payloadLen = payloadLen
self.payload = payload
self.flags = flags
def encryptAndHMAC( self, crypter, hmacKey ):
"""
Encrypt and authenticate this protocol message.
This protocol message is encrypted using `crypter' and authenticated
using `hmacKey'. Finally, the encrypted message prepended by a
HMAC-SHA256-128 is returned and ready to be sent over the wire.
"""
encrypted = crypter.encrypt(pack.htons(self.totalLen) +
pack.htons(self.payloadLen) +
chr(self.flags) + self.payload +
(self.totalLen - self.payloadLen) * '\0')
hmac = mycrypto.HMAC_SHA256_128(hmacKey, encrypted)
return hmac + encrypted
def addPadding( self, paddingLen ):
"""
Add padding to this protocol message.
Padding is added to this protocol message. The exact amount is
specified by `paddingLen'.
"""
# The padding must not exceed the message size.
if (self.totalLen + paddingLen) > const.MPU:
raise base.PluggableTransportError("Can't pad more than the MTU.")
if paddingLen == 0:
return
log.debug("Adding %d bytes of padding to %d-byte message." %
(paddingLen, const.HDR_LENGTH + self.totalLen))
self.totalLen += paddingLen
def __len__( self ):
"""
Return the length of this protocol message.
"""
return const.HDR_LENGTH + self.totalLen
# Alias class name in order to provide a more intuitive API.
new = ProtocolMessage
class MessageExtractor( object ):
"""
Extracts ScrambleSuit protocol messages out of an encrypted stream.
"""
def __init__( self ):
"""
Initialise a new MessageExtractor object.
"""
self.recvBuf = ""
self.totalLen = None
self.payloadLen = None
self.flags = None
def extract( self, data, aes, hmacKey ):
"""
Extracts (i.e., decrypts and authenticates) protocol messages.
The raw `data' coming directly from the wire is decrypted using `aes'
and authenticated using `hmacKey'. The payload is then returned as
unencrypted protocol messages. In case of invalid headers or HMACs, an
exception is raised.
"""
self.recvBuf += data
msgs = []
# Keep trying to unpack as long as there is at least a header.
while len(self.recvBuf) >= const.HDR_LENGTH:
# If necessary, extract the header fields.
if self.totalLen == self.payloadLen == self.flags == None:
self.totalLen = pack.ntohs(aes.decrypt(self.recvBuf[16:18]))
self.payloadLen = pack.ntohs(aes.decrypt(self.recvBuf[18:20]))
self.flags = ord(aes.decrypt(self.recvBuf[20]))
if not isSane(self.totalLen, self.payloadLen, self.flags):
raise base.PluggableTransportError("Invalid header.")
# Parts of the message are still on the wire; waiting.
if (len(self.recvBuf) - const.HDR_LENGTH) < self.totalLen:
break
rcvdHMAC = self.recvBuf[0:const.HMAC_SHA256_128_LENGTH]
vrfyHMAC = mycrypto.HMAC_SHA256_128(hmacKey,
self.recvBuf[const.HMAC_SHA256_128_LENGTH:
(self.totalLen + const.HDR_LENGTH)])
if rcvdHMAC != vrfyHMAC:
raise base.PluggableTransportError("Invalid message HMAC.")
# Decrypt the message and remove it from the input buffer.
extracted = aes.decrypt(self.recvBuf[const.HDR_LENGTH:
(self.totalLen + const.HDR_LENGTH)])[:self.payloadLen]
msgs.append(ProtocolMessage(payload=extracted, flags=self.flags))
self.recvBuf = self.recvBuf[const.HDR_LENGTH + self.totalLen:]
# Protocol message processed; now reset length fields.
self.totalLen = self.payloadLen = self.flags = None
return msgs
| bsd-3-clause |
bartosh/zipline | tests/pipeline/test_downsampling.py | 4 | 24457 | """
Tests for Downsampled Filters/Factors/Classifiers
"""
import pandas as pd
from pandas.util.testing import assert_frame_equal
from zipline.pipeline import (
Pipeline,
CustomFactor,
CustomFilter,
CustomClassifier,
)
from zipline.pipeline.data.testing import TestingDataSet
from zipline.pipeline.factors import SimpleMovingAverage
from zipline.pipeline.filters.smoothing import All
from zipline.testing import ZiplineTestCase, parameter_space
from zipline.testing.fixtures import (
WithTradingSessions,
WithSeededRandomPipelineEngine,
)
from zipline.utils.input_validation import _qualified_name
from zipline.utils.numpy_utils import int64_dtype
class NDaysAgoFactor(CustomFactor):
inputs = [TestingDataSet.float_col]
def compute(self, today, assets, out, floats):
out[:] = floats[0]
class NDaysAgoFilter(CustomFilter):
inputs = [TestingDataSet.bool_col]
def compute(self, today, assets, out, bools):
out[:] = bools[0]
class NDaysAgoClassifier(CustomClassifier):
inputs = [TestingDataSet.categorical_col]
dtype = TestingDataSet.categorical_col.dtype
def compute(self, today, assets, out, cats):
out[:] = cats[0]
class ComputeExtraRowsTestcase(WithTradingSessions, ZiplineTestCase):
DATA_MIN_DAY = pd.Timestamp('2012-06', tz='UTC')
DATA_MAX_DAY = pd.Timestamp('2015', tz='UTC')
TRADING_CALENDAR_STRS = ('NYSE',)
# Test with different window_lengths to ensure that window length is not
# used when calculating exra rows for the top-level term.
factor1 = TestingDataSet.float_col.latest
factor11 = NDaysAgoFactor(window_length=11)
factor91 = NDaysAgoFactor(window_length=91)
filter1 = TestingDataSet.bool_col.latest
filter11 = NDaysAgoFilter(window_length=11)
filter91 = NDaysAgoFilter(window_length=91)
classifier1 = TestingDataSet.categorical_col.latest
classifier11 = NDaysAgoClassifier(window_length=11)
classifier91 = NDaysAgoClassifier(window_length=91)
all_terms = [
factor1,
factor11,
factor91,
filter1,
filter11,
filter91,
classifier1,
classifier11,
classifier91,
]
@parameter_space(
calendar_name=TRADING_CALENDAR_STRS,
base_terms=[
(factor1, factor11, factor91),
(filter1, filter11, filter91),
(classifier1, classifier11, classifier91),
],
__fail_fast=True
)
def test_yearly(self, base_terms, calendar_name):
downsampled_terms = tuple(
t.downsample('year_start') for t in base_terms
)
all_terms = base_terms + downsampled_terms
all_sessions = self.trading_sessions[calendar_name]
end_session = all_sessions[-1]
years = all_sessions.year
sessions_in_2012 = all_sessions[years == 2012]
sessions_in_2013 = all_sessions[years == 2013]
sessions_in_2014 = all_sessions[years == 2014]
# Simulate requesting computation where the unaltered lookback would
# land exactly on the first date in 2014. We shouldn't request any
# additional rows for the regular terms or the downsampled terms.
for i in range(0, 30, 5):
start_session = sessions_in_2014[i]
self.check_extra_row_calculations(
all_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land on the second date in 2014. We should request one more extra
# row in the downsampled terms to push us back to the first date in
# 2014.
for i in range(0, 30, 5):
start_session = sessions_in_2014[i + 1]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i + 1,
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land on the last date of 2013. The downsampled terms should request
# enough extra rows to push us back to the start of 2013.
for i in range(0, 30, 5):
start_session = sessions_in_2014[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(sessions_in_2013),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
# Simulate requesting computation where the unaltered lookback would
# land on the last date of 2012. The downsampled terms should request
# enough extra rows to push us back to the first known date, which is
# in the middle of 2012
for i in range(0, 30, 5):
start_session = sessions_in_2013[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(sessions_in_2012),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
@parameter_space(
calendar_name=TRADING_CALENDAR_STRS,
base_terms=[
(factor1, factor11, factor91),
(filter1, filter11, filter91),
(classifier1, classifier11, classifier91),
],
__fail_fast=True
)
def test_quarterly(self, calendar_name, base_terms):
downsampled_terms = tuple(
t.downsample('quarter_start') for t in base_terms
)
all_terms = base_terms + downsampled_terms
# This region intersects with Q4 2013, Q1 2014, and Q2 2014.
tmp = self.trading_sessions[calendar_name]
all_sessions = tmp[tmp.slice_indexer('2013-12-15', '2014-04-30')]
end_session = all_sessions[-1]
months = all_sessions.month
Q4_2013 = all_sessions[months == 12]
Q1_2014 = all_sessions[(months == 1) | (months == 2) | (months == 3)]
Q2_2014 = all_sessions[months == 4]
# Simulate requesting computation where the unaltered lookback would
# land exactly on the first date in Q2 2014. We shouldn't request any
# additional rows for the regular terms or the downsampled terms.
for i in range(0, 15, 5):
start_session = Q2_2014[i]
self.check_extra_row_calculations(
all_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land exactly on the second date in Q2 2014.
# The downsampled terms should request one more extra row.
for i in range(0, 15, 5):
start_session = Q2_2014[i + 1]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i + 1,
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land exactly on the last date in Q1 2014. The downsampled terms
# should request enough extra rows to push us back to the first date of
# Q1 2014.
for i in range(0, 15, 5):
start_session = Q2_2014[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(Q1_2014),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
# Simulate requesting computation where the unaltered lookback would
# land exactly on the last date in Q4 2013. The downsampled terms
# should request enough extra rows to push us back to the first known
# date, which is in the middle of december 2013.
for i in range(0, 15, 5):
start_session = Q1_2014[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(Q4_2013),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
@parameter_space(
calendar_name=TRADING_CALENDAR_STRS,
base_terms=[
(factor1, factor11, factor91),
(filter1, filter11, filter91),
(classifier1, classifier11, classifier91),
],
__fail_fast=True
)
def test_monthly(self, calendar_name, base_terms):
downsampled_terms = tuple(
t.downsample('month_start') for t in base_terms
)
all_terms = base_terms + downsampled_terms
# This region intersects with Dec 2013, Jan 2014, and Feb 2014.
tmp = self.trading_sessions[calendar_name]
all_sessions = tmp[tmp.slice_indexer('2013-12-15', '2014-02-28')]
end_session = all_sessions[-1]
months = all_sessions.month
dec2013 = all_sessions[months == 12]
jan2014 = all_sessions[months == 1]
feb2014 = all_sessions[months == 2]
# Simulate requesting computation where the unaltered lookback would
# land exactly on the first date in feb 2014. We shouldn't request any
# additional rows for the regular terms or the downsampled terms.
for i in range(0, 10, 2):
start_session = feb2014[i]
self.check_extra_row_calculations(
all_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land on the second date in feb 2014. We should request one more
# extra row in the downsampled terms to push us back to the first date
# in 2014.
for i in range(0, 10, 2):
start_session = feb2014[i + 1]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i + 1,
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land on the last date of jan 2014. The downsampled terms should
# request enough extra rows to push us back to the start of jan 2014.
for i in range(0, 10, 2):
start_session = feb2014[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(jan2014),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
# Simulate requesting computation where the unaltered lookback would
# land on the last date of dec 2013. The downsampled terms should
# request enough extra rows to push us back to the first known date,
# which is in the middle of december 2013.
for i in range(0, 10, 2):
start_session = jan2014[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(dec2013),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
@parameter_space(
calendar_name=TRADING_CALENDAR_STRS,
base_terms=[
(factor1, factor11, factor91),
(filter1, filter11, filter91),
(classifier1, classifier11, classifier91),
],
__fail_fast=True
)
def test_weekly(self, calendar_name, base_terms):
downsampled_terms = tuple(
t.downsample('week_start') for t in base_terms
)
all_terms = base_terms + downsampled_terms
# December 2013
# Mo Tu We Th Fr Sa Su
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30 31
# January 2014
# Mo Tu We Th Fr Sa Su
# 1 2 3 4 5
# 6 7 8 9 10 11 12
# 13 14 15 16 17 18 19
# 20 21 22 23 24 25 26
# 27 28 29 30 31
# This region intersects with the last full week of 2013, the week
# shared by 2013 and 2014, and the first full week of 2014.
tmp = self.trading_sessions[calendar_name]
all_sessions = tmp[tmp.slice_indexer('2013-12-27', '2014-01-12')]
end_session = all_sessions[-1]
week0 = all_sessions[
all_sessions.slice_indexer('2013-12-27', '2013-12-29')
]
week1 = all_sessions[
all_sessions.slice_indexer('2013-12-30', '2014-01-05')
]
week2 = all_sessions[
all_sessions.slice_indexer('2014-01-06', '2014-01-12')
]
# Simulate requesting computation where the unaltered lookback would
# land exactly on the first date in week 2. We shouldn't request any
# additional rows for the regular terms or the downsampled terms.
for i in range(3):
start_session = week2[i]
self.check_extra_row_calculations(
all_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land exactly on the second date in week 2. The downsampled terms
# should request one more extra row.
for i in range(3):
start_session = week2[i + 1]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i + 1,
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land exactly on the last date in week 1. The downsampled terms
# should request enough extra rows to push us back to the first date of
# week 1.
for i in range(3):
start_session = week2[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(week1),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
# Simulate requesting computation where the unaltered lookback would
# land exactly on the last date in week0. The downsampled terms
# should request enough extra rows to push us back to the first known
# date, which is in the middle of december 2013.
for i in range(3):
start_session = week1[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(week0),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
def check_extra_row_calculations(self,
terms,
all_sessions,
start_session,
end_session,
min_extra_rows,
expected_extra_rows):
"""
Check that each term in ``terms`` computes an expected number of extra
rows for the given parameters.
"""
for term in terms:
result = term.compute_extra_rows(
all_sessions,
start_session,
end_session,
min_extra_rows,
)
self.assertEqual(
result,
expected_extra_rows,
"Expected {} extra_rows from {}, but got {}.".format(
expected_extra_rows,
term,
result,
)
)
class DownsampledPipelineTestCase(WithSeededRandomPipelineEngine,
ZiplineTestCase):
# Extend into the last few days of 2013 to test year/quarter boundaries.
START_DATE = pd.Timestamp('2013-12-15', tz='UTC')
# Extend into the first few days of 2015 to test year/quarter boundaries.
END_DATE = pd.Timestamp('2015-01-06', tz='UTC')
ASSET_FINDER_EQUITY_SIDS = tuple(range(10))
def check_downsampled_term(self, term):
# June 2014
# Mo Tu We Th Fr Sa Su
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
all_sessions = self.nyse_sessions
compute_dates = all_sessions[
all_sessions.slice_indexer('2014-06-05', '2015-01-06')
]
start_date, end_date = compute_dates[[0, -1]]
pipe = Pipeline({
'year': term.downsample(frequency='year_start'),
'quarter': term.downsample(frequency='quarter_start'),
'month': term.downsample(frequency='month_start'),
'week': term.downsample(frequency='week_start'),
})
# Raw values for term, computed each day from 2014 to the end of the
# target period.
raw_term_results = self.run_pipeline(
Pipeline({'term': term}),
start_date=pd.Timestamp('2014-01-02', tz='UTC'),
end_date=pd.Timestamp('2015-01-06', tz='UTC'),
)['term'].unstack()
expected_results = {
'year': (raw_term_results
.groupby(pd.TimeGrouper('AS'))
.first()
.reindex(compute_dates, method='ffill')),
'quarter': (raw_term_results
.groupby(pd.TimeGrouper('QS'))
.first()
.reindex(compute_dates, method='ffill')),
'month': (raw_term_results
.groupby(pd.TimeGrouper('MS'))
.first()
.reindex(compute_dates, method='ffill')),
'week': (raw_term_results
.groupby(pd.TimeGrouper('W', label='left'))
.first()
.reindex(compute_dates, method='ffill')),
}
results = self.run_pipeline(pipe, start_date, end_date)
for frequency in expected_results:
result = results[frequency].unstack()
expected = expected_results[frequency]
assert_frame_equal(result, expected)
def test_downsample_windowed_factor(self):
self.check_downsampled_term(
SimpleMovingAverage(
inputs=[TestingDataSet.float_col],
window_length=5,
)
)
def test_downsample_non_windowed_factor(self):
sma = SimpleMovingAverage(
inputs=[TestingDataSet.float_col],
window_length=5,
)
self.check_downsampled_term(((sma + sma) / 2).rank())
def test_downsample_windowed_filter(self):
sma = SimpleMovingAverage(
inputs=[TestingDataSet.float_col],
window_length=5,
)
self.check_downsampled_term(All(inputs=[sma.top(4)], window_length=5))
def test_downsample_nonwindowed_filter(self):
sma = SimpleMovingAverage(
inputs=[TestingDataSet.float_col],
window_length=5,
)
self.check_downsampled_term(sma > 5)
def test_downsample_windowed_classifier(self):
class IntSumClassifier(CustomClassifier):
inputs = [TestingDataSet.float_col]
window_length = 8
dtype = int64_dtype
missing_value = -1
def compute(self, today, assets, out, floats):
out[:] = floats.sum(axis=0).astype(int) % 4
self.check_downsampled_term(IntSumClassifier())
def test_downsample_nonwindowed_classifier(self):
sma = SimpleMovingAverage(
inputs=[TestingDataSet.float_col],
window_length=5,
)
self.check_downsampled_term(sma.quantiles(5))
def test_errors_on_bad_downsample_frequency(self):
f = NDaysAgoFactor(window_length=3)
with self.assertRaises(ValueError) as e:
f.downsample('bad')
expected = (
"{}() expected a value in "
"('month_start', 'quarter_start', 'week_start', 'year_start') "
"for argument 'frequency', but got 'bad' instead."
).format(_qualified_name(f.downsample))
self.assertEqual(str(e.exception), expected)
| apache-2.0 |
TsinghuaX/edx-platform | common/lib/calc/calc/preview.py | 17 | 12271 | """
Provide a `latex_preview` method similar in syntax to `evaluator`.
That is, given a math string, parse it and render each branch of the result,
always returning valid latex.
Because intermediate values of the render contain more data than simply the
string of latex, store it in a custom class `LatexRendered`.
"""
from calc import ParseAugmenter, DEFAULT_VARIABLES, DEFAULT_FUNCTIONS, SUFFIXES
class LatexRendered(object):
"""
Data structure to hold a typeset representation of some math.
Fields:
-`latex` is a generated, valid latex string (as if it were standalone).
-`sans_parens` is usually the same as `latex` except without the outermost
parens (if applicable).
-`tall` is a boolean representing if the latex has any elements extending
above or below a normal height, specifically things of the form 'a^b' and
'\frac{a}{b}'. This affects the height of wrapping parenthesis.
"""
def __init__(self, latex, parens=None, tall=False):
"""
Instantiate with the latex representing the math.
Optionally include parenthesis to wrap around it and the height.
`parens` must be one of '(', '[' or '{'.
`tall` is a boolean (see note above).
"""
self.latex = latex
self.sans_parens = latex
self.tall = tall
# Generate parens and overwrite `self.latex`.
if parens is not None:
left_parens = parens
if left_parens == '{':
left_parens = r'\{'
pairs = {'(': ')',
'[': ']',
r'\{': r'\}'}
if left_parens not in pairs:
raise Exception(
u"Unknown parenthesis '{}': coder error".format(left_parens)
)
right_parens = pairs[left_parens]
if self.tall:
left_parens = r"\left" + left_parens
right_parens = r"\right" + right_parens
self.latex = u"{left}{expr}{right}".format(
left=left_parens,
expr=latex,
right=right_parens
)
def __repr__(self): # pragma: no cover
"""
Give a sensible representation of the object.
If `sans_parens` is different, include both.
If `tall` then have '<[]>' around the code, otherwise '<>'.
"""
if self.latex == self.sans_parens:
latex_repr = u'"{}"'.format(self.latex)
else:
latex_repr = u'"{}" or "{}"'.format(self.latex, self.sans_parens)
if self.tall:
wrap = u'<[{}]>'
else:
wrap = u'<{}>'
return wrap.format(latex_repr)
def render_number(children):
"""
Combine the elements forming the number, escaping the suffix if needed.
"""
children_latex = [k.latex for k in children]
suffix = ""
if children_latex[-1] in SUFFIXES:
suffix = children_latex.pop()
suffix = ur"\text{{{s}}}".format(s=suffix)
# Exponential notation-- the "E" splits the mantissa and exponent
if "E" in children_latex:
pos = children_latex.index("E")
mantissa = "".join(children_latex[:pos])
exponent = "".join(children_latex[pos + 1:])
latex = ur"{m}\!\times\!10^{{{e}}}{s}".format(
m=mantissa, e=exponent, s=suffix
)
return LatexRendered(latex, tall=True)
else:
easy_number = "".join(children_latex)
return LatexRendered(easy_number + suffix)
def enrich_varname(varname):
"""
Prepend a backslash if we're given a greek character.
"""
greek = ("alpha beta gamma delta epsilon varepsilon zeta eta theta "
"vartheta iota kappa lambda mu nu xi pi rho sigma tau upsilon "
"phi varphi chi psi omega").split()
# add capital greek letters
greek += [x.capitalize() for x in greek]
# add hbar for QM
greek.append('hbar')
if varname in greek:
return ur"\{letter}".format(letter=varname)
else:
return varname.replace("_", r"\_")
def variable_closure(variables, casify):
"""
Wrap `render_variable` so it knows the variables allowed.
"""
def render_variable(children):
"""
Replace greek letters, otherwise escape the variable names.
"""
varname = children[0].latex
if casify(varname) not in variables:
pass # TODO turn unknown variable red or give some kind of error
first, _, second = varname.partition("_")
if second:
# Then 'a_b' must become 'a_{b}'
varname = ur"{a}_{{{b}}}".format(
a=enrich_varname(first),
b=enrich_varname(second)
)
else:
varname = enrich_varname(varname)
return LatexRendered(varname) # .replace("_", r"\_"))
return render_variable
def function_closure(functions, casify):
"""
Wrap `render_function` so it knows the functions allowed.
"""
def render_function(children):
"""
Escape function names and give proper formatting to exceptions.
The exceptions being 'sqrt', 'log2', and 'log10' as of now.
"""
fname = children[0].latex
if casify(fname) not in functions:
pass # TODO turn unknown function red or give some kind of error
# Wrap the input of the function with parens or braces.
inner = children[1].latex
if fname == "sqrt":
inner = u"{{{expr}}}".format(expr=inner)
else:
if children[1].tall:
inner = ur"\left({expr}\right)".format(expr=inner)
else:
inner = u"({expr})".format(expr=inner)
# Correctly format the name of the function.
if fname == "sqrt":
fname = ur"\sqrt"
elif fname == "log10":
fname = ur"\log_{10}"
elif fname == "log2":
fname = ur"\log_2"
else:
fname = ur"\text{{{fname}}}".format(fname=fname)
# Put it together.
latex = fname + inner
return LatexRendered(latex, tall=children[1].tall)
# Return the function within the closure.
return render_function
def render_power(children):
"""
Combine powers so that the latex is wrapped in curly braces correctly.
Also, if you have 'a^(b+c)' don't include that last set of parens:
'a^{b+c}' is correct, whereas 'a^{(b+c)}' is extraneous.
"""
if len(children) == 1:
return children[0]
children_latex = [k.latex for k in children if k.latex != "^"]
children_latex[-1] = children[-1].sans_parens
raise_power = lambda x, y: u"{}^{{{}}}".format(y, x)
latex = reduce(raise_power, reversed(children_latex))
return LatexRendered(latex, tall=True)
def render_parallel(children):
"""
Simply join the child nodes with a double vertical line.
"""
if len(children) == 1:
return children[0]
children_latex = [k.latex for k in children if k.latex != "||"]
latex = r"\|".join(children_latex)
tall = any(k.tall for k in children)
return LatexRendered(latex, tall=tall)
def render_frac(numerator, denominator):
r"""
Given a list of elements in the numerator and denominator, return a '\frac'
Avoid parens if they are unnecessary (i.e. the only thing in that part).
"""
if len(numerator) == 1:
num_latex = numerator[0].sans_parens
else:
num_latex = r"\cdot ".join(k.latex for k in numerator)
if len(denominator) == 1:
den_latex = denominator[0].sans_parens
else:
den_latex = r"\cdot ".join(k.latex for k in denominator)
latex = ur"\frac{{{num}}}{{{den}}}".format(num=num_latex, den=den_latex)
return latex
def render_product(children):
r"""
Format products and division nicely.
Group bunches of adjacent, equal operators. Every time it switches from
denominator to the next numerator, call `render_frac`. Join these groupings
together with '\cdot's, ending on a numerator if needed.
Examples: (`children` is formed indirectly by the string on the left)
'a*b' -> 'a\cdot b'
'a/b' -> '\frac{a}{b}'
'a*b/c/d' -> '\frac{a\cdot b}{c\cdot d}'
'a/b*c/d*e' -> '\frac{a}{b}\cdot \frac{c}{d}\cdot e'
"""
if len(children) == 1:
return children[0]
position = "numerator" # or denominator
fraction_mode_ever = False
numerator = []
denominator = []
latex = ""
for kid in children:
if position == "numerator":
if kid.latex == "*":
pass # Don't explicitly add the '\cdot' yet.
elif kid.latex == "/":
# Switch to denominator mode.
fraction_mode_ever = True
position = "denominator"
else:
numerator.append(kid)
else:
if kid.latex == "*":
# Switch back to numerator mode.
# First, render the current fraction and add it to the latex.
latex += render_frac(numerator, denominator) + r"\cdot "
# Reset back to beginning state
position = "numerator"
numerator = []
denominator = []
elif kid.latex == "/":
pass # Don't explicitly add a '\frac' yet.
else:
denominator.append(kid)
# Add the fraction/numerator that we ended on.
if position == "denominator":
latex += render_frac(numerator, denominator)
else:
# We ended on a numerator--act like normal multiplication.
num_latex = r"\cdot ".join(k.latex for k in numerator)
latex += num_latex
tall = fraction_mode_ever or any(k.tall for k in children)
return LatexRendered(latex, tall=tall)
def render_sum(children):
"""
Concatenate elements, including the operators.
"""
if len(children) == 1:
return children[0]
children_latex = [k.latex for k in children]
latex = "".join(children_latex)
tall = any(k.tall for k in children)
return LatexRendered(latex, tall=tall)
def render_atom(children):
"""
Properly handle parens, otherwise this is trivial.
"""
if len(children) == 3:
return LatexRendered(
children[1].latex,
parens=children[0].latex,
tall=children[1].tall
)
else:
return children[0]
def add_defaults(var, fun, case_sensitive=False):
"""
Create sets with both the default and user-defined variables.
Compare to calc.add_defaults
"""
var_items = set(DEFAULT_VARIABLES)
fun_items = set(DEFAULT_FUNCTIONS)
var_items.update(var)
fun_items.update(fun)
if not case_sensitive:
var_items = set(k.lower() for k in var_items)
fun_items = set(k.lower() for k in fun_items)
return var_items, fun_items
def latex_preview(math_expr, variables=(), functions=(), case_sensitive=False):
"""
Convert `math_expr` into latex, guaranteeing its parse-ability.
Analagous to `evaluator`.
"""
# No need to go further
if math_expr.strip() == "":
return ""
# Parse tree
latex_interpreter = ParseAugmenter(math_expr, case_sensitive)
latex_interpreter.parse_algebra()
# Get our variables together.
variables, functions = add_defaults(variables, functions, case_sensitive)
# Create a recursion to evaluate the tree.
if case_sensitive:
casify = lambda x: x
else:
casify = lambda x: x.lower() # Lowercase for case insens.
render_actions = {
'number': render_number,
'variable': variable_closure(variables, casify),
'function': function_closure(functions, casify),
'atom': render_atom,
'power': render_power,
'parallel': render_parallel,
'product': render_product,
'sum': render_sum
}
backslash = "\\"
wrap_escaped_strings = lambda s: LatexRendered(
s.replace(backslash, backslash * 2)
)
output = latex_interpreter.reduce_tree(
render_actions,
terminal_converter=wrap_escaped_strings
)
return output.latex
| agpl-3.0 |
edoko/Air_Kernel-N5 | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
midma101/AndIWasJustGoingToBed | .venv/lib/python2.7/site-packages/werkzeug/testsuite/formparser.py | 63 | 16107 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.formparser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the form parsing facilities.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
from StringIO import StringIO
from os.path import join, dirname
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import formparser
from werkzeug.test import create_environ, Client
from werkzeug.wrappers import Request, Response
from werkzeug.exceptions import RequestEntityTooLarge
@Request.application
def form_data_consumer(request):
result_object = request.args['object']
if result_object == 'text':
return Response(repr(request.form['text']))
f = request.files[result_object]
return Response('\n'.join((
repr(f.filename),
repr(f.name),
repr(f.content_type),
f.stream.read()
)))
def get_contents(filename):
f = file(filename, 'rb')
try:
return f.read()
finally:
f.close()
class FormParserTestCase(WerkzeugTestCase):
def test_limiting(self):
data = 'foo=Hello+World&bar=baz'
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='application/x-www-form-urlencoded',
method='POST')
req.max_content_length = 400
self.assert_equal(req.form['foo'], 'Hello World')
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='application/x-www-form-urlencoded',
method='POST')
req.max_form_memory_size = 7
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='application/x-www-form-urlencoded',
method='POST')
req.max_form_memory_size = 400
self.assert_equal(req.form['foo'], 'Hello World')
data = ('--foo\r\nContent-Disposition: form-field; name=foo\r\n\r\n'
'Hello World\r\n'
'--foo\r\nContent-Disposition: form-field; name=bar\r\n\r\n'
'bar=baz\r\n--foo--')
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_content_length = 4
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_content_length = 400
self.assert_equal(req.form['foo'], 'Hello World')
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_form_memory_size = 7
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_form_memory_size = 400
self.assert_equal(req.form['foo'], 'Hello World')
def test_parse_form_data_put_without_content(self):
# A PUT without a Content-Type header returns empty data
# Both rfc1945 and rfc2616 (1.0 and 1.1) say "Any HTTP/[1.0/1.1] message
# containing an entity-body SHOULD include a Content-Type header field
# defining the media type of that body." In the case where either
# headers are omitted, parse_form_data should still work.
env = create_environ('/foo', 'http://example.org/', method='PUT')
del env['CONTENT_TYPE']
del env['CONTENT_LENGTH']
stream, form, files = formparser.parse_form_data(env)
self.assert_equal(stream.read(), '')
self.assert_equal(len(form), 0)
self.assert_equal(len(files), 0)
def test_parse_form_data_get_without_content(self):
env = create_environ('/foo', 'http://example.org/', method='GET')
del env['CONTENT_TYPE']
del env['CONTENT_LENGTH']
stream, form, files = formparser.parse_form_data(env)
self.assert_equal(stream.read(), '')
self.assert_equal(len(form), 0)
self.assert_equal(len(files), 0)
def test_large_file(self):
data = 'x' * (1024 * 600)
req = Request.from_values(data={'foo': (StringIO(data), 'test.txt')},
method='POST')
# make sure we have a real file here, because we expect to be
# on the disk. > 1024 * 500
self.assert_(isinstance(req.files['foo'].stream, file))
class MultiPartTestCase(WerkzeugTestCase):
def test_basic(self):
resources = join(dirname(__file__), 'multipart')
client = Client(form_data_consumer, Response)
repository = [
('firefox3-2png1txt', '---------------------------186454651713519341951581030105', [
(u'anchor.png', 'file1', 'image/png', 'file1.png'),
(u'application_edit.png', 'file2', 'image/png', 'file2.png')
], u'example text'),
('firefox3-2pnglongtext', '---------------------------14904044739787191031754711748', [
(u'accept.png', 'file1', 'image/png', 'file1.png'),
(u'add.png', 'file2', 'image/png', 'file2.png')
], u'--long text\r\n--with boundary\r\n--lookalikes--'),
('opera8-2png1txt', '----------zEO9jQKmLc2Cq88c23Dx19', [
(u'arrow_branch.png', 'file1', 'image/png', 'file1.png'),
(u'award_star_bronze_1.png', 'file2', 'image/png', 'file2.png')
], u'blafasel öäü'),
('webkit3-2png1txt', '----WebKitFormBoundaryjdSFhcARk8fyGNy6', [
(u'gtk-apply.png', 'file1', 'image/png', 'file1.png'),
(u'gtk-no.png', 'file2', 'image/png', 'file2.png')
], u'this is another text with ümläüts'),
('ie6-2png1txt', '---------------------------7d91b03a20128', [
(u'file1.png', 'file1', 'image/x-png', 'file1.png'),
(u'file2.png', 'file2', 'image/x-png', 'file2.png')
], u'ie6 sucks :-/')
]
for name, boundary, files, text in repository:
folder = join(resources, name)
data = get_contents(join(folder, 'request.txt'))
for filename, field, content_type, fsname in files:
response = client.post('/?object=' + field, data=data, content_type=
'multipart/form-data; boundary="%s"' % boundary,
content_length=len(data))
lines = response.data.split('\n', 3)
self.assert_equal(lines[0], repr(filename))
self.assert_equal(lines[1], repr(field))
self.assert_equal(lines[2], repr(content_type))
self.assert_equal(lines[3], get_contents(join(folder, fsname)))
response = client.post('/?object=text', data=data, content_type=
'multipart/form-data; boundary="%s"' % boundary,
content_length=len(data))
self.assert_equal(response.data, repr(text))
def test_ie7_unc_path(self):
client = Client(form_data_consumer, Response)
data_file = join(dirname(__file__), 'multipart', 'ie7_full_path_request.txt')
data = get_contents(data_file)
boundary = '---------------------------7da36d1b4a0164'
response = client.post('/?object=cb_file_upload_multiple', data=data, content_type=
'multipart/form-data; boundary="%s"' % boundary, content_length=len(data))
lines = response.data.split('\n', 3)
self.assert_equal(lines[0],
repr(u'Sellersburg Town Council Meeting 02-22-2010doc.doc'))
def test_end_of_file(self):
# This test looks innocent but it was actually timeing out in
# the Werkzeug 0.5 release version (#394)
data = (
'--foo\r\n'
'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n'
'Content-Type: text/plain\r\n\r\n'
'file contents and no end'
)
data = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_(not data.files)
self.assert_(not data.form)
def test_broken(self):
data = (
'--foo\r\n'
'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n'
'Content-Transfer-Encoding: base64\r\n'
'Content-Type: text/plain\r\n\r\n'
'broken base 64'
'--foo--'
)
_, form, files = formparser.parse_form_data(create_environ(data=data,
method='POST', content_type='multipart/form-data; boundary=foo'))
self.assert_(not files)
self.assert_(not form)
self.assert_raises(ValueError, formparser.parse_form_data,
create_environ(data=data, method='POST',
content_type='multipart/form-data; boundary=foo'),
silent=False)
def test_file_no_content_type(self):
data = (
'--foo\r\n'
'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n\r\n'
'file contents\r\n--foo--'
)
data = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_equal(data.files['test'].filename, 'test.txt')
self.assert_equal(data.files['test'].read(), 'file contents')
def test_extra_newline(self):
# this test looks innocent but it was actually timeing out in
# the Werkzeug 0.5 release version (#394)
data = (
'\r\n\r\n--foo\r\n'
'Content-Disposition: form-data; name="foo"\r\n\r\n'
'a string\r\n'
'--foo--'
)
data = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_(not data.files)
self.assert_equal(data.form['foo'], 'a string')
def test_headers(self):
data = ('--foo\r\n'
'Content-Disposition: form-data; name="foo"; filename="foo.txt"\r\n'
'X-Custom-Header: blah\r\n'
'Content-Type: text/plain; charset=utf-8\r\n\r\n'
'file contents, just the contents\r\n'
'--foo--')
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
foo = req.files['foo']
self.assert_equal(foo.mimetype, 'text/plain')
self.assert_equal(foo.mimetype_params, {'charset': 'utf-8'})
self.assert_equal(foo.headers['content-type'], foo.content_type)
self.assert_equal(foo.content_type, 'text/plain; charset=utf-8')
self.assert_equal(foo.headers['x-custom-header'], 'blah')
def test_nonstandard_line_endings(self):
for nl in '\n', '\r', '\r\n':
data = nl.join((
'--foo',
'Content-Disposition: form-data; name=foo',
'',
'this is just bar',
'--foo',
'Content-Disposition: form-data; name=bar',
'',
'blafasel',
'--foo--'
))
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; '
'boundary=foo', method='POST')
self.assert_equal(req.form['foo'], 'this is just bar')
self.assert_equal(req.form['bar'], 'blafasel')
def test_failures(self):
def parse_multipart(stream, boundary, content_length):
parser = formparser.MultiPartParser(content_length)
return parser.parse(stream, boundary, content_length)
self.assert_raises(ValueError, parse_multipart, StringIO(''), '', 0)
self.assert_raises(ValueError, parse_multipart, StringIO(''), 'broken ', 0)
data = '--foo\r\n\r\nHello World\r\n--foo--'
self.assert_raises(ValueError, parse_multipart, StringIO(data), 'foo', len(data))
data = '--foo\r\nContent-Disposition: form-field; name=foo\r\n' \
'Content-Transfer-Encoding: base64\r\n\r\nHello World\r\n--foo--'
self.assert_raises(ValueError, parse_multipart, StringIO(data), 'foo', len(data))
data = '--foo\r\nContent-Disposition: form-field; name=foo\r\n\r\nHello World\r\n'
self.assert_raises(ValueError, parse_multipart, StringIO(data), 'foo', len(data))
x = formparser.parse_multipart_headers(['foo: bar\r\n', ' x test\r\n'])
self.assert_equal(x['foo'], 'bar\n x test')
self.assert_raises(ValueError, formparser.parse_multipart_headers,
['foo: bar\r\n', ' x test'])
def test_bad_newline_bad_newline_assumption(self):
class ISORequest(Request):
charset = 'latin1'
contents = 'U2vlbmUgbORu'
data = '--foo\r\nContent-Disposition: form-data; name="test"\r\n' \
'Content-Transfer-Encoding: base64\r\n\r\n' + \
contents + '\r\n--foo--'
req = ISORequest.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_equal(req.form['test'], u'Sk\xe5ne l\xe4n')
class InternalFunctionsTestCase(WerkzeugTestCase):
def test_lien_parser(self):
assert formparser._line_parse('foo') == ('foo', False)
assert formparser._line_parse('foo\r\n') == ('foo', True)
assert formparser._line_parse('foo\r') == ('foo', True)
assert formparser._line_parse('foo\n') == ('foo', True)
def test_find_terminator(self):
lineiter = iter('\n\n\nfoo\nbar\nbaz'.splitlines(True))
find_terminator = formparser.MultiPartParser()._find_terminator
line = find_terminator(lineiter)
assert line == 'foo'
assert list(lineiter) == ['bar\n', 'baz']
assert find_terminator([]) == ''
assert find_terminator(['']) == ''
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FormParserTestCase))
suite.addTest(unittest.makeSuite(MultiPartTestCase))
suite.addTest(unittest.makeSuite(InternalFunctionsTestCase))
return suite
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.