repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
var_hash
int64
-9,223,186,179,200,150,000
9,223,291,175B
doc_hash
int64
-9,223,304,365,658,930,000
9,223,309,051B
line_mean
float64
3.5
99.8
line_max
int64
13
999
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
pacoqueen/ginn
extra/patches/upgrade_db_etiquetas_norma13.py
1
8959
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Crea los campos y actualiza los productos para las nuevas etiquetas de la norma del 1 de julio de 2013. """ import os, sys sys.path.insert(0, (os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", "..", "ginn"))) os.chdir(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", "..", "ginn")) from framework import pclases def alter_tables(): cmd = """echo "ALTER TABLE producto_venta ADD COLUMN anno_certificacion INT DEFAULT NULL; ALTER TABLE producto_venta ADD COLUMN dni TEXT DEFAULT ''; ALTER TABLE producto_venta ADD COLUMN uso TEXT DEFAULT ''; UPDATE producto_venta SET anno_certificacion = NULL; UPDATE producto_venta SET dni = ''; UPDATE producto_venta SET uso = '';" | psql dev_ginn """ os.system(cmd) cmd = """echo "ALTER TABLE producto_venta ADD COLUMN anno_certificacion INT DEFAULT NULL; ALTER TABLE producto_venta ADD COLUMN dni TEXT DEFAULT ''; ALTER TABLE producto_venta ADD COLUMN uso TEXT DEFAULT ''; UPDATE producto_venta SET anno_certificacion = NULL; UPDATE producto_venta SET dni = ''; UPDATE producto_venta SET uso = '';" | psql ginn """ os.system(cmd) def update_values_producto(p): modificado = True if "GEOTESAN" in p.nombre.upper() and " 10 " in p.descripcion: p.annoCertificacion = 8 p.dni = "0001 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo y separación" elif "GEOTESAN" in p.nombre.upper() and " 11 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0002 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo y separación" elif "GEOTESAN" in p.nombre.upper() and " 12 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0003 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo y separación" elif "GEOTESAN" in p.nombre.upper() and " 13 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0004 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo y separación" elif "GEOTESAN" in p.nombre and " 14 " in p.descripcion: p.annoCertificacion = 8 p.dni = "0005 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo y separación" elif "GEOTESAN" in p.nombre.upper() and " 120 " in p.descripcion: p.annoCertificacion = 13 p.dni = "0006 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo y separación" elif "GEOTESAN" in p.nombre.upper() and " 15 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0007 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo y separación" elif "GEOTESAN" in p.nombre.upper() and " 155 " in p.descripcion: p.annoCertificacion = 11 p.dni = "0008 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo y separación" elif "GEOTESAN" in p.nombre.upper() and " 17 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0009 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo y separación" elif "GEOTESAN" in p.nombre.upper() and " 175 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0010 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo y separación" elif "GEOTESAN" in p.nombre.upper() and " 18 " in p.descripcion: p.annoCertificacion = 8 p.dni = "0011 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo y separación" elif "GEOTESAN" in p.nombre.upper() and " 21 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0012 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 23 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0013 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 235 " in p.descripcion: p.annoCertificacion = 11 p.dni = "0014 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 25 " in p.descripcion: p.annoCertificacion = 8 p.dni = "0015 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 30 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0016 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 305 " in p.descripcion: p.annoCertificacion = 11 p.dni = "0017 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 35 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0018 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 40 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0019 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 46 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0020 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 58 " in p.descripcion: p.annoCertificacion = 4 p.dni = "0021 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 69 " in p.descripcion: p.annoCertificacion = 8 p.dni = "0022 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 70 " in p.descripcion: p.annoCertificacion = 8 p.dni = "0023 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 80 " in p.descripcion: p.annoCertificacion = 9 p.dni = "0024 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 85 " in p.descripcion: p.annoCertificacion = 13 p.dni = "0025 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOTESAN" in p.nombre.upper() and " 200 " in p.descripcion: p.annoCertificacion = 8 p.dni = "0026 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo y separación" elif "GEOTESAN" in p.nombre.upper() and " 90 " in p.descripcion: p.dni = "0027 - GEOTEXTIL - 20130701" p.uso = "Drenaje, filtración, refuerzo, separación y protección" elif "GEOCEM" in p.nombre.upper(): if p.es_caja(): p.uso = "Fibra de polipropileno virgen embolsada en papel hidrosoluble para su uso como aditivo del hormigón" if p.camposEspecificosBala.dtex == 6.7: p.nombre = "GEOCEM 31 - %d" % p.camposEspecificosBala.corte p.annoCertificacion = 9 if p.camposEspecificosBala.corte == 6: p.dni = "0001 – GEOCEM - 20130701" elif p.camposEspecificosBala.corte == 12: p.dni = "0002 – GEOCEM - 20130701" elif p.camposEspecificosBala.corte == 18: p.dni = "0003 – GEOCEM - 20130701" elif p.camposEspecificosBala.corte == 24: p.dni = "0004 – GEOCEM - 20130701" else: modificado = False elif p.camposEspecificosBala.dtex == 4.4: p.nombre = "GEOCEM 31 - 12" p.dni = "0005 – GEOCEM - 20130701" p.annoCertificacion = 13 else: modificado = False else: modificado = False print p.dni return modificado def set_values(): """ Establece los valores por defecto de acuerdo a la tabla de Jesús. """ no_modificados = [] for p in pclases.ProductoVenta.select(): print "\t", p.descripcion, "...", sys.stdout.flush() modificado = update_values_producto(p) if not modificado: no_modificados.append(p) print "-"*80 print print "Productos no modificados:" print for p in no_modificados: print p.nombre, "-", p.descripcion def main(): print "Altering tables..." alter_tables() print "Setting values..." set_values() print "Done!" if __name__ == "__main__": main()
gpl-2.0
313,869,562,984,849,600
3,439,253,084,425,634,000
46.989189
357
0.615116
false
code-for-india/sahana_shelter_worldbank
tests/unit_tests/modules/s3/s3gis/LayerFailures.py
43
3905
import unittest s3gis = local_import("s3.s3gis") test_utils = local_import("test_utils") s3gis_tests = load_module("tests.unit_tests.modules.s3.s3gis") class FailingMethod(object): def __init__(self, method_spec, method): self.LayerClass, self.method_name = method_spec self.method = method def __enter__(self): self.method_impl = getattr(self.LayerClass, self.method_name) setattr(self.LayerClass, self.method_name, self.method) def __exit__(self, type, value, traceback): setattr(self.LayerClass, self.method_name, self.method_impl) ExpectSessionWarning = s3gis_tests.ExpectSessionWarning def check_map_accepts_layer_failure(warning): # mock logging with ExpectSessionWarning(session, warning): test_gis = s3gis.GIS() test_gis.show_map( catalogue_layers = True ) def thrower(exception_message): def fail(*a, **kw): raise Exception(exception_message) return fail class LayerFailures(unittest.TestCase): def setUp(test): current.session.s3.debug = False def single_record_layer(test, LayerClass): layer_type_name = LayerClass.__name__ warning = "%s not shown due to error" % layer_type_name for method_name in ("__init__", "as_javascript"): with FailingMethod( (LayerClass, method_name), thrower( "Test %s.SubLayer %s failure exception" % ( layer_type_name, method_name ) ) ): check_map_accepts_layer_failure(warning) def multiple_record_layer(test, LayerClass, table, **data): layer_type_name = LayerClass.__name__ warning = "%s not shown due to error" % layer_type_name test.single_record_layer(LayerClass) with s3gis_tests.InsertedRecord( db, table, dict( data, name = "Test "+layer_type_name, enabled = True, created_on = datetime.datetime.now(), modified_on = datetime.datetime.now(), ) ): for method_name in ("__init__", "as_dict"): with FailingMethod( (LayerClass.SubLayer, method_name), thrower( "Test %s.SubLayer %s failure exception" % ( layer_type_name, method_name ) ) ): check_map_accepts_layer_failure(warning) def test_google_layer_failure(test): test.single_record_layer(s3gis.GoogleLayer) def test_yahoo_layer_failure(test): test.single_record_layer(s3gis.YahooLayer) def test_bing_layer_failure(test): test.single_record_layer(s3gis.BingLayer) def test_GPX_layer_failure(test): test.multiple_record_layer(s3gis.GPXLayer, db.gis_layer_gpx) def test_WMS_layer_failure(test): test.multiple_record_layer(s3gis.WMSLayer, db.gis_layer_wms) def test_geojson_layer_failure(test): test.multiple_record_layer(s3gis.GeoJSONLayer, db.gis_layer_geojson) def test_GeoRSS_layer_failure(test): test.multiple_record_layer(s3gis.GeoRSSLayer, db.gis_layer_georss) def test_KML_layer_failure(test): test.multiple_record_layer(s3gis.KMLLayer, db.gis_layer_kml) def test_TMS_layer_failure(test): test.multiple_record_layer(s3gis.TMSLayer, db.gis_layer_tms) def test_WFS_layer_failure(test): test.multiple_record_layer(s3gis.WFSLayer, db.gis_layer_wfs) def test_feature_layer_failure(test): test.multiple_record_layer(s3gis.FeatureLayer, db.gis_layer_feature, module = "default" )
mit
-8,403,469,092,514,872,000
-7,851,802,546,521,340,000
32.376068
76
0.582074
false
embeddedarm/android_external_chromium_org
chrome/common/extensions/docs/server2/caching_file_system.py
25
4645
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import posixpath import sys from file_system import FileSystem, StatInfo, FileNotFoundError from future import Future class _AsyncUncachedFuture(object): def __init__(self, uncached_read_futures, stats_for_uncached, current_results, file_system, object_store): self._uncached_read_futures = uncached_read_futures self._stats_for_uncached = stats_for_uncached self._current_results = current_results self._file_system = file_system self._object_store = object_store def Get(self): new_results = self._uncached_read_futures.Get() # Update the cached data in the object store. This is a path -> (read, # version) mapping. self._object_store.SetMulti(dict( (path, (new_result, self._stats_for_uncached[path].version)) for path, new_result in new_results.iteritems())) new_results.update(self._current_results) return new_results class CachingFileSystem(FileSystem): '''FileSystem which implements a caching layer on top of |file_system|. It's smart, using Stat() to decided whether to skip Read()ing from |file_system|, and only Stat()ing directories never files. ''' def __init__(self, file_system, object_store_creator): self._file_system = file_system def create_object_store(category, **optargs): return object_store_creator.Create( CachingFileSystem, category='%s/%s' % (file_system.GetIdentity(), category), **optargs) self._stat_object_store = create_object_store('stat') # The read caches can start populated (start_empty=False) because file # updates are picked up by the stat, so it doesn't need the force-refresh # which starting empty is designed for. Without this optimisation, cron # runs are extra slow. self._read_object_store = create_object_store('read', start_empty=False) def Refresh(self): return self._file_system.Refresh() def Stat(self, path): '''Stats the directory given, or if a file is given, stats the file's parent directory to get info about the file. ''' # Always stat the parent directory, since it will have the stat of the child # anyway, and this gives us an entire directory's stat info at once. dir_path, file_path = posixpath.split(path) if dir_path and not dir_path.endswith('/'): dir_path += '/' # ... and we only ever need to cache the dir stat, too. dir_stat = self._stat_object_store.Get(dir_path).Get() if dir_stat is None: dir_stat = self._file_system.Stat(dir_path) assert dir_stat is not None # should raise a FileNotFoundError self._stat_object_store.Set(dir_path, dir_stat) if path == dir_path: stat_info = dir_stat else: file_version = dir_stat.child_versions.get(file_path) if file_version is None: raise FileNotFoundError('No stat found for %s in %s (found %s)' % (path, dir_path, dir_stat.child_versions)) stat_info = StatInfo(file_version) return stat_info def Read(self, paths): '''Reads a list of files. If a file is in memcache and it is not out of date, it is returned. Otherwise, the file is retrieved from the file system. ''' read_values = self._read_object_store.GetMulti(paths).Get() stat_values = self._stat_object_store.GetMulti(paths).Get() results = {} # maps path to read value uncached = {} # maps path to stat value for path in paths: stat_value = stat_values.get(path) if stat_value is None: # TODO(cduvall): do a concurrent Stat with the missing stat values. try: stat_value = self.Stat(path) except: return Future(exc_info=sys.exc_info()) read_value = read_values.get(path) if read_value is None: uncached[path] = stat_value continue read_data, read_version = read_value if stat_value.version != read_version: uncached[path] = stat_value continue results[path] = read_data if not uncached: return Future(value=results) return Future(delegate=_AsyncUncachedFuture( self._file_system.Read(uncached.keys()), uncached, results, self, self._read_object_store)) def GetIdentity(self): return self._file_system.GetIdentity() def __repr__(self): return '%s of <%s>' % (type(self).__name__, repr(self._file_system))
bsd-3-clause
-2,319,461,039,113,249,300
-3,478,241,394,337,455,000
36.16
80
0.651238
false
saram-kon/beepmusic
midi_to_beep/midi_to_beep.py
1
8039
#! python # COPYRIGHT: BALTHASAR SCHLOTMANN 2017 # LICENSED UNDER THE GNU GENERAL PUBLIC LICENSE VERSION 3 import sys import notedict as nd filename = sys.argv[1] # from here: https://github.com/bspaans/python-mingus/blob/master/mingus/midi/midi_file_in.py def parse_varbyte_as_int( array, i): """Read a variable length byte from the file and return thecorresponding integer.""" result = 0 bytes_read = 0 r = 0x80 while r & 0x80: r = array[i + bytes_read] bytes_read += 1 if r & 0x80: result = (result << 7) + (r & 0x7F) else: result = (result << 7) + r return (result, bytes_read) def midi_event(HEXCODE, incr, i): if byteseq[i] >= HEXCODE : i += 1 # i to next delta return (HEXCODE, i + incr) f = open(filename,"rb") byteseq = f.read() f.close() HEAD = True i=0 tempo = list() note = list() tracknotes = list() current = dict() j = 0 ON = 0 # Check global header if byteseq[i:i+4] == b'MThd': if byteseq[i+9] == 3: print("asynchrnous midi files are not handled by me") exit() delta_timer = byteseq[i+12:i+14] i += 14 print("GLOBAL HEADER") else: print("NO GLOBAL HEADER; IS THIS A MIDI FILE??") exit() while i<len(byteseq): # Global header #print("NEW EVENT: ", hex(i), hex(ON)) if HEAD: ON = 0 # Begin of track: if byteseq[i:i+4] == b'MTrk': # i to next delta i += 8 HEAD = False varint, bytesread = parse_varbyte_as_int( byteseq, i ) delta = varint print("LOCAL HEADER - SET DELTA TO", byteseq[i], hex(i)) tracknotes.append(note) note = list() current = dict() # i to next midi event i += bytesread else: raise ValueError("EXPECTED HEADER: " + str( hex(i) ) ) # If not HEAD = True else: if byteseq[i:i+1] == b'\xff': ON = 0 #print("META-EVENT i:", hex(i)) # i to midi event type byte 2 i += 1 # if I want to detect more I have to use a dictonary # strange input without length if byteseq[i] > 240: print("ERROR UNKNOWN") exit() #tempo change elif byteseq[i:i+1] == b'\x51': j += 1 #print("TEMPO CHANGE Nr:",j, hex(i)) tempo.append([delta, int.from_bytes(byteseq[i+2:i+5], byteorder='big', signed=False)]) #end of track elif byteseq[i:i+1] == b'\x2F': HEAD = True NOTEON = False print(current) print("END OF TRACK") i += 2 continue # i to delta i += byteseq[i+1] +2 #print("DELTA position", hex(i), hex(byteseq[i])) # delta is of variable length varint, bytesread = parse_varbyte_as_int( byteseq, i ) #print("READ", bytesread) #print("VARINT", varint) delta += varint # i to new midi event i += bytesread #print("NEW I", hex(i)) else: if byteseq[i] >= 0xF0 or (0xF0 == ON and 0x80 > byteseq[i]): #if byteseq[i] >= 0xF0 : i += 1 ???? ON = 0xF0 #move to length i += 1 #move to next delta i += byteseq[i] + 1 # Pitch wheel change elif byteseq[i] >= 0xE0 or (0xE0 == ON and 0x80 > byteseq[i]): if byteseq[i] >= 0xE0 : i += 1 ON = 0xE0 i += 2 # Channel after touch elif byteseq[i] >= 0xD0 or (0xD0 == ON and 0x80 > byteseq[i]): if byteseq[i] >= 0xD0 : i += 1 ON = 0xD0 i += 1 # Program change elif byteseq[i] >= 0xC0 or (0xC0 == ON and 0x80 > byteseq[i]): if byteseq[i] >= 0xC0 : i += 1 ON = 0xC0 i += 1 # Control change elif byteseq[i] >= 0xB0 or (0xB0 == ON and 0x80 > byteseq[i]): if byteseq[i] >= 0xB0 : i += 1 ON = 0xB0 i += 2 # Key after-touch elif byteseq[i] >= 0xA0 or (0xA0 == ON and 0x80 > byteseq[i]): if byteseq[i] >= 0xA0 : i += 1 ON = 0xA0 i += 2 # Note ON elif byteseq[i] >= 0x90 or (0x90 == ON and 0x80 > byteseq[i]): if byteseq[i] >= 0x90 : i += 1 #print("NOTE ", hex(i), byteseq[i], hex(ON)) ON = 0x90 # i to note # check for pseudo OFF if byteseq[i+1] != 0: # The note is added to the note array note.append( [ byteseq[i], delta, delta ]) # the note is saved in a dict together with its position if byteseq[i] in current: print(" ERROR NOTE ALREADY IN DICT", hex(i), byteseq[i]) #exit() current[ byteseq[i] ] = len(note)-1 else: # set the endtime and remove the dict entry temp = current.pop( byteseq[i], 9999 ) if temp != 9999: note[temp][2] = delta else: print("PSEUDO OFF: WARNING NOTE NOT IN DICT", hex(i), byteseq[i]) # i to next delta i += 2 # Note OFF elif byteseq[i] >= 0x80 or ON == 0x80: if byteseq[i] >= 0x80 : i += 1 ON = 0x80 # i to note # set the endtime and remove the dict entry #print(hex(i)) temp = current.pop( byteseq[i], 9999 ) if temp != 9999: note[temp][2] = delta else: raise ValueError("OFF: WARNING NOTE NOT IN DICT " + str(hex(i)) + str( byteseq[i])) # i to next delta i += 2 else: print(hex(ON)) raise ValueError("BAD INPUT: NOT A VALID MIDI EVENT i: " +str( hex(i)) +str( hex(byteseq[i]) ) ) varint, bytesread = parse_varbyte_as_int( byteseq, i ) delta += varint # i to new midi event i += bytesread tracknotes.append(note) print("SUCCESS") import numpy as np newnotes = list() for i in range(len(tracknotes)): newnotes += tracknotes[i] #print(newnotes) npnot = np.asarray(newnotes) ind = np.lexsort((npnot[::-1,0],npnot[:,1])) npnot = npnot[ind] #print(list(npnot)) #npnot = np.asarray(tracknotes[2]) # quick and dirty may need change index = np.asarray([True] * npnot.shape[0]) for i in range(npnot.shape[0]-1): if npnot[i+1,1] >= npnot[i,1] and npnot[i+1,1] <= npnot[i,2]: npnot[i , 2] = npnot[i+1,1] #print("HERE", i) if npnot[i,2] - npnot[i,1] <= 0: index[i] = False #print(npnot) #print(index) npnot = npnot[index] #print(list(npnot)) if len(tempo) == 0: tempo.append([0,1]) tempo.append([9999999999,9999999999]) tempo = np.asarray(tempo) tempo[:,1] = tempo[:,1] / np.amin(tempo[:,1]) tempo[-1,1] = 0 j = 0 length = np.zeros( npnot.shape[0]) pause = np.zeros( npnot.shape[0]) for i in range(len(npnot[:,0])): while npnot[i,1] >= tempo[j,0]: z = 1 if npnot[i,1] < tempo[j+z,0]: if npnot[i,2] < tempo[j+z,0]: length[i] = tempo[j,1] * (npnot[i,2] - npnot[i,1]) #print(tempo[j,1]) #exit while loop break else: length[i] = tempo[j,1] * (tempo[j+z,0] - npnot[i,1]) while npnot[i,2] >= tempo[j+z,0]: # Maybe use temp variables? length[i] += tempo[j+z,1] * (min(npnot[i,2],tempo[j+z+1,0]) - max(npnot[i,1],tempo[j+z,0])) z += 1 #exit while loop break else: j += 1 j = 0 for i in range(len(npnot[:,0])-1): while npnot[i,2] >= tempo[j,0]: z = 1 if npnot[i,2] < tempo[j+z,0]: if npnot[i+1,1] < tempo[j+z,0]: pause[i] = tempo[j,1] * (npnot[i+1,1] - npnot[i,2]) #print(tempo[j,1]) #exit while loop break else: pause[i] = tempo[j,1] * (tempo[j+z,0] - npnot[i,2]) while npnot[i+1,1] >= tempo[j+z,0]: # Maybe use temp variables? pause[i] += tempo[j+z,1] * (min(npnot[i+1,1],tempo[j+z+1,0]) - tempo[j+z,0]) z += 1 #exit while loop break else: j += 1 index = length > 0 npnot = npnot[index] length = length[index] pause = pause[index] pause = pause.clip(min=0) final = "#! /bin/bash\nbeep" tfactor = 2 for i in range(npnot.shape[0]-1): final += " -f " + str(nd.notedict[str(npnot[i][0])]) + " -l " +str(abs(length[i])*tfactor) + " -D " + str( pause[i] * tfactor ) + " -n" i += 1 final += " -f " + str(nd.notedict[str(npnot[i][0])]) + " -l " +str(abs(length[i])*tfactor) + " -D " + str( pause[i] * tfactor ) #print(filename.replace(".mid",".sh")) f = open(filename.replace(".mid",".sh"),"w") f.write(final) f.close() print("Number of Tracks : ", len(tracknotes)) print("Number of Notes : ", len(npnot)) print("Characters written: ", len(final)) print("Output file : ", filename.replace(".mid",".sh"))
gpl-3.0
1,740,531,822,883,304,200
-4,514,476,577,650,236,400
19.989556
136
0.568603
false
abetkin/django-qfilter
qfilters/exotic_types.py
1
5664
# -*- coding: utf-8 -*- from itertools import groupby from functools import reduce from . import QuerySetFilter, ValuesDictFilter from .utils import CallablesList class QuerysetIterationHook(QuerySetFilter): def __init__(self, hook_function): self.hook_function = hook_function def __and__(self, other): if isinstance(other, QuerysetIterationHook): # now __and__ and __or__ are identical # TODO: add support for lazy operations return self.__class__(hook_function=CallablesList.from_callables( [self.hook_function, other.hook_function], None)) return super(QuerysetIterationHook, self).__and__(other) def __or__(self, other): if isinstance(other, QuerysetIterationHook): # now __and__ and __or__ are identical # TODO: add support for lazy operations return self.__class__(hook_function=CallablesList.from_callables( [self.hook_function, other.hook_function], None)) return super(QuerysetIterationHook, self).__or__(other) def __call__(self, queryset): class QuerysetWrapper(type(queryset)): def iterator(this): for obj in super(QuerysetWrapper, this).iterator(): self.hook_function(obj) #TODO: maybe let it throw exception yield obj queryset.__class__ = QuerysetWrapper return queryset class _Attribute(object): _empty = True # does not contain other attributes def __init__(self, name=None, values_dict=None): self.name = name self._dict = values_dict @classmethod def make_class_from_fields_list(cls, fields_list, parent_field=None): fields_list = list(filter(None, fields_list)) if not fields_list: return cls if parent_field: result = type('%s_%s' % (parent_field, cls.__name__), (cls,), {}) else: transform_dict = {field: field.transform for field in fields_list if hasattr(field, 'transform')} class Object(cls): def __getitem__(self, item): return self._dict[item] def __init__(self, name=None, values_dict=None): class look_for_transforms(dict): def __getitem__(this, item): rv = super(look_for_transforms, this).__getitem__(item) if item not in transform_dict: return rv transform = transform_dict[item] return transform(self, rv) values_dict = values_dict and look_for_transforms(values_dict) return super(Object, self).__init__(name, values_dict) result = Object head__tail = [field.partition('__') for field in fields_list] for head, head__tail in groupby(head__tail, key=lambda t: t[0]): if not parent_field: parent = head else: parent = '__'.join([parent_field, head]) attr_class = cls.make_class_from_fields_list( (parts[-1] for parts in head__tail), parent_field=parent) setattr(result, head, attr_class(parent)) result._empty = False return result def get_value(self): assert self._dict and self.name in self._dict, str(self._dict.items()) + str(self.name) return self._dict[self.name] def __get__(self, instance, owner): if not instance: return self self._dict = instance._dict return self if not self._empty else self.get_value() class PropertyBasedFilter(ValuesDictFilter): def __init__(self, filter_func, fields_list=None, properties=None): super(PropertyBasedFilter, self).__init__(filter_func, fields_list) if properties: self.properties = properties def __mod__(self, other): if not isinstance(other, ValuesDictFilter): return NotImplemented fields_list=self.fields_list + other.fields_list properties = set(self.properties) if isinstance(other, PropertyBasedFilter): properties |= set(other.properties) return self.__class__(None, fields_list, properties) __rmod__ = __mod__ def _fetch_objects(self, queryset): fields_list = ['pk'] + self.fields_list Object = _Attribute.make_class_from_fields_list(fields_list) def get_related_model(model, field_name): return getattr(model, field_name).field.related.parent_model for property_name in self.properties: split_name = property_name.split('.') model_class = reduce(lambda model, field: get_related_model(model, field), split_name[:-1], queryset.model) if not split_name[:-1]: attribute_class = Object else: get_attr = lambda cls, name: getattr(cls, name).__class__ attribute_class = reduce(get_attr, split_name[:-1], Object) prop = getattr(model_class, split_name[-1]) setattr(attribute_class, split_name[-1], property(prop.fget)) objects = queryset.values(*fields_list) return [Object(values_dict=dic) for dic in objects]
mit
4,964,152,541,470,790,000
-7,802,500,629,073,386,000
38.333333
95
0.549788
false
eloquence/unisubs
apps/videos/migrations/0148_set_video_title.py
5
31537
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models from django.core.exceptions import ObjectDoesNotExist class Migration(SchemaMigration): def forwards(self, orm): if not db.dry_run: for video in orm.Video.objects.all(): new_title = self.calc_title_for_video(video).strip() or video.title.strip() video.title = new_title video.save() def calc_title_for_video(self, video): qs = video.subtitlelanguage_set.filter(is_original=True)[:1] try: lang = qs.get() except ObjectDoesNotExist: return '' public_status_values = [ "not__under_moderation", "approved", ] try: latest_version = lang.subtitleversion_set.filter( moderation_status__in=public_status_values).order_by( '-version_no')[0] except (ObjectDoesNotExist, IndexError): return '' return latest_version.title.strip() def backwards(self, orm): # title was not really used or defined before this migration, so going # backwards can be a no-op pass models = { 'accountlinker.thirdpartyaccount': { 'Meta': {'unique_together': "(('type', 'username'),)", 'object_name': 'ThirdPartyAccount'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'oauth_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'oauth_refresh_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}) }, 'auth.customuser': { 'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']}, 'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'can_send_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}), 'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'is_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'notify_by_email': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'notify_by_message': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Partner']", 'null': 'True', 'blank': 'True'}), 'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}), 'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}), 'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'comments.comment': { 'Meta': {'object_name': 'Comment'}, 'content': ('django.db.models.fields.TextField', [], {'max_length': '3000'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_pk': ('django.db.models.fields.TextField', [], {}), 'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}), 'submit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'teams.application': { 'Meta': {'unique_together': "(('team', 'user', 'status'),)", 'object_name': 'Application'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"}) }, 'teams.partner': { 'Meta': {'object_name': 'Partner'}, 'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'managed_partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.CustomUser']"}), 'can_request_paid_captions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}) }, 'teams.project': { 'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}), 'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}), 'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}) }, 'teams.team': { 'Meta': {'object_name': 'Team'}, 'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}), 'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'auth_provider_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'autocrop': True}", 'max_length': '100', 'blank': 'True'}), 'max_tasks_per_member': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}), 'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'teams'", 'null': 'True', 'to': "orm['teams.Partner']"}), 'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}), 'subtitle_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}), 'task_assign_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}), 'task_expiration': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}), 'translate_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}), 'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}), 'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'}), 'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}) }, 'teams.teammember': { 'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16', 'db_index': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_members'", 'to': "orm['auth.CustomUser']"}) }, 'teams.teamvideo': { 'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'}, 'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}), 'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'completed_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.SubtitleLanguage']", 'symmetrical': 'False', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'partner_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}), 'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'null': 'True', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}), 'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videos.Video']", 'unique': 'True'}) }, 'videos.action': { 'Meta': {'object_name': 'Action'}, 'action_type': ('django.db.models.fields.IntegerField', [], {}), 'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}), 'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamMember']", 'null': 'True', 'blank': 'True'}), 'new_video_title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}), 'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']", 'null': 'True', 'blank': 'True'}) }, 'videos.subtitle': { 'Meta': {'unique_together': "(('version', 'subtitle_id'),)", 'object_name': 'Subtitle'}, 'end_time': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'db_column': "'end_time_ms'"}), 'end_time_seconds': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'end_time'"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'start_of_paragraph': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'start_time': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'db_column': "'start_time_ms'"}), 'start_time_seconds': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'start_time'"}), 'subtitle_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'subtitle_order': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'subtitle_text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True'}) }, 'videos.subtitlelanguage': { 'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}), 'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}), 'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}), 'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}), 'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}) }, 'videos.subtitlemetadata': { 'Meta': {'object_name': 'SubtitleMetadata'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'data': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.PositiveIntegerField', [], {}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'subtitle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Subtitle']"}) }, 'videos.subtitleversion': { 'Meta': {'unique_together': "(('language', 'version_no'),)", 'object_name': 'SubtitleVersion'}, 'datetime_started': ('django.db.models.fields.DateTimeField', [], {}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'forked_from': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}), 'moderation_status': ('django.db.models.fields.CharField', [], {'default': "'not__under_moderation'", 'max_length': '32', 'db_index': 'True'}), 'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}), 'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'result_of_rollback': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}), 'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'videos.subtitleversionmetadata': { 'Meta': {'unique_together': "(('key', 'subtitle_version'),)", 'object_name': 'SubtitleVersionMetadata'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.PositiveIntegerField', [], {}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'subtitle_version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['videos.SubtitleVersion']"}) }, 'videos.usertestresult': { 'Meta': {'object_name': 'UserTestResult'}, 'browser': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'get_updates': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'task1': ('django.db.models.fields.TextField', [], {}), 'task2': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'task3': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, 'videos.video': { 'Meta': {'object_name': 'Video'}, 'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}), 's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}), 'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}), 'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}), 'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}), 'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}), 'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}), 'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}), 'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}) }, 'videos.videofeed': { 'Meta': {'object_name': 'VideoFeed'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}) }, 'videos.videometadata': { 'Meta': {'object_name': 'VideoMetadata'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'data': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.PositiveIntegerField', [], {}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}) }, 'videos.videourl': { 'Meta': {'object_name': 'VideoUrl'}, 'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'owner_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}), 'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}), 'videoid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}) } } complete_apps = ['videos']
agpl-3.0
-2,281,057,371,206,455,600
6,079,516,025,605,155,000
87.338936
226
0.554523
false
jnewland/home-assistant
homeassistant/components/bom/weather.py
8
3320
"""Support for Australian BOM (Bureau of Meteorology) weather service.""" import logging import voluptuous as vol from homeassistant.components.weather import PLATFORM_SCHEMA, WeatherEntity from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS) from homeassistant.helpers import config_validation as cv # Reuse data and API logic from the sensor implementation from .sensor import ( CONF_STATION, BOMCurrentData, closest_station, validate_station) _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_STATION): validate_station, }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the BOM weather platform.""" station = config.get(CONF_STATION) or closest_station( config.get(CONF_LATITUDE), config.get(CONF_LONGITUDE), hass.config.config_dir) if station is None: _LOGGER.error("Could not get BOM weather station from lat/lon") return False bom_data = BOMCurrentData(station) try: bom_data.update() except ValueError as err: _LOGGER.error("Received error from BOM_Current: %s", err) return False add_entities([BOMWeather(bom_data, config.get(CONF_NAME))], True) class BOMWeather(WeatherEntity): """Representation of a weather condition.""" def __init__(self, bom_data, stationname=None): """Initialise the platform with a data instance and station name.""" self.bom_data = bom_data self.stationname = stationname or self.bom_data.latest_data.get('name') def update(self): """Update current conditions.""" self.bom_data.update() @property def name(self): """Return the name of the sensor.""" return 'BOM {}'.format(self.stationname or '(unknown station)') @property def condition(self): """Return the current condition.""" return self.bom_data.get_reading('weather') # Now implement the WeatherEntity interface @property def temperature(self): """Return the platform temperature.""" return self.bom_data.get_reading('air_temp') @property def temperature_unit(self): """Return the unit of measurement.""" return TEMP_CELSIUS @property def pressure(self): """Return the mean sea-level pressure.""" return self.bom_data.get_reading('press_msl') @property def humidity(self): """Return the relative humidity.""" return self.bom_data.get_reading('rel_hum') @property def wind_speed(self): """Return the wind speed.""" return self.bom_data.get_reading('wind_spd_kmh') @property def wind_bearing(self): """Return the wind bearing.""" directions = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW'] wind = {name: idx * 360 / 16 for idx, name in enumerate(directions)} return wind.get(self.bom_data.get_reading('wind_dir')) @property def attribution(self): """Return the attribution.""" return "Data provided by the Australian Bureau of Meteorology"
apache-2.0
5,596,641,506,031,360,000
1,909,512,561,096,822,800
31.23301
79
0.637048
false
theblacklion/diamond-framework
tools/hotshot2calltree.py
3
14127
#!/usr/bin/env python # _*_ coding: latin1 _*_ # # Copyright (c) 2003 by WEB.DE, Karlsruhe # Autor: Jörg Beyer <[email protected]> # # hotshot2cachegrind is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, # Boston, MA 02110-1301, USA. # # # This script transforms the pstat output of the hotshot # python profiler into the input of kcachegrind. # # example usage: # modify you python script to run this code: # # import hotshot # filename = "pythongrind.prof" # prof = hotshot.Profile(filename, lineevents=1) # prof.runcall(run) # assuming that "run" should be called. # prof.close() # # it will run the "run"-method under profiling and write # the results in a file, called "pythongrind.prof". # # then call this script: # hotshot2cachegrind -o <output> <input> # or here: # hotshot2cachegrind cachegrind.out.0 pythongrind.prof # # then call kcachegrind: # kcachegrind cachegrind.out.0 # # TODO: # * es gibt Probleme mit rekursiven (direkt und indirekt) Aufrufen - dann # stimmen die Kosten nicht. # # * einige Funktionen werden mit "?" als Name angezeigt. Evtl sind # das nur die C/C++ extensions. # # * es fehlt noch ein Funktionsnamen Mangling, dass die Filenamen berücksichtigt, # zZ sind alle __init__'s und alle run's schwer unterscheidbar :-( # version = "$Revision: 910524 $" progname = "hotshot2cachegrind" import os, sys from hotshot import stats,log import os.path file_limit=0 what2text = { log.WHAT_ADD_INFO : "ADD_INFO", log.WHAT_DEFINE_FUNC : "DEFINE_FUNC", log.WHAT_DEFINE_FILE : "DEFINE_FILE", log.WHAT_LINENO : "LINENO", log.WHAT_EXIT : "EXIT", log.WHAT_ENTER : "ENTER"} # a pseudo caller on the caller stack. This represents # the Python interpreter that executes the given python # code. root_caller = ("PythonInterpreter",0,"execute") class CallStack: """A tiny Stack implementation, based on python lists""" def __init__(self): self.stack = [] self.recursion_counter = {} def push(self, elem): """put something on the stack""" self.stack.append(elem) rc = self.recursion_counter.get(elem, 0) self.recursion_counter[elem] = rc + 1 def pop(self): """get the head element of the stack and remove it from the stack""" elem = self.stack[-1:][0] rc = self.recursion_counter.get(elem) - 1 if rc>0: self.recursion_counter[elem] = rc else: del self.recursion_counter[elem] return self.stack.pop() def top(self): """get the head element of the stack, stack is unchanged.""" return self.stack[-1:][0] def handleLineCost(self, tdelta): p, c = self.stack.pop() self.stack.append( (p,c + tdelta) ) def size(self): """ return how many elements the stack has""" return len(self.stack) def __str__(self): return "[stack: %s]" % self.stack def recursion(self, pos): return self.recursion_counter.get(pos, 0) #return self.recursion_dict.has_key((entry[0][0], entry[0][2])) def return_from_call(caller_stack, call_dict, cost_now): """return from a function call remove the function from the caller stack, add the costs to the calling function. """ called, cost_at_enter = caller_stack.pop() caller, caller_cost = caller_stack.top() #print "return_from_call: %s ruft %s" % (caller, called,) per_file_dict = call_dict.get(called[0], {}) per_caller_dict = per_file_dict.get(called[2], {}) cost_so_far, call_counter = per_caller_dict.get(caller, (0, 0)) if caller_stack.recursion(called): per_caller_dict[caller] = (cost_so_far, call_counter + 1) else: per_caller_dict[caller] = (cost_so_far + cost_now - cost_at_enter, call_counter + 1) per_file_dict[called[2]] = per_caller_dict call_dict[called[0]] = per_file_dict def updateStatus(filecount): sys.stdout.write("reading File #%d \r" % filecount) sys.stdout.flush() def convertProfFiles(output, inputfilenames): """convert all the given input files into one kcachegrind input file. """ call_dict = {} cost_per_pos = {} cost_per_function = {} caller_stack = CallStack() caller_stack.push((root_caller, 0)) total_cost = 0 filecount = 1 number_of_files = len(inputfilenames) for inputfilename in inputfilenames: updateStatus(filecount) cost, filecount = convertHandleFilename(inputfilename, caller_stack, call_dict, cost_per_pos, cost_per_function, filecount) total_cost += cost if (file_limit > 0) and (filecount > file_limit): break print print "total_cost: % d Ticks",total_cost dumpResults(output, call_dict, total_cost, cost_per_pos, cost_per_function) def convertHandleFilename(inputfilename, caller_stack, call_dict, cost_per_pos, cost_per_function, filecount): updateStatus(filecount) if not ((file_limit > 0) and (filecount > file_limit)): if os.path.isdir(inputfilename): cost, filecount = convertProfDir(inputfilename, caller_stack, call_dict, cost_per_pos, cost_per_function, filecount) elif os.path.isfile(inputfilename): cost = convertProfFile(inputfilename, caller_stack, call_dict, cost_per_pos, cost_per_function) filecount += 1 else: sys.stderr.write("warn: ignoring '%s', is no file and no directory\n" % inputfilename) cost = 0 return (cost, filecount) def convertProfDir(start, caller_stack, call_dict, cost_per_pos, cost_per_function, filecount): cost = 0 filenames = os.listdir(start) for f in filenames: if (file_limit > 0) and (filecount > file_limit): break full = os.path.join(start, f) c, filecount = convertHandleFilename(full, caller_stack, call_dict, cost_per_pos, cost_per_function, filecount) cost += c; return (cost, filecount) def handleCostPerPos(cost_per_pos, pos, current_cost): """ the cost per source position are managed in a dict in a dict. the cost are handled per file and there per function. so, the per-file-dict contains some per-function-dicts which sum up the cost per line (in this function and in this file). """ filename = pos[0] lineno = pos[1] funcname = pos[2] file_dict = cost_per_pos.get(filename, {}) func_dict = file_dict.get(funcname, {}) func_dict.setdefault(lineno, 0) func_dict[lineno] += current_cost file_dict[funcname] = func_dict cost_per_pos[filename] = file_dict def convertProfFile(inputfilename, caller_stack, call_dict, cost_per_pos, cost_per_function): """convert a single input file into one kcachegrind data. this is the most expensive function in this python source :-) """ total_cost = 0 try: logreader = log.LogReader(inputfilename) current_cost = 0 hc = handleCostPerPos # shortcut for item in logreader: what, pos ,tdelta = item (file, lineno, func) = pos #line = "%s %s %d %s %d" % (what2text[what], file, lineno, func, tdelta) #print line # most common cases first if what == log.WHAT_LINENO: # add the current cost to the current function hc(cost_per_pos, pos, tdelta) total_cost += tdelta elif what == log.WHAT_ENTER: caller_stack.push((pos, total_cost)) hc(cost_per_pos, pos, tdelta) total_cost += tdelta elif what == log.WHAT_EXIT: hc(cost_per_pos, pos, tdelta) total_cost += tdelta return_from_call(caller_stack, call_dict, total_cost) else: assert 0, "duh: %d" % what # I have no idea, why sometimes the stack is not empty - we # have to rewind the stack to get 100% for the root_caller while caller_stack.size() > 1: return_from_call(caller_stack, call_dict, total_cost) except IOError: print "could not open inputfile '%s', ignore this." % inputfilename except EOFError, m: print "EOF: %s" % (m,) return total_cost def pretty_name(file, function): #pfile = os.path.splitext(os.path.basename(file)) [0] #return "%s_[%s]" % (function, file) return "%s" % function #return "%s::%s" % (file, function) #return "%s_%s" % (pfile, function) class TagWriter: def __init__(self, output): self.output = output self.last_values = {} def clearTag(self, tag): if self.last_values.has_key(tag): del self.last_values[ tag ] def clear(self): self.last_values = {} def write(self, tag, value): self.output.write("%s=%s\n" % (tag, value)) #if (not self.last_values.has_key(tag)) or self.last_values[tag] != value: # self.last_values[ tag ] = value # self.output.write("%s=%s\n" % (tag, value)) def dumpResults(output, call_dict, total_cost, cost_per_pos, cost_per_function): """write the collected results in the format kcachegrind could read. """ # the intro output.write("events: Tick\n") output.write("summary: %d\n" % total_cost) output.write("cmd: your python script\n") output.write("\n") tagwriter = TagWriter(output) # now the costs per line for file in cost_per_pos.keys(): func_dict = cost_per_pos[file] for func in func_dict.keys(): line_dict = func_dict[func] tagwriter.write("ob", file) tagwriter.write("fn", func)# pretty_name(file, func)) ; output.write("# ^--- 2\n") tagwriter.write("fl", file) for line in line_dict: output.write("%d %d\n" %( line, line_dict[line] )) output.write("\n\n") # now the function calls. For each caller all the called # functions and their costs are written. for file in call_dict.keys(): per_file_dict = call_dict[file] #print "file %s -> %s" % (file, per_file_dict) for called_x in per_file_dict.keys(): #print "called_x:",called_x per_caller_dict = per_file_dict[called_x] #print "called_x %s wird gerufen von: %s" % (called_x, per_caller_dict) for caller_x in per_caller_dict.keys(): tagwriter.write("ob", caller_x[0]) tagwriter.write("fn", caller_x[2])# pretty_name(caller_x[2], caller_x[0])) ; output.write("# ^--- 1\n") tagwriter.write("fl", caller_x[0]) tagwriter.write("cob", file) tagwriter.write("cfn", called_x) #pretty_name(file, called_x)) tagwriter.write("cfl", file) cost, count = per_caller_dict[caller_x] #print "called_x:",called_x output.write("calls=%d\n%d %d\n" % (count, caller_x[1], cost)) tagwriter.clear() #tagwriter.clearTag("cob") # is it a bug in kcachegrind, that the "cob=xxx" line has # to be rewritten after a calls entry with costline ? #assert cost <= total_cost, "caller_x: %s, per_caller_dict: %s " % (caller_x, per_caller_dict, ) #output.write("calls=%d\n%d %d\n" % (count, caller_x[1], cost)) output.write("\n") def run_without_optparse(): """parse the options without optparse, use sys.argv""" if len(sys.argv) < 4 or sys.argv[1] != "-o" : print "usage: hotshot2cachegrind -o outputfile in1 [in2 [in3 [...]]]" return outputfilename = sys.argv[2] try: output = file(outputfilename, "w") args = sys.argv[3:] convertProfFiles(output, args) output.close() except IOError: print "could not open '%s' for writing." % outputfilename def run_with_optparse(): """parse the options with optparse""" global file_limit versiontext = "%s version: %s" % ( progname, version.split()[1], ) parser = OptionParser(version=versiontext) parser.add_option("-o", "--output", action="store", type="string", dest="outputfilename", help="write output into FILE") parser.add_option("--file-limit", action="store", dest="file_limit", default=0, help="stop after given number of input files") output = sys.stdout close_output = 0 (options, args) = parser.parse_args() file_limit = int(options.file_limit) try: if options.outputfilename and options.outputfilename != "-": output = file(options.outputfilename, "w") close_output = 1 except IOError: print "could not open '%s' for writing." % options.outputfilename if output: convertProfFiles(output, args) if close_output: output.close() def profile_myself(): import hotshot filename = "self.prof" if not os.path.exists(filename): prof = hotshot.Profile(filename, lineevents=1) prof.runcall(run) prof.close() else: print "not profiling myself, since '%s' exists, running normal" % filename run() # check if optparse is available. try: from optparse import OptionParser run = run_with_optparse except ImportError: run = run_without_optparse if __name__ == "__main__": try: run() #profile_myself() except KeyboardInterrupt: sys.exit(1)
mit
3,228,884,582,302,578,000
-1,456,420,424,759,977
34.850254
131
0.613168
false
Andrey-Pavlov/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
118
37734
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from webkitpy.common.host_mock import MockHost from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.layout_tests.models.test_configuration import * from webkitpy.layout_tests.models.test_expectations import * from webkitpy.layout_tests.models.test_configuration import * try: from collections import OrderedDict except ImportError: # Needed for Python < 2.7 from webkitpy.thirdparty.ordered_dict import OrderedDict class Base(unittest.TestCase): # Note that all of these tests are written assuming the configuration # being tested is Windows XP, Release build. def __init__(self, testFunc): host = MockHost() self._port = host.port_factory.get('test-win-xp', None) self._exp = None unittest.TestCase.__init__(self, testFunc) def get_test(self, test_name): # FIXME: Remove this routine and just reference test names directly. return test_name def get_basic_tests(self): return [self.get_test('failures/expected/text.html'), self.get_test('failures/expected/image_checksum.html'), self.get_test('failures/expected/crash.html'), self.get_test('failures/expected/missing_text.html'), self.get_test('failures/expected/image.html'), self.get_test('passes/text.html')] def get_basic_expectations(self): return """ Bug(test) failures/expected/text.html [ Failure ] Bug(test) failures/expected/crash.html [ WontFix ] Bug(test) failures/expected/missing_image.html [ Rebaseline Missing ] Bug(test) failures/expected/image_checksum.html [ WontFix ] Bug(test) failures/expected/image.html [ WontFix Mac ] """ def parse_exp(self, expectations, overrides=None, is_lint_mode=False): expectations_dict = OrderedDict() expectations_dict['expectations'] = expectations if overrides: expectations_dict['overrides'] = overrides self._port.expectations_dict = lambda: expectations_dict expectations_to_lint = expectations_dict if is_lint_mode else None self._exp = TestExpectations(self._port, self.get_basic_tests(), expectations_to_lint=expectations_to_lint) def assert_exp(self, test, result): self.assertEqual(self._exp.get_expectations(self.get_test(test)), set([result])) def assert_bad_expectations(self, expectations, overrides=None): self.assertRaises(ParseError, self.parse_exp, expectations, is_lint_mode=True, overrides=overrides) class BasicTests(Base): def test_basic(self): self.parse_exp(self.get_basic_expectations()) self.assert_exp('failures/expected/text.html', FAIL) self.assert_exp('failures/expected/image_checksum.html', PASS) self.assert_exp('passes/text.html', PASS) self.assert_exp('failures/expected/image.html', PASS) class MiscTests(Base): def test_multiple_results(self): self.parse_exp('Bug(x) failures/expected/text.html [ Crash Failure ]') self.assertEqual(self._exp.get_expectations( self.get_test('failures/expected/text.html')), set([FAIL, CRASH])) def test_result_was_expected(self): # test basics self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), True) self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False) # test handling of SKIPped tests and results self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=True), True) self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=False), False) # test handling of MISSING results and the REBASELINE modifier self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True, test_is_skipped=False), True) self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False) def test_remove_pixel_failures(self): self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL])) self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS])), set([PASS])) self.assertEqual(TestExpectations.remove_pixel_failures(set([IMAGE])), set([PASS])) self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL])) self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS, IMAGE, CRASH])), set([PASS, CRASH])) def test_suffixes_for_expectations(self): self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL])), set(['txt', 'png', 'wav'])) self.assertEqual(TestExpectations.suffixes_for_expectations(set([IMAGE])), set(['png'])) self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL, IMAGE, CRASH])), set(['txt', 'png', 'wav'])) self.assertEqual(TestExpectations.suffixes_for_expectations(set()), set()) def test_category_expectations(self): # This test checks unknown tests are not present in the # expectations and that known test part of a test category is # present in the expectations. exp_str = 'Bug(x) failures/expected [ WontFix ]' self.parse_exp(exp_str) test_name = 'failures/expected/unknown-test.html' unknown_test = self.get_test(test_name) self.assertRaises(KeyError, self._exp.get_expectations, unknown_test) self.assert_exp('failures/expected/crash.html', PASS) def test_get_modifiers(self): self.parse_exp(self.get_basic_expectations()) self.assertEqual(self._exp.get_modifiers( self.get_test('passes/text.html')), []) def test_get_expectations_string(self): self.parse_exp(self.get_basic_expectations()) self.assertEqual(self._exp.get_expectations_string( self.get_test('failures/expected/text.html')), 'FAIL') def test_expectation_to_string(self): # Normal cases are handled by other tests. self.parse_exp(self.get_basic_expectations()) self.assertRaises(ValueError, self._exp.expectation_to_string, -1) def test_get_test_set(self): # Handle some corner cases for this routine not covered by other tests. self.parse_exp(self.get_basic_expectations()) s = self._exp.get_test_set(WONTFIX) self.assertEqual(s, set([self.get_test('failures/expected/crash.html'), self.get_test('failures/expected/image_checksum.html')])) def test_parse_warning(self): try: filesystem = self._port.host.filesystem filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'disabled-test.html-disabled'), 'content') self.get_test('disabled-test.html-disabled'), self.parse_exp("[ FOO ] failures/expected/text.html [ Failure ]\n" "Bug(rniwa) non-existent-test.html [ Failure ]\n" "Bug(rniwa) disabled-test.html-disabled [ ImageOnlyFailure ]", is_lint_mode=True) self.assertFalse(True, "ParseError wasn't raised") except ParseError, e: warnings = ("expectations:1 Unrecognized modifier 'foo' failures/expected/text.html\n" "expectations:2 Path does not exist. non-existent-test.html") self.assertEqual(str(e), warnings) def test_parse_warnings_are_logged_if_not_in_lint_mode(self): oc = OutputCapture() try: oc.capture_output() self.parse_exp('-- this should be a syntax error', is_lint_mode=False) finally: _, _, logs = oc.restore_output() self.assertNotEquals(logs, '') def test_error_on_different_platform(self): # parse_exp uses a Windows port. Assert errors on Mac show up in lint mode. self.assertRaises(ParseError, self.parse_exp, 'Bug(test) [ Mac ] failures/expected/text.html [ Failure ]\nBug(test) [ Mac ] failures/expected/text.html [ Failure ]', is_lint_mode=True) def test_error_on_different_build_type(self): # parse_exp uses a Release port. Assert errors on DEBUG show up in lint mode. self.assertRaises(ParseError, self.parse_exp, 'Bug(test) [ Debug ] failures/expected/text.html [ Failure ]\nBug(test) [ Debug ] failures/expected/text.html [ Failure ]', is_lint_mode=True) def test_overrides(self): self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]", "Bug(override) failures/expected/text.html [ ImageOnlyFailure ]") self.assert_exp('failures/expected/text.html', IMAGE) def test_overrides__directory(self): self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]", "Bug(override) failures/expected [ Crash ]") self.assert_exp('failures/expected/text.html', CRASH) self.assert_exp('failures/expected/image.html', CRASH) def test_overrides__duplicate(self): self.assert_bad_expectations("Bug(exp) failures/expected/text.html [ Failure ]", "Bug(override) failures/expected/text.html [ ImageOnlyFailure ]\n" "Bug(override) failures/expected/text.html [ Crash ]\n") def test_pixel_tests_flag(self): def match(test, result, pixel_tests_enabled): return self._exp.matches_an_expected_result( self.get_test(test), result, pixel_tests_enabled) self.parse_exp(self.get_basic_expectations()) self.assertTrue(match('failures/expected/text.html', FAIL, True)) self.assertTrue(match('failures/expected/text.html', FAIL, False)) self.assertFalse(match('failures/expected/text.html', CRASH, True)) self.assertFalse(match('failures/expected/text.html', CRASH, False)) self.assertTrue(match('failures/expected/image_checksum.html', PASS, True)) self.assertTrue(match('failures/expected/image_checksum.html', PASS, False)) self.assertTrue(match('failures/expected/crash.html', PASS, False)) self.assertTrue(match('passes/text.html', PASS, False)) def test_more_specific_override_resets_skip(self): self.parse_exp("Bug(x) failures/expected [ Skip ]\n" "Bug(x) failures/expected/text.html [ ImageOnlyFailure ]\n") self.assert_exp('failures/expected/text.html', IMAGE) self.assertFalse(self._port._filesystem.join(self._port.layout_tests_dir(), 'failures/expected/text.html') in self._exp.get_tests_with_result_type(SKIP)) class SkippedTests(Base): def check(self, expectations, overrides, skips, lint=False): port = MockHost().port_factory.get('qt') port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html'), 'foo') expectations_dict = OrderedDict() expectations_dict['expectations'] = expectations if overrides: expectations_dict['overrides'] = overrides port.expectations_dict = lambda: expectations_dict port.skipped_layout_tests = lambda tests: set(skips) expectations_to_lint = expectations_dict if lint else None exp = TestExpectations(port, ['failures/expected/text.html'], expectations_to_lint=expectations_to_lint) # Check that the expectation is for BUG_DUMMY SKIP : ... [ Pass ] self.assertEqual(exp.get_modifiers('failures/expected/text.html'), [TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER]) self.assertEqual(exp.get_expectations('failures/expected/text.html'), set([PASS])) def test_skipped_tests_work(self): self.check(expectations='', overrides=None, skips=['failures/expected/text.html']) def test_duplicate_skipped_test_fails_lint(self): self.assertRaises(ParseError, self.check, expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None, skips=['failures/expected/text.html'], lint=True) def test_skipped_file_overrides_expectations(self): self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None, skips=['failures/expected/text.html']) def test_skipped_dir_overrides_expectations(self): self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None, skips=['failures/expected']) def test_skipped_file_overrides_overrides(self): self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n', skips=['failures/expected/text.html']) def test_skipped_dir_overrides_overrides(self): self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n', skips=['failures/expected']) def test_skipped_entry_dont_exist(self): port = MockHost().port_factory.get('qt') expectations_dict = OrderedDict() expectations_dict['expectations'] = '' port.expectations_dict = lambda: expectations_dict port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html']) capture = OutputCapture() capture.capture_output() exp = TestExpectations(port) _, _, logs = capture.restore_output() self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs) class ExpectationSyntaxTests(Base): def test_unrecognized_expectation(self): self.assert_bad_expectations('Bug(test) failures/expected/text.html [ Unknown ]') def test_macro(self): exp_str = 'Bug(test) [ Win ] failures/expected/text.html [ Failure ]' self.parse_exp(exp_str) self.assert_exp('failures/expected/text.html', FAIL) def assert_tokenize_exp(self, line, bugs=None, modifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'): bugs = bugs or [] modifiers = modifiers or [] expectations = expectations or [] warnings = warnings or [] filename = 'TestExpectations' line_number = 1 expectation_line = TestExpectationParser._tokenize_line(filename, line, line_number) self.assertEqual(expectation_line.warnings, warnings) self.assertEqual(expectation_line.name, name) self.assertEqual(expectation_line.filename, filename) self.assertEqual(expectation_line.line_number, line_number) if not warnings: self.assertEqual(expectation_line.modifiers, modifiers) self.assertEqual(expectation_line.expectations, expectations) def test_bare_name(self): self.assert_tokenize_exp('foo.html', modifiers=['SKIP'], expectations=['PASS']) def test_bare_name_and_bugs(self): self.assert_tokenize_exp('webkit.org/b/12345 foo.html', modifiers=['BUGWK12345', 'SKIP'], expectations=['PASS']) self.assert_tokenize_exp('Bug(dpranke) foo.html', modifiers=['BUGDPRANKE', 'SKIP'], expectations=['PASS']) self.assert_tokenize_exp('webkit.org/b/12345 webkit.org/b/34567 foo.html', modifiers=['BUGWK12345', 'BUGWK34567', 'SKIP'], expectations=['PASS']) def test_comments(self): self.assert_tokenize_exp("# comment", name=None, comment="# comment") self.assert_tokenize_exp("foo.html # comment", comment="# comment", expectations=['PASS'], modifiers=['SKIP']) def test_config_modifiers(self): self.assert_tokenize_exp('[ Mac ] foo.html', modifiers=['MAC', 'SKIP'], expectations=['PASS']) self.assert_tokenize_exp('[ Mac Vista ] foo.html', modifiers=['MAC', 'VISTA', 'SKIP'], expectations=['PASS']) self.assert_tokenize_exp('[ Mac ] foo.html [ Failure ] ', modifiers=['MAC'], expectations=['FAIL']) def test_unknown_config(self): self.assert_tokenize_exp('[ Foo ] foo.html ', modifiers=['Foo', 'SKIP'], expectations=['PASS']) def test_unknown_expectation(self): self.assert_tokenize_exp('foo.html [ Audio ]', warnings=['Unrecognized expectation "Audio"']) def test_skip(self): self.assert_tokenize_exp('foo.html [ Skip ]', modifiers=['SKIP'], expectations=['PASS']) def test_slow(self): self.assert_tokenize_exp('foo.html [ Slow ]', modifiers=['SLOW'], expectations=['PASS']) def test_wontfix(self): self.assert_tokenize_exp('foo.html [ WontFix ]', modifiers=['WONTFIX', 'SKIP'], expectations=['PASS']) self.assert_tokenize_exp('foo.html [ WontFix ImageOnlyFailure ]', modifiers=['WONTFIX'], expectations=['IMAGE']) self.assert_tokenize_exp('foo.html [ WontFix Pass Failure ]', modifiers=['WONTFIX'], expectations=['PASS', 'FAIL']) def test_blank_line(self): self.assert_tokenize_exp('', name=None) def test_warnings(self): self.assert_tokenize_exp('[ Mac ]', warnings=['Did not find a test name.'], name=None) self.assert_tokenize_exp('[ [', warnings=['unexpected "["'], name=None) self.assert_tokenize_exp('webkit.org/b/12345 ]', warnings=['unexpected "]"'], name=None) self.assert_tokenize_exp('foo.html webkit.org/b/12345 ]', warnings=['"webkit.org/b/12345" is not at the start of the line.']) class SemanticTests(Base): def test_bug_format(self): self.assertRaises(ParseError, self.parse_exp, 'BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True) def test_bad_bugid(self): try: self.parse_exp('BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True) self.fail('should have raised an error about a bad bug identifier') except ParseError, exp: self.assertEqual(len(exp.warnings), 1) def test_missing_bugid(self): self.parse_exp('failures/expected/text.html [ Failure ]') self.assertFalse(self._exp.has_warnings()) self._port.warn_if_bug_missing_in_test_expectations = lambda: True self.parse_exp('failures/expected/text.html [ Failure ]') line = self._exp._model.get_expectation_line('failures/expected/text.html') self.assertFalse(line.is_invalid()) self.assertEqual(line.warnings, ['Test lacks BUG modifier.']) def test_skip_and_wontfix(self): # Skip is not allowed to have other expectations as well, because those # expectations won't be exercised and may become stale . self.parse_exp('failures/expected/text.html [ Failure Skip ]') self.assertTrue(self._exp.has_warnings()) self.parse_exp('failures/expected/text.html [ Crash WontFix ]') self.assertFalse(self._exp.has_warnings()) self.parse_exp('failures/expected/text.html [ Pass WontFix ]') self.assertFalse(self._exp.has_warnings()) def test_slow_and_timeout(self): # A test cannot be SLOW and expected to TIMEOUT. self.assertRaises(ParseError, self.parse_exp, 'Bug(test) failures/expected/timeout.html [ Slow Timeout ]', is_lint_mode=True) def test_rebaseline(self): # Can't lint a file w/ 'REBASELINE' in it. self.assertRaises(ParseError, self.parse_exp, 'Bug(test) failures/expected/text.html [ Failure Rebaseline ]', is_lint_mode=True) def test_duplicates(self): self.assertRaises(ParseError, self.parse_exp, """ Bug(exp) failures/expected/text.html [ Failure ] Bug(exp) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True) self.assertRaises(ParseError, self.parse_exp, self.get_basic_expectations(), overrides=""" Bug(override) failures/expected/text.html [ Failure ] Bug(override) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True) def test_missing_file(self): self.parse_exp('Bug(test) missing_file.html [ Failure ]') self.assertTrue(self._exp.has_warnings(), 1) class PrecedenceTests(Base): def test_file_over_directory(self): # This tests handling precedence of specific lines over directories # and tests expectations covering entire directories. exp_str = """ Bug(x) failures/expected/text.html [ Failure ] Bug(y) failures/expected [ WontFix ] """ self.parse_exp(exp_str) self.assert_exp('failures/expected/text.html', FAIL) self.assert_exp('failures/expected/crash.html', PASS) exp_str = """ Bug(x) failures/expected [ WontFix ] Bug(y) failures/expected/text.html [ Failure ] """ self.parse_exp(exp_str) self.assert_exp('failures/expected/text.html', FAIL) self.assert_exp('failures/expected/crash.html', PASS) def test_ambiguous(self): self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n" "Bug(test) [ Win ] passes/text.html [ Failure ]\n") def test_more_modifiers(self): self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n" "Bug(test) [ Win Release ] passes/text.html [ Failure ]\n") def test_order_in_file(self): self.assert_bad_expectations("Bug(test) [ Win Release ] : passes/text.html [ Failure ]\n" "Bug(test) [ Release ] : passes/text.html [ Pass ]\n") def test_macro_overrides(self): self.assert_bad_expectations("Bug(test) [ Win ] passes/text.html [ Pass ]\n" "Bug(test) [ XP ] passes/text.html [ Failure ]\n") class RemoveConfigurationsTest(Base): def test_remove(self): host = MockHost() test_port = host.port_factory.get('test-win-xp', None) test_port.test_exists = lambda test: True test_port.test_isfile = lambda test: True test_config = test_port.test_configuration() test_port.expectations_dict = lambda: {"expectations": """Bug(x) [ Linux Win Release ] failures/expected/foo.html [ Failure ] Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ] """} expectations = TestExpectations(test_port, self.get_basic_tests()) actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config) self.assertEqual("""Bug(x) [ Linux Vista Win7 Release ] failures/expected/foo.html [ Failure ] Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ] """, actual_expectations) def test_remove_line(self): host = MockHost() test_port = host.port_factory.get('test-win-xp', None) test_port.test_exists = lambda test: True test_port.test_isfile = lambda test: True test_config = test_port.test_configuration() test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ] Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ] """} expectations = TestExpectations(test_port) actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config) actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-vista', None).test_configuration()) actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration()) self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ] """, actual_expectations) class RebaseliningTest(Base): """Test rebaselining-specific functionality.""" def assertRemove(self, input_expectations, input_overrides, tests, expected_expectations, expected_overrides): self.parse_exp(input_expectations, is_lint_mode=False, overrides=input_overrides) actual_expectations = self._exp.remove_rebaselined_tests(tests, 'expectations') self.assertEqual(expected_expectations, actual_expectations) actual_overrides = self._exp.remove_rebaselined_tests(tests, 'overrides') self.assertEqual(expected_overrides, actual_overrides) def test_remove(self): self.assertRemove('Bug(x) failures/expected/text.html [ Failure Rebaseline ]\n' 'Bug(y) failures/expected/image.html [ ImageOnlyFailure Rebaseline ]\n' 'Bug(z) failures/expected/crash.html [ Crash ]\n', 'Bug(x0) failures/expected/image.html [ Crash ]\n', ['failures/expected/text.html'], 'Bug(y) failures/expected/image.html [ ImageOnlyFailure Rebaseline ]\n' 'Bug(z) failures/expected/crash.html [ Crash ]\n', 'Bug(x0) failures/expected/image.html [ Crash ]\n') # Ensure that we don't modify unrelated lines, even if we could rewrite them. # i.e., the second line doesn't get rewritten to "Bug(y) failures/expected/skip.html" self.assertRemove('Bug(x) failures/expected/text.html [ Failure Rebaseline ]\n' 'Bug(Y) failures/expected/image.html [ Skip ]\n' 'Bug(z) failures/expected/crash.html\n', '', ['failures/expected/text.html'], 'Bug(Y) failures/expected/image.html [ Skip ]\n' 'Bug(z) failures/expected/crash.html\n', '') def test_get_rebaselining_failures(self): # Make sure we find a test as needing a rebaseline even if it is not marked as a failure. self.parse_exp('Bug(x) failures/expected/text.html [ Rebaseline ]\n') self.assertEqual(len(self._exp.get_rebaselining_failures()), 1) self.parse_exp(self.get_basic_expectations()) self.assertEqual(len(self._exp.get_rebaselining_failures()), 0) class TestExpectationSerializationTests(unittest.TestCase): def __init__(self, testFunc): host = MockHost() test_port = host.port_factory.get('test-win-xp', None) self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros()) unittest.TestCase.__init__(self, testFunc) def _tokenize(self, line): return TestExpectationParser._tokenize_line('path', line, 0) def assert_round_trip(self, in_string, expected_string=None): expectation = self._tokenize(in_string) if expected_string is None: expected_string = in_string self.assertEqual(expected_string, expectation.to_string(self._converter)) def assert_list_round_trip(self, in_string, expected_string=None): host = MockHost() parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], allow_rebaseline_modifier=False) expectations = parser.parse('path', in_string) if expected_string is None: expected_string = in_string self.assertEqual(expected_string, TestExpectations.list_to_string(expectations, self._converter)) def test_unparsed_to_string(self): expectation = TestExpectationLine() self.assertEqual(expectation.to_string(self._converter), '') expectation.comment = ' Qux.' self.assertEqual(expectation.to_string(self._converter), '# Qux.') expectation.name = 'bar' self.assertEqual(expectation.to_string(self._converter), 'bar # Qux.') expectation.modifiers = ['foo'] # FIXME: case should be preserved here but we can't until we drop the old syntax. self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar # Qux.') expectation.expectations = ['bAz'] self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ ] # Qux.') expectation.expectations = ['bAz1', 'baZ2'] self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ1 BAZ2 ] # Qux.') expectation.modifiers = ['foo1', 'foO2'] self.assertEqual(expectation.to_string(self._converter), '[ FOO1 FOO2 ] bar [ BAZ1 BAZ2 ] # Qux.') expectation.warnings.append('Oh the horror.') self.assertEqual(expectation.to_string(self._converter), '') expectation.original_string = 'Yes it is!' self.assertEqual(expectation.to_string(self._converter), 'Yes it is!') def test_unparsed_list_to_string(self): expectation = TestExpectationLine() expectation.comment = 'Qux.' expectation.name = 'bar' expectation.modifiers = ['foo'] expectation.expectations = ['bAz1', 'baZ2'] # FIXME: case should be preserved here but we can't until we drop the old syntax. self.assertEqual(TestExpectations.list_to_string([expectation]), '[ FOO ] bar [ BAZ1 BAZ2 ] #Qux.') def test_parsed_to_string(self): expectation_line = TestExpectationLine() expectation_line.parsed_bug_modifiers = ['BUGX'] expectation_line.name = 'test/name/for/realz.html' expectation_line.parsed_expectations = set([IMAGE]) self.assertEqual(expectation_line.to_string(self._converter), None) expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release')]) self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP Release ] test/name/for/realz.html [ ImageOnlyFailure ]') expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')]) self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP ] test/name/for/realz.html [ ImageOnlyFailure ]') def test_serialize_parsed_expectations(self): expectation_line = TestExpectationLine() expectation_line.parsed_expectations = set([]) parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()]) self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), '') expectation_line.parsed_expectations = set([FAIL]) self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'fail') expectation_line.parsed_expectations = set([PASS, IMAGE]) self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass image') expectation_line.parsed_expectations = set([FAIL, PASS]) self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass fail') def test_serialize_parsed_modifier_string(self): expectation_line = TestExpectationLine() expectation_line.parsed_bug_modifiers = ['garden-o-matic'] expectation_line.parsed_modifiers = ['for', 'the'] self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, []), 'garden-o-matic for the') self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic for the win') expectation_line.parsed_bug_modifiers = [] expectation_line.parsed_modifiers = [] self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, []), '') self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'win') expectation_line.parsed_bug_modifiers = ['garden-o-matic', 'total', 'is'] self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic is total win') expectation_line.parsed_bug_modifiers = [] expectation_line.parsed_modifiers = ['garden-o-matic', 'total', 'is'] self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic is total win') def test_format_line(self): self.assertEqual(TestExpectationLine._format_line(['MODIFIERS'], 'name', ['EXPECTATIONS'], 'comment'), '[ MODIFIERS ] name [ EXPECTATIONS ] #comment') self.assertEqual(TestExpectationLine._format_line(['MODIFIERS'], 'name', ['EXPECTATIONS'], None), '[ MODIFIERS ] name [ EXPECTATIONS ]') def test_string_roundtrip(self): self.assert_round_trip('') self.assert_round_trip('FOO') self.assert_round_trip('[') self.assert_round_trip('FOO [') self.assert_round_trip('FOO ] bar') self.assert_round_trip(' FOO [') self.assert_round_trip(' [ FOO ] ') self.assert_round_trip('[ FOO ] bar [ BAZ ]') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ') self.assert_round_trip('[ FOO ] ] ] bar BAZ') self.assert_round_trip('[ FOO ] ] ] bar [ BAZ ]') self.assert_round_trip('FOO ] ] bar ==== BAZ') self.assert_round_trip('=') self.assert_round_trip('#') self.assert_round_trip('# ') self.assert_round_trip('# Foo') self.assert_round_trip('# Foo') self.assert_round_trip('# Foo :') self.assert_round_trip('# Foo : =') def test_list_roundtrip(self): self.assert_list_round_trip('') self.assert_list_round_trip('\n') self.assert_list_round_trip('\n\n') self.assert_list_round_trip('bar') self.assert_list_round_trip('bar\n# Qux.') self.assert_list_round_trip('bar\n# Qux.\n') def test_reconstitute_only_these(self): lines = [] reconstitute_only_these = [] def add_line(matching_configurations, reconstitute): expectation_line = TestExpectationLine() expectation_line.original_string = "Nay" expectation_line.parsed_bug_modifiers = ['BUGX'] expectation_line.name = 'Yay' expectation_line.parsed_expectations = set([IMAGE]) expectation_line.matching_configurations = matching_configurations lines.append(expectation_line) if reconstitute: reconstitute_only_these.append(expectation_line) add_line(set([TestConfiguration('xp', 'x86', 'release')]), True) add_line(set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')]), False) serialized = TestExpectations.list_to_string(lines, self._converter) self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nBug(x) [ XP ] Yay [ ImageOnlyFailure ]") serialized = TestExpectations.list_to_string(lines, self._converter, reconstitute_only_these=reconstitute_only_these) self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nNay") def disabled_test_string_whitespace_stripping(self): # FIXME: Re-enable this test once we rework the code to no longer support the old syntax. self.assert_round_trip('\n', '') self.assert_round_trip(' [ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]') self.assert_round_trip('[ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
bsd-3-clause
4,251,523,650,415,249,400
-7,632,237,031,866,250,000
52.371994
180
0.653867
false
schmidtc/pysal
pysal/weights/Wsets.py
7
18482
""" Set-like manipulation of weights matrices. """ __author__ = "Sergio J. Rey <[email protected]>, Charles Schmidt <[email protected]>, David Folch <[email protected]>, Dani Arribas-Bel <[email protected]>" import pysal import copy from scipy.sparse import isspmatrix_csr from numpy import ones __all__ = ['w_union', 'w_intersection', 'w_difference', 'w_symmetric_difference', 'w_subset', 'w_clip'] def w_union(w1, w2, silent_island_warning=False): """ Returns a binary weights object, w, that includes all neighbor pairs that exist in either w1 or w2. Parameters ---------- w1 : W object w2 : W object silent_island_warning : boolean Switch to turn off (default on) print statements for every observation with islands Returns ------- w : W object Notes ----- ID comparisons are performed using ==, therefore the integer ID 2 is equivalent to the float ID 2.0. Returns a matrix with all the unique IDs from w1 and w2. Examples -------- Construct rook weights matrices for two regions, one is 4x4 (16 areas) and the other is 6x4 (24 areas). A union of these two weights matrices results in the new weights matrix matching the larger one. >>> import pysal >>> w1 = pysal.lat2W(4,4) >>> w2 = pysal.lat2W(6,4) >>> w = pysal.weights.w_union(w1, w2) >>> w1[0] == w[0] True >>> w1.neighbors[15] [11, 14] >>> w2.neighbors[15] [11, 14, 19] >>> w.neighbors[15] [19, 11, 14] >>> """ neighbors = dict(w1.neighbors.items()) for i in w2.neighbors: if i in neighbors: add_neigh = set(neighbors[i]).union(set(w2.neighbors[i])) neighbors[i] = list(add_neigh) else: neighbors[i] = copy.copy(w2.neighbors[i]) return pysal.W(neighbors, silent_island_warning=silent_island_warning) def w_intersection(w1, w2, w_shape='w1', silent_island_warning=False): """ Returns a binary weights object, w, that includes only those neighbor pairs that exist in both w1 and w2. Parameters ---------- w1 : W object w2 : W object w_shape : string Defines the shape of the returned weights matrix. 'w1' returns a matrix with the same IDs as w1; 'all' returns a matrix with all the unique IDs from w1 and w2; and 'min' returns a matrix with only the IDs occurring in both w1 and w2. silent_island_warning : boolean Switch to turn off (default on) print statements for every observation with islands Returns ------- w : W object Notes ----- ID comparisons are performed using ==, therefore the integer ID 2 is equivalent to the float ID 2.0. Examples -------- Construct rook weights matrices for two regions, one is 4x4 (16 areas) and the other is 6x4 (24 areas). An intersection of these two weights matrices results in the new weights matrix matching the smaller one. >>> import pysal >>> w1 = pysal.lat2W(4,4) >>> w2 = pysal.lat2W(6,4) >>> w = pysal.weights.w_intersection(w1, w2) >>> w1[0] == w[0] True >>> w1.neighbors[15] [11, 14] >>> w2.neighbors[15] [11, 14, 19] >>> w.neighbors[15] [11, 14] >>> """ if w_shape == 'w1': neigh_keys = w1.neighbors.keys() elif w_shape == 'all': neigh_keys = set(w1.neighbors.keys()).union(set(w2.neighbors.keys())) elif w_shape == 'min': neigh_keys = set(w1.neighbors.keys( )).intersection(set(w2.neighbors.keys())) else: raise Exception("invalid string passed to w_shape") neighbors = {} for i in neigh_keys: if i in w1.neighbors and i in w2.neighbors: add_neigh = set(w1.neighbors[i]).intersection(set(w2.neighbors[i])) neighbors[i] = list(add_neigh) else: neighbors[i] = [] return pysal.W(neighbors, silent_island_warning=silent_island_warning) def w_difference(w1, w2, w_shape='w1', constrained=True, silent_island_warning=False): """ Returns a binary weights object, w, that includes only neighbor pairs in w1 that are not in w2. The w_shape and constrained parameters determine which pairs in w1 that are not in w2 are returned. Parameters ---------- w1 : W object w2 : W object w_shape : string Defines the shape of the returned weights matrix. 'w1' returns a matrix with the same IDs as w1; 'all' returns a matrix with all the unique IDs from w1 and w2; and 'min' returns a matrix with the IDs occurring in w1 and not in w2. constrained : boolean If False then the full set of neighbor pairs in w1 that are not in w2 are returned. If True then those pairs that would not be possible if w_shape='min' are dropped. Ignored if w_shape is set to 'min'. silent_island_warning : boolean Switch to turn off (default on) print statements for every observation with islands Returns ------- w : W object Notes ----- ID comparisons are performed using ==, therefore the integer ID 2 is equivalent to the float ID 2.0. Examples -------- Construct rook (w2) and queen (w1) weights matrices for two 4x4 regions (16 areas). A queen matrix has all the joins a rook matrix does plus joins between areas that share a corner. The new matrix formed by the difference of rook from queen contains only join at corners (typically called a bishop matrix). Note that the difference of queen from rook would result in a weights matrix with no joins. >>> import pysal >>> w1 = pysal.lat2W(4,4,rook=False) >>> w2 = pysal.lat2W(4,4,rook=True) >>> w = pysal.weights.w_difference(w1, w2, constrained=False) >>> w1[0] == w[0] False >>> w1.neighbors[15] [10, 11, 14] >>> w2.neighbors[15] [11, 14] >>> w.neighbors[15] [10] >>> """ if w_shape == 'w1': neigh_keys = w1.neighbors.keys() elif w_shape == 'all': neigh_keys = set(w1.neighbors.keys()).union(set(w2.neighbors.keys())) elif w_shape == 'min': neigh_keys = set( w1.neighbors.keys()).difference(set(w2.neighbors.keys())) if not neigh_keys: raise Exception("returned an empty weights matrix") else: raise Exception("invalid string passed to w_shape") neighbors = {} for i in neigh_keys: if i in w1.neighbors: if i in w2.neighbors: add_neigh = set(w1.neighbors[i] ).difference(set(w2.neighbors[i])) neighbors[i] = list(add_neigh) else: neighbors[i] = copy.copy(w1.neighbors[i]) else: neighbors[i] = [] if constrained or w_shape == 'min': constrained_keys = set( w1.neighbors.keys()).difference(set(w2.neighbors.keys())) island_keys = set(neighbors.keys()).difference(constrained_keys) for i in island_keys: neighbors[i] = [] for i in constrained_keys: neighbors[i] = list( set(neighbors[i]).intersection(constrained_keys)) return pysal.W(neighbors, silent_island_warning=silent_island_warning) def w_symmetric_difference(w1, w2, w_shape='all', constrained=True, silent_island_warning=False): """ Returns a binary weights object, w, that includes only neighbor pairs that are not shared by w1 and w2. The w_shape and constrained parameters determine which pairs that are not shared by w1 and w2 are returned. Parameters ---------- w1 : W object w2 : W object w_shape : string Defines the shape of the returned weights matrix. 'all' returns a matrix with all the unique IDs from w1 and w2; and 'min' returns a matrix with the IDs not shared by w1 and w2. constrained : boolean If False then the full set of neighbor pairs that are not shared by w1 and w2 are returned. If True then those pairs that would not be possible if w_shape='min' are dropped. Ignored if w_shape is set to 'min'. silent_island_warning : boolean Switch to turn off (default on) print statements for every observation with islands Returns ------- w : W object Notes ----- ID comparisons are performed using ==, therefore the integer ID 2 is equivalent to the float ID 2.0. Examples -------- Construct queen weights matrix for a 4x4 (16 areas) region (w1) and a rook matrix for a 6x4 (24 areas) region (w2). The symmetric difference of these two matrices (with w_shape set to 'all' and constrained set to False) contains the corner joins in the overlap area, all the joins in the non-overlap area. >>> import pysal >>> w1 = pysal.lat2W(4,4,rook=False) >>> w2 = pysal.lat2W(6,4,rook=True) >>> w = pysal.weights.w_symmetric_difference(w1, w2, constrained=False) >>> w1[0] == w[0] False >>> w1.neighbors[15] [10, 11, 14] >>> w2.neighbors[15] [11, 14, 19] >>> w.neighbors[15] [10, 19] >>> """ if w_shape == 'all': neigh_keys = set(w1.neighbors.keys()).union(set(w2.neighbors.keys())) elif w_shape == 'min': neigh_keys = set(w1.neighbors.keys( )).symmetric_difference(set(w2.neighbors.keys())) else: raise Exception("invalid string passed to w_shape") neighbors = {} for i in neigh_keys: if i in w1.neighbors: if i in w2.neighbors: add_neigh = set(w1.neighbors[i]).symmetric_difference( set(w2.neighbors[i])) neighbors[i] = list(add_neigh) else: neighbors[i] = copy.copy(w1.neighbors[i]) elif i in w2.neighbors: neighbors[i] = copy.copy(w2.neighbors[i]) else: neighbors[i] = [] if constrained or w_shape == 'min': constrained_keys = set( w1.neighbors.keys()).difference(set(w2.neighbors.keys())) island_keys = set(neighbors.keys()).difference(constrained_keys) for i in island_keys: neighbors[i] = [] for i in constrained_keys: neighbors[i] = list( set(neighbors[i]).intersection(constrained_keys)) return pysal.W(neighbors, silent_island_warning=silent_island_warning) def w_subset(w1, ids, silent_island_warning=False): """ Returns a binary weights object, w, that includes only those observations in ids. Parameters ---------- w1 : W object ids : list A list containing the IDs to be include in the returned weights object. silent_island_warning : boolean Switch to turn off (default on) print statements for every observation with islands Returns ------- w : W object Examples -------- Construct a rook weights matrix for a 6x4 region (24 areas). By default PySAL assigns integer IDs to the areas in a region. By passing in a list of integers from 0 to 15, the first 16 areas are extracted from the previous weights matrix, and only those joins relevant to the new region are retained. >>> import pysal >>> w1 = pysal.lat2W(6,4) >>> ids = range(16) >>> w = pysal.weights.w_subset(w1, ids) >>> w1[0] == w[0] True >>> w1.neighbors[15] [11, 14, 19] >>> w.neighbors[15] [11, 14] >>> """ neighbors = {} ids_set = set(ids) for i in ids: if i in w1.neighbors: neigh_add = ids_set.intersection(set(w1.neighbors[i])) neighbors[i] = list(neigh_add) else: neighbors[i] = [] return pysal.W(neighbors, id_order=ids, silent_island_warning=silent_island_warning) def w_clip(w1, w2, outSP=True, silent_island_warning=False): ''' Clip a continuous W object (w1) with a different W object (w2) so only cells where w2 has a non-zero value remain with non-zero values in w1. Checks on w1 and w2 are performed to make sure they conform to the appropriate format and, if not, they are converted. Parameters ---------- w1 : W pysal.W, scipy.sparse.csr.csr_matrix Potentially continuous weights matrix to be clipped. The clipped matrix wc will have at most the same elements as w1. w2 : W pysal.W, scipy.sparse.csr.csr_matrix Weights matrix to use as shell to clip w1. Automatically converted to binary format. Only non-zero elements in w2 will be kept non-zero in wc. NOTE: assumed to be of the same shape as w1 outSP : boolean If True (default) return sparse version of the clipped W, if False, return pysal.W object of the clipped matrix silent_island_warning : boolean Switch to turn off (default on) print statements for every observation with islands Returns ------- wc : W pysal.W, scipy.sparse.csr.csr_matrix Clipped W object (sparse if outSP=Ture). It inherits ``id_order`` from w1. Examples -------- >>> import pysal as ps First create a W object from a lattice using queen contiguity and row-standardize it (note that these weights will stay when we clip the object, but they will not neccesarily represent a row-standardization anymore): >>> w1 = ps.lat2W(3, 2, rook=False) >>> w1.transform = 'R' We will clip that geography assuming observations 0, 2, 3 and 4 belong to one group and 1, 5 belong to another group and we don't want both groups to interact with each other in our weights (i.e. w_ij = 0 if i and j in different groups). For that, we use the following method: >>> w2 = ps.block_weights(['r1', 'r2', 'r1', 'r1', 'r1', 'r2']) To illustrate that w2 will only be considered as binary even when the object passed is not, we can row-standardize it >>> w2.transform = 'R' The clipped object ``wc`` will contain only the spatial queen relationships that occur within one group ('r1' or 'r2') but will have gotten rid of those that happen across groups >>> wcs = ps.weights.Wsets.w_clip(w1, w2, outSP=True) This will create a sparse object (recommended when n is large). >>> wcs.sparse.toarray() array([[ 0. , 0. , 0.33333333, 0.33333333, 0. , 0. ], [ 0. , 0. , 0. , 0. , 0. , 0. ], [ 0.2 , 0. , 0. , 0.2 , 0.2 , 0. ], [ 0.2 , 0. , 0.2 , 0. , 0.2 , 0. ], [ 0. , 0. , 0.33333333, 0.33333333, 0. , 0. ], [ 0. , 0. , 0. , 0. , 0. , 0. ]]) If we wanted an original W object, we can control that with the argument ``outSP``: >>> wc = ps.weights.Wsets.w_clip(w1, w2, outSP=False) WARNING: there are 2 disconnected observations Island ids: [1, 5] >>> wc.full()[0] array([[ 0. , 0. , 0.33333333, 0.33333333, 0. , 0. ], [ 0. , 0. , 0. , 0. , 0. , 0. ], [ 0.2 , 0. , 0. , 0.2 , 0.2 , 0. ], [ 0.2 , 0. , 0.2 , 0. , 0.2 , 0. ], [ 0. , 0. , 0.33333333, 0.33333333, 0. , 0. ], [ 0. , 0. , 0. , 0. , 0. , 0. ]]) You can check they are actually the same: >>> wcs.sparse.toarray() == wc.full()[0] array([[ True, True, True, True, True, True], [ True, True, True, True, True, True], [ True, True, True, True, True, True], [ True, True, True, True, True, True], [ True, True, True, True, True, True], [ True, True, True, True, True, True]], dtype=bool) ''' if not w1.id_order: w1.id_order = None id_order = w1.id_order if not isspmatrix_csr(w1): w1 = w1.sparse if not isspmatrix_csr(w2): w2 = w2.sparse w2.data = ones(w2.data.shape) wc = w1.multiply(w2) wc = pysal.weights.WSP(wc, id_order=id_order) if not outSP: wc = pysal.weights.WSP2W(wc, silent_island_warning=silent_island_warning) return wc
bsd-3-clause
-5,622,330,164,592,579,000
-231,060,237,209,561,570
33.675422
153
0.522671
false
deandunbar/html2bwml
venv/lib/python2.7/site-packages/django/core/files/move.py
103
3164
""" Move a file in the safest way possible:: >>> from django.core.files.move import file_move_safe >>> file_move_safe("/tmp/old_file", "/tmp/new_file") """ import os from django.core.files import locks try: from shutil import copystat except ImportError: import stat def copystat(src, dst): """Copy all stat info (mode bits, atime and mtime) from src to dst""" st = os.stat(src) mode = stat.S_IMODE(st.st_mode) if hasattr(os, 'utime'): os.utime(dst, (st.st_atime, st.st_mtime)) if hasattr(os, 'chmod'): os.chmod(dst, mode) __all__ = ['file_move_safe'] def _samefile(src, dst): # Macintosh, Unix. if hasattr(os.path, 'samefile'): try: return os.path.samefile(src, dst) except OSError: return False # All other platforms: check for same pathname. return (os.path.normcase(os.path.abspath(src)) == os.path.normcase(os.path.abspath(dst))) def file_move_safe(old_file_name, new_file_name, chunk_size=1024 * 64, allow_overwrite=False): """ Moves a file from one location to another in the safest way possible. First, tries ``os.rename``, which is simple but will break across filesystems. If that fails, streams manually from one file to another in pure Python. If the destination file exists and ``allow_overwrite`` is ``False``, this function will throw an ``IOError``. """ # There's no reason to move if we don't have to. if _samefile(old_file_name, new_file_name): return try: # If the destination file exists and allow_overwrite is False then raise an IOError if not allow_overwrite and os.access(new_file_name, os.F_OK): raise IOError("Destination file %s exists and allow_overwrite is False" % new_file_name) os.rename(old_file_name, new_file_name) return except OSError: # This will happen with os.rename if moving to another filesystem # or when moving opened files on certain operating systems pass # first open the old file, so that it won't go away with open(old_file_name, 'rb') as old_file: # now open the new file, not forgetting allow_overwrite fd = os.open(new_file_name, (os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0) | (os.O_EXCL if not allow_overwrite else 0))) try: locks.lock(fd, locks.LOCK_EX) current_chunk = None while current_chunk != b'': current_chunk = old_file.read(chunk_size) os.write(fd, current_chunk) finally: locks.unlock(fd) os.close(fd) copystat(old_file_name, new_file_name) try: os.remove(old_file_name) except OSError as e: # Certain operating systems (Cygwin and Windows) # fail when deleting opened files, ignore it. (For the # systems where this happens, temporary files will be auto-deleted # on close anyway.) if getattr(e, 'winerror', 0) != 32 and getattr(e, 'errno', 0) != 13: raise
mit
2,276,792,061,459,437,800
-1,889,956,971,042,455,000
33.391304
100
0.608407
false
haad/ansible
test/units/modules/network/f5/test_bigip_iapp_service.py
28
12006
# -*- coding: utf-8 -*- # # Copyright (c) 2017 F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import sys from nose.plugins.skip import SkipTest if sys.version_info < (2, 7): raise SkipTest("F5 Ansible modules require Python >= 2.7") from ansible.compat.tests import unittest from ansible.compat.tests.mock import Mock from ansible.compat.tests.mock import patch from ansible.module_utils.basic import AnsibleModule try: from library.bigip_iapp_service import Parameters from library.bigip_iapp_service import ModuleManager from library.bigip_iapp_service import ArgumentSpec from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import iControlUnexpectedHTTPError from test.unit.modules.utils import set_module_args except ImportError: try: from ansible.modules.network.f5.bigip_iapp_service import Parameters from ansible.modules.network.f5.bigip_iapp_service import ModuleManager from ansible.modules.network.f5.bigip_iapp_service import ArgumentSpec from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError from units.modules.utils import set_module_args except ImportError: raise SkipTest("F5 Ansible modules require the f5-sdk Python library") fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_parameters_keys(self): args = load_fixture('create_iapp_service_parameters_f5_http.json') p = Parameters(params=args) # Assert the top-level keys assert p.name == 'http_example' assert p.partition == 'Common' assert p.template == '/Common/f5.http' assert p.deviceGroup == 'none' assert p.inheritedTrafficGroup == 'true' assert p.inheritedDevicegroup == 'true' assert p.traffic_group == '/Common/traffic-group-local-only' def test_module_parameters_lists(self): args = load_fixture('create_iapp_service_parameters_f5_http.json') p = Parameters(params=args) assert 'lists' in p._values assert p.lists[0]['name'] == 'irules__irules' assert p.lists[0]['encrypted'] == 'no' assert len(p.lists[0]['value']) == 1 assert p.lists[0]['value'][0] == '/Common/lgyft' assert p.lists[1]['name'] == 'net__client_vlan' assert p.lists[1]['encrypted'] == 'no' assert len(p.lists[1]['value']) == 1 assert p.lists[1]['value'][0] == '/Common/net2' def test_module_parameters_tables(self): args = load_fixture('create_iapp_service_parameters_f5_http.json') p = Parameters(params=args) assert 'tables' in p._values assert 'columnNames' in p.tables[0] assert len(p.tables[0]['columnNames']) == 1 assert p.tables[0]['columnNames'][0] == 'name' assert 'name' in p.tables[0] assert p.tables[0]['name'] == 'pool__hosts' assert 'rows' in p.tables[0] assert len(p.tables[0]['rows']) == 1 assert 'row' in p.tables[0]['rows'][0] assert len(p.tables[0]['rows'][0]['row']) == 1 assert p.tables[0]['rows'][0]['row'][0] == 'demo.example.com' assert len(p.tables[1]['rows']) == 2 assert 'row' in p.tables[0]['rows'][0] assert len(p.tables[1]['rows'][0]['row']) == 2 assert p.tables[1]['rows'][0]['row'][0] == '10.1.1.1' assert p.tables[1]['rows'][0]['row'][1] == '0' assert p.tables[1]['rows'][1]['row'][0] == '10.1.1.2' assert p.tables[1]['rows'][1]['row'][1] == '0' def test_module_parameters_variables(self): args = load_fixture('create_iapp_service_parameters_f5_http.json') p = Parameters(params=args) assert 'variables' in p._values assert len(p.variables) == 34 # Assert one configuration value assert 'name' in p.variables[0] assert 'value' in p.variables[0] assert p.variables[0]['name'] == 'afm__dos_security_profile' assert p.variables[0]['value'] == '/#do_not_use#' # Assert a second configuration value assert 'name' in p.variables[1] assert 'value' in p.variables[1] assert p.variables[1]['name'] == 'afm__policy' assert p.variables[1]['value'] == '/#do_not_use#' def test_module_strict_updates_from_top_level(self): # Assumes the user did not provide any parameters args = dict( strict_updates=True ) p = Parameters(params=args) assert p.strict_updates == 'enabled' args = dict( strict_updates=False ) p = Parameters(params=args) assert p.strict_updates == 'disabled' def test_module_strict_updates_override_from_top_level(self): args = dict( strict_updates=True, parameters=dict( strictUpdates='disabled' ) ) p = Parameters(params=args) assert p.strict_updates == 'enabled' args = dict( strict_updates=False, parameters=dict( strictUpdates='enabled' ) ) p = Parameters(params=args) assert p.strict_updates == 'disabled' def test_module_strict_updates_only_parameters(self): args = dict( parameters=dict( strictUpdates='disabled' ) ) p = Parameters(params=args) assert p.strict_updates == 'disabled' args = dict( parameters=dict( strictUpdates='enabled' ) ) p = Parameters(params=args) assert p.strict_updates == 'enabled' def test_api_strict_updates_from_top_level(self): args = dict( strictUpdates='enabled' ) p = Parameters(params=args) assert p.strict_updates == 'enabled' args = dict( strictUpdates='disabled' ) p = Parameters(params=args) assert p.strict_updates == 'disabled' def test_api_parameters_variables(self): args = dict( variables=[ dict( name="client__http_compression", encrypted="no", value="/#create_new#" ) ] ) p = Parameters(params=args) assert p.variables[0]['name'] == 'client__http_compression' def test_api_parameters_tables(self): args = dict( tables=[ { "name": "pool__members", "columnNames": [ "addr", "port", "connection_limit" ], "rows": [ { "row": [ "12.12.12.12", "80", "0" ] }, { "row": [ "13.13.13.13", "443", 10 ] } ] } ] ) p = Parameters(params=args) assert p.tables[0]['name'] == 'pool__members' assert p.tables[0]['columnNames'] == ['addr', 'port', 'connection_limit'] assert len(p.tables[0]['rows']) == 2 assert 'row' in p.tables[0]['rows'][0] assert 'row' in p.tables[0]['rows'][1] assert p.tables[0]['rows'][0]['row'] == ['12.12.12.12', '80', '0'] assert p.tables[0]['rows'][1]['row'] == ['13.13.13.13', '443', '10'] def test_api_parameters_device_group(self): args = dict( deviceGroup='none' ) p = Parameters(params=args) assert p.deviceGroup == 'none' def test_api_parameters_inherited_traffic_group(self): args = dict( inheritedTrafficGroup='true' ) p = Parameters(params=args) assert p.inheritedTrafficGroup == 'true' def test_api_parameters_inherited_devicegroup(self): args = dict( inheritedDevicegroup='true' ) p = Parameters(params=args) assert p.inheritedDevicegroup == 'true' def test_api_parameters_traffic_group(self): args = dict( trafficGroup='/Common/traffic-group-local-only' ) p = Parameters(params=args) assert p.traffic_group == '/Common/traffic-group-local-only' def test_module_template_same_partition(self): args = dict( template='foo', partition='bar' ) p = Parameters(params=args) assert p.template == '/bar/foo' def test_module_template_same_partition_full_path(self): args = dict( template='/bar/foo', partition='bar' ) p = Parameters(params=args) assert p.template == '/bar/foo' def test_module_template_different_partition_full_path(self): args = dict( template='/Common/foo', partition='bar' ) p = Parameters(params=args) assert p.template == '/Common/foo' class TestManager(unittest.TestCase): def setUp(self): self.spec = ArgumentSpec() def test_create_service(self, *args): parameters = load_fixture('create_iapp_service_parameters_f5_http.json') set_module_args(dict( name='foo', template='f5.http', parameters=parameters, state='present', password='passsword', server='localhost', user='admin' )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) mm = ModuleManager(module=module) # Override methods to force specific logic in the module to happen mm.exists = Mock(return_value=False) mm.create_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True def test_update_agent_status_traps(self, *args): parameters = load_fixture('update_iapp_service_parameters_f5_http.json') set_module_args(dict( name='foo', template='f5.http', parameters=parameters, state='present', password='passsword', server='localhost', user='admin' )) # Configure the parameters that would be returned by querying the # remote device parameters = load_fixture('create_iapp_service_parameters_f5_http.json') current = Parameters(parameters) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) mm = ModuleManager(module=module) # Override methods to force specific logic in the module to happen mm.exists = Mock(return_value=True) mm.update_on_device = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current) results = mm.exec_module() assert results['changed'] is True
gpl-3.0
-4,476,439,972,223,437,000
-6,858,300,837,617,915,000
31.803279
91
0.556222
false
valkjsaaa/sl4a
python/src/Lib/encodings/cp863.py
593
34508
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp863', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX 0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE 0x0086: 0x00b6, # PILCROW SIGN 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX 0x008d: 0x2017, # DOUBLE LOW LINE 0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE 0x008f: 0x00a7, # SECTION SIGN 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE 0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE 0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX 0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS 0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE 0x0098: 0x00a4, # CURRENCY SIGN 0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x009b: 0x00a2, # CENT SIGN 0x009c: 0x00a3, # POUND SIGN 0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE 0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK 0x00a0: 0x00a6, # BROKEN BAR 0x00a1: 0x00b4, # ACUTE ACCENT 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE 0x00a4: 0x00a8, # DIAERESIS 0x00a5: 0x00b8, # CEDILLA 0x00a6: 0x00b3, # SUPERSCRIPT THREE 0x00a7: 0x00af, # MACRON 0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX 0x00a9: 0x2310, # REVERSED NOT SIGN 0x00aa: 0x00ac, # NOT SIGN 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER 0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x258c, # LEFT HALF BLOCK 0x00de: 0x2590, # RIGHT HALF BLOCK 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA 0x00e3: 0x03c0, # GREEK SMALL LETTER PI 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA 0x00e6: 0x00b5, # MICRO SIGN 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA 0x00ec: 0x221e, # INFINITY 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON 0x00ef: 0x2229, # INTERSECTION 0x00f0: 0x2261, # IDENTICAL TO 0x00f1: 0x00b1, # PLUS-MINUS SIGN 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO 0x00f4: 0x2320, # TOP HALF INTEGRAL 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL 0x00f6: 0x00f7, # DIVISION SIGN 0x00f7: 0x2248, # ALMOST EQUAL TO 0x00f8: 0x00b0, # DEGREE SIGN 0x00f9: 0x2219, # BULLET OPERATOR 0x00fa: 0x00b7, # MIDDLE DOT 0x00fb: 0x221a, # SQUARE ROOT 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N 0x00fd: 0x00b2, # SUPERSCRIPT TWO 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( u'\x00' # 0x0000 -> NULL u'\x01' # 0x0001 -> START OF HEADING u'\x02' # 0x0002 -> START OF TEXT u'\x03' # 0x0003 -> END OF TEXT u'\x04' # 0x0004 -> END OF TRANSMISSION u'\x05' # 0x0005 -> ENQUIRY u'\x06' # 0x0006 -> ACKNOWLEDGE u'\x07' # 0x0007 -> BELL u'\x08' # 0x0008 -> BACKSPACE u'\t' # 0x0009 -> HORIZONTAL TABULATION u'\n' # 0x000a -> LINE FEED u'\x0b' # 0x000b -> VERTICAL TABULATION u'\x0c' # 0x000c -> FORM FEED u'\r' # 0x000d -> CARRIAGE RETURN u'\x0e' # 0x000e -> SHIFT OUT u'\x0f' # 0x000f -> SHIFT IN u'\x10' # 0x0010 -> DATA LINK ESCAPE u'\x11' # 0x0011 -> DEVICE CONTROL ONE u'\x12' # 0x0012 -> DEVICE CONTROL TWO u'\x13' # 0x0013 -> DEVICE CONTROL THREE u'\x14' # 0x0014 -> DEVICE CONTROL FOUR u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x0016 -> SYNCHRONOUS IDLE u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK u'\x18' # 0x0018 -> CANCEL u'\x19' # 0x0019 -> END OF MEDIUM u'\x1a' # 0x001a -> SUBSTITUTE u'\x1b' # 0x001b -> ESCAPE u'\x1c' # 0x001c -> FILE SEPARATOR u'\x1d' # 0x001d -> GROUP SEPARATOR u'\x1e' # 0x001e -> RECORD SEPARATOR u'\x1f' # 0x001f -> UNIT SEPARATOR u' ' # 0x0020 -> SPACE u'!' # 0x0021 -> EXCLAMATION MARK u'"' # 0x0022 -> QUOTATION MARK u'#' # 0x0023 -> NUMBER SIGN u'$' # 0x0024 -> DOLLAR SIGN u'%' # 0x0025 -> PERCENT SIGN u'&' # 0x0026 -> AMPERSAND u"'" # 0x0027 -> APOSTROPHE u'(' # 0x0028 -> LEFT PARENTHESIS u')' # 0x0029 -> RIGHT PARENTHESIS u'*' # 0x002a -> ASTERISK u'+' # 0x002b -> PLUS SIGN u',' # 0x002c -> COMMA u'-' # 0x002d -> HYPHEN-MINUS u'.' # 0x002e -> FULL STOP u'/' # 0x002f -> SOLIDUS u'0' # 0x0030 -> DIGIT ZERO u'1' # 0x0031 -> DIGIT ONE u'2' # 0x0032 -> DIGIT TWO u'3' # 0x0033 -> DIGIT THREE u'4' # 0x0034 -> DIGIT FOUR u'5' # 0x0035 -> DIGIT FIVE u'6' # 0x0036 -> DIGIT SIX u'7' # 0x0037 -> DIGIT SEVEN u'8' # 0x0038 -> DIGIT EIGHT u'9' # 0x0039 -> DIGIT NINE u':' # 0x003a -> COLON u';' # 0x003b -> SEMICOLON u'<' # 0x003c -> LESS-THAN SIGN u'=' # 0x003d -> EQUALS SIGN u'>' # 0x003e -> GREATER-THAN SIGN u'?' # 0x003f -> QUESTION MARK u'@' # 0x0040 -> COMMERCIAL AT u'A' # 0x0041 -> LATIN CAPITAL LETTER A u'B' # 0x0042 -> LATIN CAPITAL LETTER B u'C' # 0x0043 -> LATIN CAPITAL LETTER C u'D' # 0x0044 -> LATIN CAPITAL LETTER D u'E' # 0x0045 -> LATIN CAPITAL LETTER E u'F' # 0x0046 -> LATIN CAPITAL LETTER F u'G' # 0x0047 -> LATIN CAPITAL LETTER G u'H' # 0x0048 -> LATIN CAPITAL LETTER H u'I' # 0x0049 -> LATIN CAPITAL LETTER I u'J' # 0x004a -> LATIN CAPITAL LETTER J u'K' # 0x004b -> LATIN CAPITAL LETTER K u'L' # 0x004c -> LATIN CAPITAL LETTER L u'M' # 0x004d -> LATIN CAPITAL LETTER M u'N' # 0x004e -> LATIN CAPITAL LETTER N u'O' # 0x004f -> LATIN CAPITAL LETTER O u'P' # 0x0050 -> LATIN CAPITAL LETTER P u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q u'R' # 0x0052 -> LATIN CAPITAL LETTER R u'S' # 0x0053 -> LATIN CAPITAL LETTER S u'T' # 0x0054 -> LATIN CAPITAL LETTER T u'U' # 0x0055 -> LATIN CAPITAL LETTER U u'V' # 0x0056 -> LATIN CAPITAL LETTER V u'W' # 0x0057 -> LATIN CAPITAL LETTER W u'X' # 0x0058 -> LATIN CAPITAL LETTER X u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y u'Z' # 0x005a -> LATIN CAPITAL LETTER Z u'[' # 0x005b -> LEFT SQUARE BRACKET u'\\' # 0x005c -> REVERSE SOLIDUS u']' # 0x005d -> RIGHT SQUARE BRACKET u'^' # 0x005e -> CIRCUMFLEX ACCENT u'_' # 0x005f -> LOW LINE u'`' # 0x0060 -> GRAVE ACCENT u'a' # 0x0061 -> LATIN SMALL LETTER A u'b' # 0x0062 -> LATIN SMALL LETTER B u'c' # 0x0063 -> LATIN SMALL LETTER C u'd' # 0x0064 -> LATIN SMALL LETTER D u'e' # 0x0065 -> LATIN SMALL LETTER E u'f' # 0x0066 -> LATIN SMALL LETTER F u'g' # 0x0067 -> LATIN SMALL LETTER G u'h' # 0x0068 -> LATIN SMALL LETTER H u'i' # 0x0069 -> LATIN SMALL LETTER I u'j' # 0x006a -> LATIN SMALL LETTER J u'k' # 0x006b -> LATIN SMALL LETTER K u'l' # 0x006c -> LATIN SMALL LETTER L u'm' # 0x006d -> LATIN SMALL LETTER M u'n' # 0x006e -> LATIN SMALL LETTER N u'o' # 0x006f -> LATIN SMALL LETTER O u'p' # 0x0070 -> LATIN SMALL LETTER P u'q' # 0x0071 -> LATIN SMALL LETTER Q u'r' # 0x0072 -> LATIN SMALL LETTER R u's' # 0x0073 -> LATIN SMALL LETTER S u't' # 0x0074 -> LATIN SMALL LETTER T u'u' # 0x0075 -> LATIN SMALL LETTER U u'v' # 0x0076 -> LATIN SMALL LETTER V u'w' # 0x0077 -> LATIN SMALL LETTER W u'x' # 0x0078 -> LATIN SMALL LETTER X u'y' # 0x0079 -> LATIN SMALL LETTER Y u'z' # 0x007a -> LATIN SMALL LETTER Z u'{' # 0x007b -> LEFT CURLY BRACKET u'|' # 0x007c -> VERTICAL LINE u'}' # 0x007d -> RIGHT CURLY BRACKET u'~' # 0x007e -> TILDE u'\x7f' # 0x007f -> DELETE u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE u'\xb6' # 0x0086 -> PILCROW SIGN u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\u2017' # 0x008d -> DOUBLE LOW LINE u'\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE u'\xa7' # 0x008f -> SECTION SIGN u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE u'\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE u'\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE u'\xa4' # 0x0098 -> CURRENCY SIGN u'\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\xa2' # 0x009b -> CENT SIGN u'\xa3' # 0x009c -> POUND SIGN u'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE u'\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK u'\xa6' # 0x00a0 -> BROKEN BAR u'\xb4' # 0x00a1 -> ACUTE ACCENT u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE u'\xa8' # 0x00a4 -> DIAERESIS u'\xb8' # 0x00a5 -> CEDILLA u'\xb3' # 0x00a6 -> SUPERSCRIPT THREE u'\xaf' # 0x00a7 -> MACRON u'\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\u2310' # 0x00a9 -> REVERSED NOT SIGN u'\xac' # 0x00aa -> NOT SIGN u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER u'\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\u2591' # 0x00b0 -> LIGHT SHADE u'\u2592' # 0x00b1 -> MEDIUM SHADE u'\u2593' # 0x00b2 -> DARK SHADE u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT u'\u2588' # 0x00db -> FULL BLOCK u'\u2584' # 0x00dc -> LOWER HALF BLOCK u'\u258c' # 0x00dd -> LEFT HALF BLOCK u'\u2590' # 0x00de -> RIGHT HALF BLOCK u'\u2580' # 0x00df -> UPPER HALF BLOCK u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA u'\xb5' # 0x00e6 -> MICRO SIGN u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA u'\u221e' # 0x00ec -> INFINITY u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON u'\u2229' # 0x00ef -> INTERSECTION u'\u2261' # 0x00f0 -> IDENTICAL TO u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL u'\xf7' # 0x00f6 -> DIVISION SIGN u'\u2248' # 0x00f7 -> ALMOST EQUAL TO u'\xb0' # 0x00f8 -> DEGREE SIGN u'\u2219' # 0x00f9 -> BULLET OPERATOR u'\xb7' # 0x00fa -> MIDDLE DOT u'\u221a' # 0x00fb -> SQUARE ROOT u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N u'\xb2' # 0x00fd -> SUPERSCRIPT TWO u'\u25a0' # 0x00fe -> BLACK SQUARE u'\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a2: 0x009b, # CENT SIGN 0x00a3: 0x009c, # POUND SIGN 0x00a4: 0x0098, # CURRENCY SIGN 0x00a6: 0x00a0, # BROKEN BAR 0x00a7: 0x008f, # SECTION SIGN 0x00a8: 0x00a4, # DIAERESIS 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00ac: 0x00aa, # NOT SIGN 0x00af: 0x00a7, # MACRON 0x00b0: 0x00f8, # DEGREE SIGN 0x00b1: 0x00f1, # PLUS-MINUS SIGN 0x00b2: 0x00fd, # SUPERSCRIPT TWO 0x00b3: 0x00a6, # SUPERSCRIPT THREE 0x00b4: 0x00a1, # ACUTE ACCENT 0x00b5: 0x00e6, # MICRO SIGN 0x00b6: 0x0086, # PILCROW SIGN 0x00b7: 0x00fa, # MIDDLE DOT 0x00b8: 0x00a5, # CEDILLA 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF 0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS 0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE 0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA 0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE 0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX 0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS 0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX 0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS 0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX 0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE 0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX 0x00f7: 0x00f6, # DIVISION SIGN 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON 0x03c0: 0x00e3, # GREEK SMALL LETTER PI 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI 0x2017: 0x008d, # DOUBLE LOW LINE 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N 0x2219: 0x00f9, # BULLET OPERATOR 0x221a: 0x00fb, # SQUARE ROOT 0x221e: 0x00ec, # INFINITY 0x2229: 0x00ef, # INTERSECTION 0x2248: 0x00f7, # ALMOST EQUAL TO 0x2261: 0x00f0, # IDENTICAL TO 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO 0x2310: 0x00a9, # REVERSED NOT SIGN 0x2320: 0x00f4, # TOP HALF INTEGRAL 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x258c: 0x00dd, # LEFT HALF BLOCK 0x2590: 0x00de, # RIGHT HALF BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
apache-2.0
869,486,293,418,034,400
4,805,383,353,781,165,000
48.438395
97
0.605686
false
dlew/joda-time-android
utils/resource_extractor.py
9
2399
#!/usr/bin/env python """ Extracts selected resources from resources directories. Useful for grabbing translations from AOSP. Point towards base files in the library (in /res/values) and it will find all the alternate versions in other directories. """ import os import shutil from optparse import OptionParser import xml.etree.ElementTree as ET ET.register_namespace('android', "http://schemas.android.com/apk/res/android") ET.register_namespace('xliff', "urn:oasis:names:tc:xliff:document:1.2") ANDROID_XML_DECLARATION = '<?xml version="1.0" encoding="UTF-8"?>' def extract_names(path): names = {} tree = ET.parse(path) for child in tree.getroot().iter(): if 'name' not in child.attrib: continue names[child.attrib['name']] = None return names def extract(names, res_dir, out_dir): # Clear the current output directory if os.path.exists(out_dir): shutil.rmtree(out_dir) os.makedirs(out_dir) for root, dirs, files in os.walk(res_dir): for file in files: path = os.path.join(root, file) fileName, fileExtension = os.path.splitext(path) if fileExtension == ".xml": xml = extract_file(names, path) if xml is not None: val_dir = os.path.join(out_dir, os.path.relpath(root, res_dir)) if not os.path.exists(val_dir): os.makedirs(val_dir) xml.write(os.path.join(val_dir, file), encoding = 'utf-8', xml_declaration = ANDROID_XML_DECLARATION, method = 'xml') def extract_file(names, path): tree = ET.parse(path) root = tree.getroot() if root.tag != "resources": return to_remove = [] found = False for child in tree.iter(): # Only loook at second-level nodes if child not in root: continue if 'name' not in child.attrib or child.attrib['name'] not in names: to_remove.append(child) else: found = True for child in to_remove: root.remove(child) if found: return tree if __name__ == "__main__": usage = "usage: %prog [options] baseResFile1 baseResFile2 ..." parser = OptionParser(usage=usage) parser.add_option('-r', '--res', action="store", help="Resources directory location (/res/)", default="res/") parser.add_option('-o', '--out', action="store", help="Output directory", default="out/") options, args = parser.parse_args() names = {} for arg in args: names = dict(names.items() + extract_names(arg).items()) extract(names, options.res, options.out)
apache-2.0
5,293,644,797,696,031,000
6,731,735,557,259,412,000
25.076087
110
0.679033
false
frankvdp/django
tests/filtered_relation/tests.py
43
16742
from django.db import connection, transaction from django.db.models import Case, Count, F, FilteredRelation, Q, When from django.test import TestCase from django.test.testcases import skipUnlessDBFeature from .models import Author, Book, Borrower, Editor, RentalSession, Reservation class FilteredRelationTests(TestCase): @classmethod def setUpTestData(cls): cls.author1 = Author.objects.create(name='Alice') cls.author2 = Author.objects.create(name='Jane') cls.editor_a = Editor.objects.create(name='a') cls.editor_b = Editor.objects.create(name='b') cls.book1 = Book.objects.create( title='Poem by Alice', editor=cls.editor_a, author=cls.author1, ) cls.book1.generic_author.set([cls.author2]) cls.book2 = Book.objects.create( title='The book by Jane A', editor=cls.editor_b, author=cls.author2, ) cls.book3 = Book.objects.create( title='The book by Jane B', editor=cls.editor_b, author=cls.author2, ) cls.book4 = Book.objects.create( title='The book by Alice', editor=cls.editor_a, author=cls.author1, ) cls.author1.favorite_books.add(cls.book2) cls.author1.favorite_books.add(cls.book3) def test_select_related(self): qs = Author.objects.annotate( book_join=FilteredRelation('book'), ).select_related('book_join__editor').order_by('pk', 'book_join__pk') with self.assertNumQueries(1): self.assertQuerysetEqual(qs, [ (self.author1, self.book1, self.editor_a, self.author1), (self.author1, self.book4, self.editor_a, self.author1), (self.author2, self.book2, self.editor_b, self.author2), (self.author2, self.book3, self.editor_b, self.author2), ], lambda x: (x, x.book_join, x.book_join.editor, x.book_join.author)) def test_select_related_foreign_key(self): qs = Book.objects.annotate( author_join=FilteredRelation('author'), ).select_related('author_join').order_by('pk') with self.assertNumQueries(1): self.assertQuerysetEqual(qs, [ (self.book1, self.author1), (self.book2, self.author2), (self.book3, self.author2), (self.book4, self.author1), ], lambda x: (x, x.author_join)) @skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of') def test_select_related_foreign_key_for_update_of(self): with transaction.atomic(): qs = Book.objects.annotate( author_join=FilteredRelation('author'), ).select_related('author_join').select_for_update(of=('self',)).order_by('pk') with self.assertNumQueries(1): self.assertQuerysetEqual(qs, [ (self.book1, self.author1), (self.book2, self.author2), (self.book3, self.author2), (self.book4, self.author1), ], lambda x: (x, x.author_join)) def test_without_join(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ), [self.author1, self.author2] ) def test_with_join(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False), [self.author1] ) def test_with_join_and_complex_condition(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation( 'book', condition=Q( Q(book__title__iexact='poem by alice') | Q(book__state=Book.RENTED) ), ), ).filter(book_alice__isnull=False), [self.author1] ) def test_internal_queryset_alias_mapping(self): queryset = Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False) self.assertIn( 'INNER JOIN {} book_alice ON'.format(connection.ops.quote_name('filtered_relation_book')), str(queryset.query) ) def test_with_multiple_filter(self): self.assertSequenceEqual( Author.objects.annotate( book_editor_a=FilteredRelation( 'book', condition=Q(book__title__icontains='book', book__editor_id=self.editor_a.pk), ), ).filter(book_editor_a__isnull=False), [self.author1] ) def test_multiple_times(self): self.assertSequenceEqual( Author.objects.annotate( book_title_alice=FilteredRelation('book', condition=Q(book__title__icontains='alice')), ).filter(book_title_alice__isnull=False).filter(book_title_alice__isnull=False).distinct(), [self.author1] ) def test_exclude_relation_with_join(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=~Q(book__title__icontains='alice')), ).filter(book_alice__isnull=False).distinct(), [self.author2] ) def test_with_m2m(self): qs = Author.objects.annotate( favorite_books_written_by_jane=FilteredRelation( 'favorite_books', condition=Q(favorite_books__in=[self.book2]), ), ).filter(favorite_books_written_by_jane__isnull=False) self.assertSequenceEqual(qs, [self.author1]) def test_with_m2m_deep(self): qs = Author.objects.annotate( favorite_books_written_by_jane=FilteredRelation( 'favorite_books', condition=Q(favorite_books__author=self.author2), ), ).filter(favorite_books_written_by_jane__title='The book by Jane B') self.assertSequenceEqual(qs, [self.author1]) def test_with_m2m_multijoin(self): qs = Author.objects.annotate( favorite_books_written_by_jane=FilteredRelation( 'favorite_books', condition=Q(favorite_books__author=self.author2), ) ).filter(favorite_books_written_by_jane__editor__name='b').distinct() self.assertSequenceEqual(qs, [self.author1]) def test_values_list(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False).values_list('book_alice__title', flat=True), ['Poem by Alice'] ) def test_values(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False).values(), [{'id': self.author1.pk, 'name': 'Alice', 'content_type_id': None, 'object_id': None}] ) def test_extra(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False).extra(where=['1 = 1']), [self.author1] ) @skipUnlessDBFeature('supports_select_union') def test_union(self): qs1 = Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False) qs2 = Author.objects.annotate( book_jane=FilteredRelation('book', condition=Q(book__title__iexact='the book by jane a')), ).filter(book_jane__isnull=False) self.assertSequenceEqual(qs1.union(qs2), [self.author1, self.author2]) @skipUnlessDBFeature('supports_select_intersection') def test_intersection(self): qs1 = Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False) qs2 = Author.objects.annotate( book_jane=FilteredRelation('book', condition=Q(book__title__iexact='the book by jane a')), ).filter(book_jane__isnull=False) self.assertSequenceEqual(qs1.intersection(qs2), []) @skipUnlessDBFeature('supports_select_difference') def test_difference(self): qs1 = Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False) qs2 = Author.objects.annotate( book_jane=FilteredRelation('book', condition=Q(book__title__iexact='the book by jane a')), ).filter(book_jane__isnull=False) self.assertSequenceEqual(qs1.difference(qs2), [self.author1]) def test_select_for_update(self): self.assertSequenceEqual( Author.objects.annotate( book_jane=FilteredRelation('book', condition=Q(book__title__iexact='the book by jane a')), ).filter(book_jane__isnull=False).select_for_update(), [self.author2] ) def test_defer(self): # One query for the list and one query for the deferred title. with self.assertNumQueries(2): self.assertQuerysetEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False).select_related('book_alice').defer('book_alice__title'), ['Poem by Alice'], lambda author: author.book_alice.title ) def test_only_not_supported(self): msg = 'only() is not supported with FilteredRelation.' with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False).select_related('book_alice').only('book_alice__state') def test_as_subquery(self): inner_qs = Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False) qs = Author.objects.filter(id__in=inner_qs) self.assertSequenceEqual(qs, [self.author1]) def test_with_foreign_key_error(self): msg = ( "FilteredRelation's condition doesn't support nested relations " "(got 'author__favorite_books__author')." ) with self.assertRaisesMessage(ValueError, msg): list(Book.objects.annotate( alice_favorite_books=FilteredRelation( 'author__favorite_books', condition=Q(author__favorite_books__author=self.author1), ) )) def test_with_foreign_key_on_condition_error(self): msg = ( "FilteredRelation's condition doesn't support nested relations " "(got 'book__editor__name__icontains')." ) with self.assertRaisesMessage(ValueError, msg): list(Author.objects.annotate( book_edited_by_b=FilteredRelation('book', condition=Q(book__editor__name__icontains='b')), )) def test_with_empty_relation_name_error(self): with self.assertRaisesMessage(ValueError, 'relation_name cannot be empty.'): FilteredRelation('', condition=Q(blank='')) def test_with_condition_as_expression_error(self): msg = 'condition argument must be a Q() instance.' expression = Case( When(book__title__iexact='poem by alice', then=True), default=False, ) with self.assertRaisesMessage(ValueError, msg): FilteredRelation('book', condition=expression) def test_with_prefetch_related(self): msg = 'prefetch_related() is not supported with FilteredRelation.' qs = Author.objects.annotate( book_title_contains_b=FilteredRelation('book', condition=Q(book__title__icontains='b')), ).filter( book_title_contains_b__isnull=False, ) with self.assertRaisesMessage(ValueError, msg): qs.prefetch_related('book_title_contains_b') with self.assertRaisesMessage(ValueError, msg): qs.prefetch_related('book_title_contains_b__editor') def test_with_generic_foreign_key(self): self.assertSequenceEqual( Book.objects.annotate( generic_authored_book=FilteredRelation( 'generic_author', condition=Q(generic_author__isnull=False) ), ).filter(generic_authored_book__isnull=False), [self.book1] ) class FilteredRelationAggregationTests(TestCase): @classmethod def setUpTestData(cls): cls.author1 = Author.objects.create(name='Alice') cls.editor_a = Editor.objects.create(name='a') cls.book1 = Book.objects.create( title='Poem by Alice', editor=cls.editor_a, author=cls.author1, ) cls.borrower1 = Borrower.objects.create(name='Jenny') cls.borrower2 = Borrower.objects.create(name='Kevin') # borrower 1 reserves, rents, and returns book1. Reservation.objects.create( borrower=cls.borrower1, book=cls.book1, state=Reservation.STOPPED, ) RentalSession.objects.create( borrower=cls.borrower1, book=cls.book1, state=RentalSession.STOPPED, ) # borrower2 reserves, rents, and returns book1. Reservation.objects.create( borrower=cls.borrower2, book=cls.book1, state=Reservation.STOPPED, ) RentalSession.objects.create( borrower=cls.borrower2, book=cls.book1, state=RentalSession.STOPPED, ) def test_aggregate(self): """ filtered_relation() not only improves performance but also creates correct results when aggregating with multiple LEFT JOINs. Books can be reserved then rented by a borrower. Each reservation and rental session are recorded with Reservation and RentalSession models. Every time a reservation or a rental session is over, their state is changed to 'stopped'. Goal: Count number of books that are either currently reserved or rented by borrower1 or available. """ qs = Book.objects.annotate( is_reserved_or_rented_by=Case( When(reservation__state=Reservation.NEW, then=F('reservation__borrower__pk')), When(rental_session__state=RentalSession.NEW, then=F('rental_session__borrower__pk')), default=None, ) ).filter( Q(is_reserved_or_rented_by=self.borrower1.pk) | Q(state=Book.AVAILABLE) ).distinct() self.assertEqual(qs.count(), 1) # If count is equal to 1, the same aggregation should return in the # same result but it returns 4. self.assertSequenceEqual(qs.annotate(total=Count('pk')).values('total'), [{'total': 4}]) # With FilteredRelation, the result is as expected (1). qs = Book.objects.annotate( active_reservations=FilteredRelation( 'reservation', condition=Q( reservation__state=Reservation.NEW, reservation__borrower=self.borrower1, ) ), ).annotate( active_rental_sessions=FilteredRelation( 'rental_session', condition=Q( rental_session__state=RentalSession.NEW, rental_session__borrower=self.borrower1, ) ), ).filter( (Q(active_reservations__isnull=False) | Q(active_rental_sessions__isnull=False)) | Q(state=Book.AVAILABLE) ).distinct() self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs.annotate(total=Count('pk')).values('total'), [{'total': 1}])
bsd-3-clause
4,202,658,977,029,068,000
-8,126,986,911,291,492,000
41.38481
107
0.594254
false
anbangr/trusted-nova
nova/api/openstack/compute/contrib/flavormanage.py
8
3129
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License import webob from nova.api.openstack.compute import flavors as flavors_api from nova.api.openstack.compute.views import flavors as flavors_view from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.compute import instance_types from nova import exception from nova import log as logging LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('compute', 'flavormanage') class FlavorManageController(wsgi.Controller): """ The Flavor Lifecycle API controller for the OpenStack API. """ _view_builder_class = flavors_view.ViewBuilder def __init__(self): super(FlavorManageController, self).__init__() @wsgi.action("delete") def _delete(self, req, id): context = req.environ['nova.context'] authorize(context) try: flavor = instance_types.get_instance_type_by_flavor_id(id) except exception.NotFound, e: raise webob.exc.HTTPNotFound(explanation=str(e)) instance_types.destroy(flavor['name']) return webob.Response(status_int=202) @wsgi.action("create") @wsgi.serializers(xml=flavors_api.FlavorTemplate) def _create(self, req, body): context = req.environ['nova.context'] authorize(context) vals = body['flavor'] name = vals['name'] flavorid = vals['id'] memory_mb = vals.get('ram') vcpus = vals.get('vcpus') root_gb = vals.get('disk') ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral') swap = vals.get('swap') rxtx_factor = vals.get('rxtx_factor') try: flavor = instance_types.create(name, memory_mb, vcpus, root_gb, ephemeral_gb, flavorid, swap, rxtx_factor) except exception.InstanceTypeExists as err: raise webob.exc.HTTPConflict(explanation=str(err)) return self._view_builder.show(req, flavor) class Flavormanage(extensions.ExtensionDescriptor): """ Flavor create/delete API support """ name = "FlavorManage" alias = "os-flavor-manage" namespace = ("http://docs.openstack.org/compute/ext/" "flavor_manage/api/v1.1") updated = "2012-01-19T00:00:00+00:00" def get_controller_extensions(self): controller = FlavorManageController() extension = extensions.ControllerExtension(self, 'flavors', controller) return [extension]
apache-2.0
2,950,919,788,346,241,000
-5,161,719,480,242,385,000
32.645161
79
0.654203
false
andaag/scikit-learn
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
286
2378
""" ===================================================================== Decision boundary of label propagation versus SVM on the Iris dataset ===================================================================== Comparison for decision boundary generated on iris dataset between Label Propagation and SVM. This demonstrates Label Propagation learning a good boundary even with a small amount of labeled data. """ print(__doc__) # Authors: Clay Woolam <[email protected]> # Licence: BSD import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn import svm from sklearn.semi_supervised import label_propagation rng = np.random.RandomState(0) iris = datasets.load_iris() X = iris.data[:, :2] y = iris.target # step size in the mesh h = .02 y_30 = np.copy(y) y_30[rng.rand(len(y)) < 0.3] = -1 y_50 = np.copy(y) y_50[rng.rand(len(y)) < 0.5] = -1 # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors ls30 = (label_propagation.LabelSpreading().fit(X, y_30), y_30) ls50 = (label_propagation.LabelSpreading().fit(X, y_50), y_50) ls100 = (label_propagation.LabelSpreading().fit(X, y), y) rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['Label Spreading 30% data', 'Label Spreading 50% data', 'Label Spreading 100% data', 'SVC with rbf kernel'] color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)} for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis('off') # Plot also the training points colors = [color_map[y] for y in y_train] plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired) plt.title(titles[i]) plt.text(.90, 0, "Unlabeled points are colored white") plt.show()
bsd-3-clause
7,359,004,561,837,943,000
1,773,022,494,855,933,200
29.101266
74
0.598402
false
yanchen036/tensorflow
tensorflow/contrib/distributions/python/kernel_tests/wishart_test.py
15
15481
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Wishart.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from scipy import linalg from tensorflow.contrib import distributions as distributions_lib from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import random_seed from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test distributions = distributions_lib def make_pd(start, n): """Deterministically create a positive definite matrix.""" x = np.tril(linalg.circulant(np.arange(start, start + n))) return np.dot(x, x.T) def chol(x): """Compute Cholesky factorization.""" return linalg.cholesky(x).T def wishart_var(df, x): """Compute Wishart variance for numpy scale matrix.""" x = np.sqrt(df) * np.asarray(x) d = np.expand_dims(np.diag(x), -1) return x**2 + np.dot(d, d.T) class WishartCholeskyTest(test.TestCase): def testEntropy(self): with self.test_session(): scale = make_pd(1., 2) df = 4 w = distributions.WishartCholesky(df, chol(scale)) # sp.stats.wishart(df=4, scale=make_pd(1., 2)).entropy() self.assertAllClose(6.301387092430769, w.entropy().eval()) w = distributions.WishartCholesky(df=1, scale=[[1.]]) # sp.stats.wishart(df=1,scale=1).entropy() self.assertAllClose(0.78375711047393404, w.entropy().eval()) def testMeanLogDetAndLogNormalizingConstant(self): with self.test_session(): def entropy_alt(w): return ( w.log_normalization() - 0.5 * (w.df - w.dimension - 1.) * w.mean_log_det() + 0.5 * w.df * w.dimension).eval() w = distributions.WishartCholesky(df=4, scale=chol(make_pd(1., 2))) self.assertAllClose(w.entropy().eval(), entropy_alt(w)) w = distributions.WishartCholesky(df=5, scale=[[1.]]) self.assertAllClose(w.entropy().eval(), entropy_alt(w)) def testMean(self): with self.test_session(): scale = make_pd(1., 2) df = 4 w = distributions.WishartCholesky(df, chol(scale)) self.assertAllEqual(df * scale, w.mean().eval()) def testMode(self): with self.test_session(): scale = make_pd(1., 2) df = 4 w = distributions.WishartCholesky(df, chol(scale)) self.assertAllEqual((df - 2. - 1.) * scale, w.mode().eval()) def testStd(self): with self.test_session(): scale = make_pd(1., 2) df = 4 w = distributions.WishartCholesky(df, chol(scale)) self.assertAllEqual(chol(wishart_var(df, scale)), w.stddev().eval()) def testVariance(self): with self.test_session(): scale = make_pd(1., 2) df = 4 w = distributions.WishartCholesky(df, chol(scale)) self.assertAllEqual(wishart_var(df, scale), w.variance().eval()) def testSample(self): with self.test_session(): scale = make_pd(1., 2) df = 4 chol_w = distributions.WishartCholesky( df, chol(scale), cholesky_input_output_matrices=False) x = chol_w.sample(1, seed=42).eval() chol_x = [chol(x[0])] full_w = distributions.WishartFull( df, scale, cholesky_input_output_matrices=False) self.assertAllClose(x, full_w.sample(1, seed=42).eval()) chol_w_chol = distributions.WishartCholesky( df, chol(scale), cholesky_input_output_matrices=True) self.assertAllClose(chol_x, chol_w_chol.sample(1, seed=42).eval()) eigen_values = array_ops.matrix_diag_part( chol_w_chol.sample( 1000, seed=42)) np.testing.assert_array_less(0., eigen_values.eval()) full_w_chol = distributions.WishartFull( df, scale, cholesky_input_output_matrices=True) self.assertAllClose(chol_x, full_w_chol.sample(1, seed=42).eval()) eigen_values = array_ops.matrix_diag_part( full_w_chol.sample( 1000, seed=42)) np.testing.assert_array_less(0., eigen_values.eval()) # Check first and second moments. df = 4. chol_w = distributions.WishartCholesky( df=df, scale=chol(make_pd(1., 3)), cholesky_input_output_matrices=False) x = chol_w.sample(10000, seed=42) self.assertAllEqual((10000, 3, 3), x.get_shape()) moment1_estimate = math_ops.reduce_mean(x, reduction_indices=[0]).eval() self.assertAllClose(chol_w.mean().eval(), moment1_estimate, rtol=0.05) # The Variance estimate uses the squares rather than outer-products # because Wishart.Variance is the diagonal of the Wishart covariance # matrix. variance_estimate = (math_ops.reduce_mean( math_ops.square(x), reduction_indices=[0]) - math_ops.square(moment1_estimate)).eval() self.assertAllClose( chol_w.variance().eval(), variance_estimate, rtol=0.05) # Test that sampling with the same seed twice gives the same results. def testSampleMultipleTimes(self): with self.test_session(): df = 4. n_val = 100 random_seed.set_random_seed(654321) chol_w1 = distributions.WishartCholesky( df=df, scale=chol(make_pd(1., 3)), cholesky_input_output_matrices=False, name="wishart1") samples1 = chol_w1.sample(n_val, seed=123456).eval() random_seed.set_random_seed(654321) chol_w2 = distributions.WishartCholesky( df=df, scale=chol(make_pd(1., 3)), cholesky_input_output_matrices=False, name="wishart2") samples2 = chol_w2.sample(n_val, seed=123456).eval() self.assertAllClose(samples1, samples2) def testProb(self): with self.test_session(): # Generate some positive definite (pd) matrices and their Cholesky # factorizations. x = np.array( [make_pd(1., 2), make_pd(2., 2), make_pd(3., 2), make_pd(4., 2)]) chol_x = np.array([chol(x[0]), chol(x[1]), chol(x[2]), chol(x[3])]) # Since Wishart wasn"t added to SciPy until 0.16, we'll spot check some # pdfs with hard-coded results from upstream SciPy. log_prob_df_seq = np.array([ # math.log(stats.wishart.pdf(x[0], df=2+0, scale=x[0])) -3.5310242469692907, # math.log(stats.wishart.pdf(x[1], df=2+1, scale=x[1])) -7.689907330328961, # math.log(stats.wishart.pdf(x[2], df=2+2, scale=x[2])) -10.815845159537895, # math.log(stats.wishart.pdf(x[3], df=2+3, scale=x[3])) -13.640549882916691, ]) # This test checks that batches don't interfere with correctness. w = distributions.WishartCholesky( df=[2, 3, 4, 5], scale=chol_x, cholesky_input_output_matrices=True) self.assertAllClose(log_prob_df_seq, w.log_prob(chol_x).eval()) # Now we test various constructions of Wishart with different sample # shape. log_prob = np.array([ # math.log(stats.wishart.pdf(x[0], df=4, scale=x[0])) -4.224171427529236, # math.log(stats.wishart.pdf(x[1], df=4, scale=x[0])) -6.3378770664093453, # math.log(stats.wishart.pdf(x[2], df=4, scale=x[0])) -12.026946850193017, # math.log(stats.wishart.pdf(x[3], df=4, scale=x[0])) -20.951582705289454, ]) for w in ( distributions.WishartCholesky( df=4, scale=chol_x[0], cholesky_input_output_matrices=False), distributions.WishartFull( df=4, scale=x[0], cholesky_input_output_matrices=False)): self.assertAllEqual((2, 2), w.event_shape_tensor().eval()) self.assertEqual(2, w.dimension.eval()) self.assertAllClose(log_prob[0], w.log_prob(x[0]).eval()) self.assertAllClose(log_prob[0:2], w.log_prob(x[0:2]).eval()) self.assertAllClose( np.reshape(log_prob, (2, 2)), w.log_prob(np.reshape(x, (2, 2, 2, 2))).eval()) self.assertAllClose( np.reshape(np.exp(log_prob), (2, 2)), w.prob(np.reshape(x, (2, 2, 2, 2))).eval()) self.assertAllEqual((2, 2), w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape()) for w in ( distributions.WishartCholesky( df=4, scale=chol_x[0], cholesky_input_output_matrices=True), distributions.WishartFull( df=4, scale=x[0], cholesky_input_output_matrices=True)): self.assertAllEqual((2, 2), w.event_shape_tensor().eval()) self.assertEqual(2, w.dimension.eval()) self.assertAllClose(log_prob[0], w.log_prob(chol_x[0]).eval()) self.assertAllClose(log_prob[0:2], w.log_prob(chol_x[0:2]).eval()) self.assertAllClose( np.reshape(log_prob, (2, 2)), w.log_prob(np.reshape(chol_x, (2, 2, 2, 2))).eval()) self.assertAllClose( np.reshape(np.exp(log_prob), (2, 2)), w.prob(np.reshape(chol_x, (2, 2, 2, 2))).eval()) self.assertAllEqual((2, 2), w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape()) def testBatchShape(self): with self.test_session() as sess: scale = make_pd(1., 2) chol_scale = chol(scale) w = distributions.WishartCholesky(df=4, scale=chol_scale) self.assertAllEqual([], w.batch_shape) self.assertAllEqual([], w.batch_shape_tensor().eval()) w = distributions.WishartCholesky( df=[4., 4], scale=np.array([chol_scale, chol_scale])) self.assertAllEqual([2], w.batch_shape) self.assertAllEqual([2], w.batch_shape_tensor().eval()) scale_deferred = array_ops.placeholder(dtypes.float32) w = distributions.WishartCholesky(df=4, scale=scale_deferred) self.assertAllEqual( [], sess.run(w.batch_shape_tensor(), feed_dict={scale_deferred: chol_scale})) self.assertAllEqual( [2], sess.run(w.batch_shape_tensor(), feed_dict={scale_deferred: [chol_scale, chol_scale]})) def testEventShape(self): with self.test_session() as sess: scale = make_pd(1., 2) chol_scale = chol(scale) w = distributions.WishartCholesky(df=4, scale=chol_scale) self.assertAllEqual([2, 2], w.event_shape) self.assertAllEqual([2, 2], w.event_shape_tensor().eval()) w = distributions.WishartCholesky( df=[4., 4], scale=np.array([chol_scale, chol_scale])) self.assertAllEqual([2, 2], w.event_shape) self.assertAllEqual([2, 2], w.event_shape_tensor().eval()) scale_deferred = array_ops.placeholder(dtypes.float32) w = distributions.WishartCholesky(df=4, scale=scale_deferred) self.assertAllEqual( [2, 2], sess.run(w.event_shape_tensor(), feed_dict={scale_deferred: chol_scale})) self.assertAllEqual( [2, 2], sess.run(w.event_shape_tensor(), feed_dict={scale_deferred: [chol_scale, chol_scale]})) def testValidateArgs(self): with self.test_session() as sess: df_deferred = array_ops.placeholder(dtypes.float32) chol_scale_deferred = array_ops.placeholder(dtypes.float32) x = make_pd(1., 3) chol_scale = chol(x) # Check expensive, deferred assertions. with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "cannot be less than"): chol_w = distributions.WishartCholesky( df=df_deferred, scale=chol_scale_deferred, validate_args=True) sess.run(chol_w.log_prob(np.asarray( x, dtype=np.float32)), feed_dict={df_deferred: 2., chol_scale_deferred: chol_scale}) with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "Cholesky decomposition was not successful"): chol_w = distributions.WishartFull( df=df_deferred, scale=chol_scale_deferred) # np.ones((3, 3)) is not positive, definite. sess.run(chol_w.log_prob(np.asarray( x, dtype=np.float32)), feed_dict={ df_deferred: 4., chol_scale_deferred: np.ones( (3, 3), dtype=np.float32) }) with self.assertRaisesOpError("scale must be square"): chol_w = distributions.WishartCholesky( df=4., scale=np.array([[2., 3., 4.], [1., 2., 3.]], dtype=np.float32), validate_args=True) sess.run(chol_w.scale().eval()) # Ensure no assertions. chol_w = distributions.WishartCholesky( df=df_deferred, scale=chol_scale_deferred, validate_args=False) sess.run(chol_w.log_prob(np.asarray( x, dtype=np.float32)), feed_dict={df_deferred: 4, chol_scale_deferred: chol_scale}) # Bogus log_prob, but since we have no checks running... c"est la vie. sess.run(chol_w.log_prob(np.asarray( x, dtype=np.float32)), feed_dict={df_deferred: 4, chol_scale_deferred: np.ones((3, 3))}) def testStaticAsserts(self): with self.test_session(): x = make_pd(1., 3) chol_scale = chol(x) # Still has these assertions because they're resolveable at graph # construction with self.assertRaisesRegexp(ValueError, "cannot be less than"): distributions.WishartCholesky( df=2, scale=chol_scale, validate_args=False) with self.assertRaisesRegexp(TypeError, "Argument tril must have dtype"): distributions.WishartCholesky( df=4., scale=np.asarray( chol_scale, dtype=np.int32), validate_args=False) def testSampleBroadcasts(self): dims = 2 batch_shape = [2, 3] sample_shape = [2, 1] scale = np.float32([ [[1., 0.5], [0.5, 1.]], [[0.5, 0.25], [0.25, 0.75]], ]) scale = np.reshape(np.concatenate([scale, scale, scale], axis=0), batch_shape + [dims, dims]) wishart = distributions.WishartFull(df=5, scale=scale) x = wishart.sample(sample_shape, seed=42) with self.test_session() as sess: x_ = sess.run(x) expected_shape = sample_shape + batch_shape + [dims, dims] self.assertAllEqual(expected_shape, x.shape) self.assertAllEqual(expected_shape, x_.shape) if __name__ == "__main__": test.main()
apache-2.0
4,333,918,919,267,966,500
-8,561,906,485,361,614,000
36.303614
80
0.594018
false
levkar/odoo-addons
stock_remit/wizard/stock_print_remit.py
1
2063
# -*- coding: utf-8 -*- from openerp.osv import fields, osv class stock_print_remit(osv.osv_memory): _name = 'stock.print_remit' _description = "Print Remit" _columns = { 'remit_number': fields.char('Remit Number'), } def default_get(self, cr, uid, fields, context=None): res = super(stock_print_remit, self).default_get( cr, uid, fields, context=context) if 'active_id' not in context: return res picking_obj = self.pool.get('stock.picking') picking_id = context['active_id'] picking = picking_obj.browse(cr, uid, picking_id, context=context) if isinstance(picking, list): picking = picking[0] print 'picking', picking if not picking.remit_number: picking_obj.set_remit_number( cr, uid, picking_id, context=context) picking = picking_obj.browse(cr, uid, picking_id, context=context) res['remit_number'] = picking.remit_number return res def recompute_sequence_number(self, cr, uid, ids, context=None): if 'active_id' not in context: return False picking_obj = self.pool.get('stock.picking') picking_id = context['active_id'] picking = picking_obj.browse(cr, uid, picking_id, context=context) if isinstance(picking, list): picking = picking[0] picking_obj.set_remit_number( cr, uid, picking_id, context=context) picking = picking_obj.browse(cr, uid, picking_id, context=context) vals = {'remit_number': picking.remit_number} return {'value': vals} def print_stock_picking(self, cr, uid, ids, context=None): if context is None: context = {} picking_obj = self.pool['stock.picking'] if 'active_id' not in context: return False picking_id = context['active_id'] context['from_wizard'] = True return picking_obj.do_print_picking( cr, uid, picking_id, context=context)
agpl-3.0
-996,019,002,310,436,700
4,388,109,843,141,010,000
31.746032
78
0.593311
false
arupiot/deskcontrol
deskcontrol/config.py
1
4175
import os NAME_AUTHORITY = os.environ.get("NAME_AUTHORITY", "eightfitzroy.arupiot.com") DEVICE_NAME = os.environ.get("DEVICE_NAME", "TST-1") HOST = os.environ.get("BRICKD_HOST", "localhost") PORT = int(os.environ.get("BRICKD_PORT", "4223")) SHORT_IDENT = os.environ.get("SHORT_IDENT", "test") MODULES = [] MENU_MODULES = [] MQTT_CONFIG = {} ZMQ_CONFIG = {} GCLOUD_CONFIG = {} INFLUX_AUTH = {} ENVAR_MODULES = { "ENABLE_MODULE_MENU": ("MenuModule", "navigation", "Navigation"), "ENABLE_MODULE_INPUT": ("InputModule", "inputs", "Inputs"), "ENABLE_MODULE_SLEEP": ("SleepModule", "sleep", "Sleep"), "ENABLE_MODULE_RFID": ("RFIDModule", "rfid", "RFID"), "ENABLE_MODULE_INFLUX": ("InfluxModule", "influx", "InfluxDB"), "ENABLE_MODULE_HTTP_PUSH": ("HttpPushModule", "httppush", "HTTPpush"), "ENABLE_MODULE_TF_SCREEN": ("TFScreen", "tfscreen", "TF Screen"), "ENABLE_MODULE_KIVY_SCREEN": ("KivyScreen", "kivyscreen", "Kivy Screen"), "ENABLE_MODULE_GOOGLE_IOT": ("GoogleIoTModule", "googleiot", "GoogleIoT"), "ENABLE_MODULE_MQTT": ("MQTTModule", "mqtt_module", "MQTT"), "ENABLE_MODULE_ZMQ": ("ZMQModule", "zmq_module", "ZMQ"), "ENABLE_MODULE_KILN": ("KilnModule", "kiln", "Kiln"), "ENABLE_MODULE_PICKLE": ("PickleModule", "pickle", "Local Storage"), } for envar in ENVAR_MODULES: if os.environ.get(envar): MODULES.append(ENVAR_MODULES[envar]) ENVAR_MENU_MODULES = { "ENABLE_MENU_SENSOR": ("SensorModule", "sensors", "Sensors"), "ENABLE_MENU_LIGHTING": ("LightingModule", "lighting", "Lighting"), "ENABLE_MENU_DC_POWER": ("DCPowerModule", "dcpower", "Power"), "ENABLE_MENU_AC_POWER": ("ACPowerModule", "acpower", "Power"), # Works on RPi only "ENABLE_MENU_NETWORK": ("NetworkModule", "network", "Network") } for envar in ENVAR_MENU_MODULES: if os.environ.get(envar): MODULES.append(ENVAR_MENU_MODULES[envar]) if os.environ.get("ENABLE_MODULE_INFLUX"): INFLUX_AUTH = { "host": os.environ.get("INFLUXDB_HOST", "127.0.0.1"), "port": int(os.environ.get("INFLUXDB_PORT", "8086")), "user": os.environ.get("INFLUXDB_USER", "admin"), "pass": os.environ.get("INFLUXDB_PASS", "admin"), "db": os.environ.get("INFLUXDB_DB", "iotdesks"), "ssl": bool(os.environ.get("INFLUXDB_HOST")) } if os.environ.get("ENABLE_MODULE_GOOGLE_IOT"): GCLOUD_CONFIG = { "project_id": os.environ.get("GCLOUD_PROJECT_ID", "digital-building-0000000000000"), "cloud_region": os.environ.get("GCLOUD_REGION", "europe-west1"), "registry_id": os.environ.get("GCLOUD_REGISTRY_ID", "iotdesks"), "device_id": os.environ.get("GCLOUD_DEVICE_ID", "XXXX"), "private_key_file": os.environ.get("GCLOUD_PRIVATE_KEY_FILE", "keys/rsa_private.pem"), "algorithm": os.environ.get("GCLOUD_ALGORITHM", "RS256"), "ca_certs": os.environ.get("GCLOUD_CA_CERTS", "keys/google.pem"), "mqtt_bridge_hostname": os.environ.get("GCLOUD_MQTT_HOST", "mqtt.googleapis.com"), "mqtt_bridge_port": int(os.environ.get("GCLOUD_MQTT_PORT", "8883")), } if os.environ.get("ENABLE_MODULE_MQTT"): MQTT_CONFIG = { "mqtt_username": os.environ.get("MQTT_USERNAME"), "mqtt_password": os.environ.get("MQTT_PASSWORD"), "mqtt_client_id": os.environ.get("MQTT_CLIENT_ID", "test"), "mqtt_broker_host": os.environ.get("MQTT_BROKER_HOST"), "mqtt_broker_port": int(os.environ.get("MQTT_BROKER_PORT", "8883")), "mqtt_publish_topic": os.environ.get("MQTT_PUBLISH_TOPIC", "/ishiki/test/events"), "mqtt_subscribe_topic": os.environ.get("MQTT_SUBSCRIBE_TOPIC", "/ishiki/test/commands"), } if os.environ.get("ENABLE_MODULE_ZMQ"): ZMQ_CONFIG = { "zmq_port": os.environ.get("ZMQ_PORT"), "zmq_topic": os.environ.get("ZMQ_TOPIC"), } SCHEMA_POST_URL = "" PICKLEDB = "deskcontrol.db" try: config_module = __import__('config_local', globals(), locals()) for setting in dir(config_module): if setting == setting.upper(): locals()[setting] = getattr(config_module, setting) except Exception: pass
mit
6,533,300,591,001,430,000
2,005,064,604,490,507,800
40.336634
96
0.626826
false
Robpol86/coveralls-multi-ci
tests/test_git_stats_detached.py
1
2466
import subprocess from coveralls_multi_ci import git_stats def test_master(repo_dir, hashes): hex_sha = hashes['master'] assert 0 == subprocess.check_call(['git', 'checkout', '-qf', hex_sha], cwd=repo_dir) actual = git_stats(repo_dir) expected = dict( branch='master', remotes=[dict(name='origin', url='http://localhost/git.git'), ], head=dict( id=hex_sha, author_name='MrsAuthor', author_email='[email protected]', committer_name='MrCommit', committer_email='[email protected]', message='Committing empty file.' ) ) assert expected == actual def test_feature_branch(repo_dir, hashes): hex_sha = hashes['feature'] assert 0 == subprocess.check_call(['git', 'checkout', '-qf', hex_sha], cwd=repo_dir) actual = git_stats(repo_dir) expected = dict( branch='feature', remotes=[dict(name='origin', url='http://localhost/git.git'), ], head=dict( id=hex_sha, author_name='MrCommit', author_email='[email protected]', committer_name='MrCommit', committer_email='[email protected]', message='Wrote to file.' ) ) assert expected == actual def test_tag_annotated(repo_dir, hashes): hex_sha = hashes['tag_annotated'] assert 0 == subprocess.check_call(['git', 'checkout', '-qf', hex_sha], cwd=repo_dir) actual = git_stats(repo_dir) expected = dict( branch='v1.0', remotes=[dict(name='origin', url='http://localhost/git.git'), ], head=dict( id=hex_sha, author_name='MrCommit', author_email='[email protected]', committer_name='MrCommit', committer_email='[email protected]', message='Wrote to file2.' ) ) assert expected == actual def test_tag_light(repo_dir, hashes): hex_sha = hashes['tag_light'] assert 0 == subprocess.check_call(['git', 'checkout', '-qf', hex_sha], cwd=repo_dir) actual = git_stats(repo_dir) expected = dict( branch='v1.0l', remotes=[dict(name='origin', url='http://localhost/git.git'), ], head=dict( id=hex_sha, author_name='MrCommit', author_email='[email protected]', committer_name='MrCommit', committer_email='[email protected]', message='Wrote to file3.' ) ) assert expected == actual
mit
-5,836,870,726,430,459,000
-3,120,046,556,121,978,000
28.710843
88
0.553528
false
BT-ojossen/l10n-switzerland
l10n_ch_bank/migrations/8.0.9.0.0/pre-migration.py
10
1402
# -*- coding: utf-8 -*- ############################################################################## # # Author: Guewen Baconnier # Copyright 2014 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ The banks have been created on the l10n_ch module because they used the wrong namespace (ie ``l10_ch.bank_0``). Now, the records are created in the correct module but we have to correct the existing records. """ def migrate(cr, version): if not version: return query = ("UPDATE ir_model_data " "SET module = 'l10n_ch_bank' " "WHERE module = 'l10n_ch' " "AND model = 'res.bank' ") cr.execute(query)
agpl-3.0
5,513,364,718,970,052,000
4,624,620,271,081,392,000
36.891892
78
0.60271
false
romankagan/DDBWorkbench
plugins/hg4idea/testData/bin/hgext/largefiles/basestore.py
92
7659
# Copyright 2009-2010 Gregory P. Ward # Copyright 2009-2010 Intelerad Medical Systems Incorporated # Copyright 2010-2011 Fog Creek Software # Copyright 2010-2011 Unity Technologies # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. '''base class for store implementations and store-related utility code''' import re from mercurial import util, node, hg from mercurial.i18n import _ import lfutil class StoreError(Exception): '''Raised when there is a problem getting files from or putting files to a central store.''' def __init__(self, filename, hash, url, detail): self.filename = filename self.hash = hash self.url = url self.detail = detail def longmessage(self): return (_("error getting id %s from url %s for file %s: %s\n") % (self.hash, self.url, self.filename, self.detail)) def __str__(self): return "%s: %s" % (self.url, self.detail) class basestore(object): def __init__(self, ui, repo, url): self.ui = ui self.repo = repo self.url = url def put(self, source, hash): '''Put source file into the store so it can be retrieved by hash.''' raise NotImplementedError('abstract method') def exists(self, hashes): '''Check to see if the store contains the given hashes. Given an iterable of hashes it returns a mapping from hash to bool.''' raise NotImplementedError('abstract method') def get(self, files): '''Get the specified largefiles from the store and write to local files under repo.root. files is a list of (filename, hash) tuples. Return (success, missing), lists of files successfully downloaded and those not found in the store. success is a list of (filename, hash) tuples; missing is a list of filenames that we could not get. (The detailed error message will already have been presented to the user, so missing is just supplied as a summary.)''' success = [] missing = [] ui = self.ui util.makedirs(lfutil.storepath(self.repo, '')) at = 0 available = self.exists(set(hash for (_filename, hash) in files)) for filename, hash in files: ui.progress(_('getting largefiles'), at, unit='lfile', total=len(files)) at += 1 ui.note(_('getting %s:%s\n') % (filename, hash)) if not available.get(hash): ui.warn(_('%s: largefile %s not available from %s\n') % (filename, hash, self.url)) missing.append(filename) continue storefilename = lfutil.storepath(self.repo, hash) tmpfile = util.atomictempfile(storefilename + '.tmp', createmode=self.repo.store.createmode) try: hhash = self._getfile(tmpfile, filename, hash) except StoreError, err: ui.warn(err.longmessage()) hhash = "" tmpfile.close() if hhash != hash: if hhash != "": ui.warn(_('%s: data corruption (expected %s, got %s)\n') % (filename, hash, hhash)) util.unlink(storefilename + '.tmp') missing.append(filename) continue util.rename(storefilename + '.tmp', storefilename) lfutil.linktousercache(self.repo, hash) success.append((filename, hhash)) ui.progress(_('getting largefiles'), None) return (success, missing) def verify(self, revs, contents=False): '''Verify the existence (and, optionally, contents) of every big file revision referenced by every changeset in revs. Return 0 if all is well, non-zero on any errors.''' failed = False self.ui.status(_('searching %d changesets for largefiles\n') % len(revs)) verified = set() # set of (filename, filenode) tuples for rev in revs: cctx = self.repo[rev] cset = "%d:%s" % (cctx.rev(), node.short(cctx.node())) for standin in cctx: if self._verifyfile(cctx, cset, contents, standin, verified): failed = True numrevs = len(verified) numlfiles = len(set([fname for (fname, fnode) in verified])) if contents: self.ui.status( _('verified contents of %d revisions of %d largefiles\n') % (numrevs, numlfiles)) else: self.ui.status( _('verified existence of %d revisions of %d largefiles\n') % (numrevs, numlfiles)) return int(failed) def _getfile(self, tmpfile, filename, hash): '''Fetch one revision of one file from the store and write it to tmpfile. Compute the hash of the file on-the-fly as it downloads and return the hash. Close tmpfile. Raise StoreError if unable to download the file (e.g. it does not exist in the store).''' raise NotImplementedError('abstract method') def _verifyfile(self, cctx, cset, contents, standin, verified): '''Perform the actual verification of a file in the store. 'cset' is only used in warnings. 'contents' controls verification of content hash. 'standin' is the standin path of the largefile to verify. 'verified' is maintained as a set of already verified files. Returns _true_ if it is a standin and any problems are found! ''' raise NotImplementedError('abstract method') import localstore, wirestore _storeprovider = { 'file': [localstore.localstore], 'http': [wirestore.wirestore], 'https': [wirestore.wirestore], 'ssh': [wirestore.wirestore], } _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://') # During clone this function is passed the src's ui object # but it needs the dest's ui object so it can read out of # the config file. Use repo.ui instead. def _openstore(repo, remote=None, put=False): ui = repo.ui if not remote: lfpullsource = getattr(repo, 'lfpullsource', None) if lfpullsource: path = ui.expandpath(lfpullsource) else: path = ui.expandpath('default-push', 'default') # ui.expandpath() leaves 'default-push' and 'default' alone if # they cannot be expanded: fallback to the empty string, # meaning the current directory. if path == 'default-push' or path == 'default': path = '' remote = repo else: path, _branches = hg.parseurl(path) remote = hg.peer(repo, {}, path) # The path could be a scheme so use Mercurial's normal functionality # to resolve the scheme to a repository and use its path path = util.safehasattr(remote, 'url') and remote.url() or remote.path match = _scheme_re.match(path) if not match: # regular filesystem path scheme = 'file' else: scheme = match.group(1) try: storeproviders = _storeprovider[scheme] except KeyError: raise util.Abort(_('unsupported URL scheme %r') % scheme) for classobj in storeproviders: try: return classobj(ui, repo, remote) except lfutil.storeprotonotcapable: pass raise util.Abort(_('%s does not appear to be a largefile store') % path)
apache-2.0
5,204,586,008,958,776,000
-9,121,078,576,154,504,000
36
80
0.592114
false
zenodo/invenio
invenio/modules/deposit/types/simplerecord.py
1
3085
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. from flask_login import current_user from flask import render_template from invenio.modules.deposit.models import DepositionType, Deposition from invenio.modules.formatter import format_record from invenio.modules.deposit.tasks import render_form, \ create_recid, \ prepare_sip, \ finalize_record_sip, \ upload_record_sip, \ prefill_draft, \ process_sip_metadata, \ hold_for_approval class SimpleRecordDeposition(DepositionType): """Simple record submission - no support for editing nor REST API.""" workflow = [ # Pre-fill draft with values passed in from request prefill_draft(draft_id='default'), # Render form and wait for user to submit render_form(draft_id='default'), # Create the submission information package by merging form data # from all drafts (in this case only one draft exists). prepare_sip(), # Process metadata to match your JSONAlchemy record model. This will # call process_sip_metadata() on your subclass. process_sip_metadata(), # Reserve a new record id, so that we can provide proper feedback to # user before the record has been uploaded. create_recid(), # Generate MARC based on metadata dictionary. finalize_record_sip(is_dump=False), # Hold the deposition for admin approval hold_for_approval(), # Seal the SIP and write MARCXML file and call bibupload on it upload_record_sip(), ] hold_for_upload = False @classmethod def render_completed(cls, d): """Page to render when deposition was successfully completed.""" ctx = dict( deposition=d, deposition_type=( None if d.type.is_default() else d.type.get_identifier() ), uuid=d.id, my_depositions=list(Deposition.get_depositions( current_user, type=d.type )), sip=d.get_latest_sip(), format_record=format_record, ) return render_template('deposit/completed.html', **ctx) @classmethod def process_sip_metadata(cls, deposition, metadata): """Implement this method in your subclass to process metadata prior to MARC generation.""" pass
gpl-2.0
-7,493,524,908,069,554,000
2,889,514,700,468,897,000
35.72619
98
0.667099
false
takeflight/cookiecutter
tests/replay/test_load.py
8
1855
# -*- coding: utf-8 -*- """ test_load ----------- """ import json import os import pytest from cookiecutter import replay @pytest.fixture def template_name(): """Fixture to return a valid template_name.""" return 'cookiedozer_load' @pytest.fixture def replay_file(replay_test_dir, template_name): """Fixture to return a actual file name of the dump.""" file_name = '{}.json'.format(template_name) return os.path.join(replay_test_dir, file_name) def test_type_error_if_no_template_name(): """Test that replay.load raises if the tempate_name is not a valid str.""" with pytest.raises(TypeError): replay.load(None) def test_value_error_if_key_missing_in_context(mocker): """Test that replay.load raises if the loaded context does not contain 'cookiecutter'. """ with pytest.raises(ValueError): replay.load('invalid_replay') def test_io_error_if_no_replay_file(mocker, mock_user_config): """Test that replay.load raises if it cannot find a replay file.""" with pytest.raises(IOError): replay.load('no_replay') def test_run_json_load(mocker, mock_user_config, template_name, context, replay_test_dir, replay_file): """Test that replay.load runs json.load under the hood and that the context is correctly loaded from the file in replay_dir. """ spy_get_replay_file = mocker.spy(replay, 'get_file_name') mock_json_load = mocker.patch('json.load', side_effect=json.load) loaded_context = replay.load(template_name) assert mock_user_config.call_count == 1 spy_get_replay_file.assert_called_once_with(replay_test_dir, template_name) assert mock_json_load.call_count == 1 (infile_handler,), kwargs = mock_json_load.call_args assert infile_handler.name == replay_file assert loaded_context == context
bsd-3-clause
5,209,934,959,585,989,000
-7,587,250,276,679,832,000
27.538462
79
0.679245
false
gonboy/python-for-android
src/buildlib/jinja2.egg/jinja2/exceptions.py
17
4424
# -*- coding: utf-8 -*- """ jinja2.exceptions ~~~~~~~~~~~~~~~~~ Jinja exceptions. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ from jinja2._compat import imap, text_type, PY2, implements_to_string class TemplateError(Exception): """Baseclass for all template errors.""" if PY2: def __init__(self, message=None): if message is not None: message = text_type(message).encode('utf-8') Exception.__init__(self, message) @property def message(self): if self.args: message = self.args[0] if message is not None: return message.decode('utf-8', 'replace') def __unicode__(self): return self.message or '' else: def __init__(self, message=None): Exception.__init__(self, message) @property def message(self): if self.args: message = self.args[0] if message is not None: return message @implements_to_string class TemplateNotFound(IOError, LookupError, TemplateError): """Raised if a template does not exist.""" # looks weird, but removes the warning descriptor that just # bogusly warns us about message being deprecated message = None def __init__(self, name, message=None): IOError.__init__(self) if message is None: message = name self.message = message self.name = name self.templates = [name] def __str__(self): return self.message class TemplatesNotFound(TemplateNotFound): """Like :class:`TemplateNotFound` but raised if multiple templates are selected. This is a subclass of :class:`TemplateNotFound` exception, so just catching the base exception will catch both. .. versionadded:: 2.2 """ def __init__(self, names=(), message=None): if message is None: message = 'none of the templates given were found: ' + \ ', '.join(imap(text_type, names)) TemplateNotFound.__init__(self, names and names[-1] or None, message) self.templates = list(names) @implements_to_string class TemplateSyntaxError(TemplateError): """Raised to tell the user that there is a problem with the template.""" def __init__(self, message, lineno, name=None, filename=None): TemplateError.__init__(self, message) self.lineno = lineno self.name = name self.filename = filename self.source = None # this is set to True if the debug.translate_syntax_error # function translated the syntax error into a new traceback self.translated = False def __str__(self): # for translated errors we only return the message if self.translated: return self.message # otherwise attach some stuff location = 'line %d' % self.lineno name = self.filename or self.name if name: location = 'File "%s", %s' % (name, location) lines = [self.message, ' ' + location] # if the source is set, add the line to the output if self.source is not None: try: line = self.source.splitlines()[self.lineno - 1] except IndexError: line = None if line: lines.append(' ' + line.strip()) return '\n'.join(lines) class TemplateAssertionError(TemplateSyntaxError): """Like a template syntax error, but covers cases where something in the template caused an error at compile time that wasn't necessarily caused by a syntax error. However it's a direct subclass of :exc:`TemplateSyntaxError` and has the same attributes. """ class TemplateRuntimeError(TemplateError): """A generic runtime error in the template engine. Under some situations Jinja may raise this exception. """ class UndefinedError(TemplateRuntimeError): """Raised if a template tries to operate on :class:`Undefined`.""" class SecurityError(TemplateRuntimeError): """Raised if a template tries to do something insecure if the sandbox is enabled. """ class FilterArgumentError(TemplateRuntimeError): """This error is raised if a filter was called with inappropriate arguments """
mit
2,734,954,461,413,000,700
-8,355,333,525,805,452,000
29.30137
77
0.607595
false
uclouvain/osis
ddd/logic/application/use_case/read/get_attributions_about_to_expire_service.py
1
3140
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## from typing import List from ddd.logic.application.commands import GetAttributionsAboutToExpireCommand from ddd.logic.application.domain.builder.applicant_identity_builder import ApplicantIdentityBuilder from ddd.logic.application.domain.service.attribution_about_to_expire_renew import AttributionAboutToExpireRenew from ddd.logic.application.domain.service.i_learning_unit_service import ILearningUnitService from ddd.logic.application.dtos import AttributionAboutToExpireDTO from ddd.logic.application.repository.i_applicant_respository import IApplicantRepository from ddd.logic.application.repository.i_application_calendar_repository import IApplicationCalendarRepository from ddd.logic.application.repository.i_application_repository import IApplicationRepository from ddd.logic.application.repository.i_vacant_course_repository import IVacantCourseRepository def get_attributions_about_to_expire( cmd: GetAttributionsAboutToExpireCommand, application_repository: IApplicationRepository, application_calendar_repository: IApplicationCalendarRepository, applicant_repository: IApplicantRepository, vacant_course_repository: IVacantCourseRepository, learning_unit_service: ILearningUnitService, ) -> List[AttributionAboutToExpireDTO]: # Given application_calendar = application_calendar_repository.get_current_application_calendar() applicant_id = ApplicantIdentityBuilder.build_from_global_id(global_id=cmd.global_id) applicant = applicant_repository.get(applicant_id) all_existing_applications = application_repository.search(global_id=cmd.global_id) return AttributionAboutToExpireRenew.get_list_with_renewal_availability( application_calendar, applicant, all_existing_applications, vacant_course_repository, learning_unit_service )
agpl-3.0
150,751,103,776,135,580
2,411,707,828,162,534,000
52.20339
112
0.745142
false
sebrandon1/nova
nova/tests/unit/api/openstack/compute/test_flavorextradata.py
1
3374
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from nova import test from nova.tests.unit.api.openstack import fakes class FlavorExtraDataTestV21(test.NoDBTestCase): base_url = '/v2/fake/flavors' def setUp(self): super(FlavorExtraDataTestV21, self).setUp() fakes.stub_out_flavor_get_all(self) fakes.stub_out_flavor_get_by_flavor_id(self) @property def app(self): return fakes.wsgi_app_v21(init_only=('flavors')) def _verify_flavor_response(self, flavor, expected): for key in expected: self.assertEqual(flavor[key], expected[key]) def test_show(self): expected = { 'flavor': { 'id': fakes.FLAVORS['1'].flavorid, 'name': fakes.FLAVORS['1'].name, 'ram': fakes.FLAVORS['1'].memory_mb, 'vcpus': fakes.FLAVORS['1'].vcpus, 'disk': fakes.FLAVORS['1'].root_gb, 'OS-FLV-EXT-DATA:ephemeral': fakes.FLAVORS['1'].ephemeral_gb, } } url = self.base_url + '/1' req = fakes.HTTPRequest.blank(url) req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) body = jsonutils.loads(res.body) self._verify_flavor_response(body['flavor'], expected['flavor']) def test_detail(self): expected = [ { 'id': fakes.FLAVORS['1'].flavorid, 'name': fakes.FLAVORS['1'].name, 'ram': fakes.FLAVORS['1'].memory_mb, 'vcpus': fakes.FLAVORS['1'].vcpus, 'disk': fakes.FLAVORS['1'].root_gb, 'OS-FLV-EXT-DATA:ephemeral': fakes.FLAVORS['1'].ephemeral_gb, 'rxtx_factor': fakes.FLAVORS['1'].rxtx_factor or u'', 'os-flavor-access:is_public': fakes.FLAVORS['1'].is_public, }, { 'id': fakes.FLAVORS['2'].flavorid, 'name': fakes.FLAVORS['2'].name, 'ram': fakes.FLAVORS['2'].memory_mb, 'vcpus': fakes.FLAVORS['2'].vcpus, 'disk': fakes.FLAVORS['2'].root_gb, 'OS-FLV-EXT-DATA:ephemeral': fakes.FLAVORS['2'].ephemeral_gb, 'rxtx_factor': fakes.FLAVORS['2'].rxtx_factor or u'', 'os-flavor-access:is_public': fakes.FLAVORS['2'].is_public, }, ] url = self.base_url + '/detail' req = fakes.HTTPRequest.blank(url) req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) body = jsonutils.loads(res.body) for i, flavor in enumerate(body['flavors']): self._verify_flavor_response(flavor, expected[i])
apache-2.0
5,426,089,876,057,535,000
956,698,406,237,218,800
37.781609
78
0.576763
false
jashank/rust
src/etc/unicode.py
15
24287
#!/usr/bin/env python # # Copyright 2011-2013 The Rust Project Developers. See the COPYRIGHT # file at the top-level directory of this distribution and at # http://rust-lang.org/COPYRIGHT. # # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your # option. This file may not be copied, modified, or distributed # except according to those terms. # This script uses the following Unicode tables: # - DerivedCoreProperties.txt # - DerivedNormalizationProps.txt # - EastAsianWidth.txt # - auxiliary/GraphemeBreakProperty.txt # - PropList.txt # - ReadMe.txt # - Scripts.txt # - UnicodeData.txt # # Since this should not require frequent updates, we just store this # out-of-line and check the unicode.rs file into git. import fileinput, re, os, sys, operator preamble = '''// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // NOTE: The following code was generated by "src/etc/unicode.py", do not edit directly #![allow(missing_docs, non_upper_case_globals, non_snake_case)] ''' # Mapping taken from Table 12 from: # http://www.unicode.org/reports/tr44/#General_Category_Values expanded_categories = { 'Lu': ['LC', 'L'], 'Ll': ['LC', 'L'], 'Lt': ['LC', 'L'], 'Lm': ['L'], 'Lo': ['L'], 'Mn': ['M'], 'Mc': ['M'], 'Me': ['M'], 'Nd': ['N'], 'Nl': ['N'], 'No': ['No'], 'Pc': ['P'], 'Pd': ['P'], 'Ps': ['P'], 'Pe': ['P'], 'Pi': ['P'], 'Pf': ['P'], 'Po': ['P'], 'Sm': ['S'], 'Sc': ['S'], 'Sk': ['S'], 'So': ['S'], 'Zs': ['Z'], 'Zl': ['Z'], 'Zp': ['Z'], 'Cc': ['C'], 'Cf': ['C'], 'Cs': ['C'], 'Co': ['C'], 'Cn': ['C'], } # these are the surrogate codepoints, which are not valid rust characters surrogate_codepoints = (0xd800, 0xdfff) def fetch(f): if not os.path.exists(os.path.basename(f)): os.system("curl -O http://www.unicode.org/Public/UNIDATA/%s" % f) if not os.path.exists(os.path.basename(f)): sys.stderr.write("cannot load %s" % f) exit(1) def is_surrogate(n): return surrogate_codepoints[0] <= n <= surrogate_codepoints[1] def load_unicode_data(f): fetch(f) gencats = {} to_lower = {} to_upper = {} to_title = {} combines = {} canon_decomp = {} compat_decomp = {} udict = {}; range_start = -1; for line in fileinput.input(f): data = line.split(';'); if len(data) != 15: continue cp = int(data[0], 16); if is_surrogate(cp): continue if range_start >= 0: for i in xrange(range_start, cp): udict[i] = data; range_start = -1; if data[1].endswith(", First>"): range_start = cp; continue; udict[cp] = data; for code in udict: [code_org, name, gencat, combine, bidi, decomp, deci, digit, num, mirror, old, iso, upcase, lowcase, titlecase ] = udict[code]; # generate char to char direct common and simple conversions # uppercase to lowercase if lowcase != "" and code_org != lowcase: to_lower[code] = (int(lowcase, 16), 0, 0) # lowercase to uppercase if upcase != "" and code_org != upcase: to_upper[code] = (int(upcase, 16), 0, 0) # title case if titlecase.strip() != "" and code_org != titlecase: to_title[code] = (int(titlecase, 16), 0, 0) # store decomposition, if given if decomp != "": if decomp.startswith('<'): seq = [] for i in decomp.split()[1:]: seq.append(int(i, 16)) compat_decomp[code] = seq else: seq = [] for i in decomp.split(): seq.append(int(i, 16)) canon_decomp[code] = seq # place letter in categories as appropriate for cat in [gencat, "Assigned"] + expanded_categories.get(gencat, []): if cat not in gencats: gencats[cat] = [] gencats[cat].append(code) # record combining class, if any if combine != "0": if combine not in combines: combines[combine] = [] combines[combine].append(code) # generate Not_Assigned from Assigned gencats["Cn"] = gen_unassigned(gencats["Assigned"]) # Assigned is not a real category del(gencats["Assigned"]) # Other contains Not_Assigned gencats["C"].extend(gencats["Cn"]) gencats = group_cats(gencats) combines = to_combines(group_cats(combines)) return (canon_decomp, compat_decomp, gencats, combines, to_upper, to_lower, to_title) def load_special_casing(f, to_upper, to_lower, to_title): fetch(f) for line in fileinput.input(f): data = line.split('#')[0].split(';') if len(data) == 5: code, lower, title, upper, _comment = data elif len(data) == 6: code, lower, title, upper, condition, _comment = data if condition.strip(): # Only keep unconditional mappins continue else: continue code = code.strip() lower = lower.strip() title = title.strip() upper = upper.strip() key = int(code, 16) for (map_, values) in [(to_lower, lower), (to_upper, upper), (to_title, title)]: if values != code: values = [int(i, 16) for i in values.split()] for _ in range(len(values), 3): values.append(0) assert len(values) == 3 map_[key] = values def group_cats(cats): cats_out = {} for cat in cats: cats_out[cat] = group_cat(cats[cat]) return cats_out def group_cat(cat): cat_out = [] letters = sorted(set(cat)) cur_start = letters.pop(0) cur_end = cur_start for letter in letters: assert letter > cur_end, \ "cur_end: %s, letter: %s" % (hex(cur_end), hex(letter)) if letter == cur_end + 1: cur_end = letter else: cat_out.append((cur_start, cur_end)) cur_start = cur_end = letter cat_out.append((cur_start, cur_end)) return cat_out def ungroup_cat(cat): cat_out = [] for (lo, hi) in cat: while lo <= hi: cat_out.append(lo) lo += 1 return cat_out def gen_unassigned(assigned): assigned = set(assigned) return ([i for i in range(0, 0xd800) if i not in assigned] + [i for i in range(0xe000, 0x110000) if i not in assigned]) def to_combines(combs): combs_out = [] for comb in combs: for (lo, hi) in combs[comb]: combs_out.append((lo, hi, comb)) combs_out.sort(key=lambda comb: comb[0]) return combs_out def format_table_content(f, content, indent): line = " "*indent first = True for chunk in content.split(","): if len(line) + len(chunk) < 98: if first: line += chunk else: line += ", " + chunk first = False else: f.write(line + ",\n") line = " "*indent + chunk f.write(line) def load_properties(f, interestingprops): fetch(f) props = {} re1 = re.compile("^ *([0-9A-F]+) *; *(\w+)") re2 = re.compile("^ *([0-9A-F]+)\.\.([0-9A-F]+) *; *(\w+)") for line in fileinput.input(os.path.basename(f)): prop = None d_lo = 0 d_hi = 0 m = re1.match(line) if m: d_lo = m.group(1) d_hi = m.group(1) prop = m.group(2) else: m = re2.match(line) if m: d_lo = m.group(1) d_hi = m.group(2) prop = m.group(3) else: continue if interestingprops and prop not in interestingprops: continue d_lo = int(d_lo, 16) d_hi = int(d_hi, 16) if prop not in props: props[prop] = [] props[prop].append((d_lo, d_hi)) # optimize if possible for prop in props: props[prop] = group_cat(ungroup_cat(props[prop])) return props # load all widths of want_widths, except those in except_cats def load_east_asian_width(want_widths, except_cats): f = "EastAsianWidth.txt" fetch(f) widths = {} re1 = re.compile("^([0-9A-F]+);(\w+) +# (\w+)") re2 = re.compile("^([0-9A-F]+)\.\.([0-9A-F]+);(\w+) +# (\w+)") for line in fileinput.input(f): width = None d_lo = 0 d_hi = 0 cat = None m = re1.match(line) if m: d_lo = m.group(1) d_hi = m.group(1) width = m.group(2) cat = m.group(3) else: m = re2.match(line) if m: d_lo = m.group(1) d_hi = m.group(2) width = m.group(3) cat = m.group(4) else: continue if cat in except_cats or width not in want_widths: continue d_lo = int(d_lo, 16) d_hi = int(d_hi, 16) if width not in widths: widths[width] = [] widths[width].append((d_lo, d_hi)) return widths def escape_char(c): return "'\\u{%x}'" % c if c != 0 else "'\\0'" def emit_bsearch_range_table(f): f.write(""" fn bsearch_range_table(c: char, r: &'static [(char,char)]) -> bool { use core::cmp::Ordering::{Equal, Less, Greater}; use core::slice::SliceExt; r.binary_search_by(|&(lo,hi)| { if lo <= c && c <= hi { Equal } else if hi < c { Less } else { Greater } }).is_ok() }\n """) def emit_table(f, name, t_data, t_type = "&'static [(char, char)]", is_pub=True, pfun=lambda x: "(%s,%s)" % (escape_char(x[0]), escape_char(x[1]))): pub_string = "" if is_pub: pub_string = "pub " f.write(" %sconst %s: %s = &[\n" % (pub_string, name, t_type)) data = "" first = True for dat in t_data: if not first: data += "," first = False data += pfun(dat) format_table_content(f, data, 8) f.write("\n ];\n\n") def emit_property_module(f, mod, tbl, emit): f.write("pub mod %s {\n" % mod) for cat in sorted(emit): emit_table(f, "%s_table" % cat, tbl[cat]) f.write(" pub fn %s(c: char) -> bool {\n" % cat) f.write(" super::bsearch_range_table(c, %s_table)\n" % cat) f.write(" }\n\n") f.write("}\n\n") def emit_conversions_module(f, to_upper, to_lower, to_title): f.write("pub mod conversions {") f.write(""" use core::cmp::Ordering::{Equal, Less, Greater}; use core::slice::SliceExt; use core::option::Option; use core::option::Option::{Some, None}; use core::result::Result::{Ok, Err}; pub fn to_lower(c: char) -> [char; 3] { match bsearch_case_table(c, to_lowercase_table) { None => [c, '\\0', '\\0'], Some(index) => to_lowercase_table[index].1 } } pub fn to_upper(c: char) -> [char; 3] { match bsearch_case_table(c, to_uppercase_table) { None => [c, '\\0', '\\0'], Some(index) => to_uppercase_table[index].1 } } fn bsearch_case_table(c: char, table: &'static [(char, [char; 3])]) -> Option<usize> { match table.binary_search_by(|&(key, _)| { if c == key { Equal } else if key < c { Less } else { Greater } }) { Ok(i) => Some(i), Err(_) => None, } } """) t_type = "&'static [(char, [char; 3])]" pfun = lambda x: "(%s,[%s,%s,%s])" % ( escape_char(x[0]), escape_char(x[1][0]), escape_char(x[1][1]), escape_char(x[1][2])) emit_table(f, "to_lowercase_table", sorted(to_lower.iteritems(), key=operator.itemgetter(0)), is_pub=False, t_type = t_type, pfun=pfun) emit_table(f, "to_uppercase_table", sorted(to_upper.iteritems(), key=operator.itemgetter(0)), is_pub=False, t_type = t_type, pfun=pfun) f.write("}\n\n") def emit_grapheme_module(f, grapheme_table, grapheme_cats): f.write("""pub mod grapheme { use core::slice::SliceExt; pub use self::GraphemeCat::*; use core::result::Result::{Ok, Err}; #[allow(non_camel_case_types)] #[derive(Clone, Copy)] pub enum GraphemeCat { """) for cat in grapheme_cats + ["Any"]: f.write(" GC_" + cat + ",\n") f.write(""" } fn bsearch_range_value_table(c: char, r: &'static [(char, char, GraphemeCat)]) -> GraphemeCat { use core::cmp::Ordering::{Equal, Less, Greater}; match r.binary_search_by(|&(lo, hi, _)| { if lo <= c && c <= hi { Equal } else if hi < c { Less } else { Greater } }) { Ok(idx) => { let (_, _, cat) = r[idx]; cat } Err(_) => GC_Any } } pub fn grapheme_category(c: char) -> GraphemeCat { bsearch_range_value_table(c, grapheme_cat_table) } """) emit_table(f, "grapheme_cat_table", grapheme_table, "&'static [(char, char, GraphemeCat)]", pfun=lambda x: "(%s,%s,GC_%s)" % (escape_char(x[0]), escape_char(x[1]), x[2]), is_pub=False) f.write("}\n") def emit_charwidth_module(f, width_table): f.write("pub mod charwidth {\n") f.write(" use core::option::Option;\n") f.write(" use core::option::Option::{Some, None};\n") f.write(" use core::slice::SliceExt;\n") f.write(" use core::result::Result::{Ok, Err};\n") f.write(""" fn bsearch_range_value_table(c: char, is_cjk: bool, r: &'static [(char, char, u8, u8)]) -> u8 { use core::cmp::Ordering::{Equal, Less, Greater}; match r.binary_search_by(|&(lo, hi, _, _)| { if lo <= c && c <= hi { Equal } else if hi < c { Less } else { Greater } }) { Ok(idx) => { let (_, _, r_ncjk, r_cjk) = r[idx]; if is_cjk { r_cjk } else { r_ncjk } } Err(_) => 1 } } """) f.write(""" pub fn width(c: char, is_cjk: bool) -> Option<usize> { match c as usize { _c @ 0 => Some(0), // null is zero width cu if cu < 0x20 => None, // control sequences have no width cu if cu < 0x7F => Some(1), // ASCII cu if cu < 0xA0 => None, // more control sequences _ => Some(bsearch_range_value_table(c, is_cjk, charwidth_table) as usize) } } """) f.write(" // character width table. Based on Markus Kuhn's free wcwidth() implementation,\n") f.write(" // http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c\n") emit_table(f, "charwidth_table", width_table, "&'static [(char, char, u8, u8)]", is_pub=False, pfun=lambda x: "(%s,%s,%s,%s)" % (escape_char(x[0]), escape_char(x[1]), x[2], x[3])) f.write("}\n\n") def emit_norm_module(f, canon, compat, combine, norm_props): canon_keys = canon.keys() canon_keys.sort() compat_keys = compat.keys() compat_keys.sort() canon_comp = {} comp_exclusions = norm_props["Full_Composition_Exclusion"] for char in canon_keys: if True in map(lambda (lo, hi): lo <= char <= hi, comp_exclusions): continue decomp = canon[char] if len(decomp) == 2: if not canon_comp.has_key(decomp[0]): canon_comp[decomp[0]] = [] canon_comp[decomp[0]].append( (decomp[1], char) ) canon_comp_keys = canon_comp.keys() canon_comp_keys.sort() f.write("pub mod normalization {\n") def mkdata_fun(table): def f(char): data = "(%s,&[" % escape_char(char) first = True for d in table[char]: if not first: data += "," first = False data += escape_char(d) data += "])" return data return f f.write(" // Canonical decompositions\n") emit_table(f, "canonical_table", canon_keys, "&'static [(char, &'static [char])]", pfun=mkdata_fun(canon)) f.write(" // Compatibility decompositions\n") emit_table(f, "compatibility_table", compat_keys, "&'static [(char, &'static [char])]", pfun=mkdata_fun(compat)) def comp_pfun(char): data = "(%s,&[" % escape_char(char) canon_comp[char].sort(lambda x, y: x[0] - y[0]) first = True for pair in canon_comp[char]: if not first: data += "," first = False data += "(%s,%s)" % (escape_char(pair[0]), escape_char(pair[1])) data += "])" return data f.write(" // Canonical compositions\n") emit_table(f, "composition_table", canon_comp_keys, "&'static [(char, &'static [(char, char)])]", pfun=comp_pfun) f.write(""" fn bsearch_range_value_table(c: char, r: &'static [(char, char, u8)]) -> u8 { use core::cmp::Ordering::{Equal, Less, Greater}; use core::slice::SliceExt; use core::result::Result::{Ok, Err}; match r.binary_search_by(|&(lo, hi, _)| { if lo <= c && c <= hi { Equal } else if hi < c { Less } else { Greater } }) { Ok(idx) => { let (_, _, result) = r[idx]; result } Err(_) => 0 } }\n """) emit_table(f, "combining_class_table", combine, "&'static [(char, char, u8)]", is_pub=False, pfun=lambda x: "(%s,%s,%s)" % (escape_char(x[0]), escape_char(x[1]), x[2])) f.write(""" #[deprecated(reason = "use the crates.io `unicode-normalization` lib instead", since = "1.0.0")] #[unstable(feature = "unicode", reason = "this functionality will be moved to crates.io")] pub fn canonical_combining_class(c: char) -> u8 { bsearch_range_value_table(c, combining_class_table) } } """) def remove_from_wtable(wtable, val): wtable_out = [] while wtable: if wtable[0][1] < val: wtable_out.append(wtable.pop(0)) elif wtable[0][0] > val: break else: (wt_lo, wt_hi, width, width_cjk) = wtable.pop(0) if wt_lo == wt_hi == val: continue elif wt_lo == val: wtable_out.append((wt_lo+1, wt_hi, width, width_cjk)) elif wt_hi == val: wtable_out.append((wt_lo, wt_hi-1, width, width_cjk)) else: wtable_out.append((wt_lo, val-1, width, width_cjk)) wtable_out.append((val+1, wt_hi, width, width_cjk)) if wtable: wtable_out.extend(wtable) return wtable_out def optimize_width_table(wtable): wtable_out = [] w_this = wtable.pop(0) while wtable: if w_this[1] == wtable[0][0] - 1 and w_this[2:3] == wtable[0][2:3]: w_tmp = wtable.pop(0) w_this = (w_this[0], w_tmp[1], w_tmp[2], w_tmp[3]) else: wtable_out.append(w_this) w_this = wtable.pop(0) wtable_out.append(w_this) return wtable_out if __name__ == "__main__": r = "tables.rs" if os.path.exists(r): os.remove(r) with open(r, "w") as rf: # write the file's preamble rf.write(preamble) # download and parse all the data fetch("ReadMe.txt") with open("ReadMe.txt") as readme: pattern = "for Version (\d+)\.(\d+)\.(\d+) of the Unicode" unicode_version = re.search(pattern, readme.read()).groups() rf.write(""" /// The version of [Unicode](http://www.unicode.org/) /// that the unicode parts of `CharExt` and `UnicodeStrPrelude` traits are based on. pub const UNICODE_VERSION: (u64, u64, u64) = (%s, %s, %s); """ % unicode_version) (canon_decomp, compat_decomp, gencats, combines, to_upper, to_lower, to_title) = load_unicode_data("UnicodeData.txt") load_special_casing("SpecialCasing.txt", to_upper, to_lower, to_title) want_derived = ["XID_Start", "XID_Continue", "Alphabetic", "Lowercase", "Uppercase", "Cased", "Case_Ignorable"] derived = load_properties("DerivedCoreProperties.txt", want_derived) scripts = load_properties("Scripts.txt", []) props = load_properties("PropList.txt", ["White_Space", "Join_Control", "Noncharacter_Code_Point"]) norm_props = load_properties("DerivedNormalizationProps.txt", ["Full_Composition_Exclusion"]) # bsearch_range_table is used in all the property modules below emit_bsearch_range_table(rf) # category tables for (name, cat, pfuns) in ("general_category", gencats, ["N", "Cc"]), \ ("derived_property", derived, want_derived), \ ("property", props, ["White_Space"]): emit_property_module(rf, name, cat, pfuns) # normalizations and conversions module emit_norm_module(rf, canon_decomp, compat_decomp, combines, norm_props) emit_conversions_module(rf, to_upper, to_lower, to_title) ### character width module width_table = [] for zwcat in ["Me", "Mn", "Cf"]: width_table.extend(map(lambda (lo, hi): (lo, hi, 0, 0), gencats[zwcat])) width_table.append((4448, 4607, 0, 0)) # get widths, except those that are explicitly marked zero-width above ea_widths = load_east_asian_width(["W", "F", "A"], ["Me", "Mn", "Cf"]) # these are doublewidth for dwcat in ["W", "F"]: width_table.extend(map(lambda (lo, hi): (lo, hi, 2, 2), ea_widths[dwcat])) width_table.extend(map(lambda (lo, hi): (lo, hi, 1, 2), ea_widths["A"])) width_table.sort(key=lambda w: w[0]) # soft hyphen is not zero width in preformatted text; it's used to indicate # a hyphen inserted to facilitate a linebreak. width_table = remove_from_wtable(width_table, 173) # optimize the width table by collapsing adjacent entities when possible width_table = optimize_width_table(width_table) emit_charwidth_module(rf, width_table) ### grapheme cluster module # from http://www.unicode.org/reports/tr29/#Grapheme_Cluster_Break_Property_Values grapheme_cats = load_properties("auxiliary/GraphemeBreakProperty.txt", []) # Control # Note 1: # This category also includes Cs (surrogate codepoints), but Rust's `char`s are # Unicode Scalar Values only, and surrogates are thus invalid `char`s. # Thus, we have to remove Cs from the Control category # Note 2: # 0x0a and 0x0d (CR and LF) are not in the Control category for Graphemes. # However, the Graphemes iterator treats these as a special case, so they # should be included in grapheme_cats["Control"] for our implementation. grapheme_cats["Control"] = group_cat(list( (set(ungroup_cat(grapheme_cats["Control"])) | set(ungroup_cat(grapheme_cats["CR"])) | set(ungroup_cat(grapheme_cats["LF"]))) - set(ungroup_cat([surrogate_codepoints])))) del(grapheme_cats["CR"]) del(grapheme_cats["LF"]) grapheme_table = [] for cat in grapheme_cats: grapheme_table.extend([(x, y, cat) for (x, y) in grapheme_cats[cat]]) grapheme_table.sort(key=lambda w: w[0]) emit_grapheme_module(rf, grapheme_table, grapheme_cats.keys())
apache-2.0
-8,474,609,331,180,762,000
-3,841,783,937,910,036,000
33.64622
100
0.530366
false
riolet/rioauth
provider/pages/login_github.py
1
4700
import web import oauthlib.oauth2.rfc6749 import constants import common import base import logging import pprint from models import oauth_consumer class Login(base.Page): def __init__(self): base.Page.__init__(self, "Riolet Login") self.redirect_uri = unicode(constants.config.get('github', 'redirect_uri')) self.scope = unicode(constants.config.get('github', 'request_scope')) self.oauth = oauth_consumer.Authorization( session=common.session, authorization_url=constants.config.get('github', 'authorization_url'), token_url=constants.config.get('github', 'token_url'), client_id=constants.config.get('github', 'client_id'), client_secret=constants.config.get('github', 'client_secret'), default_redirect_uri=constants.config.get('github', 'redirect_uri'), default_scope_requested=constants.config.get('github', 'request_scope')) def get_token(self): authorization_response = self.uri try: # redirect_uri must match between get_auth_code and get_token. # scope must match between get_auth_code and get_token token = self.oauth.fetch_token(authorization_response, redirect_uri=self.redirect_uri, scope=self.scope) except oauthlib.oauth2.rfc6749.errors.AccessDeniedError: print("Access was denied. Reason unknown.") return False except oauthlib.oauth2.rfc6749.errors.InvalidGrantError: print("Access was denied. Error: Invalid Grant.") return False print("\n\nToken acquired!") pprint.pprint(token) print("") return True def get_auth_code(self): print("redirect_uri is {0}".format(self.redirect_uri)) # redirect_uri must match between get_auth_code and get_token. # scope must match between get_auth_code and get_token authorization_url = self.oauth.get_auth_url(redirect_uri=self.redirect_uri, scope=self.scope) print("redirecting to {0}".format(authorization_url)) self.redirect(authorization_url) def login(self): public_emails = self.oauth.request(constants.config.get('github', 'resource_url')) # Public emails should retrieve a list of dicts of emails addresses: # [{u'email': u'[email protected]', # u'primary': True, # u'verified': True, # u'visibility': u'public'}] if len(public_emails) == 0: return False email = public_emails[0]['email'] for em in public_emails: if em['primary'] is True: email = em['email'] break user = common.users.get_by_email(email) if user is None: # create user for that email! # random password. Nobody should know it, ever. Login is done through GitHub. # If user wants to choose password, they will reset it anyway. user_id = common.users.add(email, common.generate_salt(32), email) user = common.users.get_by_id(user_id) self.user = user return True def GET(self): if 'state' in self.data and 'code' in self.data: print("state and code found. Assuming to be at fetch_token step.") if self.get_token(): print("get_token returned True. setting logged_in to True") success = self.login() if not success: print("should render page with errors: {}".format(self.errors)) self.redirect('/login') common.session['logged_in'] = True common.session['user_id'] = self.user['id'] destination = '/' if 'login_redirect' in common.session: destination = common.session['login_redirect'] self.redirect(destination, absolute=True) else: print("get_token returned False. setting logged_in to False") common.session['logged_in'] = False self.redirect('/login') elif 'error' in self.data: print("Error response.\n\t{0}".format(self.data['error'])) if 'error_description' in self.data: print("\t{0}".format(self.data['error_description'])) return common.render.message(error=['Error logging in via GitHub.', 'Error: {}'.format(self.data['error_description'])], buttons=[('Login page', '/logout')]) else: print("begin authentication process.") self.get_auth_code() # this code should be unreachable. self.redirect('/login')
gpl-3.0
5,946,776,637,659,113,000
7,932,335,189,478,631,000
42.119266
169
0.59766
false
sagark123/coala
tests/results/result_actions/ApplyPatchActionTest.py
16
6917
import unittest import os from os.path import isfile from coala_utils.ContextManagers import make_temp from coalib.results.Diff import Diff from coalib.results.Result import Result from coalib.results.result_actions.ApplyPatchAction import ApplyPatchAction from coalib.settings.Section import Section class ApplyPatchActionTest(unittest.TestCase): def test_apply(self): uut = ApplyPatchAction() with make_temp() as f_a, make_temp() as f_b, make_temp() as f_c: file_dict = { f_a: ['1\n', '2\n', '3\n'], f_b: ['1\n', '2\n', '3\n'], f_c: ['1\n', '2\n', '3\n'] } expected_file_dict = { f_a: ['1\n', '3_changed\n'], f_b: ['1\n', '2\n', '3_changed\n'], f_c: ['1\n', '2\n', '3\n'] } file_diff_dict = {} diff = Diff(file_dict[f_a]) diff.delete_line(2) uut.apply_from_section(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict, Section('t')) diff = Diff(file_dict[f_a]) diff.change_line(3, '3\n', '3_changed\n') uut.apply_from_section(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict, Section('t')) diff = Diff(file_dict[f_b]) diff.change_line(3, '3\n', '3_changed\n') uut.apply(Result('origin', 'msg', diffs={f_b: diff}), file_dict, file_diff_dict) for filename in file_diff_dict: file_dict[filename] = file_diff_dict[filename].modified self.assertEqual(file_dict, expected_file_dict) with open(f_a) as fa: self.assertEqual(file_dict[f_a], fa.readlines()) with open(f_b) as fb: self.assertEqual(file_dict[f_b], fb.readlines()) with open(f_c) as fc: # File c is unchanged and should be untouched self.assertEqual([], fc.readlines()) def test_apply_orig_option(self): uut = ApplyPatchAction() with make_temp() as f_a, make_temp() as f_b: file_dict = { f_a: ['1\n', '2\n', '3\n'], f_b: ['1\n', '2\n', '3\n'] } expected_file_dict = { f_a: ['1\n', '2\n', '3_changed\n'], f_b: ['1\n', '2\n', '3_changed\n'] } file_diff_dict = {} diff = Diff(file_dict[f_a]) diff.change_line(3, '3\n', '3_changed\n') uut.apply(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict, no_orig=True) diff = Diff(file_dict[f_b]) diff.change_line(3, '3\n', '3_changed\n') uut.apply(Result('origin', 'msg', diffs={f_b: diff}), file_dict, file_diff_dict, no_orig=False) self.assertFalse(isfile(f_a+'.orig')) self.assertTrue(isfile(f_b+'.orig')) for filename in file_diff_dict: file_dict[filename] = file_diff_dict[filename].modified self.assertEqual(file_dict, expected_file_dict) def test_apply_rename(self): uut = ApplyPatchAction() with make_temp() as f_a: file_dict = {f_a: ['1\n', '2\n', '3\n']} expected_file_dict = {f_a+'.renamed': ['1\n', '2_changed\n', '3_changed\n']} file_diff_dict = {} diff = Diff(file_dict[f_a], rename=f_a+'.renamed') diff.change_line(3, '3\n', '3_changed\n') uut.apply(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict) self.assertTrue(isfile(f_a+'.orig')) self.assertTrue(isfile(f_a+'.renamed')) self.assertFalse(isfile(f_a)) diff = Diff(file_dict[f_a]) diff.change_line(2, '2\n', '2_changed\n') uut.apply(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict) self.assertFalse(isfile(f_a+'.renamed.orig')) file_dict = {f_a+'.renamed': open(f_a+'.renamed').readlines()} self.assertEqual(file_dict, expected_file_dict) # Recreate file so that context manager make_temp() can delete it open(f_a, 'w').close() def test_apply_delete(self): uut = ApplyPatchAction() with make_temp() as f_a: file_dict = {f_a: ['1\n', '2\n', '3\n']} file_diff_dict = {} diff = Diff(file_dict[f_a], delete=True) uut.apply(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict) self.assertFalse(isfile(f_a)) self.assertTrue(isfile(f_a+'.orig')) os.remove(f_a+'.orig') diff = Diff(file_dict[f_a]) diff.change_line(3, '3\n', '3_changed\n') uut.apply(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict) self.assertFalse(isfile(f_a+'.orig')) # Recreate file so that context manager make_temp() can delete it open(f_a, 'w').close() def test_is_applicable(self): diff = Diff(['1\n', '2\n', '3\n']) diff.delete_line(2) patch_result = Result('', '', diffs={'f': diff}) self.assertTrue( ApplyPatchAction.is_applicable(patch_result, {}, {})) def test_is_applicable_conflict(self): diff = Diff(['1\n', '2\n', '3\n']) diff.add_lines(2, ['a line']) conflict_result = Result('', '', diffs={'f': diff}) # Applying the same diff twice will result in a conflict self.assertIn( 'Two or more patches conflict with each other: ', ApplyPatchAction.is_applicable(conflict_result, {}, {'f': diff}) ) def test_is_applicable_empty_patch(self): diff = Diff([], rename='new_name') result = Result('', '', diffs={'f': diff}) # Two renames donot result in any change self.assertEqual( ApplyPatchAction.is_applicable(result, {}, {'f': diff}), 'The given patches do not change anything anymore.' ) def test_is_applicable_without_patch(self): result = Result('', '') self.assertEqual( ApplyPatchAction.is_applicable(result, {}, {}), 'This result has no patch attached.' )
agpl-3.0
2,898,024,168,381,594,600
1,521,443,304,503,431,400
37.642458
78
0.479109
false
selam/retopy
run.py
1
4143
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2015 Timu EREN # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from retopy.server import RetopyServer from retopy.server import Application from retopy.command import CommandHandler from retopy.command import parameter, authenticated from retopy.command import CommandError from tornado.ioloop import IOLoop class MyCounterHandler(CommandHandler): """ Counter methods """ _COUNTERS = {} @staticmethod def _check_and_create_counter(key): if not key in MyCounterHandler._COUNTERS: MyCounterHandler._COUNTERS[key] = 0 @parameter(name="key") def increment(self): """ Increments the number stored at key by one. If the key does not exist, it is set to 0 before performing the operation. """ key = self.get_argument("key") MyCounterHandler._check_and_create_counter(key) MyCounterHandler._COUNTERS[key] += 1 self.write(MyCounterHandler._COUNTERS[key]) @parameter(name="key") def decrement(self): """ Decrements the number stored at key by one. If the key does not exist, it is set to 0 before performing the operation. """ key = self.get_argument("key") MyCounterHandler._check_and_create_counter(key) MyCounterHandler._COUNTERS[key] -= 1 self.write(MyCounterHandler._COUNTERS[key]) @parameter("key") def get(self): """ Get the value of key. If the key does not exist error is returned """ key = self.get_argument("key") if key not in MyCounterHandler._COUNTERS: raise CommandError("%s Not found" % (key,)) self.write(MyCounterHandler._COUNTERS.get(key)) @parameter(name="key") @parameter(name="value", type=int) def set(self): """ Set key to hold the integer value. If key already holds a value, it is overwritten. """ key = self.get_argument("key") MyCounterHandler._check_and_create_counter(key) MyCounterHandler._COUNTERS[key] = self.get_argument("value") self.write("OK") @parameter(name="key") def rem(self): """ Removes the specified keys. A key is ignored if it does not exist. """ key = self.get_argument("key") try: del MyCounterHandler._COUNTERS[key] except KeyError, error: pass self.write("+OK") class MyPingHandler(CommandHandler): @parameter() def ping(self): """Returns PONG. This command is often used to test if a connection is still alive, or to measure latency.""" self.write("+PONG") class MyLoginHandler(CommandHandler): @parameter() @authenticated def auth_test(self): self.write("authorized to run this command") @parameter(name="username") @parameter(name="password") def auth(self): username = self.get_argument("username") password = self.get_argument("password") if not username == u"myusername" and not password == u"mypass": raise CommandError("Wrong username or password") self.command.user = username self.write("+OK") class MyApplication(Application): def __init__(self): handlers = [ (MyCounterHandler,), (MyPingHandler,), (MyLoginHandler,) ] settings = { "default_handlers": True } Application.__init__(self, handlers, **settings) s = RetopyServer(MyApplication()) s.listen(8000) IOLoop.instance().start()
apache-2.0
2,278,790,385,558,525,400
-4,522,351,963,144,450,600
28.805755
117
0.633599
false
Jeebeevee/DouweBot_JJ15
plugins_org/twitter.py
10
3194
import random import re from time import strptime, strftime from urllib import quote from util import hook, http @hook.api_key('twitter') @hook.command def twitter(inp, api_key=None): ".twitter <user>/<user> <n>/<id>/#<search>/#<search> <n> -- " \ "get <user>'s last/<n>th tweet/get tweet <id>/do <search>/get <n>th <search> result" if not isinstance(api_key, dict) or any(key not in api_key for key in ('consumer', 'consumer_secret', 'access', 'access_secret')): return "error: api keys not set" getting_id = False doing_search = False index_specified = False if re.match(r'^\d+$', inp): getting_id = True request_url = "https://api.twitter.com/1.1/statuses/show.json?id=%s" % inp else: try: inp, index = re.split('\s+', inp, 1) index = int(index) index_specified = True except ValueError: index = 0 if index < 0: index = 0 if index >= 20: return 'error: only supports up to the 20th tweet' if re.match(r'^#', inp): doing_search = True request_url = "https://api.twitter.com/1.1/search/tweets.json?q=%s" % quote(inp) else: request_url = "https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=%s" % inp try: tweet = http.get_json(request_url, oauth=True, oauth_keys=api_key) except http.HTTPError, e: errors = {400: 'bad request (ratelimited?)', 401: 'unauthorized', 403: 'forbidden', 404: 'invalid user/id', 500: 'twitter is broken', 502: 'twitter is down ("getting upgraded")', 503: 'twitter is overloaded (lol, RoR)', 410: 'twitter shut off api v1.'} if e.code == 404: return 'error: invalid ' + ['username', 'tweet id'][getting_id] if e.code in errors: return 'error: ' + errors[e.code] return 'error: unknown %s' % e.code if doing_search: try: tweet = tweet["statuses"] if not index_specified: index = random.randint(0, len(tweet) - 1) except KeyError: return 'error: no results' if not getting_id: try: tweet = tweet[index] except IndexError: return 'error: not that many tweets found' if 'retweeted_status' in tweet: rt = tweet["retweeted_status"] rt_text = http.unescape(rt["text"]).replace('\n', ' ') text = "RT @%s %s" % (rt["user"]["screen_name"], rt_text) else: text = http.unescape(tweet["text"]).replace('\n', ' ') screen_name = tweet["user"]["screen_name"] time = tweet["created_at"] time = strftime('%Y-%m-%d %H:%M:%S', strptime(time, '%a %b %d %H:%M:%S +0000 %Y')) return "%s \x02%s\x02: %s" % (time, screen_name, text) @hook.api_key('twitter') @hook.regex(r'https?://twitter.com/(#!/)?([_0-9a-zA-Z]+)/status/(\d+)') def show_tweet(match, api_key=None): return twitter(match.group(3), api_key)
unlicense
9,086,104,205,015,216,000
8,910,176,313,806,278,000
33.344086
104
0.533813
false
EqAfrica/machinekit
src/hal/user_comps/hal_storage.py
8
3146
#!/usr/bin/python # encoding: utf-8 """ Storage.py Created by Alexander Rössler on 2015-01-03. """ import time import sys import os import argparse import ConfigParser import hal class Pin: def __init__(self): self.halPin = 0 self.halName = '' self.section = '' self.name = '' self.lastValue = 0.0 def savePins(cfg, filename, pins): for pin in pins: cfg.set(pin.section, pin.name, str(pin.halPin.value)) with open(filename, 'w') as f: cfg.write(f) f.close() def readPins(cfg, filename, pins): cfg.read(filename) for pin in pins: pin.lastValue = float(cfg.get(pin.section, pin.name)) pin.halPin.value = pin.lastValue parser = argparse.ArgumentParser(description='HAL component to store and load values') parser.add_argument('-n', '--name', help='HAL component name', required=True) parser.add_argument('-f', '--file', help='Filename to store values', required=True) parser.add_argument('-x', '--on_exit', help='Save on exit', action='store_true') parser.add_argument('-a', '--autosave', help='Automatically save on value change', action='store_true') parser.add_argument('-l', '--autoload', help='Automatically load the file values', action='store_true') parser.add_argument('-i', '--interval', help='Update interval', default=1.00) args = parser.parse_args() updateInterval = float(args.interval) autosave = args.autosave autoload = args.autoload saveOnExit = args.on_exit filename = args.file loaded = False # Create pins pins = [] if not os.path.isfile(filename): sys.stderr.write('Error: File does not exist.\n'); sys.exit(1) cfg = ConfigParser.ConfigParser() cfg.read(filename) h = hal.component(args.name) for section in cfg.sections(): for item in cfg.items(section): pin = Pin() pin.section = section pin.name = item[0] pin.halName = section.lower() + '.' + item[0].lower() pin.halPin = h.newpin(pin.halName, hal.HAL_FLOAT, hal.HAL_IO) pins.append(pin) halReadTriggerPin = h.newpin("read-trigger", hal.HAL_BIT, hal.HAL_IN) halWriteTriggerPin = h.newpin("write-trigger", hal.HAL_BIT, hal.HAL_IN) h.ready() if autoload: readPins(cfg, filename, pins) loaded = True lastReadTrigger = 0 lastWriteTrigger = 0 try: while (True): if lastReadTrigger ^ halReadTriggerPin.value: lastReadTrigger = halReadTriggerPin.value readPins(cfg, filename, pins) loaded = True if lastWriteTrigger ^ halWriteTriggerPin.value: lastWriteTrigger = halWriteTriggerPin.value savePins(cfg, filename, pins) if autosave and loaded: for pin in pins: if pin.halPin.value != pin.lastValue: pin.lastValue = pin.halPin.value savePins(cfg, filename, pins) time.sleep(updateInterval) except KeyboardInterrupt: if saveOnExit: savePins(cfg, filename, pins) print(("exiting HAL component " + args.name)) h.exit()
lgpl-2.1
-1,038,653,461,772,454,500
-2,362,277,509,517,908,000
27.333333
103
0.628617
false
rockyzhang/zhangyanhit-python-for-android-mips
python3-alpha/python3-src/Lib/encodings/euc_kr.py
816
1027
# # euc_kr.py: Python Unicode Codec for EUC_KR # # Written by Hye-Shik Chang <[email protected]> # import _codecs_kr, codecs import _multibytecodec as mbc codec = _codecs_kr.getcodec('euc_kr') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='euc_kr', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
apache-2.0
-1,092,890,444,733,620,200
965,353,117,854,477,800
25.333333
74
0.696203
false
ujenmr/ansible
lib/ansible/modules/cloud/vmware/vmware_host_ssl_facts.py
56
4683
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Abhijeet Kasurde <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = r''' --- module: vmware_host_ssl_facts short_description: Gather facts of ESXi host system about SSL description: - This module can be used to gather facts of the SSL thumbprint information for a host. version_added: 2.7 author: - Abhijeet Kasurde (@Akasurde) notes: - Tested on vSphere 6.5 requirements: - python >= 2.6 - PyVmomi options: cluster_name: description: - Name of the cluster. - SSL thumbprint information about all ESXi host system in the given cluster will be reported. - If C(esxi_hostname) is not given, this parameter is required. esxi_hostname: description: - ESXi hostname. - SSL thumbprint information of this ESXi host system will be reported. - If C(cluster_name) is not given, this parameter is required. extends_documentation_fragment: vmware.documentation ''' EXAMPLES = r''' - name: Gather SSL thumbprint information about all ESXi Hosts in given Cluster vmware_host_ssl_facts: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' cluster_name: '{{ cluster_name }}' delegate_to: localhost register: all_host_ssl_facts - name: Get SSL Thumbprint info about "{{ esxi_hostname }}" vmware_host_ssl_facts: hostname: "{{ vcenter_server }}" username: "{{ vcenter_user }}" password: "{{ vcenter_pass }}" esxi_hostname: '{{ esxi_hostname }}' register: ssl_facts - set_fact: ssl_thumbprint: "{{ ssl_facts['host_ssl_facts'][esxi_hostname]['ssl_thumbprints'][0] }}" - debug: msg: "{{ ssl_thumbprint }}" - name: Add ESXi Host to vCenter vmware_host: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter_name: '{{ datacenter_name }}' cluster_name: '{{ cluster_name }}' esxi_hostname: '{{ esxi_hostname }}' esxi_username: '{{ esxi_username }}' esxi_password: '{{ esxi_password }}' esxi_ssl_thumbprint: '{{ ssl_thumbprint }}' state: present ''' RETURN = r''' host_ssl_facts: description: - dict with hostname as key and dict with SSL thumbprint related facts returned: facts type: dict sample: { "10.76.33.215": { "owner_tag": "", "principal": "vpxuser", "ssl_thumbprints": [ "E3:E8:A9:20:8D:32:AE:59:C6:8D:A5:91:B0:20:EF:00:A2:7C:27:EE", "F1:AC:DA:6E:D8:1E:37:36:4A:5C:07:E5:04:0B:87:C8:75:FB:42:01" ] } } ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi class VMwareHostSslManager(PyVmomi): def __init__(self, module): super(VMwareHostSslManager, self).__init__(module) cluster_name = self.params.get('cluster_name', None) esxi_host_name = self.params.get('esxi_hostname', None) self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) self.hosts_facts = {} def gather_ssl_facts(self): for host in self.hosts: self.hosts_facts[host.name] = dict(principal='', owner_tag='', ssl_thumbprints=[]) host_ssl_info_mgr = host.config.sslThumbprintInfo if host_ssl_info_mgr: self.hosts_facts[host.name]['principal'] = host_ssl_info_mgr.principal self.hosts_facts[host.name]['owner_tag'] = host_ssl_info_mgr.ownerTag self.hosts_facts[host.name]['ssl_thumbprints'] = [i for i in host_ssl_info_mgr.sslThumbprints] self.module.exit_json(changed=False, host_ssl_facts=self.hosts_facts) def main(): argument_spec = vmware_argument_spec() argument_spec.update( cluster_name=dict(type='str'), esxi_hostname=dict(type='str'), ) module = AnsibleModule( argument_spec=argument_spec, required_one_of=[ ['cluster_name', 'esxi_hostname'], ], supports_check_mode=True, ) vmware_host_accept_config = VMwareHostSslManager(module) vmware_host_accept_config.gather_ssl_facts() if __name__ == "__main__": main()
gpl-3.0
6,401,492,415,284,545,000
4,238,119,893,183,620,000
31.075342
110
0.620542
false
Sonicbids/django
django/contrib/gis/geos/linestring.py
9
5808
from django.contrib.gis.geos.base import numpy from django.contrib.gis.geos.coordseq import GEOSCoordSeq from django.contrib.gis.geos.error import GEOSException from django.contrib.gis.geos.geometry import GEOSGeometry from django.contrib.gis.geos.point import Point from django.contrib.gis.geos import prototypes as capi from django.utils.six.moves import range class LineString(GEOSGeometry): _init_func = capi.create_linestring _minlength = 2 #### Python 'magic' routines #### def __init__(self, *args, **kwargs): """ Initializes on the given sequence -- may take lists, tuples, NumPy arrays of X,Y pairs, or Point objects. If Point objects are used, ownership is _not_ transferred to the LineString object. Examples: ls = LineString((1, 1), (2, 2)) ls = LineString([(1, 1), (2, 2)]) ls = LineString(array([(1, 1), (2, 2)])) ls = LineString(Point(1, 1), Point(2, 2)) """ # If only one argument provided, set the coords array appropriately if len(args) == 1: coords = args[0] else: coords = args if isinstance(coords, (tuple, list)): # Getting the number of coords and the number of dimensions -- which # must stay the same, e.g., no LineString((1, 2), (1, 2, 3)). ncoords = len(coords) if coords: ndim = len(coords[0]) else: raise TypeError('Cannot initialize on empty sequence.') self._checkdim(ndim) # Incrementing through each of the coordinates and verifying for i in range(1, ncoords): if not isinstance(coords[i], (tuple, list, Point)): raise TypeError('each coordinate should be a sequence (list or tuple)') if len(coords[i]) != ndim: raise TypeError('Dimension mismatch.') numpy_coords = False elif numpy and isinstance(coords, numpy.ndarray): shape = coords.shape # Using numpy's shape. if len(shape) != 2: raise TypeError('Too many dimensions.') self._checkdim(shape[1]) ncoords = shape[0] ndim = shape[1] numpy_coords = True else: raise TypeError('Invalid initialization input for LineStrings.') # Creating a coordinate sequence object because it is easier to # set the points using GEOSCoordSeq.__setitem__(). cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim == 3)) for i in range(ncoords): if numpy_coords: cs[i] = coords[i, :] elif isinstance(coords[i], Point): cs[i] = coords[i].tuple else: cs[i] = coords[i] # If SRID was passed in with the keyword arguments srid = kwargs.get('srid', None) # Calling the base geometry initialization with the returned pointer # from the function. super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid) def __iter__(self): "Allows iteration over this LineString." for i in range(len(self)): yield self[i] def __len__(self): "Returns the number of points in this LineString." return len(self._cs) def _get_single_external(self, index): return self._cs[index] _get_single_internal = _get_single_external def _set_list(self, length, items): ndim = self._cs.dims hasz = self._cs.hasz # I don't understand why these are different # create a new coordinate sequence and populate accordingly cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz) for i, c in enumerate(items): cs[i] = c ptr = self._init_func(cs.ptr) if ptr: capi.destroy_geom(self.ptr) self.ptr = ptr self._post_init(self.srid) else: # can this happen? raise GEOSException('Geometry resulting from slice deletion was invalid.') def _set_single(self, index, value): self._checkindex(index) self._cs[index] = value def _checkdim(self, dim): if dim not in (2, 3): raise TypeError('Dimension mismatch.') #### Sequence Properties #### @property def tuple(self): "Returns a tuple version of the geometry from the coordinate sequence." return self._cs.tuple coords = tuple def _listarr(self, func): """ Internal routine that returns a sequence (list) corresponding with the given function. Will return a numpy array if possible. """ lst = [func(i) for i in range(len(self))] if numpy: return numpy.array(lst) # ARRRR! else: return lst @property def array(self): "Returns a numpy array for the LineString." return self._listarr(self._cs.__getitem__) @property def merged(self): "Returns the line merge of this LineString." return self._topology(capi.geos_linemerge(self.ptr)) @property def x(self): "Returns a list or numpy array of the X variable." return self._listarr(self._cs.getX) @property def y(self): "Returns a list or numpy array of the Y variable." return self._listarr(self._cs.getY) @property def z(self): "Returns a list or numpy array of the Z variable." if not self.hasz: return None else: return self._listarr(self._cs.getZ) # LinearRings are LineStrings used within Polygons. class LinearRing(LineString): _minLength = 4 _init_func = capi.create_linearring
bsd-3-clause
-4,250,029,842,655,085,000
578,179,760,785,023,500
33.366864
91
0.584022
false
ToonTownInfiniteRepo/ToontownInfinite
toontown/hood/GZHood.py
2
2614
from pandac.PandaModules import * import ToonHood from toontown.safezone import GZSafeZoneLoader from toontown.toonbase.ToontownGlobals import * from toontown.racing import DistributedVehicle import SkyUtil class GZHood(ToonHood.ToonHood): def __init__(self, parentFSM, doneEvent, dnaStore, hoodId): ToonHood.ToonHood.__init__(self, parentFSM, doneEvent, dnaStore, hoodId) self.id = GolfZone self.safeZoneLoaderClass = GZSafeZoneLoader.GZSafeZoneLoader self.storageDNAFile = 'phase_6/dna/storage_GZ.dna' self.holidayStorageDNADict = {HALLOWEEN_PROPS: ['phase_6/dna/halloween_props_storage_GZ.dna'], SPOOKY_PROPS: ['phase_6/dna/halloween_props_storage_GZ.dna']} self.skyFile = 'phase_3.5/models/props/TT_sky' self.spookySkyFile = 'phase_3.5/models/props/BR_sky' self.titleColor = (1.0, 0.5, 0.4, 1.0) def load(self): ToonHood.ToonHood.load(self) self.parentFSM.getStateNamed('GZHood').addChild(self.fsm) def unload(self): self.parentFSM.getStateNamed('GZHood').removeChild(self.fsm) ToonHood.ToonHood.unload(self) def enter(self, *args): ToonHood.ToonHood.enter(self, *args) base.localAvatar.chatMgr.chatInputSpeedChat.addGolfMenu() base.camLens.setNearFar(SpeedwayCameraNear, SpeedwayCameraFar) def exit(self): base.camLens.setNearFar(DefaultCameraNear, DefaultCameraFar) base.localAvatar.chatMgr.chatInputSpeedChat.removeGolfMenu() ToonHood.ToonHood.exit(self) def skyTrack(self, task): return SkyUtil.cloudSkyTrack(task) def startSky(self): if not self.sky.getTag('sky') == 'Regular': self.endSpookySky() SkyUtil.startCloudSky(self) def startSpookySky(self): if hasattr(self, 'sky') and self.sky: self.stopSky() self.sky = loader.loadModel(self.spookySkyFile) self.sky.setTag('sky', 'Halloween') self.sky.setScale(1.0) self.sky.setDepthTest(0) self.sky.setDepthWrite(0) self.sky.setColor(0.5, 0.5, 0.5, 1) self.sky.setBin('background', 100) self.sky.setFogOff() self.sky.reparentTo(camera) self.sky.setTransparency(TransparencyAttrib.MDual, 1) fadeIn = self.sky.colorScaleInterval(1.5, Vec4(1, 1, 1, 1), startColorScale=Vec4(1, 1, 1, 0.25), blendType='easeInOut') fadeIn.start() self.sky.setZ(0.0) self.sky.setHpr(0.0, 0.0, 0.0) ce = CompassEffect.make(NodePath(), CompassEffect.PRot | CompassEffect.PZ) self.sky.node().setEffect(ce)
mit
-1,111,711,479,356,713,500
7,609,534,304,791,120,000
39.215385
127
0.66909
false
Pythonify/awesome
venv/lib/python2.7/site-packages/pip/_vendor/ipaddress.py
339
80176
# Copyright 2007 Google Inc. # Licensed to PSF under a Contributor Agreement. """A fast, lightweight IPv4/IPv6 manipulation library in Python. This library is used to create/poke/manipulate IPv4 and IPv6 addresses and networks. """ from __future__ import unicode_literals import itertools import struct __version__ = '1.0.17' # Compatibility functions _compat_int_types = (int,) try: _compat_int_types = (int, long) except NameError: pass try: _compat_str = unicode except NameError: _compat_str = str assert bytes != str if b'\0'[0] == 0: # Python 3 semantics def _compat_bytes_to_byte_vals(byt): return byt else: def _compat_bytes_to_byte_vals(byt): return [struct.unpack(b'!B', b)[0] for b in byt] try: _compat_int_from_byte_vals = int.from_bytes except AttributeError: def _compat_int_from_byte_vals(bytvals, endianess): assert endianess == 'big' res = 0 for bv in bytvals: assert isinstance(bv, _compat_int_types) res = (res << 8) + bv return res def _compat_to_bytes(intval, length, endianess): assert isinstance(intval, _compat_int_types) assert endianess == 'big' if length == 4: if intval < 0 or intval >= 2 ** 32: raise struct.error("integer out of range for 'I' format code") return struct.pack(b'!I', intval) elif length == 16: if intval < 0 or intval >= 2 ** 128: raise struct.error("integer out of range for 'QQ' format code") return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff) else: raise NotImplementedError() if hasattr(int, 'bit_length'): # Not int.bit_length , since that won't work in 2.7 where long exists def _compat_bit_length(i): return i.bit_length() else: def _compat_bit_length(i): for res in itertools.count(): if i >> res == 0: return res def _compat_range(start, end, step=1): assert step > 0 i = start while i < end: yield i i += step class _TotalOrderingMixin(object): __slots__ = () # Helper that derives the other comparison operations from # __lt__ and __eq__ # We avoid functools.total_ordering because it doesn't handle # NotImplemented correctly yet (http://bugs.python.org/issue10042) def __eq__(self, other): raise NotImplementedError def __ne__(self, other): equal = self.__eq__(other) if equal is NotImplemented: return NotImplemented return not equal def __lt__(self, other): raise NotImplementedError def __le__(self, other): less = self.__lt__(other) if less is NotImplemented or not less: return self.__eq__(other) return less def __gt__(self, other): less = self.__lt__(other) if less is NotImplemented: return NotImplemented equal = self.__eq__(other) if equal is NotImplemented: return NotImplemented return not (less or equal) def __ge__(self, other): less = self.__lt__(other) if less is NotImplemented: return NotImplemented return not less IPV4LENGTH = 32 IPV6LENGTH = 128 class AddressValueError(ValueError): """A Value Error related to the address.""" class NetmaskValueError(ValueError): """A Value Error related to the netmask.""" def ip_address(address): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Address or IPv6Address object. Raises: ValueError: if the *address* passed isn't either a v4 or a v6 address """ try: return IPv4Address(address) except (AddressValueError, NetmaskValueError): pass try: return IPv6Address(address) except (AddressValueError, NetmaskValueError): pass if isinstance(address, bytes): raise AddressValueError( '%r does not appear to be an IPv4 or IPv6 address. ' 'Did you pass in a bytes (str in Python 2) instead of' ' a unicode object?' % address) raise ValueError('%r does not appear to be an IPv4 or IPv6 address' % address) def ip_network(address, strict=True): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP network. Either IPv4 or IPv6 networks may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Network or IPv6Network object. Raises: ValueError: if the string passed isn't either a v4 or a v6 address. Or if the network has host bits set. """ try: return IPv4Network(address, strict) except (AddressValueError, NetmaskValueError): pass try: return IPv6Network(address, strict) except (AddressValueError, NetmaskValueError): pass if isinstance(address, bytes): raise AddressValueError( '%r does not appear to be an IPv4 or IPv6 network. ' 'Did you pass in a bytes (str in Python 2) instead of' ' a unicode object?' % address) raise ValueError('%r does not appear to be an IPv4 or IPv6 network' % address) def ip_interface(address): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Interface or IPv6Interface object. Raises: ValueError: if the string passed isn't either a v4 or a v6 address. Notes: The IPv?Interface classes describe an Address on a particular Network, so they're basically a combination of both the Address and Network classes. """ try: return IPv4Interface(address) except (AddressValueError, NetmaskValueError): pass try: return IPv6Interface(address) except (AddressValueError, NetmaskValueError): pass raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' % address) def v4_int_to_packed(address): """Represent an address as 4 packed bytes in network (big-endian) order. Args: address: An integer representation of an IPv4 IP address. Returns: The integer address packed as 4 bytes in network (big-endian) order. Raises: ValueError: If the integer is negative or too large to be an IPv4 IP address. """ try: return _compat_to_bytes(address, 4, 'big') except (struct.error, OverflowError): raise ValueError("Address negative or too large for IPv4") def v6_int_to_packed(address): """Represent an address as 16 packed bytes in network (big-endian) order. Args: address: An integer representation of an IPv6 IP address. Returns: The integer address packed as 16 bytes in network (big-endian) order. """ try: return _compat_to_bytes(address, 16, 'big') except (struct.error, OverflowError): raise ValueError("Address negative or too large for IPv6") def _split_optional_netmask(address): """Helper to split the netmask and raise AddressValueError if needed""" addr = _compat_str(address).split('/') if len(addr) > 2: raise AddressValueError("Only one '/' permitted in %r" % address) return addr def _find_address_range(addresses): """Find a sequence of sorted deduplicated IPv#Address. Args: addresses: a list of IPv#Address objects. Yields: A tuple containing the first and last IP addresses in the sequence. """ it = iter(addresses) first = last = next(it) for ip in it: if ip._ip != last._ip + 1: yield first, last first = ip last = ip yield first, last def _count_righthand_zero_bits(number, bits): """Count the number of zero bits on the right hand side. Args: number: an integer. bits: maximum number of bits to count. Returns: The number of zero bits on the right hand side of the number. """ if number == 0: return bits return min(bits, _compat_bit_length(~number & (number - 1))) def summarize_address_range(first, last): """Summarize a network range given the first and last IP addresses. Example: >>> list(summarize_address_range(IPv4Address('192.0.2.0'), ... IPv4Address('192.0.2.130'))) ... #doctest: +NORMALIZE_WHITESPACE [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), IPv4Network('192.0.2.130/32')] Args: first: the first IPv4Address or IPv6Address in the range. last: the last IPv4Address or IPv6Address in the range. Returns: An iterator of the summarized IPv(4|6) network objects. Raise: TypeError: If the first and last objects are not IP addresses. If the first and last objects are not the same version. ValueError: If the last object is not greater than the first. If the version of the first address is not 4 or 6. """ if (not (isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress))): raise TypeError('first and last must be IP addresses, not networks') if first.version != last.version: raise TypeError("%s and %s are not of the same version" % ( first, last)) if first > last: raise ValueError('last IP address must be greater than first') if first.version == 4: ip = IPv4Network elif first.version == 6: ip = IPv6Network else: raise ValueError('unknown IP version') ip_bits = first._max_prefixlen first_int = first._ip last_int = last._ip while first_int <= last_int: nbits = min(_count_righthand_zero_bits(first_int, ip_bits), _compat_bit_length(last_int - first_int + 1) - 1) net = ip((first_int, ip_bits - nbits)) yield net first_int += 1 << nbits if first_int - 1 == ip._ALL_ONES: break def _collapse_addresses_internal(addresses): """Loops through the addresses, collapsing concurrent netblocks. Example: ip1 = IPv4Network('192.0.2.0/26') ip2 = IPv4Network('192.0.2.64/26') ip3 = IPv4Network('192.0.2.128/26') ip4 = IPv4Network('192.0.2.192/26') _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> [IPv4Network('192.0.2.0/24')] This shouldn't be called directly; it is called via collapse_addresses([]). Args: addresses: A list of IPv4Network's or IPv6Network's Returns: A list of IPv4Network's or IPv6Network's depending on what we were passed. """ # First merge to_merge = list(addresses) subnets = {} while to_merge: net = to_merge.pop() supernet = net.supernet() existing = subnets.get(supernet) if existing is None: subnets[supernet] = net elif existing != net: # Merge consecutive subnets del subnets[supernet] to_merge.append(supernet) # Then iterate over resulting networks, skipping subsumed subnets last = None for net in sorted(subnets.values()): if last is not None: # Since they are sorted, # last.network_address <= net.network_address is a given. if last.broadcast_address >= net.broadcast_address: continue yield net last = net def collapse_addresses(addresses): """Collapse a list of IP objects. Example: collapse_addresses([IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/25')]) -> [IPv4Network('192.0.2.0/24')] Args: addresses: An iterator of IPv4Network or IPv6Network objects. Returns: An iterator of the collapsed IPv(4|6)Network objects. Raises: TypeError: If passed a list of mixed version objects. """ addrs = [] ips = [] nets = [] # split IP addresses and networks for ip in addresses: if isinstance(ip, _BaseAddress): if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) ips.append(ip) elif ip._prefixlen == ip._max_prefixlen: if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) try: ips.append(ip.ip) except AttributeError: ips.append(ip.network_address) else: if nets and nets[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, nets[-1])) nets.append(ip) # sort and dedup ips = sorted(set(ips)) # find consecutive address ranges in the sorted sequence and summarize them if ips: for first, last in _find_address_range(ips): addrs.extend(summarize_address_range(first, last)) return _collapse_addresses_internal(addrs + nets) def get_mixed_type_key(obj): """Return a key suitable for sorting between networks and addresses. Address and Network objects are not sortable by default; they're fundamentally different so the expression IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') doesn't make any sense. There are some times however, where you may wish to have ipaddress sort these for you anyway. If you need to do this, you can use this function as the key= argument to sorted(). Args: obj: either a Network or Address object. Returns: appropriate key. """ if isinstance(obj, _BaseNetwork): return obj._get_networks_key() elif isinstance(obj, _BaseAddress): return obj._get_address_key() return NotImplemented class _IPAddressBase(_TotalOrderingMixin): """The mother class.""" __slots__ = () @property def exploded(self): """Return the longhand version of the IP address as a string.""" return self._explode_shorthand_ip_string() @property def compressed(self): """Return the shorthand version of the IP address as a string.""" return _compat_str(self) @property def reverse_pointer(self): """The name of the reverse DNS pointer for the IP address, e.g.: >>> ipaddress.ip_address("127.0.0.1").reverse_pointer '1.0.0.127.in-addr.arpa' >>> ipaddress.ip_address("2001:db8::1").reverse_pointer '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' """ return self._reverse_pointer() @property def version(self): msg = '%200s has no version specified' % (type(self),) raise NotImplementedError(msg) def _check_int_address(self, address): if address < 0: msg = "%d (< 0) is not permitted as an IPv%d address" raise AddressValueError(msg % (address, self._version)) if address > self._ALL_ONES: msg = "%d (>= 2**%d) is not permitted as an IPv%d address" raise AddressValueError(msg % (address, self._max_prefixlen, self._version)) def _check_packed_address(self, address, expected_len): address_len = len(address) if address_len != expected_len: msg = ( '%r (len %d != %d) is not permitted as an IPv%d address. ' 'Did you pass in a bytes (str in Python 2) instead of' ' a unicode object?' ) raise AddressValueError(msg % (address, address_len, expected_len, self._version)) @classmethod def _ip_int_from_prefix(cls, prefixlen): """Turn the prefix length into a bitwise netmask Args: prefixlen: An integer, the prefix length. Returns: An integer. """ return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) @classmethod def _prefix_from_ip_int(cls, ip_int): """Return prefix length from the bitwise netmask. Args: ip_int: An integer, the netmask in expanded bitwise format Returns: An integer, the prefix length. Raises: ValueError: If the input intermingles zeroes & ones """ trailing_zeroes = _count_righthand_zero_bits(ip_int, cls._max_prefixlen) prefixlen = cls._max_prefixlen - trailing_zeroes leading_ones = ip_int >> trailing_zeroes all_ones = (1 << prefixlen) - 1 if leading_ones != all_ones: byteslen = cls._max_prefixlen // 8 details = _compat_to_bytes(ip_int, byteslen, 'big') msg = 'Netmask pattern %r mixes zeroes & ones' raise ValueError(msg % details) return prefixlen @classmethod def _report_invalid_netmask(cls, netmask_str): msg = '%r is not a valid netmask' % netmask_str raise NetmaskValueError(msg) @classmethod def _prefix_from_prefix_string(cls, prefixlen_str): """Return prefix length from a numeric string Args: prefixlen_str: The string to be converted Returns: An integer, the prefix length. Raises: NetmaskValueError: If the input is not a valid netmask """ # int allows a leading +/- as well as surrounding whitespace, # so we ensure that isn't the case if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str): cls._report_invalid_netmask(prefixlen_str) try: prefixlen = int(prefixlen_str) except ValueError: cls._report_invalid_netmask(prefixlen_str) if not (0 <= prefixlen <= cls._max_prefixlen): cls._report_invalid_netmask(prefixlen_str) return prefixlen @classmethod def _prefix_from_ip_string(cls, ip_str): """Turn a netmask/hostmask string into a prefix length Args: ip_str: The netmask/hostmask to be converted Returns: An integer, the prefix length. Raises: NetmaskValueError: If the input is not a valid netmask/hostmask """ # Parse the netmask/hostmask like an IP address. try: ip_int = cls._ip_int_from_string(ip_str) except AddressValueError: cls._report_invalid_netmask(ip_str) # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). # Note that the two ambiguous cases (all-ones and all-zeroes) are # treated as netmasks. try: return cls._prefix_from_ip_int(ip_int) except ValueError: pass # Invert the bits, and try matching a /0+1+/ hostmask instead. ip_int ^= cls._ALL_ONES try: return cls._prefix_from_ip_int(ip_int) except ValueError: cls._report_invalid_netmask(ip_str) def __reduce__(self): return self.__class__, (_compat_str(self),) class _BaseAddress(_IPAddressBase): """A generic IP object. This IP class contains the version independent methods which are used by single IP addresses. """ __slots__ = () def __int__(self): return self._ip def __eq__(self, other): try: return (self._ip == other._ip and self._version == other._version) except AttributeError: return NotImplemented def __lt__(self, other): if not isinstance(other, _IPAddressBase): return NotImplemented if not isinstance(other, _BaseAddress): raise TypeError('%s and %s are not of the same type' % ( self, other)) if self._version != other._version: raise TypeError('%s and %s are not of the same version' % ( self, other)) if self._ip != other._ip: return self._ip < other._ip return False # Shorthand for Integer addition and subtraction. This is not # meant to ever support addition/subtraction of addresses. def __add__(self, other): if not isinstance(other, _compat_int_types): return NotImplemented return self.__class__(int(self) + other) def __sub__(self, other): if not isinstance(other, _compat_int_types): return NotImplemented return self.__class__(int(self) - other) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) def __str__(self): return _compat_str(self._string_from_ip_int(self._ip)) def __hash__(self): return hash(hex(int(self._ip))) def _get_address_key(self): return (self._version, self) def __reduce__(self): return self.__class__, (self._ip,) class _BaseNetwork(_IPAddressBase): """A generic IP network object. This IP class contains the version independent methods which are used by networks. """ def __init__(self, address): self._cache = {} def __repr__(self): return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) def __str__(self): return '%s/%d' % (self.network_address, self.prefixlen) def hosts(self): """Generate Iterator over usable hosts in a network. This is like __iter__ except it doesn't return the network or broadcast addresses. """ network = int(self.network_address) broadcast = int(self.broadcast_address) for x in _compat_range(network + 1, broadcast): yield self._address_class(x) def __iter__(self): network = int(self.network_address) broadcast = int(self.broadcast_address) for x in _compat_range(network, broadcast + 1): yield self._address_class(x) def __getitem__(self, n): network = int(self.network_address) broadcast = int(self.broadcast_address) if n >= 0: if network + n > broadcast: raise IndexError('address out of range') return self._address_class(network + n) else: n += 1 if broadcast + n < network: raise IndexError('address out of range') return self._address_class(broadcast + n) def __lt__(self, other): if not isinstance(other, _IPAddressBase): return NotImplemented if not isinstance(other, _BaseNetwork): raise TypeError('%s and %s are not of the same type' % ( self, other)) if self._version != other._version: raise TypeError('%s and %s are not of the same version' % ( self, other)) if self.network_address != other.network_address: return self.network_address < other.network_address if self.netmask != other.netmask: return self.netmask < other.netmask return False def __eq__(self, other): try: return (self._version == other._version and self.network_address == other.network_address and int(self.netmask) == int(other.netmask)) except AttributeError: return NotImplemented def __hash__(self): return hash(int(self.network_address) ^ int(self.netmask)) def __contains__(self, other): # always false if one is v4 and the other is v6. if self._version != other._version: return False # dealing with another network. if isinstance(other, _BaseNetwork): return False # dealing with another address else: # address return (int(self.network_address) <= int(other._ip) <= int(self.broadcast_address)) def overlaps(self, other): """Tell if self is partly contained in other.""" return self.network_address in other or ( self.broadcast_address in other or ( other.network_address in self or ( other.broadcast_address in self))) @property def broadcast_address(self): x = self._cache.get('broadcast_address') if x is None: x = self._address_class(int(self.network_address) | int(self.hostmask)) self._cache['broadcast_address'] = x return x @property def hostmask(self): x = self._cache.get('hostmask') if x is None: x = self._address_class(int(self.netmask) ^ self._ALL_ONES) self._cache['hostmask'] = x return x @property def with_prefixlen(self): return '%s/%d' % (self.network_address, self._prefixlen) @property def with_netmask(self): return '%s/%s' % (self.network_address, self.netmask) @property def with_hostmask(self): return '%s/%s' % (self.network_address, self.hostmask) @property def num_addresses(self): """Number of hosts in the current subnet.""" return int(self.broadcast_address) - int(self.network_address) + 1 @property def _address_class(self): # Returning bare address objects (rather than interfaces) allows for # more consistent behaviour across the network address, broadcast # address and individual host addresses. msg = '%200s has no associated address class' % (type(self),) raise NotImplementedError(msg) @property def prefixlen(self): return self._prefixlen def address_exclude(self, other): """Remove an address from a larger block. For example: addr1 = ip_network('192.0.2.0/28') addr2 = ip_network('192.0.2.1/32') list(addr1.address_exclude(addr2)) = [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] or IPv6: addr1 = ip_network('2001:db8::1/32') addr2 = ip_network('2001:db8::1/128') list(addr1.address_exclude(addr2)) = [ip_network('2001:db8::1/128'), ip_network('2001:db8::2/127'), ip_network('2001:db8::4/126'), ip_network('2001:db8::8/125'), ... ip_network('2001:db8:8000::/33')] Args: other: An IPv4Network or IPv6Network object of the same type. Returns: An iterator of the IPv(4|6)Network objects which is self minus other. Raises: TypeError: If self and other are of differing address versions, or if other is not a network object. ValueError: If other is not completely contained by self. """ if not self._version == other._version: raise TypeError("%s and %s are not of the same version" % ( self, other)) if not isinstance(other, _BaseNetwork): raise TypeError("%s is not a network object" % other) if not other.subnet_of(self): raise ValueError('%s not contained in %s' % (other, self)) if other == self: return # Make sure we're comparing the network of other. other = other.__class__('%s/%s' % (other.network_address, other.prefixlen)) s1, s2 = self.subnets() while s1 != other and s2 != other: if other.subnet_of(s1): yield s2 s1, s2 = s1.subnets() elif other.subnet_of(s2): yield s1 s1, s2 = s2.subnets() else: # If we got here, there's a bug somewhere. raise AssertionError('Error performing exclusion: ' 's1: %s s2: %s other: %s' % (s1, s2, other)) if s1 == other: yield s2 elif s2 == other: yield s1 else: # If we got here, there's a bug somewhere. raise AssertionError('Error performing exclusion: ' 's1: %s s2: %s other: %s' % (s1, s2, other)) def compare_networks(self, other): """Compare two IP objects. This is only concerned about the comparison of the integer representation of the network addresses. This means that the host bits aren't considered at all in this method. If you want to compare host bits, you can easily enough do a 'HostA._ip < HostB._ip' Args: other: An IP object. Returns: If the IP versions of self and other are the same, returns: -1 if self < other: eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') IPv6Network('2001:db8::1000/124') < IPv6Network('2001:db8::2000/124') 0 if self == other eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') IPv6Network('2001:db8::1000/124') == IPv6Network('2001:db8::1000/124') 1 if self > other eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') IPv6Network('2001:db8::2000/124') > IPv6Network('2001:db8::1000/124') Raises: TypeError if the IP versions are different. """ # does this need to raise a ValueError? if self._version != other._version: raise TypeError('%s and %s are not of the same type' % ( self, other)) # self._version == other._version below here: if self.network_address < other.network_address: return -1 if self.network_address > other.network_address: return 1 # self.network_address == other.network_address below here: if self.netmask < other.netmask: return -1 if self.netmask > other.netmask: return 1 return 0 def _get_networks_key(self): """Network-only key function. Returns an object that identifies this address' network and netmask. This function is a suitable "key" argument for sorted() and list.sort(). """ return (self._version, self.network_address, self.netmask) def subnets(self, prefixlen_diff=1, new_prefix=None): """The subnets which join to make the current subnet. In the case that self contains only one IP (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 for IPv6), yield an iterator with just ourself. Args: prefixlen_diff: An integer, the amount the prefix length should be increased by. This should not be set if new_prefix is also set. new_prefix: The desired new prefix length. This must be a larger number (smaller prefix) than the existing prefix. This should not be set if prefixlen_diff is also set. Returns: An iterator of IPv(4|6) objects. Raises: ValueError: The prefixlen_diff is too small or too large. OR prefixlen_diff and new_prefix are both set or new_prefix is a smaller number than the current prefix (smaller number means a larger network) """ if self._prefixlen == self._max_prefixlen: yield self return if new_prefix is not None: if new_prefix < self._prefixlen: raise ValueError('new prefix must be longer') if prefixlen_diff != 1: raise ValueError('cannot set prefixlen_diff and new_prefix') prefixlen_diff = new_prefix - self._prefixlen if prefixlen_diff < 0: raise ValueError('prefix length diff must be > 0') new_prefixlen = self._prefixlen + prefixlen_diff if new_prefixlen > self._max_prefixlen: raise ValueError( 'prefix length diff %d is invalid for netblock %s' % ( new_prefixlen, self)) start = int(self.network_address) end = int(self.broadcast_address) + 1 step = (int(self.hostmask) + 1) >> prefixlen_diff for new_addr in _compat_range(start, end, step): current = self.__class__((new_addr, new_prefixlen)) yield current def supernet(self, prefixlen_diff=1, new_prefix=None): """The supernet containing the current network. Args: prefixlen_diff: An integer, the amount the prefix length of the network should be decreased by. For example, given a /24 network and a prefixlen_diff of 3, a supernet with a /21 netmask is returned. Returns: An IPv4 network object. Raises: ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a negative prefix length. OR If prefixlen_diff and new_prefix are both set or new_prefix is a larger number than the current prefix (larger number means a smaller network) """ if self._prefixlen == 0: return self if new_prefix is not None: if new_prefix > self._prefixlen: raise ValueError('new prefix must be shorter') if prefixlen_diff != 1: raise ValueError('cannot set prefixlen_diff and new_prefix') prefixlen_diff = self._prefixlen - new_prefix new_prefixlen = self.prefixlen - prefixlen_diff if new_prefixlen < 0: raise ValueError( 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % (self.prefixlen, prefixlen_diff)) return self.__class__(( int(self.network_address) & (int(self.netmask) << prefixlen_diff), new_prefixlen )) @property def is_multicast(self): """Test if the address is reserved for multicast use. Returns: A boolean, True if the address is a multicast address. See RFC 2373 2.7 for details. """ return (self.network_address.is_multicast and self.broadcast_address.is_multicast) def subnet_of(self, other): # always false if one is v4 and the other is v6. if self._version != other._version: return False # dealing with another network. if (hasattr(other, 'network_address') and hasattr(other, 'broadcast_address')): return (other.network_address <= self.network_address and other.broadcast_address >= self.broadcast_address) # dealing with another address else: raise TypeError('Unable to test subnet containment with element ' 'of type %s' % type(other)) def supernet_of(self, other): # always false if one is v4 and the other is v6. if self._version != other._version: return False # dealing with another network. if (hasattr(other, 'network_address') and hasattr(other, 'broadcast_address')): return (other.network_address >= self.network_address and other.broadcast_address <= self.broadcast_address) # dealing with another address else: raise TypeError('Unable to test subnet containment with element ' 'of type %s' % type(other)) @property def is_reserved(self): """Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges. """ return (self.network_address.is_reserved and self.broadcast_address.is_reserved) @property def is_link_local(self): """Test if the address is reserved for link-local. Returns: A boolean, True if the address is reserved per RFC 4291. """ return (self.network_address.is_link_local and self.broadcast_address.is_link_local) @property def is_private(self): """Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv4-special-registry or iana-ipv6-special-registry. """ return (self.network_address.is_private and self.broadcast_address.is_private) @property def is_global(self): """Test if this address is allocated for public networks. Returns: A boolean, True if the address is not reserved per iana-ipv4-special-registry or iana-ipv6-special-registry. """ return not self.is_private @property def is_unspecified(self): """Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 2373 2.5.2. """ return (self.network_address.is_unspecified and self.broadcast_address.is_unspecified) @property def is_loopback(self): """Test if the address is a loopback address. Returns: A boolean, True if the address is a loopback address as defined in RFC 2373 2.5.3. """ return (self.network_address.is_loopback and self.broadcast_address.is_loopback) class _BaseV4(object): """Base IPv4 object. The following methods are used by IPv4 objects in both single IP addresses and networks. """ __slots__ = () _version = 4 # Equivalent to 255.255.255.255 or 32 bits of 1's. _ALL_ONES = (2 ** IPV4LENGTH) - 1 _DECIMAL_DIGITS = frozenset('0123456789') # the valid octets for host and netmasks. only useful for IPv4. _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0]) _max_prefixlen = IPV4LENGTH # There are only a handful of valid v4 netmasks, so we cache them all # when constructed (see _make_netmask()). _netmask_cache = {} def _explode_shorthand_ip_string(self): return _compat_str(self) @classmethod def _make_netmask(cls, arg): """Make a (netmask, prefix_len) tuple from the given argument. Argument can be: - an integer (the prefix length) - a string representing the prefix length (e.g. "24") - a string representing the prefix netmask (e.g. "255.255.255.0") """ if arg not in cls._netmask_cache: if isinstance(arg, _compat_int_types): prefixlen = arg else: try: # Check for a netmask in prefix length form prefixlen = cls._prefix_from_prefix_string(arg) except NetmaskValueError: # Check for a netmask or hostmask in dotted-quad form. # This may raise NetmaskValueError. prefixlen = cls._prefix_from_ip_string(arg) netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) cls._netmask_cache[arg] = netmask, prefixlen return cls._netmask_cache[arg] @classmethod def _ip_int_from_string(cls, ip_str): """Turn the given IP string into an integer for comparison. Args: ip_str: A string, the IP ip_str. Returns: The IP ip_str as an integer. Raises: AddressValueError: if ip_str isn't a valid IPv4 Address. """ if not ip_str: raise AddressValueError('Address cannot be empty') octets = ip_str.split('.') if len(octets) != 4: raise AddressValueError("Expected 4 octets in %r" % ip_str) try: return _compat_int_from_byte_vals( map(cls._parse_octet, octets), 'big') except ValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str)) @classmethod def _parse_octet(cls, octet_str): """Convert a decimal octet into an integer. Args: octet_str: A string, the number to parse. Returns: The octet as an integer. Raises: ValueError: if the octet isn't strictly a decimal from [0..255]. """ if not octet_str: raise ValueError("Empty octet not permitted") # Whitelist the characters, since int() allows a lot of bizarre stuff. if not cls._DECIMAL_DIGITS.issuperset(octet_str): msg = "Only decimal digits permitted in %r" raise ValueError(msg % octet_str) # We do the length check second, since the invalid character error # is likely to be more informative for the user if len(octet_str) > 3: msg = "At most 3 characters permitted in %r" raise ValueError(msg % octet_str) # Convert to integer (we know digits are legal) octet_int = int(octet_str, 10) # Any octets that look like they *might* be written in octal, # and which don't look exactly the same in both octal and # decimal are rejected as ambiguous if octet_int > 7 and octet_str[0] == '0': msg = "Ambiguous (octal/decimal) value in %r not permitted" raise ValueError(msg % octet_str) if octet_int > 255: raise ValueError("Octet %d (> 255) not permitted" % octet_int) return octet_int @classmethod def _string_from_ip_int(cls, ip_int): """Turns a 32-bit integer into dotted decimal notation. Args: ip_int: An integer, the IP address. Returns: The IP address as a string in dotted decimal notation. """ return '.'.join(_compat_str(struct.unpack(b'!B', b)[0] if isinstance(b, bytes) else b) for b in _compat_to_bytes(ip_int, 4, 'big')) def _is_hostmask(self, ip_str): """Test if the IP string is a hostmask (rather than a netmask). Args: ip_str: A string, the potential hostmask. Returns: A boolean, True if the IP string is a hostmask. """ bits = ip_str.split('.') try: parts = [x for x in map(int, bits) if x in self._valid_mask_octets] except ValueError: return False if len(parts) != len(bits): return False if parts[0] < parts[-1]: return True return False def _reverse_pointer(self): """Return the reverse DNS pointer name for the IPv4 address. This implements the method described in RFC1035 3.5. """ reverse_octets = _compat_str(self).split('.')[::-1] return '.'.join(reverse_octets) + '.in-addr.arpa' @property def max_prefixlen(self): return self._max_prefixlen @property def version(self): return self._version class IPv4Address(_BaseV4, _BaseAddress): """Represent and manipulate single IPv4 Addresses.""" __slots__ = ('_ip', '__weakref__') def __init__(self, address): """ Args: address: A string or integer representing the IP Additionally, an integer can be passed, so IPv4Address('192.0.2.1') == IPv4Address(3221225985). or, more generally IPv4Address(int(IPv4Address('192.0.2.1'))) == IPv4Address('192.0.2.1') Raises: AddressValueError: If ipaddress isn't a valid IPv4 address. """ # Efficient constructor from integer. if isinstance(address, _compat_int_types): self._check_int_address(address) self._ip = address return # Constructing from a packed address if isinstance(address, bytes): self._check_packed_address(address, 4) bvs = _compat_bytes_to_byte_vals(address) self._ip = _compat_int_from_byte_vals(bvs, 'big') return # Assume input argument to be string or any object representation # which converts into a formatted IP string. addr_str = _compat_str(address) if '/' in addr_str: raise AddressValueError("Unexpected '/' in %r" % address) self._ip = self._ip_int_from_string(addr_str) @property def packed(self): """The binary representation of this address.""" return v4_int_to_packed(self._ip) @property def is_reserved(self): """Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within the reserved IPv4 Network range. """ return self in self._constants._reserved_network @property def is_private(self): """Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv4-special-registry. """ return any(self in net for net in self._constants._private_networks) @property def is_global(self): return ( self not in self._constants._public_network and not self.is_private) @property def is_multicast(self): """Test if the address is reserved for multicast use. Returns: A boolean, True if the address is multicast. See RFC 3171 for details. """ return self in self._constants._multicast_network @property def is_unspecified(self): """Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 5735 3. """ return self == self._constants._unspecified_address @property def is_loopback(self): """Test if the address is a loopback address. Returns: A boolean, True if the address is a loopback per RFC 3330. """ return self in self._constants._loopback_network @property def is_link_local(self): """Test if the address is reserved for link-local. Returns: A boolean, True if the address is link-local per RFC 3927. """ return self in self._constants._linklocal_network class IPv4Interface(IPv4Address): def __init__(self, address): if isinstance(address, (bytes, _compat_int_types)): IPv4Address.__init__(self, address) self.network = IPv4Network(self._ip) self._prefixlen = self._max_prefixlen return if isinstance(address, tuple): IPv4Address.__init__(self, address[0]) if len(address) > 1: self._prefixlen = int(address[1]) else: self._prefixlen = self._max_prefixlen self.network = IPv4Network(address, strict=False) self.netmask = self.network.netmask self.hostmask = self.network.hostmask return addr = _split_optional_netmask(address) IPv4Address.__init__(self, addr[0]) self.network = IPv4Network(address, strict=False) self._prefixlen = self.network._prefixlen self.netmask = self.network.netmask self.hostmask = self.network.hostmask def __str__(self): return '%s/%d' % (self._string_from_ip_int(self._ip), self.network.prefixlen) def __eq__(self, other): address_equal = IPv4Address.__eq__(self, other) if not address_equal or address_equal is NotImplemented: return address_equal try: return self.network == other.network except AttributeError: # An interface with an associated network is NOT the # same as an unassociated address. That's why the hash # takes the extra info into account. return False def __lt__(self, other): address_less = IPv4Address.__lt__(self, other) if address_less is NotImplemented: return NotImplemented try: return self.network < other.network except AttributeError: # We *do* allow addresses and interfaces to be sorted. The # unassociated address is considered less than all interfaces. return False def __hash__(self): return self._ip ^ self._prefixlen ^ int(self.network.network_address) __reduce__ = _IPAddressBase.__reduce__ @property def ip(self): return IPv4Address(self._ip) @property def with_prefixlen(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self._prefixlen) @property def with_netmask(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self.netmask) @property def with_hostmask(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self.hostmask) class IPv4Network(_BaseV4, _BaseNetwork): """This class represents and manipulates 32-bit IPv4 network + addresses.. Attributes: [examples for IPv4Network('192.0.2.0/27')] .network_address: IPv4Address('192.0.2.0') .hostmask: IPv4Address('0.0.0.31') .broadcast_address: IPv4Address('192.0.2.32') .netmask: IPv4Address('255.255.255.224') .prefixlen: 27 """ # Class to use when creating address objects _address_class = IPv4Address def __init__(self, address, strict=True): """Instantiate a new IPv4 network object. Args: address: A string or integer representing the IP [& network]. '192.0.2.0/24' '192.0.2.0/255.255.255.0' '192.0.0.2/0.0.0.255' are all functionally the same in IPv4. Similarly, '192.0.2.1' '192.0.2.1/255.255.255.255' '192.0.2.1/32' are also functionally equivalent. That is to say, failing to provide a subnetmask will create an object with a mask of /32. If the mask (portion after the / in the argument) is given in dotted quad form, it is treated as a netmask if it starts with a non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it starts with a zero field (e.g. 0.255.255.255 == /8), with the single exception of an all-zero mask which is treated as a netmask == /0. If no mask is given, a default of /32 is used. Additionally, an integer can be passed, so IPv4Network('192.0.2.1') == IPv4Network(3221225985) or, more generally IPv4Interface(int(IPv4Interface('192.0.2.1'))) == IPv4Interface('192.0.2.1') Raises: AddressValueError: If ipaddress isn't a valid IPv4 address. NetmaskValueError: If the netmask isn't valid for an IPv4 address. ValueError: If strict is True and a network address is not supplied. """ _BaseNetwork.__init__(self, address) # Constructing from a packed address or integer if isinstance(address, (_compat_int_types, bytes)): self.network_address = IPv4Address(address) self.netmask, self._prefixlen = self._make_netmask( self._max_prefixlen) # fixme: address/network test here. return if isinstance(address, tuple): if len(address) > 1: arg = address[1] else: # We weren't given an address[1] arg = self._max_prefixlen self.network_address = IPv4Address(address[0]) self.netmask, self._prefixlen = self._make_netmask(arg) packed = int(self.network_address) if packed & int(self.netmask) != packed: if strict: raise ValueError('%s has host bits set' % self) else: self.network_address = IPv4Address(packed & int(self.netmask)) return # Assume input argument to be string or any object representation # which converts into a formatted IP prefix string. addr = _split_optional_netmask(address) self.network_address = IPv4Address(self._ip_int_from_string(addr[0])) if len(addr) == 2: arg = addr[1] else: arg = self._max_prefixlen self.netmask, self._prefixlen = self._make_netmask(arg) if strict: if (IPv4Address(int(self.network_address) & int(self.netmask)) != self.network_address): raise ValueError('%s has host bits set' % self) self.network_address = IPv4Address(int(self.network_address) & int(self.netmask)) if self._prefixlen == (self._max_prefixlen - 1): self.hosts = self.__iter__ @property def is_global(self): """Test if this address is allocated for public networks. Returns: A boolean, True if the address is not reserved per iana-ipv4-special-registry. """ return (not (self.network_address in IPv4Network('100.64.0.0/10') and self.broadcast_address in IPv4Network('100.64.0.0/10')) and not self.is_private) class _IPv4Constants(object): _linklocal_network = IPv4Network('169.254.0.0/16') _loopback_network = IPv4Network('127.0.0.0/8') _multicast_network = IPv4Network('224.0.0.0/4') _public_network = IPv4Network('100.64.0.0/10') _private_networks = [ IPv4Network('0.0.0.0/8'), IPv4Network('10.0.0.0/8'), IPv4Network('127.0.0.0/8'), IPv4Network('169.254.0.0/16'), IPv4Network('172.16.0.0/12'), IPv4Network('192.0.0.0/29'), IPv4Network('192.0.0.170/31'), IPv4Network('192.0.2.0/24'), IPv4Network('192.168.0.0/16'), IPv4Network('198.18.0.0/15'), IPv4Network('198.51.100.0/24'), IPv4Network('203.0.113.0/24'), IPv4Network('240.0.0.0/4'), IPv4Network('255.255.255.255/32'), ] _reserved_network = IPv4Network('240.0.0.0/4') _unspecified_address = IPv4Address('0.0.0.0') IPv4Address._constants = _IPv4Constants class _BaseV6(object): """Base IPv6 object. The following methods are used by IPv6 objects in both single IP addresses and networks. """ __slots__ = () _version = 6 _ALL_ONES = (2 ** IPV6LENGTH) - 1 _HEXTET_COUNT = 8 _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef') _max_prefixlen = IPV6LENGTH # There are only a bunch of valid v6 netmasks, so we cache them all # when constructed (see _make_netmask()). _netmask_cache = {} @classmethod def _make_netmask(cls, arg): """Make a (netmask, prefix_len) tuple from the given argument. Argument can be: - an integer (the prefix length) - a string representing the prefix length (e.g. "24") - a string representing the prefix netmask (e.g. "255.255.255.0") """ if arg not in cls._netmask_cache: if isinstance(arg, _compat_int_types): prefixlen = arg else: prefixlen = cls._prefix_from_prefix_string(arg) netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) cls._netmask_cache[arg] = netmask, prefixlen return cls._netmask_cache[arg] @classmethod def _ip_int_from_string(cls, ip_str): """Turn an IPv6 ip_str into an integer. Args: ip_str: A string, the IPv6 ip_str. Returns: An int, the IPv6 address Raises: AddressValueError: if ip_str isn't a valid IPv6 Address. """ if not ip_str: raise AddressValueError('Address cannot be empty') parts = ip_str.split(':') # An IPv6 address needs at least 2 colons (3 parts). _min_parts = 3 if len(parts) < _min_parts: msg = "At least %d parts expected in %r" % (_min_parts, ip_str) raise AddressValueError(msg) # If the address has an IPv4-style suffix, convert it to hexadecimal. if '.' in parts[-1]: try: ipv4_int = IPv4Address(parts.pop())._ip except AddressValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str)) parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) parts.append('%x' % (ipv4_int & 0xFFFF)) # An IPv6 address can't have more than 8 colons (9 parts). # The extra colon comes from using the "::" notation for a single # leading or trailing zero part. _max_parts = cls._HEXTET_COUNT + 1 if len(parts) > _max_parts: msg = "At most %d colons permitted in %r" % ( _max_parts - 1, ip_str) raise AddressValueError(msg) # Disregarding the endpoints, find '::' with nothing in between. # This indicates that a run of zeroes has been skipped. skip_index = None for i in _compat_range(1, len(parts) - 1): if not parts[i]: if skip_index is not None: # Can't have more than one '::' msg = "At most one '::' permitted in %r" % ip_str raise AddressValueError(msg) skip_index = i # parts_hi is the number of parts to copy from above/before the '::' # parts_lo is the number of parts to copy from below/after the '::' if skip_index is not None: # If we found a '::', then check if it also covers the endpoints. parts_hi = skip_index parts_lo = len(parts) - skip_index - 1 if not parts[0]: parts_hi -= 1 if parts_hi: msg = "Leading ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # ^: requires ^:: if not parts[-1]: parts_lo -= 1 if parts_lo: msg = "Trailing ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # :$ requires ::$ parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) if parts_skipped < 1: msg = "Expected at most %d other parts with '::' in %r" raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str)) else: # Otherwise, allocate the entire address to parts_hi. The # endpoints could still be empty, but _parse_hextet() will check # for that. if len(parts) != cls._HEXTET_COUNT: msg = "Exactly %d parts expected without '::' in %r" raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) if not parts[0]: msg = "Leading ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # ^: requires ^:: if not parts[-1]: msg = "Trailing ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # :$ requires ::$ parts_hi = len(parts) parts_lo = 0 parts_skipped = 0 try: # Now, parse the hextets into a 128-bit integer. ip_int = 0 for i in range(parts_hi): ip_int <<= 16 ip_int |= cls._parse_hextet(parts[i]) ip_int <<= 16 * parts_skipped for i in range(-parts_lo, 0): ip_int <<= 16 ip_int |= cls._parse_hextet(parts[i]) return ip_int except ValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str)) @classmethod def _parse_hextet(cls, hextet_str): """Convert an IPv6 hextet string into an integer. Args: hextet_str: A string, the number to parse. Returns: The hextet as an integer. Raises: ValueError: if the input isn't strictly a hex number from [0..FFFF]. """ # Whitelist the characters, since int() allows a lot of bizarre stuff. if not cls._HEX_DIGITS.issuperset(hextet_str): raise ValueError("Only hex digits permitted in %r" % hextet_str) # We do the length check second, since the invalid character error # is likely to be more informative for the user if len(hextet_str) > 4: msg = "At most 4 characters permitted in %r" raise ValueError(msg % hextet_str) # Length check means we can skip checking the integer value return int(hextet_str, 16) @classmethod def _compress_hextets(cls, hextets): """Compresses a list of hextets. Compresses a list of strings, replacing the longest continuous sequence of "0" in the list with "" and adding empty strings at the beginning or at the end of the string such that subsequently calling ":".join(hextets) will produce the compressed version of the IPv6 address. Args: hextets: A list of strings, the hextets to compress. Returns: A list of strings. """ best_doublecolon_start = -1 best_doublecolon_len = 0 doublecolon_start = -1 doublecolon_len = 0 for index, hextet in enumerate(hextets): if hextet == '0': doublecolon_len += 1 if doublecolon_start == -1: # Start of a sequence of zeros. doublecolon_start = index if doublecolon_len > best_doublecolon_len: # This is the longest sequence of zeros so far. best_doublecolon_len = doublecolon_len best_doublecolon_start = doublecolon_start else: doublecolon_len = 0 doublecolon_start = -1 if best_doublecolon_len > 1: best_doublecolon_end = (best_doublecolon_start + best_doublecolon_len) # For zeros at the end of the address. if best_doublecolon_end == len(hextets): hextets += [''] hextets[best_doublecolon_start:best_doublecolon_end] = [''] # For zeros at the beginning of the address. if best_doublecolon_start == 0: hextets = [''] + hextets return hextets @classmethod def _string_from_ip_int(cls, ip_int=None): """Turns a 128-bit integer into hexadecimal notation. Args: ip_int: An integer, the IP address. Returns: A string, the hexadecimal representation of the address. Raises: ValueError: The address is bigger than 128 bits of all ones. """ if ip_int is None: ip_int = int(cls._ip) if ip_int > cls._ALL_ONES: raise ValueError('IPv6 address is too large') hex_str = '%032x' % ip_int hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)] hextets = cls._compress_hextets(hextets) return ':'.join(hextets) def _explode_shorthand_ip_string(self): """Expand a shortened IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A string, the expanded IPv6 address. """ if isinstance(self, IPv6Network): ip_str = _compat_str(self.network_address) elif isinstance(self, IPv6Interface): ip_str = _compat_str(self.ip) else: ip_str = _compat_str(self) ip_int = self._ip_int_from_string(ip_str) hex_str = '%032x' % ip_int parts = [hex_str[x:x + 4] for x in range(0, 32, 4)] if isinstance(self, (_BaseNetwork, IPv6Interface)): return '%s/%d' % (':'.join(parts), self._prefixlen) return ':'.join(parts) def _reverse_pointer(self): """Return the reverse DNS pointer name for the IPv6 address. This implements the method described in RFC3596 2.5. """ reverse_chars = self.exploded[::-1].replace(':', '') return '.'.join(reverse_chars) + '.ip6.arpa' @property def max_prefixlen(self): return self._max_prefixlen @property def version(self): return self._version class IPv6Address(_BaseV6, _BaseAddress): """Represent and manipulate single IPv6 Addresses.""" __slots__ = ('_ip', '__weakref__') def __init__(self, address): """Instantiate a new IPv6 address object. Args: address: A string or integer representing the IP Additionally, an integer can be passed, so IPv6Address('2001:db8::') == IPv6Address(42540766411282592856903984951653826560) or, more generally IPv6Address(int(IPv6Address('2001:db8::'))) == IPv6Address('2001:db8::') Raises: AddressValueError: If address isn't a valid IPv6 address. """ # Efficient constructor from integer. if isinstance(address, _compat_int_types): self._check_int_address(address) self._ip = address return # Constructing from a packed address if isinstance(address, bytes): self._check_packed_address(address, 16) bvs = _compat_bytes_to_byte_vals(address) self._ip = _compat_int_from_byte_vals(bvs, 'big') return # Assume input argument to be string or any object representation # which converts into a formatted IP string. addr_str = _compat_str(address) if '/' in addr_str: raise AddressValueError("Unexpected '/' in %r" % address) self._ip = self._ip_int_from_string(addr_str) @property def packed(self): """The binary representation of this address.""" return v6_int_to_packed(self._ip) @property def is_multicast(self): """Test if the address is reserved for multicast use. Returns: A boolean, True if the address is a multicast address. See RFC 2373 2.7 for details. """ return self in self._constants._multicast_network @property def is_reserved(self): """Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges. """ return any(self in x for x in self._constants._reserved_networks) @property def is_link_local(self): """Test if the address is reserved for link-local. Returns: A boolean, True if the address is reserved per RFC 4291. """ return self in self._constants._linklocal_network @property def is_site_local(self): """Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6. """ return self in self._constants._sitelocal_network @property def is_private(self): """Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv6-special-registry. """ return any(self in net for net in self._constants._private_networks) @property def is_global(self): """Test if this address is allocated for public networks. Returns: A boolean, true if the address is not reserved per iana-ipv6-special-registry. """ return not self.is_private @property def is_unspecified(self): """Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 2373 2.5.2. """ return self._ip == 0 @property def is_loopback(self): """Test if the address is a loopback address. Returns: A boolean, True if the address is a loopback address as defined in RFC 2373 2.5.3. """ return self._ip == 1 @property def ipv4_mapped(self): """Return the IPv4 mapped address. Returns: If the IPv6 address is a v4 mapped address, return the IPv4 mapped address. Return None otherwise. """ if (self._ip >> 32) != 0xFFFF: return None return IPv4Address(self._ip & 0xFFFFFFFF) @property def teredo(self): """Tuple of embedded teredo IPs. Returns: Tuple of the (server, client) IPs or None if the address doesn't appear to be a teredo address (doesn't start with 2001::/32) """ if (self._ip >> 96) != 0x20010000: return None return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), IPv4Address(~self._ip & 0xFFFFFFFF)) @property def sixtofour(self): """Return the IPv4 6to4 embedded address. Returns: The IPv4 6to4-embedded address if present or None if the address doesn't appear to contain a 6to4 embedded address. """ if (self._ip >> 112) != 0x2002: return None return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) class IPv6Interface(IPv6Address): def __init__(self, address): if isinstance(address, (bytes, _compat_int_types)): IPv6Address.__init__(self, address) self.network = IPv6Network(self._ip) self._prefixlen = self._max_prefixlen return if isinstance(address, tuple): IPv6Address.__init__(self, address[0]) if len(address) > 1: self._prefixlen = int(address[1]) else: self._prefixlen = self._max_prefixlen self.network = IPv6Network(address, strict=False) self.netmask = self.network.netmask self.hostmask = self.network.hostmask return addr = _split_optional_netmask(address) IPv6Address.__init__(self, addr[0]) self.network = IPv6Network(address, strict=False) self.netmask = self.network.netmask self._prefixlen = self.network._prefixlen self.hostmask = self.network.hostmask def __str__(self): return '%s/%d' % (self._string_from_ip_int(self._ip), self.network.prefixlen) def __eq__(self, other): address_equal = IPv6Address.__eq__(self, other) if not address_equal or address_equal is NotImplemented: return address_equal try: return self.network == other.network except AttributeError: # An interface with an associated network is NOT the # same as an unassociated address. That's why the hash # takes the extra info into account. return False def __lt__(self, other): address_less = IPv6Address.__lt__(self, other) if address_less is NotImplemented: return NotImplemented try: return self.network < other.network except AttributeError: # We *do* allow addresses and interfaces to be sorted. The # unassociated address is considered less than all interfaces. return False def __hash__(self): return self._ip ^ self._prefixlen ^ int(self.network.network_address) __reduce__ = _IPAddressBase.__reduce__ @property def ip(self): return IPv6Address(self._ip) @property def with_prefixlen(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self._prefixlen) @property def with_netmask(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self.netmask) @property def with_hostmask(self): return '%s/%s' % (self._string_from_ip_int(self._ip), self.hostmask) @property def is_unspecified(self): return self._ip == 0 and self.network.is_unspecified @property def is_loopback(self): return self._ip == 1 and self.network.is_loopback class IPv6Network(_BaseV6, _BaseNetwork): """This class represents and manipulates 128-bit IPv6 networks. Attributes: [examples for IPv6('2001:db8::1000/124')] .network_address: IPv6Address('2001:db8::1000') .hostmask: IPv6Address('::f') .broadcast_address: IPv6Address('2001:db8::100f') .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') .prefixlen: 124 """ # Class to use when creating address objects _address_class = IPv6Address def __init__(self, address, strict=True): """Instantiate a new IPv6 Network object. Args: address: A string or integer representing the IPv6 network or the IP and prefix/netmask. '2001:db8::/128' '2001:db8:0000:0000:0000:0000:0000:0000/128' '2001:db8::' are all functionally the same in IPv6. That is to say, failing to provide a subnetmask will create an object with a mask of /128. Additionally, an integer can be passed, so IPv6Network('2001:db8::') == IPv6Network(42540766411282592856903984951653826560) or, more generally IPv6Network(int(IPv6Network('2001:db8::'))) == IPv6Network('2001:db8::') strict: A boolean. If true, ensure that we have been passed A true network address, eg, 2001:db8::1000/124 and not an IP address on a network, eg, 2001:db8::1/124. Raises: AddressValueError: If address isn't a valid IPv6 address. NetmaskValueError: If the netmask isn't valid for an IPv6 address. ValueError: If strict was True and a network address was not supplied. """ _BaseNetwork.__init__(self, address) # Efficient constructor from integer or packed address if isinstance(address, (bytes, _compat_int_types)): self.network_address = IPv6Address(address) self.netmask, self._prefixlen = self._make_netmask( self._max_prefixlen) return if isinstance(address, tuple): if len(address) > 1: arg = address[1] else: arg = self._max_prefixlen self.netmask, self._prefixlen = self._make_netmask(arg) self.network_address = IPv6Address(address[0]) packed = int(self.network_address) if packed & int(self.netmask) != packed: if strict: raise ValueError('%s has host bits set' % self) else: self.network_address = IPv6Address(packed & int(self.netmask)) return # Assume input argument to be string or any object representation # which converts into a formatted IP prefix string. addr = _split_optional_netmask(address) self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) if len(addr) == 2: arg = addr[1] else: arg = self._max_prefixlen self.netmask, self._prefixlen = self._make_netmask(arg) if strict: if (IPv6Address(int(self.network_address) & int(self.netmask)) != self.network_address): raise ValueError('%s has host bits set' % self) self.network_address = IPv6Address(int(self.network_address) & int(self.netmask)) if self._prefixlen == (self._max_prefixlen - 1): self.hosts = self.__iter__ def hosts(self): """Generate Iterator over usable hosts in a network. This is like __iter__ except it doesn't return the Subnet-Router anycast address. """ network = int(self.network_address) broadcast = int(self.broadcast_address) for x in _compat_range(network + 1, broadcast + 1): yield self._address_class(x) @property def is_site_local(self): """Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6. """ return (self.network_address.is_site_local and self.broadcast_address.is_site_local) class _IPv6Constants(object): _linklocal_network = IPv6Network('fe80::/10') _multicast_network = IPv6Network('ff00::/8') _private_networks = [ IPv6Network('::1/128'), IPv6Network('::/128'), IPv6Network('::ffff:0:0/96'), IPv6Network('100::/64'), IPv6Network('2001::/23'), IPv6Network('2001:2::/48'), IPv6Network('2001:db8::/32'), IPv6Network('2001:10::/28'), IPv6Network('fc00::/7'), IPv6Network('fe80::/10'), ] _reserved_networks = [ IPv6Network('::/8'), IPv6Network('100::/8'), IPv6Network('200::/7'), IPv6Network('400::/6'), IPv6Network('800::/5'), IPv6Network('1000::/4'), IPv6Network('4000::/3'), IPv6Network('6000::/3'), IPv6Network('8000::/3'), IPv6Network('A000::/3'), IPv6Network('C000::/3'), IPv6Network('E000::/4'), IPv6Network('F000::/5'), IPv6Network('F800::/6'), IPv6Network('FE00::/9'), ] _sitelocal_network = IPv6Network('fec0::/10') IPv6Address._constants = _IPv6Constants
gpl-3.0
-2,762,518,490,100,699,600
-2,723,789,994,851,766,300
32.062268
86
0.568387
false
jianghuaw/nova
nova/tests/unit/notifications/objects/test_flavor.py
2
7601
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from nova import context from nova.notifications.objects import flavor as flavor_notification from nova import objects from nova.objects import fields from nova import test from nova.tests.unit.objects.test_flavor import fake_flavor PROJECTS_SENTINEL = object() class TestFlavorNotification(test.TestCase): def setUp(self): self.ctxt = context.get_admin_context() super(TestFlavorNotification, self).setUp() @mock.patch('nova.notifications.objects.flavor.FlavorNotification') def _verify_notification(self, flavor_obj, flavor, action, mock_notification, project_id=None, expected_projects=PROJECTS_SENTINEL): notification = mock_notification if action == "CREATE": flavor_obj.create() elif action == "DELETE": flavor_obj.destroy() elif action == "ADD_ACCESS": action = "UPDATE" flavor_obj.add_access(project_id) elif action == "REMOVE_ACCESS": action = "UPDATE" flavor_obj.remove_access(project_id) else: flavor_obj.save() self.assertTrue(notification.called) event_type = notification.call_args[1]['event_type'] priority = notification.call_args[1]['priority'] publisher = notification.call_args[1]['publisher'] payload = notification.call_args[1]['payload'] self.assertEqual("fake-mini", publisher.host) self.assertEqual("nova-api", publisher.binary) self.assertEqual(fields.NotificationPriority.INFO, priority) self.assertEqual('flavor', event_type.object) self.assertEqual(getattr(fields.NotificationAction, action), event_type.action) notification.return_value.emit.assert_called_once_with(self.ctxt) schema = flavor_notification.FlavorPayload.SCHEMA for field in schema: if field == 'projects' and expected_projects != PROJECTS_SENTINEL: self.assertEqual(expected_projects, getattr(payload, field)) elif field in flavor_obj: self.assertEqual(flavor_obj[field], getattr(payload, field)) else: self.fail('Missing check for field %s in flavor_obj.' % field) @mock.patch('nova.objects.Flavor._flavor_create') def test_flavor_create_with_notification(self, mock_create): flavor = copy.deepcopy(fake_flavor) flavor_obj = objects.Flavor(context=self.ctxt) flavor_obj.extra_specs = flavor['extra_specs'] flavorid = '1' flavor['flavorid'] = flavorid flavor['id'] = flavorid mock_create.return_value = flavor self._verify_notification(flavor_obj, flavor, 'CREATE') @mock.patch('nova.objects.Flavor._flavor_extra_specs_del') def test_flavor_update_with_notification(self, mock_delete): flavor = copy.deepcopy(fake_flavor) flavorid = '1' flavor['flavorid'] = flavorid flavor['id'] = flavorid flavor_obj = objects.Flavor(context=self.ctxt, **flavor) flavor_obj.obj_reset_changes() del flavor_obj.extra_specs['foo'] del flavor['extra_specs']['foo'] self._verify_notification(flavor_obj, flavor, "UPDATE") projects = ['project-1', 'project-2'] flavor_obj.projects = projects flavor['projects'] = projects self._verify_notification(flavor_obj, flavor, "UPDATE") @mock.patch('nova.objects.Flavor._add_access') @mock.patch('nova.objects.Flavor._remove_access') def test_flavor_access_with_notification(self, mock_remove_access, mock_add_access): flavor = copy.deepcopy(fake_flavor) flavorid = '1' flavor['flavorid'] = flavorid flavor['id'] = flavorid flavor_obj = objects.Flavor(context=self.ctxt, **flavor) flavor_obj.obj_reset_changes() self._verify_notification(flavor_obj, flavor, "ADD_ACCESS", project_id="project1") self._verify_notification(flavor_obj, flavor, "REMOVE_ACCESS", project_id="project1") @mock.patch('nova.objects.Flavor._flavor_destroy') def test_flavor_destroy_with_notification(self, mock_destroy): flavor = copy.deepcopy(fake_flavor) flavorid = '1' flavor['flavorid'] = flavorid flavor['id'] = flavorid mock_destroy.return_value = flavor flavor_obj = objects.Flavor(context=self.ctxt, **flavor) flavor_obj.obj_reset_changes() self.assertNotIn('projects', flavor_obj) # We specifically expect there to not be any projects as we don't want # to try and lazy-load them from the main database and end up with []. self._verify_notification(flavor_obj, flavor, "DELETE", expected_projects=None) @mock.patch('nova.objects.Flavor._flavor_destroy') def test_flavor_destroy_with_notification_and_projects(self, mock_destroy): """Tests the flavor-delete notification with flavor.projects loaded.""" flavor = copy.deepcopy(fake_flavor) flavorid = '1' flavor['flavorid'] = flavorid flavor['id'] = flavorid mock_destroy.return_value = flavor flavor_obj = objects.Flavor( context=self.ctxt, projects=['foo'], **flavor) flavor_obj.obj_reset_changes() self.assertIn('projects', flavor_obj) self.assertEqual(['foo'], flavor_obj.projects) # Since projects is loaded we shouldn't try to lazy-load it. self._verify_notification(flavor_obj, flavor, "DELETE") def test_obj_make_compatible(self): flavor = copy.deepcopy(fake_flavor) flavorid = '1' flavor['flavorid'] = flavorid flavor['id'] = flavorid flavor_obj = objects.Flavor(context=self.ctxt, **flavor) flavor_payload = flavor_notification.FlavorPayload(flavor_obj) primitive = flavor_payload.obj_to_primitive() self.assertIn('name', primitive['nova_object.data']) self.assertIn('swap', primitive['nova_object.data']) self.assertIn('rxtx_factor', primitive['nova_object.data']) self.assertIn('vcpu_weight', primitive['nova_object.data']) self.assertIn('disabled', primitive['nova_object.data']) self.assertIn('is_public', primitive['nova_object.data']) flavor_payload.obj_make_compatible(primitive['nova_object.data'], '1.0') self.assertNotIn('name', primitive['nova_object.data']) self.assertNotIn('swap', primitive['nova_object.data']) self.assertNotIn('rxtx_factor', primitive['nova_object.data']) self.assertNotIn('vcpu_weight', primitive['nova_object.data']) self.assertNotIn('disabled', primitive['nova_object.data']) self.assertNotIn('is_public', primitive['nova_object.data'])
apache-2.0
-108,089,883,150,801,000
3,000,773,622,308,832,000
43.711765
79
0.635443
false
serviceagility/boto
boto/sdb/domain.py
153
14351
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from __future__ import print_function """ Represents an SDB Domain """ from boto.sdb.queryresultset import SelectResultSet from boto.compat import six class Domain(object): def __init__(self, connection=None, name=None): self.connection = connection self.name = name self._metadata = None def __repr__(self): return 'Domain:%s' % self.name def __iter__(self): return iter(self.select("SELECT * FROM `%s`" % self.name)) def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'DomainName': self.name = value else: setattr(self, name, value) def get_metadata(self): if not self._metadata: self._metadata = self.connection.domain_metadata(self) return self._metadata def put_attributes(self, item_name, attributes, replace=True, expected_value=None): """ Store attributes for a given item. :type item_name: string :param item_name: The name of the item whose attributes are being stored. :type attribute_names: dict or dict-like object :param attribute_names: The name/value pairs to store as attributes :type expected_value: list :param expected_value: If supplied, this is a list or tuple consisting of a single attribute name and expected value. The list can be of the form: * ['name', 'value'] In which case the call will first verify that the attribute "name" of this item has a value of "value". If it does, the delete will proceed, otherwise a ConditionalCheckFailed error will be returned. The list can also be of the form: * ['name', True|False] which will simply check for the existence (True) or non-existence (False) of the attribute. :type replace: bool :param replace: Whether the attribute values passed in will replace existing values or will be added as addition values. Defaults to True. :rtype: bool :return: True if successful """ return self.connection.put_attributes(self, item_name, attributes, replace, expected_value) def batch_put_attributes(self, items, replace=True): """ Store attributes for multiple items. :type items: dict or dict-like object :param items: A dictionary-like object. The keys of the dictionary are the item names and the values are themselves dictionaries of attribute names/values, exactly the same as the attribute_names parameter of the scalar put_attributes call. :type replace: bool :param replace: Whether the attribute values passed in will replace existing values or will be added as addition values. Defaults to True. :rtype: bool :return: True if successful """ return self.connection.batch_put_attributes(self, items, replace) def get_attributes(self, item_name, attribute_name=None, consistent_read=False, item=None): """ Retrieve attributes for a given item. :type item_name: string :param item_name: The name of the item whose attributes are being retrieved. :type attribute_names: string or list of strings :param attribute_names: An attribute name or list of attribute names. This parameter is optional. If not supplied, all attributes will be retrieved for the item. :rtype: :class:`boto.sdb.item.Item` :return: An Item mapping type containing the requested attribute name/values """ return self.connection.get_attributes(self, item_name, attribute_name, consistent_read, item) def delete_attributes(self, item_name, attributes=None, expected_values=None): """ Delete attributes from a given item. :type item_name: string :param item_name: The name of the item whose attributes are being deleted. :type attributes: dict, list or :class:`boto.sdb.item.Item` :param attributes: Either a list containing attribute names which will cause all values associated with that attribute name to be deleted or a dict or Item containing the attribute names and keys and list of values to delete as the value. If no value is supplied, all attribute name/values for the item will be deleted. :type expected_value: list :param expected_value: If supplied, this is a list or tuple consisting of a single attribute name and expected value. The list can be of the form: * ['name', 'value'] In which case the call will first verify that the attribute "name" of this item has a value of "value". If it does, the delete will proceed, otherwise a ConditionalCheckFailed error will be returned. The list can also be of the form: * ['name', True|False] which will simply check for the existence (True) or non-existence (False) of the attribute. :rtype: bool :return: True if successful """ return self.connection.delete_attributes(self, item_name, attributes, expected_values) def batch_delete_attributes(self, items): """ Delete multiple items in this domain. :type items: dict or dict-like object :param items: A dictionary-like object. The keys of the dictionary are the item names and the values are either: * dictionaries of attribute names/values, exactly the same as the attribute_names parameter of the scalar put_attributes call. The attribute name/value pairs will only be deleted if they match the name/value pairs passed in. * None which means that all attributes associated with the item should be deleted. :rtype: bool :return: True if successful """ return self.connection.batch_delete_attributes(self, items) def select(self, query='', next_token=None, consistent_read=False, max_items=None): """ Returns a set of Attributes for item names within domain_name that match the query. The query must be expressed in using the SELECT style syntax rather than the original SimpleDB query language. :type query: string :param query: The SimpleDB query to be performed. :rtype: iter :return: An iterator containing the results. This is actually a generator function that will iterate across all search results, not just the first page. """ return SelectResultSet(self, query, max_items=max_items, next_token=next_token, consistent_read=consistent_read) def get_item(self, item_name, consistent_read=False): """ Retrieves an item from the domain, along with all of its attributes. :param string item_name: The name of the item to retrieve. :rtype: :class:`boto.sdb.item.Item` or ``None`` :keyword bool consistent_read: When set to true, ensures that the most recent data is returned. :return: The requested item, or ``None`` if there was no match found """ item = self.get_attributes(item_name, consistent_read=consistent_read) if item: item.domain = self return item else: return None def new_item(self, item_name): return self.connection.item_cls(self, item_name) def delete_item(self, item): self.delete_attributes(item.name) def to_xml(self, f=None): """Get this domain as an XML DOM Document :param f: Optional File to dump directly to :type f: File or Stream :return: File object where the XML has been dumped to :rtype: file """ if not f: from tempfile import TemporaryFile f = TemporaryFile() print('<?xml version="1.0" encoding="UTF-8"?>', file=f) print('<Domain id="%s">' % self.name, file=f) for item in self: print('\t<Item id="%s">' % item.name, file=f) for k in item: print('\t\t<attribute id="%s">' % k, file=f) values = item[k] if not isinstance(values, list): values = [values] for value in values: print('\t\t\t<value><![CDATA[', end=' ', file=f) if isinstance(value, six.text_type): value = value.encode('utf-8', 'replace') else: value = six.text_type(value, errors='replace').encode('utf-8', 'replace') f.write(value) print(']]></value>', file=f) print('\t\t</attribute>', file=f) print('\t</Item>', file=f) print('</Domain>', file=f) f.flush() f.seek(0) return f def from_xml(self, doc): """Load this domain based on an XML document""" import xml.sax handler = DomainDumpParser(self) xml.sax.parse(doc, handler) return handler def delete(self): """ Delete this domain, and all items under it """ return self.connection.delete_domain(self) class DomainMetaData(object): def __init__(self, domain=None): self.domain = domain self.item_count = None self.item_names_size = None self.attr_name_count = None self.attr_names_size = None self.attr_value_count = None self.attr_values_size = None def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'ItemCount': self.item_count = int(value) elif name == 'ItemNamesSizeBytes': self.item_names_size = int(value) elif name == 'AttributeNameCount': self.attr_name_count = int(value) elif name == 'AttributeNamesSizeBytes': self.attr_names_size = int(value) elif name == 'AttributeValueCount': self.attr_value_count = int(value) elif name == 'AttributeValuesSizeBytes': self.attr_values_size = int(value) elif name == 'Timestamp': self.timestamp = value else: setattr(self, name, value) import sys from xml.sax.handler import ContentHandler class DomainDumpParser(ContentHandler): """ SAX parser for a domain that has been dumped """ def __init__(self, domain): self.uploader = UploaderThread(domain) self.item_id = None self.attrs = {} self.attribute = None self.value = "" self.domain = domain def startElement(self, name, attrs): if name == "Item": self.item_id = attrs['id'] self.attrs = {} elif name == "attribute": self.attribute = attrs['id'] elif name == "value": self.value = "" def characters(self, ch): self.value += ch def endElement(self, name): if name == "value": if self.value and self.attribute: value = self.value.strip() attr_name = self.attribute.strip() if attr_name in self.attrs: self.attrs[attr_name].append(value) else: self.attrs[attr_name] = [value] elif name == "Item": self.uploader.items[self.item_id] = self.attrs # Every 20 items we spawn off the uploader if len(self.uploader.items) >= 20: self.uploader.start() self.uploader = UploaderThread(self.domain) elif name == "Domain": # If we're done, spawn off our last Uploader Thread self.uploader.start() from threading import Thread class UploaderThread(Thread): """Uploader Thread""" def __init__(self, domain): self.db = domain self.items = {} super(UploaderThread, self).__init__() def run(self): try: self.db.batch_put_attributes(self.items) except: print("Exception using batch put, trying regular put instead") for item_name in self.items: self.db.put_attributes(item_name, self.items[item_name]) print(".", end=' ') sys.stdout.flush()
mit
2,454,436,343,189,231,600
2,773,712,995,023,855,600
36.765789
97
0.586788
false
bplancher/odoo
openerp/service/report.py
56
5118
# -*- coding: utf-8 -*- import base64 import logging import sys import threading import openerp import openerp.report from openerp import tools from openerp.exceptions import UserError import security _logger = logging.getLogger(__name__) # TODO: set a maximum report number per user to avoid DOS attacks # # Report state: # False -> True self_reports = {} self_id = 0 self_id_protect = threading.Semaphore() def dispatch(method, params): (db, uid, passwd ) = params[0:3] threading.current_thread().uid = uid params = params[3:] if method not in ['report', 'report_get', 'render_report']: raise KeyError("Method not supported %s" % method) security.check(db,uid,passwd) openerp.modules.registry.RegistryManager.check_registry_signaling(db) fn = globals()['exp_' + method] res = fn(db, uid, *params) openerp.modules.registry.RegistryManager.signal_caches_change(db) return res def exp_render_report(db, uid, object, ids, datas=None, context=None): if not datas: datas={} if not context: context={} self_id_protect.acquire() global self_id self_id += 1 id = self_id self_id_protect.release() self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None} cr = openerp.registry(db).cursor() try: result, format = openerp.report.render_report(cr, uid, ids, object, datas, context) if not result: tb = sys.exc_info() self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb) self_reports[id]['result'] = result self_reports[id]['format'] = format self_reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', exception) if hasattr(exception, 'name') and hasattr(exception, 'value'): self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb) self_reports[id]['state'] = True cr.commit() cr.close() return _check_report(id) def exp_report(db, uid, object, ids, datas=None, context=None): if not datas: datas={} if not context: context={} self_id_protect.acquire() global self_id self_id += 1 id = self_id self_id_protect.release() self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None} def go(id, uid, ids, datas, context): with openerp.api.Environment.manage(): cr = openerp.registry(db).cursor() try: result, format = openerp.report.render_report(cr, uid, ids, object, datas, context) if not result: tb = sys.exc_info() self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb) self_reports[id]['result'] = result self_reports[id]['format'] = format self_reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', exception) if hasattr(exception, 'name') and hasattr(exception, 'value'): self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb) self_reports[id]['state'] = True cr.commit() cr.close() return True threading.Thread(target=go, args=(id, uid, ids, datas, context)).start() return id def _check_report(report_id): result = self_reports[report_id] exc = result['exception'] if exc: raise UserError('%s: %s' % (exc.message, exc.traceback)) res = {'state': result['state']} if res['state']: if tools.config['reportgz']: import zlib res2 = zlib.compress(result['result']) res['code'] = 'zlib' else: #CHECKME: why is this needed??? if isinstance(result['result'], unicode): res2 = result['result'].encode('latin1', 'replace') else: res2 = result['result'] if res2: res['result'] = base64.encodestring(res2) res['format'] = result['format'] del self_reports[report_id] return res def exp_report_get(db, uid, report_id): if report_id in self_reports: if self_reports[report_id]['uid'] == uid: return _check_report(report_id) else: raise Exception, 'AccessDenied' else: raise Exception, 'ReportNotFound'
agpl-3.0
4,059,518,195,687,956,000
-1,253,499,960,210,526,500
34.054795
167
0.601993
false
Lilykos/invenio
invenio/legacy/bibrank/adminlib.py
13
43633
# This file is part of Invenio. # Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Youshould have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Invenio BibRank Administrator Interface.""" __revision__ = "$Id$" import os import ConfigParser from invenio.config import \ CFG_SITE_LANG, \ CFG_SITE_URL from invenio.base.helpers import utf8ifier import invenio.modules.access.engine as acce from invenio.base.i18n import language_list_long from invenio.legacy.dbquery import run_sql, wash_table_column_name from invenio.modules.ranker.registry import configuration def getnavtrail(previous=''): navtrail = """<a class="navtrail" href="%s/help/admin">Admin Area</a> """ % ( CFG_SITE_URL,) navtrail = navtrail + previous return navtrail def check_user(req, role, adminarea=2, authorized=0): (auth_code, auth_message) = is_adminuser(req, role) if not authorized and auth_code != 0: return ("false", auth_message) return ("", auth_message) def is_adminuser(req, role): """check if user is a registered administrator. """ return acce.acc_authorize_action(req, role) def perform_index(ln=CFG_SITE_LANG): """create the bibrank main area menu page.""" header = ['Code', 'Translations', 'Collections', 'Rank method'] rnk_list = get_def_name('', "rnkMETHOD") actions = [] for (rnkID, name) in rnk_list: actions.append([name]) for col in [(('Modify', 'modifytranslations'),), (('Modify', 'modifycollection'),), (('Show Details', 'showrankdetails'), ('Modify', 'modifyrank'), ('Delete', 'deleterank'))]: actions[-1].append('<a href="%s/admin/bibrank/bibrankadmin.py/%s?rnkID=%s&amp;ln=%s">%s</a>' % (CFG_SITE_URL, col[0][1], rnkID, ln, col[0][0])) for (str, function) in col[1:]: actions[-1][-1] += ' / <a href="%s/admin/bibrank/bibrankadmin.py/%s?rnkID=%s&amp;ln=%s">%s</a>' % ( CFG_SITE_URL, function, rnkID, ln, str) output = """ <a href="%s/admin/bibrank/bibrankadmin.py/addrankarea?ln=%s">Add new rank method</a><br /><br /> """ % (CFG_SITE_URL, ln) output += tupletotable(header=header, tuple=actions) return addadminbox("""Overview of rank methods&nbsp;&nbsp;&nbsp;<small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mi">?</a>]</small>""" % CFG_SITE_URL, datalist=[output, '']) def perform_modifycollection(rnkID='', ln=CFG_SITE_LANG, func='', colID='', confirm=0): """Modify which collections the rank method is visible to""" output = "" subtitle = "" if rnkID: rnkNAME = get_def_name(rnkID, "rnkMETHOD")[0][1] if func in ["0", 0] and confirm in ["1", 1]: finresult = attach_col_rnk(rnkID, colID) elif func in ["1", 1] and confirm in ["1", 1]: finresult = detach_col_rnk(rnkID, colID) if colID: colNAME = get_def_name(colID, "collection")[0][1] subtitle = """Step 1 - Select collection to enable/disable rank method '%s' for""" % rnkNAME output = """ <dl> <dt>The rank method is currently enabled for these collections:</dt> <dd> """ col_list = get_rnk_col(rnkID, ln) if not col_list: output += """No collections""" else: for (id, name) in col_list: output += """%s, """ % name output += """</dd> </dl> """ col_list = get_def_name('', "collection") col_rnk = dict(get_rnk_col(rnkID)) col_list = filter(lambda x: x[0] not in col_rnk, col_list) if col_list: text = """ <span class="adminlabel">Enable for:</span> <select name="colID" class="admin_w200"> <option value="">- select collection -</option> """ for (id, name) in col_list: text += """<option value="%s" %s>%s</option>""" % (id, (func in ["0", 0] and confirm in [ "0", 0] and colID and int(colID) == int(id)) and 'selected="selected"' or '', name) text += """</select>""" output += createhiddenform(action="modifycollection", text=text, button="Enable", rnkID=rnkID, ln=ln, func=0, confirm=1) if confirm in ["0", 0] and func in ["0", 0] and colID: subtitle = "Step 2 - Confirm to enable rank method for the chosen collection" text = "<b><p>Please confirm to enable rank method '%s' for the collection '%s'</p></b>" % ( rnkNAME, colNAME) output += createhiddenform(action="modifycollection", text=text, button="Confirm", rnkID=rnkID, ln=ln, colID=colID, func=0, confirm=1) elif confirm in ["1", 1] and func in ["0", 0] and colID: subtitle = "Step 3 - Result" output += write_outcome(finresult) elif confirm not in ["0", 0] and func in ["0", 0]: output += """<b><span class="info">Please select a collection.</span></b>""" col_list = get_rnk_col(rnkID, ln) if col_list: text = """ <span class="adminlabel">Disable for:</span> <select name="colID" class="admin_w200"> <option value="">- select collection -</option> """ for (id, name) in col_list: text += """<option value="%s" %s>%s</option>""" % (id, (func in ["1", 1] and confirm in [ "0", 0] and colID and int(colID) == int(id)) and 'selected="selected"' or '', name) text += """</select>""" output += createhiddenform(action="modifycollection", text=text, button="Disable", rnkID=rnkID, ln=ln, func=1, confirm=1) if confirm in ["1", 1] and func in ["1", 1] and colID: subtitle = "Step 3 - Result" output += write_outcome(finresult) elif confirm not in ["0", 0] and func in ["1", 1]: output += """<b><span class="info">Please select a collection.</span></b>""" body = [output] return addadminbox(subtitle + """&nbsp;&nbsp;&nbsp;<small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mc">?</a>]</small>""" % CFG_SITE_URL, body) def perform_modifytranslations(rnkID, ln, sel_type, trans, confirm, callback='yes'): """Modify the translations of a rank method""" output = '' subtitle = '' langs = get_languages() langs.sort() if confirm in ["2", 2] and rnkID: finresult = modify_translations( rnkID, langs, sel_type, trans, "rnkMETHOD") rnk_name = get_def_name(rnkID, "rnkMETHOD")[0][1] rnk_dict = dict( get_i8n_name('', ln, get_rnk_nametypes()[0][0], "rnkMETHOD")) if rnkID and int(rnkID) in rnk_dict: rnkID = int(rnkID) subtitle = """<a name="3">3. Modify translations for rank method '%s'</a>""" % rnk_name if type(trans) is str: trans = [trans] if sel_type == '': sel_type = get_rnk_nametypes()[0][0] header = ['Language', 'Translation'] actions = [] text = """ <span class="adminlabel">Name type</span> <select name="sel_type" class="admin_w200"> """ types = get_rnk_nametypes() if len(types) > 1: for (key, value) in types: text += """<option value="%s" %s>%s""" % ( key, key == sel_type and 'selected="selected"' or '', value) trans_names = get_name(rnkID, ln, key, "rnkMETHOD") if trans_names and trans_names[0][0]: text += ": %s" % trans_names[0][0] text += "</option>" text += """</select>""" output += createhiddenform(action="modifytranslations", text=text, button="Select", rnkID=rnkID, ln=ln, confirm=0) if confirm in [-1, "-1", 0, "0"]: trans = [] for key, value in langs: try: trans_names = get_name(rnkID, key, sel_type, "rnkMETHOD") trans.append(trans_names[0][0]) except StandardError as e: trans.append('') for nr in range(0, len(langs)): actions.append(["%s" % (langs[nr][1],)]) actions[-1].append( '<input type="text" name="trans" size="30" value="%s"/>' % trans[nr]) text = tupletotable(header=header, tuple=actions) output += createhiddenform(action="modifytranslations", text=text, button="Modify", rnkID=rnkID, sel_type=sel_type, ln=ln, confirm=2) if sel_type and len(trans) and confirm in ["2", 2]: output += write_outcome(finresult) body = [output] return addadminbox(subtitle + """&nbsp;&nbsp;&nbsp;<small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mt">?</a>]</small>""" % CFG_SITE_URL, body) def perform_addrankarea(rnkcode='', ln=CFG_SITE_LANG, template='', confirm=-1): """form to add a new rank method with these values:""" subtitle = 'Step 1 - Create new rank method' output = """ <dl> <dt>BibRank code:</dt> <dd>A unique code that identifies a rank method, is used when running the bibrank daemon and used to name the configuration file for the method. <br />The template files includes the necessary parameters for the chosen rank method, and only needs to be edited with the correct tags and paths. <br />For more information, please go to the <a title="See guide" href="%s/help/admin/bibrank-admin-guide">BibRank guide</a> and read the section about adding a rank method</dd> </dl> """ % CFG_SITE_URL text = """ <span class="adminlabel">BibRank code</span> <input class="admin_wvar" type="text" name="rnkcode" value="%s" /> """ % (rnkcode) text += """<br /> <span class="adminlabel">Cfg template</span> <select name="template" class="admin_w200"> <option value="">No template</option> """ templates = get_templates() for templ in templates: text += """<option value="%s" %s>%s</option>""" % ( templ, template == templ and 'selected="selected"' or '', templ[9:len(templ) - 4]) text += """</select>""" output += createhiddenform(action="addrankarea", text=text, button="Add rank method", ln=ln, confirm=1) if rnkcode: if confirm in ["0", 0]: subtitle = 'Step 2 - Confirm addition of rank method' text = """<b>Add rank method with BibRank code: '%s'.</b>""" % ( rnkcode) if template: text += """<br /><b>Using configuration template: '%s'.</b>""" % ( template) else: text += """<br /><b>Create empty configuration file.</b>""" output += createhiddenform(action="addrankarea", text=text, rnkcode=rnkcode, button="Confirm", template=template, confirm=1) elif confirm in ["1", 1]: rnkID = add_rnk(rnkcode) subtitle = "Step 3 - Result" if rnkID[0] == 1: rnkID = rnkID[1] text = """<b><span class="info">Added new rank method with BibRank code '%s'</span></b>""" % rnkcode try: if template: infile = open(configuration.get(template, ''), 'r') indata = infile.readlines() infile.close() else: indata = () file = open( configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), 'w') for line in indata: file.write(line) file.close() if template: text += """<b><span class="info"><br />Configuration file created using '%s' as template.</span></b>""" % template else: text += """<b><span class="info"><br />Empty configuration file created.</span></b>""" except StandardError as e: text += """<b><span class="info"><br />Sorry, could not create configuration file: '%s.cfg', either because it already exists, or not enough rights to create file. <br />Please create the file in the path given.</span></b>""" % ( configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), ) else: text = """<b><span class="info">Sorry, could not add rank method, rank method with the same BibRank code probably exists.</span></b>""" output += text elif not rnkcode and confirm not in [-1, "-1"]: output += """<b><span class="info">Sorry, could not add rank method, not enough data submitted.</span></b>""" body = [output] return addadminbox(subtitle + """&nbsp;&nbsp;&nbsp;<small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#ar">?</a>]</small>""" % CFG_SITE_URL, body) def perform_modifyrank(rnkID, rnkcode='', ln=CFG_SITE_LANG, template='', cfgfile='', confirm=0): """form to modify a rank method rnkID - id of the rank method """ if not rnkID: return "No ranking method selected." if not get_rnk_code(rnkID): return "Ranking method %s does not seem to exist." % str(rnkID) subtitle = 'Step 1 - Please modify the wanted values below' if not rnkcode: oldcode = get_rnk_code(rnkID)[0] else: oldcode = rnkcode output = """ <dl> <dd>When changing the BibRank code of a rank method, you must also change any scheduled tasks using the old value. <br />For more information, please go to the <a title="See guide" href="%s/help/admin/bibrank-admin-guide">BibRank guide</a> and read the section about modifying a rank method's BibRank code.</dd> </dl> """ % CFG_SITE_URL text = """ <span class="adminlabel">BibRank code</span> <input class="admin_wvar" type="text" name="rnkcode" value="%s" /> <br /> """ % (oldcode) try: text += """<span class="adminlabel">Cfg file</span>""" textarea = "" if cfgfile: textarea += cfgfile else: file = open( configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', '')) for line in file.readlines(): textarea += line text += """<textarea class="admin_wvar" name="cfgfile" rows="15" cols="70">""" + \ textarea + """</textarea>""" except StandardError as e: text += """<b><span class="info">Cannot load file, either it does not exist, or not enough rights to read it: '%s.cfg'<br />Please create the file in the path given.</span></b>""" % ( configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), ) output += createhiddenform(action="modifyrank", text=text, rnkID=rnkID, button="Modify", confirm=1) if rnkcode and confirm in ["1", 1] and get_rnk_code(rnkID)[0][0] != rnkcode: oldcode = get_rnk_code(rnkID)[0][0] result = modify_rnk(rnkID, rnkcode) subtitle = "Step 3 - Result" if result: text = """<b><span class="info">Rank method modified.</span></b>""" try: file = open(configuration.get(oldcode + '.cfg', ''), 'r') file2 = open(configuration.get(rnkcode + '.cfg', ''), 'w') lines = file.readlines() for line in lines: file2.write(line) file.close() file2.close() os.remove(configuration.get(oldcode + '.cfg', '')) except StandardError as e: text = """<b><span class="info">Sorry, could not change name of cfg file, must be done manually: '%s.cfg'</span></b>""" % ( configuration.get(oldcode + '.cfg', ''), ) else: text = """<b><span class="info">Sorry, could not modify rank method.</span></b>""" output += text if cfgfile and confirm in ["1", 1]: try: file = open( configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), 'w') file.write(cfgfile) file.close() text = """<b><span class="info"><br />Configuration file modified: '%s/bibrank/%s.cfg'</span></b>""" % ( configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), ) except StandardError as e: text = """<b><span class="info"><br />Sorry, could not modify configuration file, please check for rights to do so: '%s.cfg'<br />Please modify the file manually.</span></b>""" % ( configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), ) output += text finoutput = addadminbox( subtitle + """&nbsp;&nbsp;&nbsp;<small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mr">?</a>]</small>""" % CFG_SITE_URL, [output]) output = "" text = """ <span class="adminlabel">Select</span> <select name="template" class="admin_w200"> <option value="">- select template -</option> """ templates = get_templates() for templ in templates: text += """<option value="%s" %s>%s</option>""" % ( templ, template == templ and 'selected="selected"' or '', templ[9:len(templ) - 4]) text += """</select><br />""" output += createhiddenform(action="modifyrank", text=text, rnkID=rnkID, button="Show template", confirm=0) try: if template: textarea = "" text = """<span class="adminlabel">Content:</span>""" file = open(configuration.get(template, ''), 'r') lines = file.readlines() for line in lines: textarea += line file.close() text += """<textarea class="admin_wvar" readonly="true" rows="15" cols="70">""" + \ textarea + """</textarea>""" output += text except StandardError as e: output += """Cannot load file, either it does not exist, or not enough rights to read it: '%s'""" % ( configuration.get(template, ''), ) finoutput += addadminbox("View templates", [output]) return finoutput def perform_deleterank(rnkID, ln=CFG_SITE_LANG, confirm=0): """form to delete a rank method """ subtitle = '' output = """ <span class="warning"> <dl> <dt><strong>WARNING:</strong></dt> <dd><strong>When deleting a rank method, you also deletes all data related to the rank method, like translations, which collections it was attached to and the data necessary to rank the searchresults. Any scheduled tasks using the deleted rank method will also stop working. <br /><br />For more information, please go to the <a title="See guide" href="%s/help/admin/bibrank-admin-guide">BibRank guide</a> and read the section regarding deleting a rank method.</strong></dd> </dl> </span> """ % CFG_SITE_URL if rnkID: if confirm in ["0", 0]: rnkNAME = get_def_name(rnkID, "rnkMETHOD")[0][1] subtitle = 'Step 1 - Confirm deletion' text = """Delete rank method '%s'.""" % (rnkNAME) output += createhiddenform(action="deleterank", text=text, button="Confirm", rnkID=rnkID, confirm=1) elif confirm in ["1", 1]: try: rnkNAME = get_def_name(rnkID, "rnkMETHOD")[0][1] rnkcode = get_rnk_code(rnkID)[0][0] table = "" try: config = ConfigParser.ConfigParser() config.readfp( open(configuration.get(rnkcode + ".cfg"), 'r')) table = config.get( config.get('rank_method', "function"), "table") except Exception: pass result = delete_rnk(rnkID, table) subtitle = "Step 2 - Result" if result: text = """<b><span class="info">Rank method deleted</span></b>""" try: os.remove(configuration.get(rnkcode + ".cfg")) text += """<br /><b><span class="info">Configuration file deleted: '%s.cfg'.</span></b>""" % ( configuration.get(rnkcode + ".cfg"), ) except StandardError as e: text += """<br /><b><span class="info">Sorry, could not delete configuration file: '%s/bibrank/%s.cfg'.</span><br />Please delete the file manually.</span></b>""" % ( configuration.get(rnkcode + ".cfg"), ) else: text = """<b><span class="info">Sorry, could not delete rank method</span></b>""" except StandardError as e: text = """<b><span class="info">Sorry, could not delete rank method, most likely already deleted</span></b>""" output = text body = [output] return addadminbox(subtitle + """&nbsp;&nbsp;&nbsp;<small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#dr">?</a>]</small>""" % CFG_SITE_URL, body) def perform_showrankdetails(rnkID, ln=CFG_SITE_LANG): """Returns details about the rank method given by rnkID""" if not rnkID: return "No ranking method selected." if not get_rnk_code(rnkID): return "Ranking method %s does not seem to exist." % str(rnkID) subtitle = """Overview <a href="%s/admin/bibrank/bibrankadmin.py/modifyrank?rnkID=%s&amp;ln=%s">[Modify]</a>""" % ( CFG_SITE_URL, rnkID, ln) text = """ BibRank code: %s<br /> Last updated by BibRank: """ % (get_rnk_code(rnkID)[0][0]) if get_rnk(rnkID)[0][2]: text += "%s<br />" % get_rnk(rnkID)[0][2] else: text += "Not yet run.<br />" output = addadminbox(subtitle, [text]) subtitle = """Rank method statistics""" text = "" try: text = "Not yet implemented" except StandardError as e: text = "BibRank not yet run, cannot show statistics for method" output += addadminbox(subtitle, [text]) subtitle = """Attached to collections <a href="%s/admin/bibrank/bibrankadmin.py/modifycollection?rnkID=%s&amp;ln=%s">[Modify]</a>""" % ( CFG_SITE_URL, rnkID, ln) text = "" col = get_rnk_col(rnkID, ln) for key, value in col: text += "%s<br />" % value if not col: text += "No collections" output += addadminbox(subtitle, [text]) subtitle = """Translations <a href="%s/admin/bibrank/bibrankadmin.py/modifytranslations?rnkID=%s&amp;ln=%s">[Modify]</a>""" % ( CFG_SITE_URL, rnkID, ln) prev_lang = '' trans = get_translations(rnkID) types = get_rnk_nametypes() types = dict(map(lambda x: (x[0], x[1]), types)) text = "" languages = dict(get_languages()) if trans: for lang, type, name in trans: if lang and lang in languages and type and name: if prev_lang != lang: prev_lang = lang text += """%s: <br />""" % (languages[lang]) if type in types: text += """<span style="margin-left: 10px">'%s'</span><span class="note">(%s)</span><br />""" % ( name, types[type]) else: text = """No translations exists""" output += addadminbox(subtitle, [text]) subtitle = """Configuration file: '%s/bibrank/%s.cfg' <a href="%s/admin/bibrank/bibrankadmin.py/modifyrank?rnkID=%s&amp;ln=%s">[Modify]</a>""" % ( CFG_ETCDIR, get_rnk_code(rnkID)[0][0], CFG_SITE_URL, rnkID, ln) text = "" try: file = open(configuration.get(get_rnk_code(rnkID)[0][0] + ".cfg", '')) text += """<pre>""" for line in file.readlines(): text += line text += """</pre>""" except StandardError as e: text = """Cannot load file, either it does not exist, or not enough rights to read it.""" output += addadminbox(subtitle, [text]) return output def compare_on_val(second, first): return cmp(second[1], first[1]) def get_rnk_code(rnkID): """Returns the name from rnkMETHOD based on argument rnkID - id from rnkMETHOD""" try: res = run_sql("SELECT name FROM rnkMETHOD where id=%s" % (rnkID)) return res except StandardError as e: return () def get_rnk(rnkID=''): """Return one or all rank methods rnkID - return the rank method given, or all if not given""" try: if rnkID: res = run_sql( "SELECT id,name,DATE_FORMAT(last_updated, '%%Y-%%m-%%d %%H:%%i:%%s') from rnkMETHOD WHERE id=%s" % rnkID) else: res = run_sql( "SELECT id,name,DATE_FORMAT(last_updated, '%%Y-%%m-%%d %%H:%%i:%%s') from rnkMETHOD") return res except StandardError as e: return () def get_translations(rnkID): """Returns the translations in rnkMETHODNAME for a rankmethod rnkID - the id of the rankmethod from rnkMETHOD """ try: res = run_sql( "SELECT ln, type, value FROM rnkMETHODNAME where id_rnkMETHOD=%s ORDER BY ln,type" % (rnkID)) return res except StandardError as e: return () def get_rnk_nametypes(): """Return a list of the various translationnames for the rank methods""" type = [] type.append(('ln', 'Long name')) #type.append(('sn', 'Short name')) return type def get_col_nametypes(): """Return a list of the various translationnames for the rank methods""" type = [] type.append(('ln', 'Long name')) return type def get_rnk_col(rnkID, ln=CFG_SITE_LANG): """ Returns a list of the collections the given rank method is attached to rnkID - id from rnkMETHOD""" try: res1 = dict(run_sql( "SELECT id_collection, '' FROM collection_rnkMETHOD WHERE id_rnkMETHOD=%s" % rnkID)) res2 = get_def_name('', "collection") result = filter(lambda x: x[0] in res1, res2) return result except StandardError as e: return () def get_templates(): """Read CFG_ETCDIR/bibrank and returns a list of all files with 'template' """ templates = [] files = configuration.itervalues() for file in files: if str.find(file, "template_") != -1: templates.append(file) return templates def attach_col_rnk(rnkID, colID): """attach rank method to collection rnkID - id from rnkMETHOD table colID - id of collection, as in collection table """ try: res = run_sql( "INSERT INTO collection_rnkMETHOD(id_collection, id_rnkMETHOD) values (%s,%s)" % (colID, rnkID)) return (1, "") except StandardError as e: return (0, e) def detach_col_rnk(rnkID, colID): """detach rank method from collection rnkID - id from rnkMETHOD table colID - id of collection, as in collection table """ try: res = run_sql( "DELETE FROM collection_rnkMETHOD WHERE id_collection=%s AND id_rnkMETHOD=%s" % (colID, rnkID)) return (1, "") except StandardError as e: return (0, e) def delete_rnk(rnkID, table=""): """Deletes all data for the given rank method rnkID - delete all data in the tables associated with ranking and this id """ try: res = run_sql("DELETE FROM rnkMETHOD WHERE id=%s" % rnkID) res = run_sql( "DELETE FROM rnkMETHODNAME WHERE id_rnkMETHOD=%s" % rnkID) res = run_sql( "DELETE FROM collection_rnkMETHOD WHERE id_rnkMETHOD=%s" % rnkID) res = run_sql( "DELETE FROM rnkMETHODDATA WHERE id_rnkMETHOD=%s" % rnkID) if table: res = run_sql("truncate %s" % table) res = run_sql("truncate %sR" % table[:-1]) return (1, "") except StandardError as e: return (0, e) def modify_rnk(rnkID, rnkcode): """change the code for the rank method given rnkID - change in rnkMETHOD where id is like this rnkcode - new value for field 'name' in rnkMETHOD """ try: res = run_sql( "UPDATE rnkMETHOD set name=%s WHERE id=%s", (rnkcode, rnkID)) return (1, "") except StandardError as e: return (0, e) def add_rnk(rnkcode): """Adds a new rank method to rnkMETHOD rnkcode - the "code" for the rank method, to be used by bibrank daemon """ try: res = run_sql("INSERT INTO rnkMETHOD (name) VALUES (%s)", (rnkcode,)) res = run_sql("SELECT id FROM rnkMETHOD WHERE name=%s", (rnkcode,)) if res: return (1, res[0][0]) else: raise StandardError except StandardError as e: return (0, e) def addadminbox(header='', datalist=[], cls="admin_wvar"): """used to create table around main data on a page, row based. header - header on top of the table datalist - list of the data to be added row by row cls - possible to select wich css-class to format the look of the table.""" if len(datalist) == 1: per = '100' else: per = '75' output = '<table class="%s" ' % (cls, ) + 'width="95%">\n' output += """ <thead> <tr> <th class="adminheaderleft" colspan="%s">%s</th> </tr> </thead> <tbody> """ % (len(datalist), header) output += ' <tr>\n' output += """ <td style="vertical-align: top; margin-top: 5px; width: %s;"> %s </td> """ % (per + '%', datalist[0]) if len(datalist) > 1: output += """ <td style="vertical-align: top; margin-top: 5px; width: %s;"> %s </td> """ % ('25%', datalist[1]) output += ' </tr>\n' output += """ </tbody> </table> """ return output def tupletotable(header=[], tuple=[], start='', end='', extracolumn='', highlight_rows_p=False, alternate_row_colors_p=False): """create html table for a tuple. header - optional header for the columns tuple - create table of this start - text to be added in the beginning, most likely beginning of a form end - text to be added in the end, mot likely end of a form. extracolumn - mainly used to put in a button. highlight_rows_p - if the cursor hovering a row should highlight the full row or not alternate_row_colors_p - if alternate background colours should be used for the rows """ # study first row in tuple for alignment align = [] try: firstrow = tuple[0] if type(firstrow) in [int, long]: align = ['admintdright'] elif type(firstrow) in [str, dict]: align = ['admintdleft'] else: for item in firstrow: if type(item) is int: align.append('admintdright') else: align.append('admintdleft') except IndexError: firstrow = [] tblstr = '' for h in header + ['']: tblstr += ' <th class="adminheader">%s</th>\n' % (h, ) if tblstr: tblstr = ' <tr>\n%s\n </tr>\n' % (tblstr, ) tblstr = start + '<table class="admin_wvar_nomargin">\n' + tblstr # extra column try: extra = '<tr class="%s">' % ( highlight_rows_p and 'admin_row_highlight' or '') if type(firstrow) not in [int, long, str, dict]: # for data in firstrow: extra += '<td class="%s">%s</td>\n' % ('admintd', data) for i in range(len(firstrow)): extra += '<td class="{0}">{1}</td>\n'.format( align[i], firstrow[i]) else: extra += ' <td class="%s">%s</td>\n' % (align[0], firstrow) extra += '<td class="extracolumn" rowspan="%s" style="vertical-align: top;">\n%s\n</td>\n</tr>\n' % ( len(tuple), extracolumn) except IndexError: extra = '' tblstr += extra # for i in range(1, len(tuple)): j = 0 for row in tuple[1:]: j += 1 tblstr += ' <tr class="%s %s">\n' % (highlight_rows_p and 'admin_row_highlight' or '', (j % 2 and alternate_row_colors_p) and 'admin_row_color' or '') # row = tuple[i] if type(row) not in [int, long, str, dict]: # for data in row: tblstr += '<td class="admintd">%s</td>\n' % (data,) for i in range(len(row)): tblstr += '<td class="{0}">{1}</td>\n'.format(align[i], utf8ifier(row[i])) else: tblstr += ' <td class="%s">%s</td>\n' % (align[0], row) tblstr += ' </tr> \n' tblstr += '</table> \n ' tblstr += end return tblstr def tupletotable_onlyselected(header=[], tuple=[], selected=[], start='', end='', extracolumn=''): """create html table for a tuple. header - optional header for the columns tuple - create table of this selected - indexes of selected rows in the tuple start - put this in the beginning end - put this in the beginning extracolumn - mainly used to put in a button""" tuple2 = [] for index in selected: tuple2.append(tuple[int(index) - 1]) return tupletotable(header=header, tuple=tuple2, start=start, end=end, extracolumn=extracolumn) def addcheckboxes(datalist=[], name='authids', startindex=1, checked=[]): """adds checkboxes in front of the listdata. datalist - add checkboxes in front of this list name - name of all the checkboxes, values will be associated with this name startindex - usually 1 because of the header checked - values of checkboxes to be pre-checked """ if not type(checked) is list: checked = [checked] for row in datalist: # always box, check another place if 1 or row[0] not in [-1, "-1", 0, "0"]: chkstr = str(startindex) in checked and 'checked="checked"' or '' row.insert( 0, '<input type="checkbox" name="%s" value="%s" %s />' % (name, startindex, chkstr)) else: row.insert(0, '') startindex += 1 return datalist def createhiddenform(action="", text="", button="confirm", cnfrm='', **hidden): """create select with hidden values and submit button action - name of the action to perform on submit text - additional text, can also be used to add non hidden input button - value/caption on the submit button cnfrm - if given, must check checkbox to confirm **hidden - dictionary with name=value pairs for hidden input """ output = '<form action="%s" method="post">\n' % (action, ) output += '<table>\n<tr><td style="vertical-align: top">' # output += text.decode('utf-8') output += text if cnfrm: output += ' <input type="checkbox" name="confirm" value="1"/>' for key in hidden.keys(): if type(hidden[key]) is list: for value in hidden[key]: output += ' <input type="hidden" name="%s" value="%s"/>\n' % ( key, value) else: output += ' <input type="hidden" name="%s" value="%s"/>\n' % ( key, hidden[key]) output += '</td><td style="vertical-align: bottom">' output += ' <input class="btn btn-default" type="submit" value="%s"/>\n' % ( button, ) output += '</td></tr></table>' output += '</form>\n' return output def get_languages(): languages = [] for (lang, lang_namelong) in language_list_long(): languages.append((lang, lang_namelong)) languages.sort() return languages def get_def_name(ID, table): """Returns a list of the names, either with the name in the current language, the default language, or just the name from the given table ln - a language supported by Invenio type - the type of value wanted, like 'ln', 'sn'""" name = "name" if table[-1:].isupper(): name = "NAME" try: if ID: res = run_sql("SELECT id,name FROM %s where id=%s" % (table, ID)) else: res = run_sql("SELECT id,name FROM %s" % table) res = list(res) res.sort(compare_on_val) return res except StandardError as e: return [] def get_i8n_name(ID, ln, rtype, table): """Returns a list of the names, either with the name in the current language, the default language, or just the name from the given table ln - a language supported by Invenio type - the type of value wanted, like 'ln', 'sn'""" name = "name" if table[-1:].isupper(): name = "NAME" try: res = "" if ID: res = run_sql("SELECT id_%s,value FROM %s%s where type='%s' and ln='%s' and id_%s=%s" % ( table, table, name, rtype, ln, table, ID)) else: res = run_sql("SELECT id_%s,value FROM %s%s where type='%s' and ln='%s'" % ( table, table, name, rtype, ln)) if ln != CFG_SITE_LANG: if ID: res1 = run_sql("SELECT id_%s,value FROM %s%s WHERE ln='%s' and type='%s' and id_%s=%s" % ( table, table, name, CFG_SITE_LANG, rtype, table, ID)) else: res1 = run_sql("SELECT id_%s,value FROM %s%s WHERE ln='%s' and type='%s'" % ( table, table, name, CFG_SITE_LANG, rtype)) res2 = dict(res) result = filter(lambda x: x[0] not in res2, res1) res = res + result if ID: res1 = run_sql("SELECT id,name FROM %s where id=%s" % (table, ID)) else: res1 = run_sql("SELECT id,name FROM %s" % table) res2 = dict(res) result = filter(lambda x: x[0] not in res2, res1) res = res + result res = list(res) res.sort(compare_on_val) return res except StandardError as e: raise StandardError def get_name(ID, ln, rtype, table, id_column=None): """Returns the value from the table name based on arguments ID - id ln - a language supported by Invenio type - the type of value wanted, like 'ln', 'sn' table - tablename id_column - name of the column with identifier. If None, expect column to be named 'id_%s' % table """ name = "name" if table[-1:].isupper(): name = "NAME" if id_column: id_column = wash_table_column_name(id_column) try: res = run_sql("SELECT value FROM %s%s WHERE type='%s' and ln='%s' and %s=%s" % ( table, name, rtype, ln, (id_column or 'id_%s' % wash_table_column_name(table)), ID)) return res except StandardError as e: return () def modify_translations(ID, langs, sel_type, trans, table, id_column=None): """add or modify translations in tables given by table frmID - the id of the format from the format table sel_type - the name type langs - the languages trans - the translations, in same order as in langs table - the table id_column - name of the column with identifier. If None, expect column to be named 'id_%s' % table """ name = "name" if table[-1:].isupper(): name = "NAME" id_column = id_column or 'id_%s' % table if id_column: id_column = wash_table_column_name(id_column) try: for nr in range(0, len(langs)): res = run_sql("SELECT value FROM %s%s WHERE %s=%%s AND type=%%s AND ln=%%s" % (table, name, id_column), (ID, sel_type, langs[nr][0])) if res: if trans[nr]: res = run_sql("UPDATE %s%s SET value=%%s WHERE %s=%%s AND type=%%s AND ln=%%s" % (table, name, id_column), (trans[nr], ID, sel_type, langs[nr][0])) else: res = run_sql("DELETE FROM %s%s WHERE %s=%%s AND type=%%s AND ln=%%s" % (table, name, id_column), (ID, sel_type, langs[nr][0])) else: if trans[nr]: res = run_sql("INSERT INTO %s%s (%s, type, ln, value) VALUES (%%s,%%s,%%s,%%s)" % (table, name, id_column), (ID, sel_type, langs[nr][0], trans[nr])) return (1, "") except StandardError as e: return (0, e) def write_outcome(res): """ Write the outcome of an update of some settings. Parameter 'res' is a tuple (int, str), where 'int' is 0 when there is an error to display, and 1 when everything went fine. 'str' is a message displayed when there is an error. """ if res and res[0] == 1: return """<b><span class="info">Operation successfully completed.</span></b>""" elif res: return """<b><span class="info">Operation failed. Reason:</span></b><br />%s""" % res[1]
gpl-2.0
7,261,501,722,540,108,000
837,040,681,650,516,200
37.241017
249
0.528476
false
abdullah2891/remo
remo/profiles/helpers.py
3
2078
import urlparse from datetime import timedelta from django.utils import timezone from django.conf import settings from funfactory.helpers import urlparams from jingo import register from libravatar import libravatar_url from remo.profiles.models import FunctionalArea, UserAvatar from remo.reports.utils import get_last_report INACTIVE_HIGH = timedelta(weeks=8) INACTIVE_LOW = timedelta(weeks=4) @register.filter def get_avatar_url(user, size=50): """Get a url pointing to user's avatar. The libravatar network is used for avatars. Return cached value if its last update was less than 24 hours before. Optional argument size can be provided to set the avatar size. """ if not user: return None default_img_url = reduce(lambda u, x: urlparse.urljoin(u, x), [settings.SITE_URL, settings.STATIC_URL, 'base/img/remo/remo_avatar.png']) user_avatar, created = UserAvatar.objects.get_or_create(user=user) now = timezone.now() if (user_avatar.last_update < now - timedelta(days=7)) or created: user_avatar.avatar_url = libravatar_url(email=user.email, https=True) user_avatar.save() avatar_url = urlparams(user_avatar.avatar_url, default=default_img_url) if size != -1: avatar_url = urlparams(avatar_url, size=size) return avatar_url @register.filter def get_functional_area(name): """Return the Functional Area object given the name.""" try: return FunctionalArea.objects.get(name=name) except FunctionalArea.DoesNotExist: return None @register.filter def get_activity_level(user): """Return user's inactivity level.""" last_report = get_last_report(user) if not last_report: return '' today = timezone.now().date() inactivity_period = today - last_report.report_date if inactivity_period > INACTIVE_LOW: if inactivity_period > INACTIVE_HIGH: return 'inactive-high' return 'inactive-low' return ''
bsd-3-clause
-2,103,764,005,154,446,300
6,167,720,536,372,204,000
26.706667
77
0.671319
false
TridevGuha/django
tests/utils_tests/test_regex_helper.py
448
1784
from __future__ import unicode_literals import unittest from django.utils import regex_helper class NormalizeTests(unittest.TestCase): def test_empty(self): pattern = r"" expected = [('', [])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected) def test_escape(self): pattern = r"\\\^\$\.\|\?\*\+\(\)\[" expected = [('\\^$.|?*+()[', [])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected) def test_group_positional(self): pattern = r"(.*)-(.+)" expected = [('%(_0)s-%(_1)s', ['_0', '_1'])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected) def test_group_ignored(self): pattern = r"(?i)(?L)(?m)(?s)(?u)(?#)" expected = [('', [])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected) def test_group_noncapturing(self): pattern = r"(?:non-capturing)" expected = [('non-capturing', [])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected) def test_group_named(self): pattern = r"(?P<first_group_name>.*)-(?P<second_group_name>.*)" expected = [('%(first_group_name)s-%(second_group_name)s', ['first_group_name', 'second_group_name'])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected) def test_group_backreference(self): pattern = r"(?P<first_group_name>.*)-(?P=first_group_name)" expected = [('%(first_group_name)s-%(first_group_name)s', ['first_group_name'])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected)
bsd-3-clause
466,398,305,479,188,500
1,739,822,239,708,223,700
33.980392
71
0.570628
false
DevinDewitt/pyqt5
examples/webkit/fancybrowser/fancybrowser.py
3
7927
#!/usr/bin/env python ############################################################################# ## ## Copyright (C) 2013 Riverbank Computing Limited ## Copyright (C) 2010 Hans-Peter Jansen <[email protected]>. ## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). ## All rights reserved. ## ## This file is part of the examples of PyQt. ## ## $QT_BEGIN_LICENSE:BSD$ ## You may use this file under the terms of the BSD license as follows: ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are ## met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in ## the documentation and/or other materials provided with the ## distribution. ## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor ## the names of its contributors may be used to endorse or promote ## products derived from this software without specific prior written ## permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## $QT_END_LICENSE$ ## ########################################################################### from PyQt5.QtCore import QFile, QIODevice, Qt, QTextStream, QUrl from PyQt5.QtWidgets import (QAction, QApplication, QLineEdit, QMainWindow, QSizePolicy, QStyle, QTextEdit) from PyQt5.QtNetwork import QNetworkProxyFactory, QNetworkRequest from PyQt5.QtWebKitWidgets import QWebPage, QWebView import jquery_rc class MainWindow(QMainWindow): def __init__(self, url): super(MainWindow, self).__init__() self.progress = 0 fd = QFile(":/jquery.min.js") if fd.open(QIODevice.ReadOnly | QFile.Text): self.jQuery = QTextStream(fd).readAll() fd.close() else: self.jQuery = '' QNetworkProxyFactory.setUseSystemConfiguration(True) self.view = QWebView(self) self.view.load(url) self.view.loadFinished.connect(self.adjustLocation) self.view.titleChanged.connect(self.adjustTitle) self.view.loadProgress.connect(self.setProgress) self.view.loadFinished.connect(self.finishLoading) self.locationEdit = QLineEdit(self) self.locationEdit.setSizePolicy(QSizePolicy.Expanding, self.locationEdit.sizePolicy().verticalPolicy()) self.locationEdit.returnPressed.connect(self.changeLocation) toolBar = self.addToolBar("Navigation") toolBar.addAction(self.view.pageAction(QWebPage.Back)) toolBar.addAction(self.view.pageAction(QWebPage.Forward)) toolBar.addAction(self.view.pageAction(QWebPage.Reload)) toolBar.addAction(self.view.pageAction(QWebPage.Stop)) toolBar.addWidget(self.locationEdit) viewMenu = self.menuBar().addMenu("&View") viewSourceAction = QAction("Page Source", self) viewSourceAction.triggered.connect(self.viewSource) viewMenu.addAction(viewSourceAction) effectMenu = self.menuBar().addMenu("&Effect") effectMenu.addAction("Highlight all links", self.highlightAllLinks) self.rotateAction = QAction( self.style().standardIcon(QStyle.SP_FileDialogDetailedView), "Turn images upside down", self, checkable=True, toggled=self.rotateImages) effectMenu.addAction(self.rotateAction) toolsMenu = self.menuBar().addMenu("&Tools") toolsMenu.addAction("Remove GIF images", self.removeGifImages) toolsMenu.addAction("Remove all inline frames", self.removeInlineFrames) toolsMenu.addAction("Remove all object elements", self.removeObjectElements) toolsMenu.addAction("Remove all embedded elements", self.removeEmbeddedElements) self.setCentralWidget(self.view) def viewSource(self): accessManager = self.view.page().networkAccessManager() request = QNetworkRequest(self.view.url()) reply = accessManager.get(request) reply.finished.connect(self.slotSourceDownloaded) def slotSourceDownloaded(self): reply = self.sender() self.textEdit = QTextEdit() self.textEdit.setAttribute(Qt.WA_DeleteOnClose) self.textEdit.show() self.textEdit.setPlainText(QTextStream(reply).readAll()) self.textEdit.resize(600, 400) reply.deleteLater() def adjustLocation(self): self.locationEdit.setText(self.view.url().toString()) def changeLocation(self): url = QUrl.fromUserInput(self.locationEdit.text()) self.view.load(url) self.view.setFocus() def adjustTitle(self): if 0 < self.progress < 100: self.setWindowTitle("%s (%s%%)" % (self.view.title(), self.progress)) else: self.setWindowTitle(self.view.title()) def setProgress(self, p): self.progress = p self.adjustTitle() def finishLoading(self): self.progress = 100 self.adjustTitle() self.view.page().mainFrame().evaluateJavaScript(self.jQuery) self.rotateImages(self.rotateAction.isChecked()) def highlightAllLinks(self): code = """$('a').each( function () { $(this).css('background-color', 'yellow') } )""" self.view.page().mainFrame().evaluateJavaScript(code) def rotateImages(self, invert): if invert: code = """ $('img').each( function () { $(this).css('-webkit-transition', '-webkit-transform 2s'); $(this).css('-webkit-transform', 'rotate(180deg)') } )""" else: code = """ $('img').each( function () { $(this).css('-webkit-transition', '-webkit-transform 2s'); $(this).css('-webkit-transform', 'rotate(0deg)') } )""" self.view.page().mainFrame().evaluateJavaScript(code) def removeGifImages(self): code = "$('[src*=gif]').remove()" self.view.page().mainFrame().evaluateJavaScript(code) def removeInlineFrames(self): code = "$('iframe').remove()" self.view.page().mainFrame().evaluateJavaScript(code) def removeObjectElements(self): code = "$('object').remove()" self.view.page().mainFrame().evaluateJavaScript(code) def removeEmbeddedElements(self): code = "$('embed').remove()" self.view.page().mainFrame().evaluateJavaScript(code) if __name__ == '__main__': import sys app = QApplication(sys.argv) if len(sys.argv) > 1: url = QUrl(sys.argv[1]) else: url = QUrl('http://www.google.com/ncr') browser = MainWindow(url) browser.show() sys.exit(app.exec_())
gpl-3.0
-4,662,010,328,858,383,000
7,342,818,595,177,115,000
36.56872
83
0.628737
false
nexusriot/cinder
cinder/openstack/common/scheduler/filters/availability_zone_filter.py
26
1256
# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.openstack.common.scheduler import filters class AvailabilityZoneFilter(filters.BaseHostFilter): """Filters Hosts by availability zone.""" # Availability zones do not change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): spec = filter_properties.get('request_spec', {}) props = spec.get('resource_properties', {}) availability_zone = props.get('availability_zone') if availability_zone: return availability_zone == host_state.service['availability_zone'] return True
apache-2.0
-4,693,819,223,876,841,000
7,247,177,113,185,713,000
38.25
79
0.710191
false
KohlsTechnology/ansible
lib/ansible/playbook/play.py
11
12526
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible import constants as C from ansible.errors import AnsibleParserError, AnsibleAssertionError from ansible.module_utils.six import string_types from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base from ansible.playbook.become import Become from ansible.playbook.block import Block from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles from ansible.playbook.role import Role from ansible.playbook.taggable import Taggable from ansible.vars.manager import preprocess_vars try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() __all__ = ['Play'] class Play(Base, Taggable, Become): """ A play is a language feature that represents a list of roles and/or task/handler blocks to execute on a given set of hosts. Usage: Play.load(datastructure) -> Play Play.something(...) """ # ================================================================================= _hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True) # Facts _fact_path = FieldAttribute(isa='string', default=None) _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) _gather_subset = FieldAttribute(isa='barelist', default=None, always_post_validate=True) _gather_timeout = FieldAttribute(isa='int', default=None, always_post_validate=True) # Variable Attributes _vars_files = FieldAttribute(isa='list', default=[], priority=99) _vars_prompt = FieldAttribute(isa='list', default=[], always_post_validate=True) _vault_password = FieldAttribute(isa='string', always_post_validate=True) # Role Attributes _roles = FieldAttribute(isa='list', default=[], priority=90) # Block (Task) Lists Attributes _handlers = FieldAttribute(isa='list', default=[]) _pre_tasks = FieldAttribute(isa='list', default=[]) _post_tasks = FieldAttribute(isa='list', default=[]) _tasks = FieldAttribute(isa='list', default=[]) # Flag/Setting Attributes _force_handlers = FieldAttribute(isa='bool', always_post_validate=True) _max_fail_percentage = FieldAttribute(isa='percent', always_post_validate=True) _serial = FieldAttribute(isa='list', default=[], always_post_validate=True) _strategy = FieldAttribute(isa='string', default=C.DEFAULT_STRATEGY, always_post_validate=True) _order = FieldAttribute(isa='string', always_post_validate=True) # ================================================================================= def __init__(self): super(Play, self).__init__() self._included_conditional = None self._included_path = None self._removed_hosts = [] self.ROLE_CACHE = {} def __repr__(self): return self.get_name() def get_name(self): ''' return the name of the Play ''' return self._attributes.get('name') @staticmethod def load(data, variable_manager=None, loader=None, vars=None): if ('name' not in data or data['name'] is None) and 'hosts' in data: if isinstance(data['hosts'], list): data['name'] = ','.join(data['hosts']) else: data['name'] = data['hosts'] p = Play() if vars: p.vars = vars.copy() return p.load_data(data, variable_manager=variable_manager, loader=loader) def preprocess_data(self, ds): ''' Adjusts play datastructure to cleanup old/legacy items ''' if not isinstance(ds, dict): raise AnsibleAssertionError('while preprocessing data (%s), ds should be a dict but was a %s' % (ds, type(ds))) # The use of 'user' in the Play datastructure was deprecated to # line up with the same change for Tasks, due to the fact that # 'user' conflicted with the user module. if 'user' in ds: # this should never happen, but error out with a helpful message # to the user if it does... if 'remote_user' in ds: raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. " "The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds) ds['remote_user'] = ds['user'] del ds['user'] return super(Play, self).preprocess_data(ds) def _load_tasks(self, attr, ds): ''' Loads a list of blocks from a list which may be mixed tasks/blocks. Bare tasks outside of a block are given an implicit block. ''' try: return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) except AssertionError as e: raise AnsibleParserError("A malformed block was encountered while loading tasks", obj=self._ds, orig_exc=e) def _load_pre_tasks(self, attr, ds): ''' Loads a list of blocks from a list which may be mixed tasks/blocks. Bare tasks outside of a block are given an implicit block. ''' try: return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) except AssertionError as e: raise AnsibleParserError("A malformed block was encountered while loading pre_tasks", obj=self._ds, orig_exc=e) def _load_post_tasks(self, attr, ds): ''' Loads a list of blocks from a list which may be mixed tasks/blocks. Bare tasks outside of a block are given an implicit block. ''' try: return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) except AssertionError as e: raise AnsibleParserError("A malformed block was encountered while loading post_tasks", obj=self._ds, orig_exc=e) def _load_handlers(self, attr, ds): ''' Loads a list of blocks from a list which may be mixed handlers/blocks. Bare handlers outside of a block are given an implicit block. ''' try: return self._extend_value( self.handlers, load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader), prepend=True ) except AssertionError as e: raise AnsibleParserError("A malformed block was encountered while loading handlers", obj=self._ds, orig_exc=e) def _load_roles(self, attr, ds): ''' Loads and returns a list of RoleInclude objects from the datastructure list of role definitions and creates the Role from those objects ''' if ds is None: ds = [] try: role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager, loader=self._loader) except AssertionError as e: raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds, orig_exc=e) roles = [] for ri in role_includes: roles.append(Role.load(ri, play=self)) return roles def _load_vars_prompt(self, attr, ds): new_ds = preprocess_vars(ds) vars_prompts = [] if new_ds is not None: for prompt_data in new_ds: if 'name' not in prompt_data: display.deprecated("Using the 'short form' for vars_prompt has been deprecated", version="2.7") for vname, prompt in prompt_data.items(): vars_prompts.append(dict( name=vname, prompt=prompt, default=None, private=None, confirm=None, encrypt=None, salt_size=None, salt=None, )) else: vars_prompts.append(prompt_data) return vars_prompts def _compile_roles(self): ''' Handles the role compilation step, returning a flat list of tasks with the lowest level dependencies first. For example, if a role R has a dependency D1, which also has a dependency D2, the tasks from D2 are merged first, followed by D1, and lastly by the tasks from the parent role R last. This is done for all roles in the Play. ''' block_list = [] if len(self.roles) > 0: for r in self.roles: block_list.extend(r.compile(play=self)) return block_list def compile_roles_handlers(self): ''' Handles the role handler compilation step, returning a flat list of Handlers This is done for all roles in the Play. ''' block_list = [] if len(self.roles) > 0: for r in self.roles: block_list.extend(r.get_handler_blocks(play=self)) return block_list def compile(self): ''' Compiles and returns the task list for this play, compiled from the roles (which are themselves compiled recursively) and/or the list of tasks specified in the play. ''' # create a block containing a single flush handlers meta # task, so we can be sure to run handlers at certain points # of the playbook execution flush_block = Block.load( data={'meta': 'flush_handlers'}, play=self, variable_manager=self._variable_manager, loader=self._loader ) block_list = [] block_list.extend(self.pre_tasks) block_list.append(flush_block) block_list.extend(self._compile_roles()) block_list.extend(self.tasks) block_list.append(flush_block) block_list.extend(self.post_tasks) block_list.append(flush_block) return block_list def get_vars(self): return self.vars.copy() def get_vars_files(self): if self.vars_files is None: return [] return self.vars_files def get_handlers(self): return self.handlers[:] def get_roles(self): return self.roles[:] def get_tasks(self): tasklist = [] for task in self.pre_tasks + self.tasks + self.post_tasks: if isinstance(task, Block): tasklist.append(task.block + task.rescue + task.always) else: tasklist.append(task) return tasklist def serialize(self): data = super(Play, self).serialize() roles = [] for role in self.get_roles(): roles.append(role.serialize()) data['roles'] = roles data['included_path'] = self._included_path return data def deserialize(self, data): super(Play, self).deserialize(data) self._included_path = data.get('included_path', None) if 'roles' in data: role_data = data.get('roles', []) roles = [] for role in role_data: r = Role() r.deserialize(role) roles.append(r) setattr(self, 'roles', roles) del data['roles'] def copy(self): new_me = super(Play, self).copy() new_me.ROLE_CACHE = self.ROLE_CACHE.copy() new_me._included_conditional = self._included_conditional new_me._included_path = self._included_path return new_me
gpl-3.0
1,846,551,805,586,845,700
6,937,962,779,431,417,000
36.169139
135
0.602507
false
Plain-Andy-legacy/android_external_chromium_org
build/android/pylib/device/adb_wrapper_test.py
36
2716
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Tests for the AdbWrapper class.""" import os import tempfile import time import unittest from pylib.device import adb_wrapper from pylib.device import device_errors class TestAdbWrapper(unittest.TestCase): def setUp(self): devices = adb_wrapper.AdbWrapper.GetDevices() assert devices, 'A device must be attached' self._adb = devices[0] self._adb.WaitForDevice() @staticmethod def _MakeTempFile(contents): """Make a temporary file with the given contents. Args: contents: string to write to the temporary file. Returns: The absolute path to the file. """ fi, path = tempfile.mkstemp() with os.fdopen(fi, 'wb') as f: f.write(contents) return path def testShell(self): output = self._adb.Shell('echo test', expect_rc=0) self.assertEqual(output.strip(), 'test') output = self._adb.Shell('echo test') self.assertEqual(output.strip(), 'test') self.assertRaises(device_errors.AdbCommandFailedError, self._adb.Shell, 'echo test', expect_rc=1) def testPushPull(self): path = self._MakeTempFile('foo') device_path = '/data/local/tmp/testfile.txt' local_tmpdir = os.path.dirname(path) self._adb.Push(path, device_path) self.assertEqual(self._adb.Shell('cat %s' % device_path), 'foo') self._adb.Pull(device_path, local_tmpdir) with open(os.path.join(local_tmpdir, 'testfile.txt'), 'r') as f: self.assertEqual(f.read(), 'foo') def testInstall(self): path = self._MakeTempFile('foo') self.assertRaises(device_errors.AdbCommandFailedError, self._adb.Install, path) def testForward(self): self.assertRaises(device_errors.AdbCommandFailedError, self._adb.Forward, 0, 0) def testUninstall(self): self.assertRaises(device_errors.AdbCommandFailedError, self._adb.Uninstall, 'some.nonexistant.package') def testRebootWaitForDevice(self): self._adb.Reboot() print 'waiting for device to reboot...' while self._adb.GetState() == 'device': time.sleep(1) self._adb.WaitForDevice() self.assertEqual(self._adb.GetState(), 'device') print 'waiting for package manager...' while 'package:' not in self._adb.Shell('pm path android'): time.sleep(1) def testRootRemount(self): self._adb.Root() while True: try: self._adb.Shell('start') break except device_errors.AdbCommandFailedError: time.sleep(1) self._adb.Remount() if __name__ == '__main__': unittest.main()
bsd-3-clause
-2,491,002,642,480,168,400
-2,147,493,104,382,941,700
28.204301
79
0.664948
false
trademob/boto
boto/ec2/autoscale/request.py
152
1549
# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. class Request(object): def __init__(self, connection=None): self.connection = connection self.request_id = '' def __repr__(self): return 'Request:%s' % self.request_id def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'RequestId': self.request_id = value else: setattr(self, name, value)
mit
-7,379,827,204,257,645,000
-6,564,918,693,365,542,000
39.763158
74
0.714009
false
extremewaysback/django
tests/forms_tests/tests/test_formsets.py
128
57919
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime from django.forms import ( CharField, DateField, FileField, Form, IntegerField, SplitDateTimeField, ValidationError, formsets, ) from django.forms.formsets import BaseFormSet, formset_factory from django.forms.utils import ErrorList from django.test import SimpleTestCase from django.utils.encoding import force_text class Choice(Form): choice = CharField() votes = IntegerField() # FormSet allows us to use multiple instance of the same form on 1 page. For now, # the best way to create a FormSet is by using the formset_factory function. ChoiceFormSet = formset_factory(Choice) class FavoriteDrinkForm(Form): name = CharField() class BaseFavoriteDrinksFormSet(BaseFormSet): def clean(self): seen_drinks = [] for drink in self.cleaned_data: if drink['name'] in seen_drinks: raise ValidationError('You may only specify a drink once.') seen_drinks.append(drink['name']) class EmptyFsetWontValidate(BaseFormSet): def clean(self): raise ValidationError("Clean method called") # Let's define a FormSet that takes a list of favorite drinks, but raises an # error if there are any duplicates. Used in ``test_clean_hook``, # ``test_regression_6926`` & ``test_regression_12878``. FavoriteDrinksFormSet = formset_factory(FavoriteDrinkForm, formset=BaseFavoriteDrinksFormSet, extra=3) # Used in ``test_formset_splitdatetimefield``. class SplitDateTimeForm(Form): when = SplitDateTimeField(initial=datetime.datetime.now) SplitDateTimeFormSet = formset_factory(SplitDateTimeForm) class CustomKwargForm(Form): def __init__(self, *args, **kwargs): self.custom_kwarg = kwargs.pop('custom_kwarg') super(CustomKwargForm, self).__init__(*args, **kwargs) class FormsFormsetTestCase(SimpleTestCase): def make_choiceformset(self, formset_data=None, formset_class=ChoiceFormSet, total_forms=None, initial_forms=0, max_num_forms=0, min_num_forms=0, **kwargs): """ Make a ChoiceFormset from the given formset_data. The data should be given as a list of (choice, votes) tuples. """ kwargs.setdefault('prefix', 'choices') kwargs.setdefault('auto_id', False) if formset_data is None: return formset_class(**kwargs) if total_forms is None: total_forms = len(formset_data) def prefixed(*args): args = (kwargs['prefix'],) + args return '-'.join(args) data = { prefixed('TOTAL_FORMS'): str(total_forms), prefixed('INITIAL_FORMS'): str(initial_forms), prefixed('MAX_NUM_FORMS'): str(max_num_forms), prefixed('MIN_NUM_FORMS'): str(min_num_forms), } for i, (choice, votes) in enumerate(formset_data): data[prefixed(str(i), 'choice')] = choice data[prefixed(str(i), 'votes')] = votes return formset_class(data, **kwargs) def test_basic_formset(self): # A FormSet constructor takes the same arguments as Form. Let's create a FormSet # for adding data. By default, it displays 1 blank form. It can display more, # but we'll look at how to do so later. formset = self.make_choiceformset() self.assertHTMLEqual( str(formset), """<input type="hidden" name="choices-TOTAL_FORMS" value="1" /> <input type="hidden" name="choices-INITIAL_FORMS" value="0" /> <input type="hidden" name="choices-MIN_NUM_FORMS" value="0" /> <input type="hidden" name="choices-MAX_NUM_FORMS" value="1000" /> <tr><th>Choice:</th><td><input type="text" name="choices-0-choice" /></td></tr> <tr><th>Votes:</th><td><input type="number" name="choices-0-votes" /></td></tr>""" ) # We treat FormSet pretty much like we would treat a normal Form. FormSet has an # is_valid method, and a cleaned_data or errors attribute depending on whether all # the forms passed validation. However, unlike a Form instance, cleaned_data and # errors will be a list of dicts rather than just a single dict. formset = self.make_choiceformset([('Calexico', '100')]) self.assertTrue(formset.is_valid()) self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}]) # If a FormSet was not passed any data, its is_valid and has_changed # methods should return False. formset = self.make_choiceformset() self.assertFalse(formset.is_valid()) self.assertFalse(formset.has_changed()) def test_form_kwargs_formset(self): """ Test that custom kwargs set on the formset instance are passed to the underlying forms. """ FormSet = formset_factory(CustomKwargForm, extra=2) formset = FormSet(form_kwargs={'custom_kwarg': 1}) for form in formset: self.assertTrue(hasattr(form, 'custom_kwarg')) self.assertEqual(form.custom_kwarg, 1) def test_form_kwargs_formset_dynamic(self): """ Test that form kwargs can be passed dynamically in a formset. """ class DynamicBaseFormSet(BaseFormSet): def get_form_kwargs(self, index): return {'custom_kwarg': index} DynamicFormSet = formset_factory(CustomKwargForm, formset=DynamicBaseFormSet, extra=2) formset = DynamicFormSet(form_kwargs={'custom_kwarg': 'ignored'}) for i, form in enumerate(formset): self.assertTrue(hasattr(form, 'custom_kwarg')) self.assertEqual(form.custom_kwarg, i) def test_form_kwargs_empty_form(self): FormSet = formset_factory(CustomKwargForm) formset = FormSet(form_kwargs={'custom_kwarg': 1}) self.assertTrue(hasattr(formset.empty_form, 'custom_kwarg')) self.assertEqual(formset.empty_form.custom_kwarg, 1) def test_formset_validation(self): # FormSet instances can also have an error attribute if validation failed for # any of the forms. formset = self.make_choiceformset([('Calexico', '')]) self.assertFalse(formset.is_valid()) self.assertEqual(formset.errors, [{'votes': ['This field is required.']}]) def test_formset_has_changed(self): # FormSet instances has_changed method will be True if any data is # passed to his forms, even if the formset didn't validate blank_formset = self.make_choiceformset([('', '')]) self.assertFalse(blank_formset.has_changed()) # invalid formset test invalid_formset = self.make_choiceformset([('Calexico', '')]) self.assertFalse(invalid_formset.is_valid()) self.assertTrue(invalid_formset.has_changed()) # valid formset test valid_formset = self.make_choiceformset([('Calexico', '100')]) self.assertTrue(valid_formset.is_valid()) self.assertTrue(valid_formset.has_changed()) def test_formset_initial_data(self): # We can also prefill a FormSet with existing data by providing an ``initial`` # argument to the constructor. ``initial`` should be a list of dicts. By default, # an extra blank form is included. initial = [{'choice': 'Calexico', 'votes': 100}] formset = self.make_choiceformset(initial=initial) form_output = [] for form in formset.forms: form_output.append(form.as_ul()) self.assertHTMLEqual( '\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li> <li>Votes: <input type="number" name="choices-0-votes" value="100" /></li> <li>Choice: <input type="text" name="choices-1-choice" /></li> <li>Votes: <input type="number" name="choices-1-votes" /></li>""" ) # Let's simulate what would happen if we submitted this form. formset = self.make_choiceformset([('Calexico', '100'), ('', '')], initial_forms=1) self.assertTrue(formset.is_valid()) self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}]) def test_second_form_partially_filled(self): # But the second form was blank! Shouldn't we get some errors? No. If we display # a form as blank, it's ok for it to be submitted as blank. If we fill out even # one of the fields of a blank form though, it will be validated. We may want to # required that at least x number of forms are completed, but we'll show how to # handle that later. formset = self.make_choiceformset([('Calexico', '100'), ('The Decemberists', '')], initial_forms=1) self.assertFalse(formset.is_valid()) self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}]) def test_delete_prefilled_data(self): # If we delete data that was pre-filled, we should get an error. Simply removing # data from form fields isn't the proper way to delete it. We'll see how to # handle that case later. formset = self.make_choiceformset([('', ''), ('', '')], initial_forms=1) self.assertFalse(formset.is_valid()) self.assertEqual( formset.errors, [{'votes': ['This field is required.'], 'choice': ['This field is required.']}, {}] ) def test_displaying_more_than_one_blank_form(self): # Displaying more than 1 blank form ########################################### # We can also display more than 1 empty form at a time. To do so, pass a # extra argument to formset_factory. ChoiceFormSet = formset_factory(Choice, extra=3) formset = ChoiceFormSet(auto_id=False, prefix='choices') form_output = [] for form in formset.forms: form_output.append(form.as_ul()) self.assertHTMLEqual( '\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" /></li> <li>Votes: <input type="number" name="choices-0-votes" /></li> <li>Choice: <input type="text" name="choices-1-choice" /></li> <li>Votes: <input type="number" name="choices-1-votes" /></li> <li>Choice: <input type="text" name="choices-2-choice" /></li> <li>Votes: <input type="number" name="choices-2-votes" /></li>""" ) # Since we displayed every form as blank, we will also accept them back as blank. # This may seem a little strange, but later we will show how to require a minimum # number of forms to be completed. data = { 'choices-TOTAL_FORMS': '3', # the number of forms rendered 'choices-INITIAL_FORMS': '0', # the number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '0', # max number of forms 'choices-0-choice': '', 'choices-0-votes': '', 'choices-1-choice': '', 'choices-1-votes': '', 'choices-2-choice': '', 'choices-2-votes': '', } formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertTrue(formset.is_valid()) self.assertEqual([form.cleaned_data for form in formset.forms], [{}, {}, {}]) def test_min_num_displaying_more_than_one_blank_form(self): # We can also display more than 1 empty form passing min_num argument # to formset_factory. It will (essentially) increment the extra argument ChoiceFormSet = formset_factory(Choice, extra=1, min_num=1) formset = ChoiceFormSet(auto_id=False, prefix='choices') form_output = [] for form in formset.forms: form_output.append(form.as_ul()) # Min_num forms are required; extra forms can be empty. self.assertFalse(formset.forms[0].empty_permitted) self.assertTrue(formset.forms[1].empty_permitted) self.assertHTMLEqual( '\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" /></li> <li>Votes: <input type="number" name="choices-0-votes" /></li> <li>Choice: <input type="text" name="choices-1-choice" /></li> <li>Votes: <input type="number" name="choices-1-votes" /></li>""" ) def test_min_num_displaying_more_than_one_blank_form_with_zero_extra(self): # We can also display more than 1 empty form passing min_num argument ChoiceFormSet = formset_factory(Choice, extra=0, min_num=3) formset = ChoiceFormSet(auto_id=False, prefix='choices') form_output = [] for form in formset.forms: form_output.append(form.as_ul()) self.assertHTMLEqual( '\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" /></li> <li>Votes: <input type="number" name="choices-0-votes" /></li> <li>Choice: <input type="text" name="choices-1-choice" /></li> <li>Votes: <input type="number" name="choices-1-votes" /></li> <li>Choice: <input type="text" name="choices-2-choice" /></li> <li>Votes: <input type="number" name="choices-2-votes" /></li>""" ) def test_single_form_completed(self): # We can just fill out one of the forms. data = { 'choices-TOTAL_FORMS': '3', # the number of forms rendered 'choices-INITIAL_FORMS': '0', # the number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '0', # max number of forms 'choices-0-choice': 'Calexico', 'choices-0-votes': '100', 'choices-1-choice': '', 'choices-1-votes': '', 'choices-2-choice': '', 'choices-2-votes': '', } ChoiceFormSet = formset_factory(Choice, extra=3) formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertTrue(formset.is_valid()) self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}, {}]) def test_formset_validate_max_flag(self): # If validate_max is set and max_num is less than TOTAL_FORMS in the # data, then throw an exception. MAX_NUM_FORMS in the data is # irrelevant here (it's output as a hint for the client but its # value in the returned data is not checked) data = { 'choices-TOTAL_FORMS': '2', # the number of forms rendered 'choices-INITIAL_FORMS': '0', # the number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored 'choices-0-choice': 'Zero', 'choices-0-votes': '0', 'choices-1-choice': 'One', 'choices-1-votes': '1', } ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True) formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertFalse(formset.is_valid()) self.assertEqual(formset.non_form_errors(), ['Please submit 1 or fewer forms.']) def test_formset_validate_min_flag(self): # If validate_min is set and min_num is more than TOTAL_FORMS in the # data, then throw an exception. MIN_NUM_FORMS in the data is # irrelevant here (it's output as a hint for the client but its # value in the returned data is not checked) data = { 'choices-TOTAL_FORMS': '2', # the number of forms rendered 'choices-INITIAL_FORMS': '0', # the number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '0', # max number of forms - should be ignored 'choices-0-choice': 'Zero', 'choices-0-votes': '0', 'choices-1-choice': 'One', 'choices-1-votes': '1', } ChoiceFormSet = formset_factory(Choice, extra=1, min_num=3, validate_min=True) formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertFalse(formset.is_valid()) self.assertEqual(formset.non_form_errors(), ['Please submit 3 or more forms.']) def test_second_form_partially_filled_2(self): # And once again, if we try to partially complete a form, validation will fail. data = { 'choices-TOTAL_FORMS': '3', # the number of forms rendered 'choices-INITIAL_FORMS': '0', # the number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '0', # max number of forms 'choices-0-choice': 'Calexico', 'choices-0-votes': '100', 'choices-1-choice': 'The Decemberists', 'choices-1-votes': '', # missing value 'choices-2-choice': '', 'choices-2-votes': '', } ChoiceFormSet = formset_factory(Choice, extra=3) formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertFalse(formset.is_valid()) self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}, {}]) def test_more_initial_data(self): # The extra argument also works when the formset is pre-filled with initial # data. initial = [{'choice': 'Calexico', 'votes': 100}] ChoiceFormSet = formset_factory(Choice, extra=3) formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices') form_output = [] for form in formset.forms: form_output.append(form.as_ul()) self.assertHTMLEqual( '\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li> <li>Votes: <input type="number" name="choices-0-votes" value="100" /></li> <li>Choice: <input type="text" name="choices-1-choice" /></li> <li>Votes: <input type="number" name="choices-1-votes" /></li> <li>Choice: <input type="text" name="choices-2-choice" /></li> <li>Votes: <input type="number" name="choices-2-votes" /></li> <li>Choice: <input type="text" name="choices-3-choice" /></li> <li>Votes: <input type="number" name="choices-3-votes" /></li>""" ) # Make sure retrieving an empty form works, and it shows up in the form list self.assertTrue(formset.empty_form.empty_permitted) self.assertHTMLEqual( formset.empty_form.as_ul(), """<li>Choice: <input type="text" name="choices-__prefix__-choice" /></li> <li>Votes: <input type="number" name="choices-__prefix__-votes" /></li>""" ) def test_formset_with_deletion(self): # FormSets with deletion ###################################################### # We can easily add deletion ability to a FormSet with an argument to # formset_factory. This will add a boolean field to each form instance. When # that boolean field is True, the form will be in formset.deleted_forms ChoiceFormSet = formset_factory(Choice, can_delete=True) initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}] formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices') form_output = [] for form in formset.forms: form_output.append(form.as_ul()) self.assertHTMLEqual( '\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li> <li>Votes: <input type="number" name="choices-0-votes" value="100" /></li> <li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li> <li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li> <li>Votes: <input type="number" name="choices-1-votes" value="900" /></li> <li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li> <li>Choice: <input type="text" name="choices-2-choice" /></li> <li>Votes: <input type="number" name="choices-2-votes" /></li> <li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>""" ) # To delete something, we just need to set that form's special delete field to # 'on'. Let's go ahead and delete Fergie. data = { 'choices-TOTAL_FORMS': '3', # the number of forms rendered 'choices-INITIAL_FORMS': '2', # the number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '0', # max number of forms 'choices-0-choice': 'Calexico', 'choices-0-votes': '100', 'choices-0-DELETE': '', 'choices-1-choice': 'Fergie', 'choices-1-votes': '900', 'choices-1-DELETE': 'on', 'choices-2-choice': '', 'choices-2-votes': '', 'choices-2-DELETE': '', } formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertTrue(formset.is_valid()) self.assertEqual( [form.cleaned_data for form in formset.forms], [ {'votes': 100, 'DELETE': False, 'choice': 'Calexico'}, {'votes': 900, 'DELETE': True, 'choice': 'Fergie'}, {}, ] ) self.assertEqual( [form.cleaned_data for form in formset.deleted_forms], [{'votes': 900, 'DELETE': True, 'choice': 'Fergie'}] ) # If we fill a form with something and then we check the can_delete checkbox for # that form, that form's errors should not make the entire formset invalid since # it's going to be deleted. class CheckForm(Form): field = IntegerField(min_value=100) data = { 'check-TOTAL_FORMS': '3', # the number of forms rendered 'check-INITIAL_FORMS': '2', # the number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'check-MAX_NUM_FORMS': '0', # max number of forms 'check-0-field': '200', 'check-0-DELETE': '', 'check-1-field': '50', 'check-1-DELETE': 'on', 'check-2-field': '', 'check-2-DELETE': '', } CheckFormSet = formset_factory(CheckForm, can_delete=True) formset = CheckFormSet(data, prefix='check') self.assertTrue(formset.is_valid()) # If we remove the deletion flag now we will have our validation back. data['check-1-DELETE'] = '' formset = CheckFormSet(data, prefix='check') self.assertFalse(formset.is_valid()) # Should be able to get deleted_forms from a valid formset even if a # deleted form would have been invalid. class Person(Form): name = CharField() PeopleForm = formset_factory( form=Person, can_delete=True) p = PeopleForm( {'form-0-name': '', 'form-0-DELETE': 'on', # no name! 'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 1, 'form-MIN_NUM_FORMS': 0, 'form-MAX_NUM_FORMS': 1}) self.assertTrue(p.is_valid()) self.assertEqual(len(p.deleted_forms), 1) def test_formsets_with_ordering(self): # FormSets with ordering ###################################################### # We can also add ordering ability to a FormSet with an argument to # formset_factory. This will add an integer field to each form instance. When # form validation succeeds, [form.cleaned_data for form in formset.forms] will have the data in the correct # order specified by the ordering fields. If a number is duplicated in the set # of ordering fields, for instance form 0 and form 3 are both marked as 1, then # the form index used as a secondary ordering criteria. In order to put # something at the front of the list, you'd need to set it's order to 0. ChoiceFormSet = formset_factory(Choice, can_order=True) initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}] formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices') form_output = [] for form in formset.forms: form_output.append(form.as_ul()) self.assertHTMLEqual( '\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li> <li>Votes: <input type="number" name="choices-0-votes" value="100" /></li> <li>Order: <input type="number" name="choices-0-ORDER" value="1" /></li> <li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li> <li>Votes: <input type="number" name="choices-1-votes" value="900" /></li> <li>Order: <input type="number" name="choices-1-ORDER" value="2" /></li> <li>Choice: <input type="text" name="choices-2-choice" /></li> <li>Votes: <input type="number" name="choices-2-votes" /></li> <li>Order: <input type="number" name="choices-2-ORDER" /></li>""" ) data = { 'choices-TOTAL_FORMS': '3', # the number of forms rendered 'choices-INITIAL_FORMS': '2', # the number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '0', # max number of forms 'choices-0-choice': 'Calexico', 'choices-0-votes': '100', 'choices-0-ORDER': '1', 'choices-1-choice': 'Fergie', 'choices-1-votes': '900', 'choices-1-ORDER': '2', 'choices-2-choice': 'The Decemberists', 'choices-2-votes': '500', 'choices-2-ORDER': '0', } formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertTrue(formset.is_valid()) form_output = [] for form in formset.ordered_forms: form_output.append(form.cleaned_data) self.assertEqual(form_output, [ {'votes': 500, 'ORDER': 0, 'choice': 'The Decemberists'}, {'votes': 100, 'ORDER': 1, 'choice': 'Calexico'}, {'votes': 900, 'ORDER': 2, 'choice': 'Fergie'}, ]) def test_empty_ordered_fields(self): # Ordering fields are allowed to be left blank, and if they *are* left blank, # they will be sorted below everything else. data = { 'choices-TOTAL_FORMS': '4', # the number of forms rendered 'choices-INITIAL_FORMS': '3', # the number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '0', # max number of forms 'choices-0-choice': 'Calexico', 'choices-0-votes': '100', 'choices-0-ORDER': '1', 'choices-1-choice': 'Fergie', 'choices-1-votes': '900', 'choices-1-ORDER': '2', 'choices-2-choice': 'The Decemberists', 'choices-2-votes': '500', 'choices-2-ORDER': '', 'choices-3-choice': 'Basia Bulat', 'choices-3-votes': '50', 'choices-3-ORDER': '', } ChoiceFormSet = formset_factory(Choice, can_order=True) formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertTrue(formset.is_valid()) form_output = [] for form in formset.ordered_forms: form_output.append(form.cleaned_data) self.assertEqual(form_output, [ {'votes': 100, 'ORDER': 1, 'choice': 'Calexico'}, {'votes': 900, 'ORDER': 2, 'choice': 'Fergie'}, {'votes': 500, 'ORDER': None, 'choice': 'The Decemberists'}, {'votes': 50, 'ORDER': None, 'choice': 'Basia Bulat'}, ]) def test_ordering_blank_fieldsets(self): # Ordering should work with blank fieldsets. data = { 'choices-TOTAL_FORMS': '3', # the number of forms rendered 'choices-INITIAL_FORMS': '0', # the number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '0', # max number of forms } ChoiceFormSet = formset_factory(Choice, can_order=True) formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertTrue(formset.is_valid()) form_output = [] for form in formset.ordered_forms: form_output.append(form.cleaned_data) self.assertEqual(form_output, []) def test_formset_with_ordering_and_deletion(self): # FormSets with ordering + deletion ########################################### # Let's try throwing ordering and deletion into the same form. ChoiceFormSet = formset_factory(Choice, can_order=True, can_delete=True) initial = [ {'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}, {'choice': 'The Decemberists', 'votes': 500}, ] formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices') form_output = [] for form in formset.forms: form_output.append(form.as_ul()) self.assertHTMLEqual( '\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li> <li>Votes: <input type="number" name="choices-0-votes" value="100" /></li> <li>Order: <input type="number" name="choices-0-ORDER" value="1" /></li> <li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li> <li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li> <li>Votes: <input type="number" name="choices-1-votes" value="900" /></li> <li>Order: <input type="number" name="choices-1-ORDER" value="2" /></li> <li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li> <li>Choice: <input type="text" name="choices-2-choice" value="The Decemberists" /></li> <li>Votes: <input type="number" name="choices-2-votes" value="500" /></li> <li>Order: <input type="number" name="choices-2-ORDER" value="3" /></li> <li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li> <li>Choice: <input type="text" name="choices-3-choice" /></li> <li>Votes: <input type="number" name="choices-3-votes" /></li> <li>Order: <input type="number" name="choices-3-ORDER" /></li> <li>Delete: <input type="checkbox" name="choices-3-DELETE" /></li>""" ) # Let's delete Fergie, and put The Decemberists ahead of Calexico. data = { 'choices-TOTAL_FORMS': '4', # the number of forms rendered 'choices-INITIAL_FORMS': '3', # the number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '0', # max number of forms 'choices-0-choice': 'Calexico', 'choices-0-votes': '100', 'choices-0-ORDER': '1', 'choices-0-DELETE': '', 'choices-1-choice': 'Fergie', 'choices-1-votes': '900', 'choices-1-ORDER': '2', 'choices-1-DELETE': 'on', 'choices-2-choice': 'The Decemberists', 'choices-2-votes': '500', 'choices-2-ORDER': '0', 'choices-2-DELETE': '', 'choices-3-choice': '', 'choices-3-votes': '', 'choices-3-ORDER': '', 'choices-3-DELETE': '', } formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertTrue(formset.is_valid()) form_output = [] for form in formset.ordered_forms: form_output.append(form.cleaned_data) self.assertEqual(form_output, [ {'votes': 500, 'DELETE': False, 'ORDER': 0, 'choice': 'The Decemberists'}, {'votes': 100, 'DELETE': False, 'ORDER': 1, 'choice': 'Calexico'}, ]) self.assertEqual( [form.cleaned_data for form in formset.deleted_forms], [{'votes': 900, 'DELETE': True, 'ORDER': 2, 'choice': 'Fergie'}] ) def test_invalid_deleted_form_with_ordering(self): # Should be able to get ordered forms from a valid formset even if a # deleted form would have been invalid. class Person(Form): name = CharField() PeopleForm = formset_factory(form=Person, can_delete=True, can_order=True) p = PeopleForm({ 'form-0-name': '', 'form-0-DELETE': 'on', # no name! 'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 1, 'form-MIN_NUM_FORMS': 0, 'form-MAX_NUM_FORMS': 1 }) self.assertTrue(p.is_valid()) self.assertEqual(p.ordered_forms, []) def test_clean_hook(self): # FormSet clean hook ########################################################## # FormSets have a hook for doing extra validation that shouldn't be tied to any # particular form. It follows the same pattern as the clean hook on Forms. # We start out with a some duplicate data. data = { 'drinks-TOTAL_FORMS': '2', # the number of forms rendered 'drinks-INITIAL_FORMS': '0', # the number of forms with initial data 'drinks-MIN_NUM_FORMS': '0', # min number of forms 'drinks-MAX_NUM_FORMS': '0', # max number of forms 'drinks-0-name': 'Gin and Tonic', 'drinks-1-name': 'Gin and Tonic', } formset = FavoriteDrinksFormSet(data, prefix='drinks') self.assertFalse(formset.is_valid()) # Any errors raised by formset.clean() are available via the # formset.non_form_errors() method. for error in formset.non_form_errors(): self.assertEqual(str(error), 'You may only specify a drink once.') # Make sure we didn't break the valid case. data = { 'drinks-TOTAL_FORMS': '2', # the number of forms rendered 'drinks-INITIAL_FORMS': '0', # the number of forms with initial data 'drinks-MIN_NUM_FORMS': '0', # min number of forms 'drinks-MAX_NUM_FORMS': '0', # max number of forms 'drinks-0-name': 'Gin and Tonic', 'drinks-1-name': 'Bloody Mary', } formset = FavoriteDrinksFormSet(data, prefix='drinks') self.assertTrue(formset.is_valid()) self.assertEqual(formset.non_form_errors(), []) def test_limiting_max_forms(self): # Limiting the maximum number of forms ######################################## # Base case for max_num. # When not passed, max_num will take a high default value, leaving the # number of forms only controlled by the value of the extra parameter. LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3) formset = LimitedFavoriteDrinkFormSet() form_output = [] for form in formset.forms: form_output.append(str(form)) self.assertHTMLEqual( '\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th> <td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr> <tr><th><label for="id_form-1-name">Name:</label></th> <td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr> <tr><th><label for="id_form-2-name">Name:</label></th> <td><input type="text" name="form-2-name" id="id_form-2-name" /></td></tr>""" ) # If max_num is 0 then no form is rendered at all. LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=0) formset = LimitedFavoriteDrinkFormSet() form_output = [] for form in formset.forms: form_output.append(str(form)) self.assertEqual('\n'.join(form_output), "") LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=5, max_num=2) formset = LimitedFavoriteDrinkFormSet() form_output = [] for form in formset.forms: form_output.append(str(form)) self.assertHTMLEqual( '\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td> <input type="text" name="form-0-name" id="id_form-0-name" /></td></tr> <tr><th><label for="id_form-1-name">Name:</label></th> <td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""" ) # Ensure that max_num has no effect when extra is less than max_num. LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2) formset = LimitedFavoriteDrinkFormSet() form_output = [] for form in formset.forms: form_output.append(str(form)) self.assertHTMLEqual( '\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th> <td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>""" ) def test_max_num_with_initial_data(self): # max_num with initial data # When not passed, max_num will take a high default value, leaving the # number of forms only controlled by the value of the initial and extra # parameters. initial = [ {'name': 'Fernet and Coke'}, ] LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1) formset = LimitedFavoriteDrinkFormSet(initial=initial) form_output = [] for form in formset.forms: form_output.append(str(form)) self.assertHTMLEqual( '\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th> <td><input type="text" name="form-0-name" value="Fernet and Coke" id="id_form-0-name" /></td></tr> <tr><th><label for="id_form-1-name">Name:</label></th> <td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""" ) def test_max_num_zero(self): # If max_num is 0 then no form is rendered at all, regardless of extra, # unless initial data is present. (This changed in the patch for bug # 20084 -- previously max_num=0 trumped initial data) LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0) formset = LimitedFavoriteDrinkFormSet() form_output = [] for form in formset.forms: form_output.append(str(form)) self.assertEqual('\n'.join(form_output), "") # test that initial trumps max_num initial = [ {'name': 'Fernet and Coke'}, {'name': 'Bloody Mary'}, ] LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0) formset = LimitedFavoriteDrinkFormSet(initial=initial) form_output = [] for form in formset.forms: form_output.append(str(form)) self.assertHTMLEqual( '\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th> <td><input id="id_form-0-name" name="form-0-name" type="text" value="Fernet and Coke" /></td></tr> <tr><th><label for="id_form-1-name">Name:</label></th> <td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary" /></td></tr>""" ) def test_more_initial_than_max_num(self): # More initial forms than max_num now results in all initial forms # being displayed (but no extra forms). This behavior was changed # from max_num taking precedence in the patch for #20084 initial = [ {'name': 'Gin Tonic'}, {'name': 'Bloody Mary'}, {'name': 'Jack and Coke'}, ] LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2) formset = LimitedFavoriteDrinkFormSet(initial=initial) form_output = [] for form in formset.forms: form_output.append(str(form)) self.assertHTMLEqual( '\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th> <td><input id="id_form-0-name" name="form-0-name" type="text" value="Gin Tonic" /></td></tr> <tr><th><label for="id_form-1-name">Name:</label></th> <td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary" /></td></tr> <tr><th><label for="id_form-2-name">Name:</label></th> <td><input id="id_form-2-name" name="form-2-name" type="text" value="Jack and Coke" /></td></tr>""" ) # One form from initial and extra=3 with max_num=2 should result in the one # initial form and one extra. initial = [ {'name': 'Gin Tonic'}, ] LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=2) formset = LimitedFavoriteDrinkFormSet(initial=initial) form_output = [] for form in formset.forms: form_output.append(str(form)) self.assertHTMLEqual( '\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th> <td><input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name" /></td></tr> <tr><th><label for="id_form-1-name">Name:</label></th> <td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""" ) def test_regression_6926(self): # Regression test for #6926 ################################################## # Make sure the management form has the correct prefix. formset = FavoriteDrinksFormSet() self.assertEqual(formset.management_form.prefix, 'form') data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '0', 'form-MIN_NUM_FORMS': '0', 'form-MAX_NUM_FORMS': '0', } formset = FavoriteDrinksFormSet(data=data) self.assertEqual(formset.management_form.prefix, 'form') formset = FavoriteDrinksFormSet(initial={}) self.assertEqual(formset.management_form.prefix, 'form') def test_regression_12878(self): # Regression test for #12878 ################################################# data = { 'drinks-TOTAL_FORMS': '2', # the number of forms rendered 'drinks-INITIAL_FORMS': '0', # the number of forms with initial data 'drinks-MIN_NUM_FORMS': '0', # min number of forms 'drinks-MAX_NUM_FORMS': '0', # max number of forms 'drinks-0-name': 'Gin and Tonic', 'drinks-1-name': 'Gin and Tonic', } formset = FavoriteDrinksFormSet(data, prefix='drinks') self.assertFalse(formset.is_valid()) self.assertEqual(formset.non_form_errors(), ['You may only specify a drink once.']) def test_formset_iteration(self): # Regression tests for #16455 -- formset instances are iterable ChoiceFormset = formset_factory(Choice, extra=3) formset = ChoiceFormset() # confirm iterated formset yields formset.forms forms = list(formset) self.assertEqual(forms, formset.forms) self.assertEqual(len(formset), len(forms)) # confirm indexing of formset self.assertEqual(formset[0], forms[0]) try: formset[3] self.fail('Requesting an invalid formset index should raise an exception') except IndexError: pass # Formsets can override the default iteration order class BaseReverseFormSet(BaseFormSet): def __iter__(self): return reversed(self.forms) def __getitem__(self, idx): return super(BaseReverseFormSet, self).__getitem__(len(self) - idx - 1) ReverseChoiceFormset = formset_factory(Choice, BaseReverseFormSet, extra=3) reverse_formset = ReverseChoiceFormset() # confirm that __iter__ modifies rendering order # compare forms from "reverse" formset with forms from original formset self.assertEqual(str(reverse_formset[0]), str(forms[-1])) self.assertEqual(str(reverse_formset[1]), str(forms[-2])) self.assertEqual(len(reverse_formset), len(forms)) def test_formset_nonzero(self): """ Formsets with no forms should still evaluate as true. Regression test for #15722 """ ChoiceFormset = formset_factory(Choice, extra=0) formset = ChoiceFormset() self.assertEqual(len(formset.forms), 0) self.assertTrue(formset) def test_formset_splitdatetimefield(self): """ Formset should also work with SplitDateTimeField(initial=datetime.datetime.now). Regression test for #18709. """ data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-0-when_0': '1904-06-16', 'form-0-when_1': '15:51:33', } formset = SplitDateTimeFormSet(data) self.assertTrue(formset.is_valid()) def test_formset_error_class(self): # Regression tests for #16479 -- formsets form use ErrorList instead of supplied error_class class CustomErrorList(ErrorList): pass formset = FavoriteDrinksFormSet(error_class=CustomErrorList) self.assertEqual(formset.forms[0].error_class, CustomErrorList) def test_formset_calls_forms_is_valid(self): # Regression tests for #18574 -- make sure formsets call # is_valid() on each form. class AnotherChoice(Choice): def is_valid(self): self.is_valid_called = True return super(AnotherChoice, self).is_valid() AnotherChoiceFormSet = formset_factory(AnotherChoice) data = { 'choices-TOTAL_FORMS': '1', # number of forms rendered 'choices-INITIAL_FORMS': '0', # number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '0', # max number of forms 'choices-0-choice': 'Calexico', 'choices-0-votes': '100', } formset = AnotherChoiceFormSet(data, auto_id=False, prefix='choices') self.assertTrue(formset.is_valid()) self.assertTrue(all(form.is_valid_called for form in formset.forms)) def test_hard_limit_on_instantiated_forms(self): """A formset has a hard limit on the number of forms instantiated.""" # reduce the default limit of 1000 temporarily for testing _old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM try: formsets.DEFAULT_MAX_NUM = 2 ChoiceFormSet = formset_factory(Choice, max_num=1) # someone fiddles with the mgmt form data... formset = ChoiceFormSet( { 'choices-TOTAL_FORMS': '4', 'choices-INITIAL_FORMS': '0', 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '4', 'choices-0-choice': 'Zero', 'choices-0-votes': '0', 'choices-1-choice': 'One', 'choices-1-votes': '1', 'choices-2-choice': 'Two', 'choices-2-votes': '2', 'choices-3-choice': 'Three', 'choices-3-votes': '3', }, prefix='choices', ) # But we still only instantiate 3 forms self.assertEqual(len(formset.forms), 3) # and the formset isn't valid self.assertFalse(formset.is_valid()) finally: formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM def test_increase_hard_limit(self): """Can increase the built-in forms limit via a higher max_num.""" # reduce the default limit of 1000 temporarily for testing _old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM try: formsets.DEFAULT_MAX_NUM = 3 # for this form, we want a limit of 4 ChoiceFormSet = formset_factory(Choice, max_num=4) formset = ChoiceFormSet( { 'choices-TOTAL_FORMS': '4', 'choices-INITIAL_FORMS': '0', 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '4', 'choices-0-choice': 'Zero', 'choices-0-votes': '0', 'choices-1-choice': 'One', 'choices-1-votes': '1', 'choices-2-choice': 'Two', 'choices-2-votes': '2', 'choices-3-choice': 'Three', 'choices-3-votes': '3', }, prefix='choices', ) # Four forms are instantiated and no exception is raised self.assertEqual(len(formset.forms), 4) finally: formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM def test_non_form_errors_run_full_clean(self): # Regression test for #11160 # If non_form_errors() is called without calling is_valid() first, # it should ensure that full_clean() is called. class BaseCustomFormSet(BaseFormSet): def clean(self): raise ValidationError("This is a non-form error") ChoiceFormSet = formset_factory(Choice, formset=BaseCustomFormSet) formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertIsInstance(formset.non_form_errors(), ErrorList) self.assertEqual(list(formset.non_form_errors()), ['This is a non-form error']) def test_validate_max_ignores_forms_marked_for_deletion(self): class CheckForm(Form): field = IntegerField() data = { 'check-TOTAL_FORMS': '2', 'check-INITIAL_FORMS': '0', 'check-MAX_NUM_FORMS': '1', 'check-0-field': '200', 'check-0-DELETE': '', 'check-1-field': '50', 'check-1-DELETE': 'on', } CheckFormSet = formset_factory(CheckForm, max_num=1, validate_max=True, can_delete=True) formset = CheckFormSet(data, prefix='check') self.assertTrue(formset.is_valid()) def test_formset_total_error_count(self): """A valid formset should have 0 total errors.""" data = [ # formset_data, expected error count ([('Calexico', '100')], 0), ([('Calexico', '')], 1), ([('', 'invalid')], 2), ([('Calexico', '100'), ('Calexico', '')], 1), ([('Calexico', ''), ('Calexico', '')], 2), ] for formset_data, expected_error_count in data: formset = self.make_choiceformset(formset_data) self.assertEqual(formset.total_error_count(), expected_error_count) def test_formset_total_error_count_with_non_form_errors(self): data = { 'choices-TOTAL_FORMS': '2', # the number of forms rendered 'choices-INITIAL_FORMS': '0', # the number of forms with initial data 'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored 'choices-0-choice': 'Zero', 'choices-0-votes': '0', 'choices-1-choice': 'One', 'choices-1-votes': '1', } ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True) formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertEqual(formset.total_error_count(), 1) data['choices-1-votes'] = '' formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertEqual(formset.total_error_count(), 2) def test_html_safe(self): formset = self.make_choiceformset() self.assertTrue(hasattr(formset, '__html__')) self.assertEqual(force_text(formset), formset.__html__()) data = { 'choices-TOTAL_FORMS': '1', # the number of forms rendered 'choices-INITIAL_FORMS': '0', # the number of forms with initial data 'choices-MIN_NUM_FORMS': '0', # min number of forms 'choices-MAX_NUM_FORMS': '0', # max number of forms 'choices-0-choice': 'Calexico', 'choices-0-votes': '100', } class Choice(Form): choice = CharField() votes = IntegerField() ChoiceFormSet = formset_factory(Choice) class FormsetAsFooTests(SimpleTestCase): def test_as_table(self): formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertHTMLEqual( formset.as_table(), """<input type="hidden" name="choices-TOTAL_FORMS" value="1" /> <input type="hidden" name="choices-INITIAL_FORMS" value="0" /> <input type="hidden" name="choices-MIN_NUM_FORMS" value="0" /> <input type="hidden" name="choices-MAX_NUM_FORMS" value="0" /> <tr><th>Choice:</th><td><input type="text" name="choices-0-choice" value="Calexico" /></td></tr> <tr><th>Votes:</th><td><input type="number" name="choices-0-votes" value="100" /></td></tr>""" ) def test_as_p(self): formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertHTMLEqual( formset.as_p(), """<input type="hidden" name="choices-TOTAL_FORMS" value="1" /> <input type="hidden" name="choices-INITIAL_FORMS" value="0" /> <input type="hidden" name="choices-MIN_NUM_FORMS" value="0" /> <input type="hidden" name="choices-MAX_NUM_FORMS" value="0" /> <p>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></p> <p>Votes: <input type="number" name="choices-0-votes" value="100" /></p>""" ) def test_as_ul(self): formset = ChoiceFormSet(data, auto_id=False, prefix='choices') self.assertHTMLEqual( formset.as_ul(), """<input type="hidden" name="choices-TOTAL_FORMS" value="1" /> <input type="hidden" name="choices-INITIAL_FORMS" value="0" /> <input type="hidden" name="choices-MIN_NUM_FORMS" value="0" /> <input type="hidden" name="choices-MAX_NUM_FORMS" value="0" /> <li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li> <li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>""" ) # Regression test for #11418 ################################################# class ArticleForm(Form): title = CharField() pub_date = DateField() ArticleFormSet = formset_factory(ArticleForm) class TestIsBoundBehavior(SimpleTestCase): def test_no_data_raises_validation_error(self): with self.assertRaises(ValidationError): ArticleFormSet({}).is_valid() def test_with_management_data_attrs_work_fine(self): data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', } formset = ArticleFormSet(data) self.assertEqual(0, formset.initial_form_count()) self.assertEqual(1, formset.total_form_count()) self.assertTrue(formset.is_bound) self.assertTrue(formset.forms[0].is_bound) self.assertTrue(formset.is_valid()) self.assertTrue(formset.forms[0].is_valid()) self.assertEqual([{}], formset.cleaned_data) def test_form_errors_are_caught_by_formset(self): data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '0', 'form-0-title': 'Test', 'form-0-pub_date': '1904-06-16', 'form-1-title': 'Test', 'form-1-pub_date': '', # <-- this date is missing but required } formset = ArticleFormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual([{}, {'pub_date': ['This field is required.']}], formset.errors) def test_empty_forms_are_unbound(self): data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-0-title': 'Test', 'form-0-pub_date': '1904-06-16', } unbound_formset = ArticleFormSet() bound_formset = ArticleFormSet(data) empty_forms = [] empty_forms.append(unbound_formset.empty_form) empty_forms.append(bound_formset.empty_form) # Empty forms should be unbound self.assertFalse(empty_forms[0].is_bound) self.assertFalse(empty_forms[1].is_bound) # The empty forms should be equal. self.assertHTMLEqual(empty_forms[0].as_p(), empty_forms[1].as_p()) class TestEmptyFormSet(SimpleTestCase): def test_empty_formset_is_valid(self): """Test that an empty formset still calls clean()""" EmptyFsetWontValidateFormset = formset_factory(FavoriteDrinkForm, extra=0, formset=EmptyFsetWontValidate) formset = EmptyFsetWontValidateFormset( data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '0'}, prefix="form", ) formset2 = EmptyFsetWontValidateFormset( data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '1', 'form-0-name': 'bah'}, prefix="form", ) self.assertFalse(formset.is_valid()) self.assertFalse(formset2.is_valid()) def test_empty_formset_media(self): """Make sure media is available on empty formset, refs #19545""" class MediaForm(Form): class Media: js = ('some-file.js',) self.assertIn('some-file.js', str(formset_factory(MediaForm, extra=0)().media)) def test_empty_formset_is_multipart(self): """Make sure `is_multipart()` works with empty formset, refs #19545""" class FileForm(Form): file = FileField() self.assertTrue(formset_factory(FileForm, extra=0)().is_multipart())
bsd-3-clause
466,919,539,685,559,000
6,190,541,155,392,495,000
41.307524
119
0.587061
false
mcepl/youtube-dl
test/test_iqiyi_sdk_interpreter.py
16
1103
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import FakeYDL from youtube_dl.extractor import IqiyiIE class IqiyiIEWithCredentials(IqiyiIE): def _get_login_info(self): return 'foo', 'bar' class WarningLogger(object): def __init__(self): self.messages = [] def warning(self, msg): self.messages.append(msg) def debug(self, msg): pass def error(self, msg): pass class TestIqiyiSDKInterpreter(unittest.TestCase): def test_iqiyi_sdk_interpreter(self): ''' Test the functionality of IqiyiSDKInterpreter by trying to log in If `sign` is incorrect, /validate call throws an HTTP 556 error ''' logger = WarningLogger() ie = IqiyiIEWithCredentials(FakeYDL({'logger': logger})) ie._login() self.assertTrue('unable to log in:' in logger.messages[0]) if __name__ == '__main__': unittest.main()
unlicense
5,101,550,568,591,291,000
2,159,476,563,687,471,400
22.468085
79
0.650952
false
seiji56/rmaze-2016
logic_code/last_ver/phys/movement.py
1
6199
import herkulex import time import thread import RPi.GPIO as gpio import sensory as sn herkulex.connect("/dev/ttyAMA0", 115200) FR = herkulex.servo(0xfd) FL = herkulex.servo(16) BR = herkulex.servo(10) BL = herkulex.servo(20) DROP = herkulex.servo(50) ALL = herkulex.servo(0xfe) ALL.torque_on() def align(tout = 1, dst_sth = 400): start = time.time() while time.time() - start < tout: vpow = sn.vertcorr(dst_sth) setPow([-vpow, vpow, -vpow, vpow], sn.latcorr(dst_sth), sn.angcorr(dst_sth)) stop() def stop(): ALL.set_servo_speed(1, 0x06) ALL.set_led(0x06) def setPow(pots, latcorr = 0, angcorr = 0): if pots[0] + latcorr + angcorr != 0: FR.set_servo_speed(pots[0] + latcorr + angcorr, 0x06) if pots[1] + latcorr + angcorr != 0: FL.set_servo_speed(pots[1] + latcorr + angcorr, 0x06) if pots[2] - latcorr + angcorr != 0: BR.set_servo_speed(pots[2] - latcorr + angcorr, 0x06) if pots[3] - latcorr + angcorr != 0: BL.set_servo_speed(pots[3] - latcorr + angcorr, 0x06) ALL.set_led(0x06) hasvictim = -1 readmlx = True def mlxvchk(): global hasvictim while readmlx: vic = sn.hasvictim() if vic >= 0: hasvictim = vic walkpow = 1000 walktime = 1.54 sensordfront = 0 sensordback = 0 walkcalib = -50 expft = .5 def walkf(move, dst_sth = 400, col_sth = 500, scstop = False, scuse = False, old = True, corr = False): global readmlx global hasvictim start = time.time() basepow = [-walkpow, walkpow + walkcalib, -walkpow, walkpow + walkcalib] mlxthread = 0 if move[1] == 1: readmlx = True hasvictim = -1 mlxthread = start_new_thread(mlxvchk, ()) if sn.shouldAlign(dst_sth): align(.5) if corr: setPow(basepow, sn.latcorr(dst_sth), sn.angcorr(dst_sth)) setPow(basepow, 0, 0) if not old: if scstop and scuse: wallstate = [sn.wl(dst_sth), sn.wr(dst_sth)] #l, r tmpws = [sn.wl(dst_sth), sn.wr(dst_sth)] t_curr = 0 while t_curr < move[1] and sn.wl(dst_sth) == wallstate[0] and not sn.wf( dst_sth) and sn.wl(dst_sth) == wallstate[1] and not sn.color(col_sth ): t_start = time.time() while time.time() - t_start < walktime: if corr: setPow(basepow, sn.latcorr(dst_sth), sn.angcorr(dst_sth)) setPow(basepow, 0, 0) sm = 0 cnt = 0 if sn.wlf() != tmpws[0]: sm += time.time() - expft tmpws[0] = sn.wlf() if sn.wrf() != tmpws[1]: sm += time.time() - expft tmpws[1] = sn.wrf() if cnt > 0: t_start = sm/cnt t_curr += 1 elif scstop and not scuse: wallstate = [sn.wl(dst_sth), sn.wr(dst_sth)] #l, r t_curr = 0 while t_curr < move[1] and sn.wl(dst_sth) == wallstate[0] and not sn.wf( dst_sth) and sn.wl(dst_sth) == wallstate[1] and not sn.color(col_sth ): t_start = time.time() while time.time() - t_start < walktime: if corr: setPow(basepow, sn.latcorr(dst_sth), sn.angcorr(dst_sth)) setPow(basepow, 0, 0) t_curr += 1 elif not scstop and scuse: tmpws = [sn.wl(dst_sth), sn.wr(dst_sth)] t_curr = 0 while t_curr < move[1] and not sn.wf(dst_sth) and not sn.color(col_sth): t_start = time.time() while time.time() - t_start < walktime: if corr: setPow(basepow, sn.latcorr(dst_sth), sn.angcorr(dst_sth)) setPow(basepow, 0, 0) sm = 0 cnt = 0 if sn.wlf() != tmpws[0]: sm += time.time() - expft tmpws[0] = sn.wlf() if sn.wrf() != tmpws[1]: sm += time.time() - expft tmpws[1] = sn.wrf() if cnt > 0: t_start = sm/cnt t_curr += 1 else: t_curr = 0 while t_curr < move[1] and not sn.wf(dst_sth) and not sn.color(col_sth): t_start = time.time() while time.time() - t_start < walktime: if corr: setPow(basepow, sn.latcorr(dst_sth), sn.angcorr()) setPow(basepow, 0, 0) t_curr += 1 else: time.sleep(walktime*move[1]) stop() readmlx = False if hasvictim >= 0: act = drop(hasvictim) move = [act, move] return move rotpow = 1000 rottime = 1 # per 90 dg def turnr(move, dst_sth = 400): basepow = [rotpow for i in range(4)] setPow(basepow, 0, 0) time.sleep(rottime*move[1]) stop() return move def turnl(move, dst_sth = 400): basepow = [-rotpow for i in range(4)] setPow(basepow, 0, 0) time.sleep(rottime*move[1]) stop() return move def upramp(): basepow = [-walkpow, walkpow + walkcalib, -walkpow, walkpow + walkcalib] while sn.isramp(): setPow(basepow, sn.latcorr(dst_sth), sn.angcorr()) walkf((0, .3)) def downramp(): upramp() gpio.setmode(gpio.BCM) gpio.setup(20, gpio.OUT) gpio.output(20, gpio.LOW) def drop(side): ret = None if side == 0: ret = (1, 2) elif side == 1: ret = (1, 1) elif side == 3: ret = (3, 1) for i in range(5): gpio.output(20, gpio.HIGH) time.sleep(.5) gpio.output(20, gpio.LOW) time.sleep(.5) apply(ret) DROP.set_servo_angle(0, 1, 0x08) time.sleep(1) DROP.set_servo_angle(-95, 1, 0x08) time.sleep(1) DROP.set_servo_speed(1, 0x06) ALL.set_led(0x06) return ret
gpl-3.0
-8,105,393,956,529,942,000
8,464,593,905,269,809,000
28.519048
88
0.490563
false
inspyration/odoo
addons/event/__openerp__.py
5
1447
# -*- coding: utf-8 -*- { 'name': 'Events Organisation', 'version': '0.1', 'website': 'https://www.odoo.com/page/events', 'category': 'Tools', 'summary': 'Trainings, Conferences, Meetings, Exhibitions, Registrations', 'description': """ Organization and management of Events. ====================================== The event module allows you to efficiently organise events and all related tasks: planification, registration tracking, attendances, etc. Key Features ------------ * Manage your Events and Registrations * Use emails to automatically confirm and send acknowledgements for any event registration """, 'author': 'OpenERP SA', 'depends': ['base_setup', 'board', 'email_template', 'marketing'], 'data': [ 'security/event_security.xml', 'security/ir.model.access.csv', 'wizard/event_confirm_view.xml', 'report/report_event_registration_view.xml', 'event_view.xml', 'event_data.xml', 'res_config_view.xml', 'email_template.xml', 'views/event.xml', 'event_report.xml', 'views/report_registrationbadge.xml', ], 'demo': [ 'event_demo.xml', ], 'installable': True, 'auto_install': False, 'images': ['images/1_event_type_list.jpeg', 'images/2_events.jpeg', 'images/3_registrations.jpeg', 'images/events_kanban.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
2,316,972,865,876,809,700
-5,492,077,921,254,806,000
33.452381
132
0.62405
false
buckket/twtxt
twtxt/helper.py
1
5887
""" twtxt.helper ~~~~~~~~~~~~ This module implements various helper for use in twtxt. :copyright: (c) 2016-2017 by buckket. :license: MIT, see LICENSE for more details. """ import shlex import subprocess import sys import textwrap import click import pkg_resources from twtxt.mentions import format_mentions from twtxt.parser import parse_iso8601 def style_timeline(tweets, porcelain=False): if porcelain: return "\n".join(style_tweet(tweet, porcelain) for tweet in tweets) else: return "\n{0}\n".format("\n\n".join(filter(None, (style_tweet(tweet, porcelain) for tweet in tweets)))) def style_tweet(tweet, porcelain=False): conf = click.get_current_context().obj["conf"] limit = conf.character_limit if porcelain: return "{nick}\t{url}\t{tweet}".format( nick=tweet.source.nick, url=tweet.source.url, tweet=str(tweet)) else: if sys.stdout.isatty() and not tweet.text.isprintable(): return None styled_text = format_mentions(tweet.text) len_styling = len(styled_text) - len(click.unstyle(styled_text)) final_text = textwrap.shorten(styled_text, limit + len_styling) if limit else styled_text timestamp = tweet.absolute_datetime if conf.use_abs_time else tweet.relative_datetime return "➤ {nick} ({time}):\n{tweet}".format( nick=click.style(tweet.source.nick, bold=True), tweet=final_text, time=click.style(timestamp, dim=True)) def style_source(source, porcelain=False): if porcelain: return "{nick}\t{url}".format( nick=source.nick, url=source.url) else: return "➤ {nick} @ {url}".format( nick=click.style(source.nick, bold=True), url=source.url) def style_source_with_status(source, status, porcelain=False): if porcelain: return "{nick}\t{url}\t{status}\t{content_length}\t{last_modified}".format( nick=source.nick, url=source.url, status=status.status_code, content_length=status.content_length, last_modified=status.last_modified) else: if status.status_code == 200: scolor, smessage = "green", str(status.status_code) elif status: scolor, smessage = "red", str(status.status_code) else: scolor, smessage = "red", "ERROR" return "➤ {nick} @ {url} [{content_length}, {last_modified}] ({status})".format( nick=click.style(source.nick, bold=True, fg=scolor), url=source.url, status=click.style(smessage, fg=scolor), content_length=status.natural_content_length, last_modified=status.natural_last_modified) def validate_created_at(ctx, param, value): if value: try: return parse_iso8601(value) except (ValueError, OverflowError) as e: raise click.BadParameter("{0}.".format(e)) def validate_text(ctx, param, value): conf = click.get_current_context().obj["conf"] if isinstance(value, tuple): value = " ".join(value) if not value and not sys.stdin.isatty(): value = click.get_text_stream("stdin").read() if value: value = value.strip() if conf.character_warning and len(value) > conf.character_warning: click.confirm("✂ Warning: Tweet is longer than {0} characters. Are you sure?".format( conf.character_warning), abort=True) return value else: raise click.BadArgumentUsage("Text can’t be empty.") def validate_config_key(ctx, param, value): """Validate a configuration key according to `section.item`.""" if not value: return value try: section, item = value.split(".", 1) except ValueError: raise click.BadArgumentUsage("Given key does not contain a section name.") else: return section, item def run_pre_tweet_hook(hook, options): try: command = shlex.split(hook.format(**options)) except KeyError: click.echo("✗ Invalid variables in pre_tweet_hook.") raise click.Abort try: subprocess.check_output(command, shell=True, universal_newlines=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: click.echo("✗ pre_tweet_hook returned {}.".format(e.returncode)) if e.output: click.echo(e.output) raise click.Abort def run_post_tweet_hook(hook, options): try: command = shlex.split(hook.format(**options)) except KeyError: click.echo("✗ Invalid variables in post_tweet_hook.") return try: subprocess.check_output(command, shell=True, universal_newlines=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: click.echo("✗ post_tweet_hook returned {}.".format(e.returncode)) if e.output: click.echo(e.output) def sort_and_truncate_tweets(tweets, direction, limit): if direction == "descending": return sorted(tweets, reverse=True)[:limit] elif direction == "ascending": if limit < len(tweets): return sorted(tweets)[len(tweets) - limit:] else: return sorted(tweets) else: return [] def generate_user_agent(): try: version = pkg_resources.require("twtxt")[0].version except pkg_resources.DistributionNotFound: version = "unknown" conf = click.get_current_context().obj["conf"] if conf.disclose_identity and conf.nick and conf.twturl: user_agent = "twtxt/{version} (+{url}; @{nick})".format( version=version, url=conf.twturl, nick=conf.nick) else: user_agent = "twtxt/{version}".format(version=version) return {"User-Agent": user_agent}
mit
-6,462,023,614,795,995,000
3,267,038,481,222,612,000
31.787709
111
0.622934
false
fabian4/trove
trove/limits/views.py
7
1896
# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_utils import timeutils class LimitView(object): def __init__(self, rate_limit): self.rate_limit = rate_limit def data(self): get_utc = datetime.datetime.utcfromtimestamp next_avail = get_utc(self.rate_limit.get("resetTime", 0)) return {"limit": { "nextAvailable": timeutils.isotime(at=next_avail), "remaining": self.rate_limit.get("remaining", 0), "unit": self.rate_limit.get("unit", ""), "value": self.rate_limit.get("value", ""), "verb": self.rate_limit.get("verb", ""), "uri": self.rate_limit.get("URI", ""), "regex": self.rate_limit.get("regex", "") } } class LimitViews(object): def __init__(self, abs_limits, rate_limits): self.abs_limits = abs_limits self.rate_limits = rate_limits def data(self): data = [] abs_view = dict() abs_view["verb"] = "ABSOLUTE" for resource_name, abs_limit in self.abs_limits.items(): abs_view["max_" + resource_name] = abs_limit data.append(abs_view) for l in self.rate_limits: data.append(LimitView(l).data()["limit"]) return {"limits": data}
apache-2.0
-5,139,845,559,804,796,000
-1,288,773,629,905,713,200
32.263158
78
0.613924
false
berendkleinhaneveld/VTK
ThirdParty/Twisted/twisted/web/test/test_wsgi.py
33
54989
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.web.wsgi}. """ __metaclass__ = type from sys import exc_info from urllib import quote from thread import get_ident import StringIO, cStringIO, tempfile from zope.interface.verify import verifyObject from twisted.python.log import addObserver, removeObserver, err from twisted.python.failure import Failure from twisted.python.threadpool import ThreadPool from twisted.internet.defer import Deferred, gatherResults from twisted.internet import reactor from twisted.internet.error import ConnectionLost from twisted.trial.unittest import TestCase from twisted.web import http from twisted.web.resource import IResource, Resource from twisted.web.server import Request, Site, version from twisted.web.wsgi import WSGIResource from twisted.web.test.test_web import DummyChannel class SynchronousThreadPool: """ A single-threaded implementation of part of the L{ThreadPool} interface. This implementation calls functions synchronously rather than running them in a thread pool. It is used to make the tests which are not directly for thread-related behavior deterministic. """ def callInThread(self, f, *a, **kw): """ Call C{f(*a, **kw)} in this thread rather than scheduling it to be called in a thread. """ try: f(*a, **kw) except: # callInThread doesn't let exceptions propagate to the caller. # None is always returned and any exception raised gets logged # later on. err(None, "Callable passed to SynchronousThreadPool.callInThread failed") class SynchronousReactorThreads: """ A single-threaded implementation of part of the L{IReactorThreads} interface. This implementation assumes that it will only be invoked from the reactor thread, so it calls functions synchronously rather than trying to schedule them to run in the reactor thread. It is used in conjunction with L{SynchronousThreadPool} to make the tests which are not directly for thread-related behavior deterministic. """ def callFromThread(self, f, *a, **kw): """ Call C{f(*a, **kw)} in this thread which should also be the reactor thread. """ f(*a, **kw) class WSGIResourceTests(TestCase): def setUp(self): """ Create a L{WSGIResource} with synchronous threading objects and a no-op application object. This is useful for testing certain things about the resource implementation which are unrelated to WSGI. """ self.resource = WSGIResource( SynchronousReactorThreads(), SynchronousThreadPool(), lambda environ, startResponse: None) def test_interfaces(self): """ L{WSGIResource} implements L{IResource} and stops resource traversal. """ verifyObject(IResource, self.resource) self.assertTrue(self.resource.isLeaf) def test_unsupported(self): """ A L{WSGIResource} cannot have L{IResource} children. Its C{getChildWithDefault} and C{putChild} methods raise L{RuntimeError}. """ self.assertRaises( RuntimeError, self.resource.getChildWithDefault, "foo", Request(DummyChannel(), False)) self.assertRaises( RuntimeError, self.resource.putChild, "foo", Resource()) class WSGITestsMixin: """ @ivar channelFactory: A no-argument callable which will be invoked to create a new HTTP channel to associate with request objects. """ channelFactory = DummyChannel def setUp(self): self.threadpool = SynchronousThreadPool() self.reactor = SynchronousReactorThreads() def lowLevelRender( self, requestFactory, applicationFactory, channelFactory, method, version, resourceSegments, requestSegments, query=None, headers=[], body=None, safe=''): """ @param method: A C{str} giving the request method to use. @param version: A C{str} like C{'1.1'} giving the request version. @param resourceSegments: A C{list} of unencoded path segments which specifies the location in the resource hierarchy at which the L{WSGIResource} will be placed, eg C{['']} for I{/}, C{['foo', 'bar', '']} for I{/foo/bar/}, etc. @param requestSegments: A C{list} of unencoded path segments giving the request URI. @param query: A C{list} of two-tuples of C{str} giving unencoded query argument keys and values. @param headers: A C{list} of two-tuples of C{str} giving request header names and corresponding values. @param safe: A C{str} giving the bytes which are to be considered I{safe} for inclusion in the request URI and not quoted. @return: A L{Deferred} which will be called back with a two-tuple of the arguments passed which would be passed to the WSGI application object for this configuration and request (ie, the environment and start_response callable). """ root = WSGIResource( self.reactor, self.threadpool, applicationFactory()) resourceSegments.reverse() for seg in resourceSegments: tmp = Resource() tmp.putChild(seg, root) root = tmp channel = channelFactory() channel.site = Site(root) request = requestFactory(channel, False) for k, v in headers: request.requestHeaders.addRawHeader(k, v) request.gotLength(0) if body: request.content.write(body) request.content.seek(0) uri = '/' + '/'.join([quote(seg, safe) for seg in requestSegments]) if query is not None: uri += '?' + '&'.join(['='.join([quote(k, safe), quote(v, safe)]) for (k, v) in query]) request.requestReceived(method, uri, 'HTTP/' + version) return request def render(self, *a, **kw): result = Deferred() def applicationFactory(): def application(*args): environ, startResponse = args result.callback(args) startResponse('200 OK', []) return iter(()) return application self.lowLevelRender( Request, applicationFactory, self.channelFactory, *a, **kw) return result def requestFactoryFactory(self, requestClass=Request): d = Deferred() def requestFactory(*a, **kw): request = requestClass(*a, **kw) # If notifyFinish is called after lowLevelRender returns, it won't # do the right thing, because the request will have already # finished. One might argue that this is a bug in # Request.notifyFinish. request.notifyFinish().chainDeferred(d) return request return d, requestFactory def getContentFromResponse(self, response): return response.split('\r\n\r\n', 1)[1] class EnvironTests(WSGITestsMixin, TestCase): """ Tests for the values in the C{environ} C{dict} passed to the application object by L{twisted.web.wsgi.WSGIResource}. """ def environKeyEqual(self, key, value): def assertEnvironKeyEqual((environ, startResponse)): self.assertEqual(environ[key], value) return assertEnvironKeyEqual def test_environIsDict(self): """ L{WSGIResource} calls the application object with an C{environ} parameter which is exactly of type C{dict}. """ d = self.render('GET', '1.1', [], ['']) def cbRendered((environ, startResponse)): self.assertIdentical(type(environ), dict) d.addCallback(cbRendered) return d def test_requestMethod(self): """ The C{'REQUEST_METHOD'} key of the C{environ} C{dict} passed to the application contains the HTTP method in the request (RFC 3875, section 4.1.12). """ get = self.render('GET', '1.1', [], ['']) get.addCallback(self.environKeyEqual('REQUEST_METHOD', 'GET')) # Also make sure a different request method shows up as a different # value in the environ dict. post = self.render('POST', '1.1', [], ['']) post.addCallback(self.environKeyEqual('REQUEST_METHOD', 'POST')) return gatherResults([get, post]) def test_scriptName(self): """ The C{'SCRIPT_NAME'} key of the C{environ} C{dict} passed to the application contains the I{abs_path} (RFC 2396, section 3) to this resource (RFC 3875, section 4.1.13). """ root = self.render('GET', '1.1', [], ['']) root.addCallback(self.environKeyEqual('SCRIPT_NAME', '')) emptyChild = self.render('GET', '1.1', [''], ['']) emptyChild.addCallback(self.environKeyEqual('SCRIPT_NAME', '/')) leaf = self.render('GET', '1.1', ['foo'], ['foo']) leaf.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo')) container = self.render('GET', '1.1', ['foo', ''], ['foo', '']) container.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo/')) internal = self.render('GET', '1.1', ['foo'], ['foo', 'bar']) internal.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo')) unencoded = self.render( 'GET', '1.1', ['foo', '/', 'bar\xff'], ['foo', '/', 'bar\xff']) # The RFC says "(not URL-encoded)", even though that makes # interpretation of SCRIPT_NAME ambiguous. unencoded.addCallback( self.environKeyEqual('SCRIPT_NAME', '/foo///bar\xff')) return gatherResults([ root, emptyChild, leaf, container, internal, unencoded]) def test_pathInfo(self): """ The C{'PATH_INFO'} key of the C{environ} C{dict} passed to the application contains the suffix of the request URI path which is not included in the value for the C{'SCRIPT_NAME'} key (RFC 3875, section 4.1.5). """ assertKeyEmpty = self.environKeyEqual('PATH_INFO', '') root = self.render('GET', '1.1', [], ['']) root.addCallback(self.environKeyEqual('PATH_INFO', '/')) emptyChild = self.render('GET', '1.1', [''], ['']) emptyChild.addCallback(assertKeyEmpty) leaf = self.render('GET', '1.1', ['foo'], ['foo']) leaf.addCallback(assertKeyEmpty) container = self.render('GET', '1.1', ['foo', ''], ['foo', '']) container.addCallback(assertKeyEmpty) internalLeaf = self.render('GET', '1.1', ['foo'], ['foo', 'bar']) internalLeaf.addCallback(self.environKeyEqual('PATH_INFO', '/bar')) internalContainer = self.render('GET', '1.1', ['foo'], ['foo', '']) internalContainer.addCallback(self.environKeyEqual('PATH_INFO', '/')) unencoded = self.render('GET', '1.1', [], ['foo', '/', 'bar\xff']) unencoded.addCallback( self.environKeyEqual('PATH_INFO', '/foo///bar\xff')) return gatherResults([ root, leaf, container, internalLeaf, internalContainer, unencoded]) def test_queryString(self): """ The C{'QUERY_STRING'} key of the C{environ} C{dict} passed to the application contains the portion of the request URI after the first I{?} (RFC 3875, section 4.1.7). """ missing = self.render('GET', '1.1', [], [''], None) missing.addCallback(self.environKeyEqual('QUERY_STRING', '')) empty = self.render('GET', '1.1', [], [''], []) empty.addCallback(self.environKeyEqual('QUERY_STRING', '')) present = self.render('GET', '1.1', [], [''], [('foo', 'bar')]) present.addCallback(self.environKeyEqual('QUERY_STRING', 'foo=bar')) unencoded = self.render('GET', '1.1', [], [''], [('/', '/')]) unencoded.addCallback(self.environKeyEqual('QUERY_STRING', '%2F=%2F')) # "?" is reserved in the <searchpart> portion of a URL. However, it # seems to be a common mistake of clients to forget to quote it. So, # make sure we handle that invalid case. doubleQuestion = self.render( 'GET', '1.1', [], [''], [('foo', '?bar')], safe='?') doubleQuestion.addCallback( self.environKeyEqual('QUERY_STRING', 'foo=?bar')) return gatherResults([ missing, empty, present, unencoded, doubleQuestion]) def test_contentType(self): """ The C{'CONTENT_TYPE'} key of the C{environ} C{dict} passed to the application contains the value of the I{Content-Type} request header (RFC 3875, section 4.1.3). """ missing = self.render('GET', '1.1', [], ['']) missing.addCallback(self.environKeyEqual('CONTENT_TYPE', '')) present = self.render( 'GET', '1.1', [], [''], None, [('content-type', 'x-foo/bar')]) present.addCallback(self.environKeyEqual('CONTENT_TYPE', 'x-foo/bar')) return gatherResults([missing, present]) def test_contentLength(self): """ The C{'CONTENT_LENGTH'} key of the C{environ} C{dict} passed to the application contains the value of the I{Content-Length} request header (RFC 3875, section 4.1.2). """ missing = self.render('GET', '1.1', [], ['']) missing.addCallback(self.environKeyEqual('CONTENT_LENGTH', '')) present = self.render( 'GET', '1.1', [], [''], None, [('content-length', '1234')]) present.addCallback(self.environKeyEqual('CONTENT_LENGTH', '1234')) return gatherResults([missing, present]) def test_serverName(self): """ The C{'SERVER_NAME'} key of the C{environ} C{dict} passed to the application contains the best determination of the server hostname possible, using either the value of the I{Host} header in the request or the address the server is listening on if that header is not present (RFC 3875, section 4.1.14). """ missing = self.render('GET', '1.1', [], ['']) # 10.0.0.1 value comes from a bit far away - # twisted.test.test_web.DummyChannel.transport.getHost().host missing.addCallback(self.environKeyEqual('SERVER_NAME', '10.0.0.1')) present = self.render( 'GET', '1.1', [], [''], None, [('host', 'example.org')]) present.addCallback(self.environKeyEqual('SERVER_NAME', 'example.org')) return gatherResults([missing, present]) def test_serverPort(self): """ The C{'SERVER_PORT'} key of the C{environ} C{dict} passed to the application contains the port number of the server which received the request (RFC 3875, section 4.1.15). """ portNumber = 12354 def makeChannel(): channel = DummyChannel() channel.transport = DummyChannel.TCP() channel.transport.port = portNumber return channel self.channelFactory = makeChannel d = self.render('GET', '1.1', [], ['']) d.addCallback(self.environKeyEqual('SERVER_PORT', str(portNumber))) return d def test_serverProtocol(self): """ The C{'SERVER_PROTOCOL'} key of the C{environ} C{dict} passed to the application contains the HTTP version number received in the request (RFC 3875, section 4.1.16). """ old = self.render('GET', '1.0', [], ['']) old.addCallback(self.environKeyEqual('SERVER_PROTOCOL', 'HTTP/1.0')) new = self.render('GET', '1.1', [], ['']) new.addCallback(self.environKeyEqual('SERVER_PROTOCOL', 'HTTP/1.1')) return gatherResults([old, new]) def test_remoteAddr(self): """ The C{'REMOTE_ADDR'} key of the C{environ} C{dict} passed to the application contains the address of the client making the request. """ d = self.render('GET', '1.1', [], ['']) d.addCallback(self.environKeyEqual('REMOTE_ADDR', '192.168.1.1')) return d def test_headers(self): """ HTTP request headers are copied into the C{environ} C{dict} passed to the application with a C{HTTP_} prefix added to their names. """ singleValue = self.render( 'GET', '1.1', [], [''], None, [('foo', 'bar'), ('baz', 'quux')]) def cbRendered((environ, startResponse)): self.assertEqual(environ['HTTP_FOO'], 'bar') self.assertEqual(environ['HTTP_BAZ'], 'quux') singleValue.addCallback(cbRendered) multiValue = self.render( 'GET', '1.1', [], [''], None, [('foo', 'bar'), ('foo', 'baz')]) multiValue.addCallback(self.environKeyEqual('HTTP_FOO', 'bar,baz')) withHyphen = self.render( 'GET', '1.1', [], [''], None, [('foo-bar', 'baz')]) withHyphen.addCallback(self.environKeyEqual('HTTP_FOO_BAR', 'baz')) multiLine = self.render( 'GET', '1.1', [], [''], None, [('foo', 'bar\n\tbaz')]) multiLine.addCallback(self.environKeyEqual('HTTP_FOO', 'bar \tbaz')) return gatherResults([singleValue, multiValue, withHyphen, multiLine]) def test_wsgiVersion(self): """ The C{'wsgi.version'} key of the C{environ} C{dict} passed to the application has the value C{(1, 0)} indicating that this is a WSGI 1.0 container. """ versionDeferred = self.render('GET', '1.1', [], ['']) versionDeferred.addCallback(self.environKeyEqual('wsgi.version', (1, 0))) return versionDeferred def test_wsgiRunOnce(self): """ The C{'wsgi.run_once'} key of the C{environ} C{dict} passed to the application is set to C{False}. """ once = self.render('GET', '1.1', [], ['']) once.addCallback(self.environKeyEqual('wsgi.run_once', False)) return once def test_wsgiMultithread(self): """ The C{'wsgi.multithread'} key of the C{environ} C{dict} passed to the application is set to C{True}. """ thread = self.render('GET', '1.1', [], ['']) thread.addCallback(self.environKeyEqual('wsgi.multithread', True)) return thread def test_wsgiMultiprocess(self): """ The C{'wsgi.multiprocess'} key of the C{environ} C{dict} passed to the application is set to C{False}. """ process = self.render('GET', '1.1', [], ['']) process.addCallback(self.environKeyEqual('wsgi.multiprocess', False)) return process def test_wsgiURLScheme(self): """ The C{'wsgi.url_scheme'} key of the C{environ} C{dict} passed to the application has the request URL scheme. """ # XXX Does this need to be different if the request is for an absolute # URL? def channelFactory(): channel = DummyChannel() channel.transport = DummyChannel.SSL() return channel self.channelFactory = DummyChannel httpDeferred = self.render('GET', '1.1', [], ['']) httpDeferred.addCallback(self.environKeyEqual('wsgi.url_scheme', 'http')) self.channelFactory = channelFactory httpsDeferred = self.render('GET', '1.1', [], ['']) httpsDeferred.addCallback(self.environKeyEqual('wsgi.url_scheme', 'https')) return gatherResults([httpDeferred, httpsDeferred]) def test_wsgiErrors(self): """ The C{'wsgi.errors'} key of the C{environ} C{dict} passed to the application is a file-like object (as defined in the U{Input and Errors Streams<http://www.python.org/dev/peps/pep-0333/#input-and-error-streams>} section of PEP 333) which converts bytes written to it into events for the logging system. """ events = [] addObserver(events.append) self.addCleanup(removeObserver, events.append) errors = self.render('GET', '1.1', [], ['']) def cbErrors((environ, startApplication)): errors = environ['wsgi.errors'] errors.write('some message\n') errors.writelines(['another\nmessage\n']) errors.flush() self.assertEqual(events[0]['message'], ('some message\n',)) self.assertEqual(events[0]['system'], 'wsgi') self.assertTrue(events[0]['isError']) self.assertEqual(events[1]['message'], ('another\nmessage\n',)) self.assertEqual(events[1]['system'], 'wsgi') self.assertTrue(events[1]['isError']) self.assertEqual(len(events), 2) errors.addCallback(cbErrors) return errors class InputStreamTestMixin(WSGITestsMixin): """ A mixin for L{TestCase} subclasses which defines a number of tests against L{_InputStream}. The subclass is expected to create a file-like object to be wrapped by an L{_InputStream} under test. """ def getFileType(self): raise NotImplementedError( "%s.getFile must be implemented" % (self.__class__.__name__,)) def _renderAndReturnReaderResult(self, reader, content): contentType = self.getFileType() class CustomizedRequest(Request): def gotLength(self, length): # Always allocate a file of the specified type, instead of # using the base behavior of selecting one depending on the # length. self.content = contentType() def appFactoryFactory(reader): result = Deferred() def applicationFactory(): def application(*args): environ, startResponse = args result.callback(reader(environ['wsgi.input'])) startResponse('200 OK', []) return iter(()) return application return result, applicationFactory d, appFactory = appFactoryFactory(reader) self.lowLevelRender( CustomizedRequest, appFactory, DummyChannel, 'PUT', '1.1', [], [''], None, [], content) return d def test_readAll(self): """ Calling L{_InputStream.read} with no arguments returns the entire input stream. """ bytes = "some bytes are here" d = self._renderAndReturnReaderResult(lambda input: input.read(), bytes) d.addCallback(self.assertEqual, bytes) return d def test_readSome(self): """ Calling L{_InputStream.read} with an integer returns that many bytes from the input stream, as long as it is less than or equal to the total number of bytes available. """ bytes = "hello, world." d = self._renderAndReturnReaderResult(lambda input: input.read(3), bytes) d.addCallback(self.assertEqual, "hel") return d def test_readMoreThan(self): """ Calling L{_InputStream.read} with an integer that is greater than the total number of bytes in the input stream returns all bytes in the input stream. """ bytes = "some bytes are here" d = self._renderAndReturnReaderResult( lambda input: input.read(len(bytes) + 3), bytes) d.addCallback(self.assertEqual, bytes) return d def test_readTwice(self): """ Calling L{_InputStream.read} a second time returns bytes starting from the position after the last byte returned by the previous read. """ bytes = "some bytes, hello" def read(input): input.read(3) return input.read() d = self._renderAndReturnReaderResult(read, bytes) d.addCallback(self.assertEqual, bytes[3:]) return d def test_readNone(self): """ Calling L{_InputStream.read} with C{None} as an argument returns all bytes in the input stream. """ bytes = "the entire stream" d = self._renderAndReturnReaderResult( lambda input: input.read(None), bytes) d.addCallback(self.assertEqual, bytes) return d def test_readNegative(self): """ Calling L{_InputStream.read} with a negative integer as an argument returns all bytes in the input stream. """ bytes = "all of the input" d = self._renderAndReturnReaderResult( lambda input: input.read(-1), bytes) d.addCallback(self.assertEqual, bytes) return d def test_readline(self): """ Calling L{_InputStream.readline} with no argument returns one line from the input stream. """ bytes = "hello\nworld" d = self._renderAndReturnReaderResult( lambda input: input.readline(), bytes) d.addCallback(self.assertEqual, "hello\n") return d def test_readlineSome(self): """ Calling L{_InputStream.readline} with an integer returns at most that many bytes, even if it is not enough to make up a complete line. COMPATIBILITY NOTE: the size argument is excluded from the WSGI specification, but is provided here anyhow, because useful libraries such as python stdlib's cgi.py assume their input file-like-object supports readline with a size argument. If you use it, be aware your application may not be portable to other conformant WSGI servers. """ bytes = "goodbye\nworld" d = self._renderAndReturnReaderResult( lambda input: input.readline(3), bytes) d.addCallback(self.assertEqual, "goo") return d def test_readlineMoreThan(self): """ Calling L{_InputStream.readline} with an integer which is greater than the number of bytes in the next line returns only the next line. """ bytes = "some lines\nof text" d = self._renderAndReturnReaderResult( lambda input: input.readline(20), bytes) d.addCallback(self.assertEqual, "some lines\n") return d def test_readlineTwice(self): """ Calling L{_InputStream.readline} a second time returns the line following the line returned by the first call. """ bytes = "first line\nsecond line\nlast line" def readline(input): input.readline() return input.readline() d = self._renderAndReturnReaderResult(readline, bytes) d.addCallback(self.assertEqual, "second line\n") return d def test_readlineNone(self): """ Calling L{_InputStream.readline} with C{None} as an argument returns one line from the input stream. """ bytes = "this is one line\nthis is another line" d = self._renderAndReturnReaderResult( lambda input: input.readline(None), bytes) d.addCallback(self.assertEqual, "this is one line\n") return d def test_readlineNegative(self): """ Calling L{_InputStream.readline} with a negative integer as an argument returns one line from the input stream. """ bytes = "input stream line one\nline two" d = self._renderAndReturnReaderResult( lambda input: input.readline(-1), bytes) d.addCallback(self.assertEqual, "input stream line one\n") return d def test_readlines(self): """ Calling L{_InputStream.readlines} with no arguments returns a list of all lines from the input stream. """ bytes = "alice\nbob\ncarol" d = self._renderAndReturnReaderResult( lambda input: input.readlines(), bytes) d.addCallback(self.assertEqual, ["alice\n", "bob\n", "carol"]) return d def test_readlinesSome(self): """ Calling L{_InputStream.readlines} with an integer as an argument returns a list of lines from the input stream with the argument serving as an approximate bound on the total number of bytes to read. """ bytes = "123\n456\n789\n0" d = self._renderAndReturnReaderResult( lambda input: input.readlines(5), bytes) def cbLines(lines): # Make sure we got enough lines to make 5 bytes. Anything beyond # that is fine too. self.assertEqual(lines[:2], ["123\n", "456\n"]) d.addCallback(cbLines) return d def test_readlinesMoreThan(self): """ Calling L{_InputStream.readlines} with an integer which is greater than the total number of bytes in the input stream returns a list of all lines from the input. """ bytes = "one potato\ntwo potato\nthree potato" d = self._renderAndReturnReaderResult( lambda input: input.readlines(100), bytes) d.addCallback( self.assertEqual, ["one potato\n", "two potato\n", "three potato"]) return d def test_readlinesAfterRead(self): """ Calling L{_InputStream.readlines} after a call to L{_InputStream.read} returns lines starting at the byte after the last byte returned by the C{read} call. """ bytes = "hello\nworld\nfoo" def readlines(input): input.read(7) return input.readlines() d = self._renderAndReturnReaderResult(readlines, bytes) d.addCallback(self.assertEqual, ["orld\n", "foo"]) return d def test_readlinesNone(self): """ Calling L{_InputStream.readlines} with C{None} as an argument returns all lines from the input. """ bytes = "one fish\ntwo fish\n" d = self._renderAndReturnReaderResult( lambda input: input.readlines(None), bytes) d.addCallback(self.assertEqual, ["one fish\n", "two fish\n"]) return d def test_readlinesNegative(self): """ Calling L{_InputStream.readlines} with a negative integer as an argument returns a list of all lines from the input. """ bytes = "red fish\nblue fish\n" d = self._renderAndReturnReaderResult( lambda input: input.readlines(-1), bytes) d.addCallback(self.assertEqual, ["red fish\n", "blue fish\n"]) return d def test_iterable(self): """ Iterating over L{_InputStream} produces lines from the input stream. """ bytes = "green eggs\nand ham\n" d = self._renderAndReturnReaderResult(lambda input: list(input), bytes) d.addCallback(self.assertEqual, ["green eggs\n", "and ham\n"]) return d def test_iterableAfterRead(self): """ Iterating over L{_InputStream} after calling L{_InputStream.read} produces lines from the input stream starting from the first byte after the last byte returned by the C{read} call. """ bytes = "green eggs\nand ham\n" def iterate(input): input.read(3) return list(input) d = self._renderAndReturnReaderResult(iterate, bytes) d.addCallback(self.assertEqual, ["en eggs\n", "and ham\n"]) return d class InputStreamStringIOTests(InputStreamTestMixin, TestCase): """ Tests for L{_InputStream} when it is wrapped around a L{StringIO.StringIO}. """ def getFileType(self): return StringIO.StringIO class InputStreamCStringIOTests(InputStreamTestMixin, TestCase): """ Tests for L{_InputStream} when it is wrapped around a L{cStringIO.StringIO}. """ def getFileType(self): return cStringIO.StringIO class InputStreamTemporaryFileTests(InputStreamTestMixin, TestCase): """ Tests for L{_InputStream} when it is wrapped around a L{tempfile.TemporaryFile}. """ def getFileType(self): return tempfile.TemporaryFile class StartResponseTests(WSGITestsMixin, TestCase): """ Tests for the I{start_response} parameter passed to the application object by L{WSGIResource}. """ def test_status(self): """ The response status passed to the I{start_response} callable is written as the status of the response to the request. """ channel = DummyChannel() def applicationFactory(): def application(environ, startResponse): startResponse('107 Strange message', []) return iter(()) return application d, requestFactory = self.requestFactoryFactory() def cbRendered(ignored): self.assertTrue( channel.transport.written.getvalue().startswith( 'HTTP/1.1 107 Strange message')) d.addCallback(cbRendered) request = self.lowLevelRender( requestFactory, applicationFactory, lambda: channel, 'GET', '1.1', [], [''], None, []) return d def _headersTest(self, appHeaders, expectedHeaders): """ Verify that if the response headers given by C{appHeaders} are passed to the I{start_response} callable, then the response header lines given by C{expectedHeaders} plus I{Server} and I{Date} header lines are included in the response. """ # Make the Date header value deterministic self.patch(http, 'datetimeToString', lambda: 'Tuesday') channel = DummyChannel() def applicationFactory(): def application(environ, startResponse): startResponse('200 OK', appHeaders) return iter(()) return application d, requestFactory = self.requestFactoryFactory() def cbRendered(ignored): response = channel.transport.written.getvalue() headers, rest = response.split('\r\n\r\n', 1) headerLines = headers.split('\r\n')[1:] headerLines.sort() allExpectedHeaders = expectedHeaders + [ 'Date: Tuesday', 'Server: ' + version, 'Transfer-Encoding: chunked'] allExpectedHeaders.sort() self.assertEqual(headerLines, allExpectedHeaders) d.addCallback(cbRendered) request = self.lowLevelRender( requestFactory, applicationFactory, lambda: channel, 'GET', '1.1', [], [''], None, []) return d def test_headers(self): """ The headers passed to the I{start_response} callable are included in the response as are the required I{Date} and I{Server} headers and the necessary connection (hop to hop) header I{Transfer-Encoding}. """ return self._headersTest( [('foo', 'bar'), ('baz', 'quux')], ['Baz: quux', 'Foo: bar']) def test_applicationProvidedContentType(self): """ If I{Content-Type} is included in the headers passed to the I{start_response} callable, one I{Content-Type} header is included in the response. """ return self._headersTest( [('content-type', 'monkeys are great')], ['Content-Type: monkeys are great']) def test_applicationProvidedServerAndDate(self): """ If either I{Server} or I{Date} is included in the headers passed to the I{start_response} callable, they are disregarded. """ return self._headersTest( [('server', 'foo'), ('Server', 'foo'), ('date', 'bar'), ('dATE', 'bar')], []) def test_delayedUntilReturn(self): """ Nothing is written in response to a request when the I{start_response} callable is invoked. If the iterator returned by the application object produces only empty strings, the response is written after the last element is produced. """ channel = DummyChannel() intermediateValues = [] def record(): intermediateValues.append(channel.transport.written.getvalue()) def applicationFactory(): def application(environ, startResponse): startResponse('200 OK', [('foo', 'bar'), ('baz', 'quux')]) yield '' record() return application d, requestFactory = self.requestFactoryFactory() def cbRendered(ignored): self.assertEqual(intermediateValues, ['']) d.addCallback(cbRendered) request = self.lowLevelRender( requestFactory, applicationFactory, lambda: channel, 'GET', '1.1', [], [''], None, []) return d def test_delayedUntilContent(self): """ Nothing is written in response to a request when the I{start_response} callable is invoked. Once a non-empty string has been produced by the iterator returned by the application object, the response status and headers are written. """ channel = DummyChannel() intermediateValues = [] def record(): intermediateValues.append(channel.transport.written.getvalue()) def applicationFactory(): def application(environ, startResponse): startResponse('200 OK', [('foo', 'bar')]) yield '' record() yield 'foo' record() return application d, requestFactory = self.requestFactoryFactory() def cbRendered(ignored): self.assertFalse(intermediateValues[0]) self.assertTrue(intermediateValues[1]) d.addCallback(cbRendered) request = self.lowLevelRender( requestFactory, applicationFactory, lambda: channel, 'GET', '1.1', [], [''], None, []) return d def test_content(self): """ Content produced by the iterator returned by the application object is written to the request as it is produced. """ channel = DummyChannel() intermediateValues = [] def record(): intermediateValues.append(channel.transport.written.getvalue()) def applicationFactory(): def application(environ, startResponse): startResponse('200 OK', [('content-length', '6')]) yield 'foo' record() yield 'bar' record() return application d, requestFactory = self.requestFactoryFactory() def cbRendered(ignored): self.assertEqual( self.getContentFromResponse(intermediateValues[0]), 'foo') self.assertEqual( self.getContentFromResponse(intermediateValues[1]), 'foobar') d.addCallback(cbRendered) request = self.lowLevelRender( requestFactory, applicationFactory, lambda: channel, 'GET', '1.1', [], [''], None, []) return d def test_multipleStartResponse(self): """ If the I{start_response} callable is invoked multiple times before a data for the response body is produced, the values from the last call are used. """ channel = DummyChannel() def applicationFactory(): def application(environ, startResponse): startResponse('100 Foo', []) startResponse('200 Bar', []) return iter(()) return application d, requestFactory = self.requestFactoryFactory() def cbRendered(ignored): self.assertTrue( channel.transport.written.getvalue().startswith( 'HTTP/1.1 200 Bar\r\n')) d.addCallback(cbRendered) request = self.lowLevelRender( requestFactory, applicationFactory, lambda: channel, 'GET', '1.1', [], [''], None, []) return d def test_startResponseWithException(self): """ If the I{start_response} callable is invoked with a third positional argument before the status and headers have been written to the response, the status and headers become the newly supplied values. """ channel = DummyChannel() def applicationFactory(): def application(environ, startResponse): startResponse('100 Foo', [], (Exception, Exception("foo"), None)) return iter(()) return application d, requestFactory = self.requestFactoryFactory() def cbRendered(ignored): self.assertTrue( channel.transport.written.getvalue().startswith( 'HTTP/1.1 100 Foo\r\n')) d.addCallback(cbRendered) request = self.lowLevelRender( requestFactory, applicationFactory, lambda: channel, 'GET', '1.1', [], [''], None, []) return d def test_startResponseWithExceptionTooLate(self): """ If the I{start_response} callable is invoked with a third positional argument after the status and headers have been written to the response, the supplied I{exc_info} values are re-raised to the application. """ channel = DummyChannel() class SomeException(Exception): pass try: raise SomeException() except: excInfo = exc_info() reraised = [] def applicationFactory(): def application(environ, startResponse): startResponse('200 OK', []) yield 'foo' try: startResponse('500 ERR', [], excInfo) except: reraised.append(exc_info()) return application d, requestFactory = self.requestFactoryFactory() def cbRendered(ignored): self.assertTrue( channel.transport.written.getvalue().startswith( 'HTTP/1.1 200 OK\r\n')) self.assertEqual(reraised[0][0], excInfo[0]) self.assertEqual(reraised[0][1], excInfo[1]) self.assertEqual(reraised[0][2].tb_next, excInfo[2]) d.addCallback(cbRendered) request = self.lowLevelRender( requestFactory, applicationFactory, lambda: channel, 'GET', '1.1', [], [''], None, []) return d def test_write(self): """ I{start_response} returns the I{write} callable which can be used to write bytes to the response body without buffering. """ channel = DummyChannel() intermediateValues = [] def record(): intermediateValues.append(channel.transport.written.getvalue()) def applicationFactory(): def application(environ, startResponse): write = startResponse('100 Foo', [('content-length', '6')]) write('foo') record() write('bar') record() return iter(()) return application d, requestFactory = self.requestFactoryFactory() def cbRendered(ignored): self.assertEqual( self.getContentFromResponse(intermediateValues[0]), 'foo') self.assertEqual( self.getContentFromResponse(intermediateValues[1]), 'foobar') d.addCallback(cbRendered) request = self.lowLevelRender( requestFactory, applicationFactory, lambda: channel, 'GET', '1.1', [], [''], None, []) return d class ApplicationTests(WSGITestsMixin, TestCase): """ Tests for things which are done to the application object and the iterator it returns. """ def enableThreads(self): self.reactor = reactor self.threadpool = ThreadPool() self.threadpool.start() self.addCleanup(self.threadpool.stop) def test_close(self): """ If the application object returns an iterator which also has a I{close} method, that method is called after iteration is complete. """ channel = DummyChannel() class Result: def __init__(self): self.open = True def __iter__(self): for i in range(3): if self.open: yield str(i) def close(self): self.open = False result = Result() def applicationFactory(): def application(environ, startResponse): startResponse('200 OK', [('content-length', '3')]) return result return application d, requestFactory = self.requestFactoryFactory() def cbRendered(ignored): self.assertEqual( self.getContentFromResponse( channel.transport.written.getvalue()), '012') self.assertFalse(result.open) d.addCallback(cbRendered) self.lowLevelRender( requestFactory, applicationFactory, lambda: channel, 'GET', '1.1', [], ['']) return d def test_applicationCalledInThread(self): """ The application object is invoked and iterated in a thread which is not the reactor thread. """ self.enableThreads() invoked = [] def applicationFactory(): def application(environ, startResponse): def result(): for i in range(3): invoked.append(get_ident()) yield str(i) invoked.append(get_ident()) startResponse('200 OK', [('content-length', '3')]) return result() return application d, requestFactory = self.requestFactoryFactory() def cbRendered(ignored): self.assertNotIn(get_ident(), invoked) self.assertEqual(len(set(invoked)), 1) d.addCallback(cbRendered) self.lowLevelRender( requestFactory, applicationFactory, DummyChannel, 'GET', '1.1', [], ['']) return d def test_writeCalledFromThread(self): """ The I{write} callable returned by I{start_response} calls the request's C{write} method in the reactor thread. """ self.enableThreads() invoked = [] class ThreadVerifier(Request): def write(self, bytes): invoked.append(get_ident()) return Request.write(self, bytes) def applicationFactory(): def application(environ, startResponse): write = startResponse('200 OK', []) write('foo') return iter(()) return application d, requestFactory = self.requestFactoryFactory(ThreadVerifier) def cbRendered(ignored): self.assertEqual(set(invoked), set([get_ident()])) d.addCallback(cbRendered) self.lowLevelRender( requestFactory, applicationFactory, DummyChannel, 'GET', '1.1', [], ['']) return d def test_iteratedValuesWrittenFromThread(self): """ Strings produced by the iterator returned by the application object are written to the request in the reactor thread. """ self.enableThreads() invoked = [] class ThreadVerifier(Request): def write(self, bytes): invoked.append(get_ident()) return Request.write(self, bytes) def applicationFactory(): def application(environ, startResponse): startResponse('200 OK', []) yield 'foo' return application d, requestFactory = self.requestFactoryFactory(ThreadVerifier) def cbRendered(ignored): self.assertEqual(set(invoked), set([get_ident()])) d.addCallback(cbRendered) self.lowLevelRender( requestFactory, applicationFactory, DummyChannel, 'GET', '1.1', [], ['']) return d def test_statusWrittenFromThread(self): """ The response status is set on the request object in the reactor thread. """ self.enableThreads() invoked = [] class ThreadVerifier(Request): def setResponseCode(self, code, message): invoked.append(get_ident()) return Request.setResponseCode(self, code, message) def applicationFactory(): def application(environ, startResponse): startResponse('200 OK', []) return iter(()) return application d, requestFactory = self.requestFactoryFactory(ThreadVerifier) def cbRendered(ignored): self.assertEqual(set(invoked), set([get_ident()])) d.addCallback(cbRendered) self.lowLevelRender( requestFactory, applicationFactory, DummyChannel, 'GET', '1.1', [], ['']) return d def test_connectionClosedDuringIteration(self): """ If the request connection is lost while the application object is being iterated, iteration is stopped. """ class UnreliableConnection(Request): """ This is a request which pretends its connection is lost immediately after the first write is done to it. """ def write(self, bytes): self.connectionLost(Failure(ConnectionLost("No more connection"))) self.badIter = False def appIter(): yield "foo" self.badIter = True raise Exception("Should not have gotten here") def applicationFactory(): def application(environ, startResponse): startResponse('200 OK', []) return appIter() return application d, requestFactory = self.requestFactoryFactory(UnreliableConnection) def cbRendered(ignored): self.assertFalse(self.badIter, "Should not have resumed iteration") d.addCallback(cbRendered) self.lowLevelRender( requestFactory, applicationFactory, DummyChannel, 'GET', '1.1', [], ['']) return self.assertFailure(d, ConnectionLost) def _internalServerErrorTest(self, application): channel = DummyChannel() def applicationFactory(): return application d, requestFactory = self.requestFactoryFactory() def cbRendered(ignored): errors = self.flushLoggedErrors(RuntimeError) self.assertEqual(len(errors), 1) self.assertTrue( channel.transport.written.getvalue().startswith( 'HTTP/1.1 500 Internal Server Error')) d.addCallback(cbRendered) request = self.lowLevelRender( requestFactory, applicationFactory, lambda: channel, 'GET', '1.1', [], [''], None, []) return d def test_applicationExceptionBeforeStartResponse(self): """ If the application raises an exception before calling I{start_response} then the response status is I{500} and the exception is logged. """ def application(environ, startResponse): raise RuntimeError("This application had some error.") return self._internalServerErrorTest(application) def test_applicationExceptionAfterStartResponse(self): """ If the application calls I{start_response} but then raises an exception before any data is written to the response then the response status is I{500} and the exception is logged. """ def application(environ, startResponse): startResponse('200 OK', []) raise RuntimeError("This application had some error.") return self._internalServerErrorTest(application) def _connectionClosedTest(self, application, responseContent): channel = DummyChannel() def applicationFactory(): return application d, requestFactory = self.requestFactoryFactory() # Capture the request so we can disconnect it later on. requests = [] def requestFactoryWrapper(*a, **kw): requests.append(requestFactory(*a, **kw)) return requests[-1] def ebRendered(ignored): errors = self.flushLoggedErrors(RuntimeError) self.assertEqual(len(errors), 1) response = channel.transport.written.getvalue() self.assertTrue(response.startswith('HTTP/1.1 200 OK')) # Chunked transfer-encoding makes this a little messy. self.assertIn(responseContent, response) d.addErrback(ebRendered) request = self.lowLevelRender( requestFactoryWrapper, applicationFactory, lambda: channel, 'GET', '1.1', [], [''], None, []) # By now the connection should be closed. self.assertTrue(channel.transport.disconnected) # Give it a little push to go the rest of the way. requests[0].connectionLost(Failure(ConnectionLost("All gone"))) return d def test_applicationExceptionAfterWrite(self): """ If the application raises an exception after the response status has already been sent then the connection is closed and the exception is logged. """ responseContent = ( 'Some bytes, triggering the server to start sending the response') def application(environ, startResponse): startResponse('200 OK', []) yield responseContent raise RuntimeError("This application had some error.") return self._connectionClosedTest(application, responseContent) def test_applicationCloseException(self): """ If the application returns a closeable iterator and the C{close} method raises an exception when called then the connection is still closed and the exception is logged. """ responseContent = 'foo' class Application(object): def __init__(self, environ, startResponse): startResponse('200 OK', []) def __iter__(self): yield responseContent def close(self): raise RuntimeError("This application had some error.") return self._connectionClosedTest(Application, responseContent)
bsd-3-clause
-8,947,990,660,216,422,000
-802,605,255,208,695,900
34.002546
85
0.591573
false
aurofable/medhack-server
venv/lib/python2.7/encodings/punycode.py
586
6813
# -*- coding: iso-8859-1 -*- """ Codec for the Punicode encoding, as specified in RFC 3492 Written by Martin v. Löwis. """ import codecs ##################### Encoding ##################################### def segregate(str): """3.1 Basic code point segregation""" base = [] extended = {} for c in str: if ord(c) < 128: base.append(c) else: extended[c] = 1 extended = extended.keys() extended.sort() return "".join(base).encode("ascii"),extended def selective_len(str, max): """Return the length of str, considering only characters below max.""" res = 0 for c in str: if ord(c) < max: res += 1 return res def selective_find(str, char, index, pos): """Return a pair (index, pos), indicating the next occurrence of char in str. index is the position of the character considering only ordinals up to and including char, and pos is the position in the full string. index/pos is the starting position in the full string.""" l = len(str) while 1: pos += 1 if pos == l: return (-1, -1) c = str[pos] if c == char: return index+1, pos elif c < char: index += 1 def insertion_unsort(str, extended): """3.2 Insertion unsort coding""" oldchar = 0x80 result = [] oldindex = -1 for c in extended: index = pos = -1 char = ord(c) curlen = selective_len(str, char) delta = (curlen+1) * (char - oldchar) while 1: index,pos = selective_find(str,c,index,pos) if index == -1: break delta += index - oldindex result.append(delta-1) oldindex = index delta = 0 oldchar = char return result def T(j, bias): # Punycode parameters: tmin = 1, tmax = 26, base = 36 res = 36 * (j + 1) - bias if res < 1: return 1 if res > 26: return 26 return res digits = "abcdefghijklmnopqrstuvwxyz0123456789" def generate_generalized_integer(N, bias): """3.3 Generalized variable-length integers""" result = [] j = 0 while 1: t = T(j, bias) if N < t: result.append(digits[N]) return result result.append(digits[t + ((N - t) % (36 - t))]) N = (N - t) // (36 - t) j += 1 def adapt(delta, first, numchars): if first: delta //= 700 else: delta //= 2 delta += delta // numchars # ((base - tmin) * tmax) // 2 == 455 divisions = 0 while delta > 455: delta = delta // 35 # base - tmin divisions += 36 bias = divisions + (36 * delta // (delta + 38)) return bias def generate_integers(baselen, deltas): """3.4 Bias adaptation""" # Punycode parameters: initial bias = 72, damp = 700, skew = 38 result = [] bias = 72 for points, delta in enumerate(deltas): s = generate_generalized_integer(delta, bias) result.extend(s) bias = adapt(delta, points==0, baselen+points+1) return "".join(result) def punycode_encode(text): base, extended = segregate(text) base = base.encode("ascii") deltas = insertion_unsort(text, extended) extended = generate_integers(len(base), deltas) if base: return base + "-" + extended return extended ##################### Decoding ##################################### def decode_generalized_number(extended, extpos, bias, errors): """3.3 Generalized variable-length integers""" result = 0 w = 1 j = 0 while 1: try: char = ord(extended[extpos]) except IndexError: if errors == "strict": raise UnicodeError, "incomplete punicode string" return extpos + 1, None extpos += 1 if 0x41 <= char <= 0x5A: # A-Z digit = char - 0x41 elif 0x30 <= char <= 0x39: digit = char - 22 # 0x30-26 elif errors == "strict": raise UnicodeError("Invalid extended code point '%s'" % extended[extpos]) else: return extpos, None t = T(j, bias) result += digit * w if digit < t: return extpos, result w = w * (36 - t) j += 1 def insertion_sort(base, extended, errors): """3.2 Insertion unsort coding""" char = 0x80 pos = -1 bias = 72 extpos = 0 while extpos < len(extended): newpos, delta = decode_generalized_number(extended, extpos, bias, errors) if delta is None: # There was an error in decoding. We can't continue because # synchronization is lost. return base pos += delta+1 char += pos // (len(base) + 1) if char > 0x10FFFF: if errors == "strict": raise UnicodeError, ("Invalid character U+%x" % char) char = ord('?') pos = pos % (len(base) + 1) base = base[:pos] + unichr(char) + base[pos:] bias = adapt(delta, (extpos == 0), len(base)) extpos = newpos return base def punycode_decode(text, errors): pos = text.rfind("-") if pos == -1: base = "" extended = text else: base = text[:pos] extended = text[pos+1:] base = unicode(base, "ascii", errors) extended = extended.upper() return insertion_sort(base, extended, errors) ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): res = punycode_encode(input) return res, len(input) def decode(self,input,errors='strict'): if errors not in ('strict', 'replace', 'ignore'): raise UnicodeError, "Unsupported error handling "+errors res = punycode_decode(input, errors) return res, len(input) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return punycode_encode(input) class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): if self.errors not in ('strict', 'replace', 'ignore'): raise UnicodeError, "Unsupported error handling "+self.errors return punycode_decode(input, self.errors) class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='punycode', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, )
mit
8,471,891,741,313,882,000
-7,241,086,619,788,721,000
27.62605
74
0.552033
false
ryneeverett/cartridge
cartridge/shop/translation.py
5
1121
from modeltranslation.translator import translator, TranslationOptions from mezzanine.conf import settings from mezzanine.core.translation import (TranslatedDisplayable, TranslatedRichText) from cartridge.shop.models import (Category, Product, ProductOption, ProductImage, ProductVariation) class TranslatedProduct(TranslatedDisplayable, TranslatedRichText): fields = () class TranslatedProductImage(TranslationOptions): fields = ('description',) class TranslatedProductOption(TranslationOptions): fields = ('name',) class TranslatedProductVariation(TranslationOptions): fields = tuple(('option%s' % opt[0] for opt in settings.SHOP_OPTION_TYPE_CHOICES)) class TranslatedCategory(TranslatedRichText): fields = () translator.register(Product, TranslatedProduct) translator.register(ProductImage, TranslatedProductImage) translator.register(ProductOption, TranslatedProductOption) translator.register(ProductVariation, TranslatedProductVariation) translator.register(Category, TranslatedCategory)
bsd-2-clause
-328,367,419,562,413,760
4,644,854,545,825,629,000
31.970588
70
0.753791
false
FBRTMaka/ooi-ui-services
ooiservices/tests/test_routes.py
2
7359
#!/usr/bin/env python ''' Specific testing of routes. ''' __author__ = 'Edna Donoughe' import unittest import json from base64 import b64encode from flask import url_for from ooiservices.app import create_app, db from ooiservices.app.models import PlatformDeployment, InstrumentDeployment, Stream, StreamParameter from ooiservices.app.models import Organization, User, UserScope import flask.ext.whooshalchemy as whooshalchemy import datetime as dt app = create_app('TESTING_CONFIG') app.config['WHOOSH_BASE'] = 'ooiservices/whoosh_index' whooshalchemy.whoosh_index(app, PlatformDeployment) ''' These tests are additional to the normal testing performed by coverage; each of these tests are to validate model logic outside of db management. ''' class UserTestCase(unittest.TestCase): def setUp(self): self.app = app self.app_context = self.app.app_context() self.app_context.push() db.create_all() test_username = 'admin' test_password = 'test' Organization.insert_org() User.insert_user(username=test_username, password=test_password) self.client = self.app.test_client(use_cookies=False) UserScope.insert_scopes() admin = User.query.filter_by(user_name='admin').first() scope = UserScope.query.filter_by(scope_name='user_admin').first() admin.scopes.append(scope) db.session.add(admin) db.session.commit() def tearDown(self): db.session.remove() db.drop_all() self.app_context.pop() def get_api_headers(self, username, password): return { 'Authorization': 'Basic ' + b64encode( (username + ':' + password).encode('utf-8')).decode('utf-8'), 'Accept': 'application/json', 'Content-Type': 'application/json' } #Test [GET] /platform_deployments - 'main.get_platform_deployments' def test_route_get_platform_deployments(self): #Create a sample data set. platform_ref = PlatformDeployment(reference_designator='CE01ISSM') db.session.add(platform_ref) db.session.commit() platform_ref2 = PlatformDeployment(reference_designator='GS05MOAS-PG002') db.session.add(platform_ref2) db.session.commit() content_type = 'application/json' response = self.client.get(url_for('main.get_platform_deployments'), content_type = content_type) all_data = response.data expected_data = json.loads(response.data) self.assertTrue(response.status_code == 200) response = self.client.get(url_for('main.get_platform_deployment', id='CE01ISSM'), content_type=content_type) self.assertTrue(response.status_code == 200) response = self.client.get('/platform_deployments?array_id=3') self.assertTrue(response.status_code == 200) response = self.client.get('/platform_deployments?array_id=999') self.assertTrue(response.status_code == 200) data = json.loads(response.data) no_data = {'platform_deployments': []} self.assertTrue(data == no_data) response = self.client.get('/platform_deployments?ref_id="GS05MOAS-PG002"') self.assertTrue(response.status_code == 200) # search for not existent platform; all platforms returned response = self.client.get('/platform_deployments?ref_id="badthing"') self.assertTrue(response.status_code == 200) data = json.loads(response.data) self.assertTrue(data == expected_data) response = self.client.get('/platform_deployments?search="CE01ISSM"') self.assertTrue(response.status_code == 200) # Test [GET] /parameters - 'main.get_parameters' def test_get_parameters(self): ''' parameter(id=preferred_timestamp): { "data_type": null, "id": 1, "long_name": null, "parameter_name": "preferred_timestamp", "short_name": null, "standard_name": null, "units": null } ''' content_type = 'application/json' #Create a sample data set parameter_name = StreamParameter(stream_parameter_name='preferred_timestamp') db.session.add(parameter_name) db.session.commit() # Get all parameters response = self.client.get(url_for('main.get_parameters'), content_type=content_type) self.assertTrue(response.status_code == 200) # Get parameter response = self.client.get(url_for('main.get_parameter', id='preferred_timestamp'), content_type=content_type) self.assertTrue(response.status_code == 200) def test_organization(self): content_type = 'application/json' # get all organizations response = self.client.get('/organization', content_type=content_type) self.assertEquals(response.status_code, 200) data = json.loads(response.data) expectation = {u'organizations':[{u'id':1, u'organization_long_name' : None, u'organization_name' : u'RPS ASA', u'image_url':None}]} self.assertEquals(data, expectation) # Get organization by id response = self.client.get('/organization/1', content_type=content_type) self.assertEquals(response.status_code, 200) data = json.loads(response.data) self.assertEquals(data, expectation['organizations'][0]) # Get non-existant organization (bad id value); expect failure response = self.client.get('/organization/999', content_type=content_type) self.assertEquals(response.status_code, 204) # # Test [GET] /display_name - 'main.get_display_name' # def test_get_display_name(self): # # content_type = 'application/json' # # # Create a sample data set. # platform_ref = VocabNames(reference_designator='CE01ISSM', level_one='Endurance', level_two='OR Inshore Surface Mooring') # db.session.add(platform_ref) # db.session.commit() # # platform_ref2 = VocabNames(reference_designator='CE01ISSM-MFC31', level_one='Endurance', level_two='OR Inshore Surface Mooring', # level_three='Multi-Function Node') # db.session.add(platform_ref2) # db.session.commit() # response = self.client.get(url_for('main.get_display_name', reference_designator='CE01ISSM-MFC31'), content_type=content_type) # self.assertEquals(response.status_code, 200) # # response = self.client.get(url_for('main.get_display_name'), content_type=content_type) # self.assertEquals(response.status_code, 204) # # response = self.client.get(url_for('main.get_display_name', reference_designator='GS03FLMA-RIXXX'), content_type=content_type) # self.assertEquals(response.status_code, 204) # # response = self.client.get(url_for('main.get_display_name', reference_designator='GS03FLMA-RIXXX-BAD'), content_type=content_type) # self.assertEquals(response.status_code, 204)
apache-2.0
-8,972,024,312,827,844,000
4,406,342,214,411,464,700
40.342697
140
0.625221
false
devinbalkind/eden
models/00_db.py
4
6534
# -*- coding: utf-8 -*- """ Import Modules Configure the Database Instantiate Classes """ if settings.get_L10n_languages_readonly(): # Make the Language files read-only for improved performance T.is_writable = False get_vars = request.get_vars # Are we running in debug mode? request_debug = get_vars.get("debug", None) s3.debug = request_debug or settings.get_base_debug() if request_debug: # Also override log level: settings.log.level = "debug" if s3.debug: # Reload all modules every request # Doesn't catch s3cfg or s3/* from gluon.custom_import import track_changes track_changes(True) import datetime try: import json # try stdlib (Python 2.6) except ImportError: try: import simplejson as json # try external module except: import gluon.contrib.simplejson as json # fallback to pure-Python module ######################## # Database Configuration ######################## migrate = settings.get_base_migrate() fake_migrate = settings.get_base_fake_migrate() if migrate: check_reserved = ["mysql", "postgres"] else: check_reserved = None (db_string, pool_size) = settings.get_database_string() if db_string.find("sqlite") != -1: db = DAL(db_string, check_reserved=check_reserved, migrate_enabled = migrate, fake_migrate_all = fake_migrate, lazy_tables = not migrate) # on SQLite 3.6.19+ this enables foreign key support (included in Python 2.7+) # db.executesql("PRAGMA foreign_keys=ON") else: try: if db_string.find("mysql") != -1: # Use MySQLdb where available (pymysql has given broken pipes) # - done automatically now, no need to add this manually #try: # import MySQLdb # from gluon.dal import MySQLAdapter # MySQLAdapter.driver = MySQLdb #except ImportError: # # Fallback to pymysql # pass if check_reserved: check_reserved = ["postgres"] db = DAL(db_string, check_reserved = check_reserved, pool_size = pool_size, migrate_enabled = migrate, lazy_tables = not migrate) else: # PostgreSQL if check_reserved: check_reserved = ["mysql"] db = DAL(db_string, check_reserved = check_reserved, pool_size = pool_size, migrate_enabled = migrate, lazy_tables = not migrate) except: db_type = db_string.split(":", 1)[0] db_location = db_string.split("@", 1)[1] raise(HTTP(503, "Cannot connect to %s Database: %s" % (db_type, db_location))) current.db = db db.set_folder("upload") # Sessions Storage if settings.get_base_session_memcache(): # Store sessions in Memcache from gluon.contrib.memcache import MemcacheClient cache.memcache = MemcacheClient(request, [settings.get_base_session_memcache()]) from gluon.contrib.memdb import MEMDB session.connect(request, response, db=MEMDB(cache.memcache)) #################################################################### # Instantiate Classes from Modules # # - store instances in current to be accessible from other modules # #################################################################### from gluon.tools import Mail mail = Mail() current.mail = mail from gluon.storage import Messages messages = Messages(T) current.messages = messages ERROR = Messages(T) current.ERROR = ERROR # Import the S3 Framework if update_check_needed: # Reload the Field definitions reload(s3base.s3fields) else: import s3 as s3base # Set up logger (before any module attempts to use it!) import s3log s3log.S3Log.setup() # AAA current.auth = auth = s3base.AuthS3() # Use session for persistent per-user variables # - beware of a user having multiple tabs open! # - don't save callables or class instances as these can't be pickled if not session.s3: session.s3 = Storage() # Use username instead of email address for logins # - would probably require further customisation # to get this fully-working within Eden as it's not a Tested configuration #auth.settings.login_userfield = "username" auth.settings.hmac_key = settings.get_auth_hmac_key() auth.define_tables(migrate=migrate, fake_migrate=fake_migrate) current.audit = audit = s3base.S3Audit(migrate=migrate, fake_migrate=fake_migrate) # Shortcuts for models/controllers/views s3_has_role = auth.s3_has_role s3_has_permission = auth.s3_has_permission s3_logged_in_person = auth.s3_logged_in_person # CRUD s3.crud = Storage() # S3 Custom Validators and Widgets, imported here into the global # namespace in order to access them without the s3base namespace prefix s3_action_buttons = s3base.S3CRUD.action_buttons s3_fullname = s3base.s3_fullname S3ResourceHeader = s3base.S3ResourceHeader from s3.s3navigation import s3_rheader_tabs from s3.s3validators import * from s3.s3widgets import * from s3.s3data import * # GIS Module gis = s3base.GIS() current.gis = gis # s3_request s3_request = s3base.s3_request # Field Selectors FS = s3base.FS # S3XML s3xml = s3base.S3XML() current.xml = s3xml # Messaging msg = s3base.S3Msg() current.msg = msg # Sync sync = s3base.S3Sync() current.sync = sync # ----------------------------------------------------------------------------- def s3_clear_session(): # CRUD last opened records (rcvars) s3base.s3_remove_last_record_id() # Session-owned records if "owned_records" in session: del session["owned_records"] if "s3" in session: s3 = session.s3 opts = ["hrm", "report_options", "utc_offset", "deduplicate"] for o in opts: if o in s3: del s3[o] # ----------------------------------------------------------------------------- def s3_auth_on_login(form): """ Actions to be performed upon successful login Do not redirect from here! """ s3_clear_session() # ----------------------------------------------------------------------------- def s3_auth_on_logout(user): """ Actions to be performed after logout Do not redirect from here! """ s3_clear_session() # END =========================================================================
mit
6,000,285,428,019,583,000
2,050,675,274,096,701,700
28.300448
86
0.5955
false
playingaround2017/test123
gamera/pixmaps/img2img.py
2
2515
# # This file has been taken from wxpython (see the file # wx/tools/img2img.py in the wxpython source distribution) # # Copyright (c) 1998 Julian Smart, Robert Roebling et al # # This program may be freely used, copied and distributed under # the terms of the wxWindows Library Licence, Version 3. See # the file "copyright" of the wxpython distribution from # http://wxpython.org/ for details. # """ Common routines for the image converter utilities. """ import sys, os, glob, getopt, string import wx if wx.Platform == "__WXGTK__": # some bitmap related things need to have a wxApp initialized... app = wx.PySimpleApp() wx.InitAllImageHandlers() def convert(file, maskClr, outputDir, outputName, outType, outExt): if string.lower(os.path.splitext(file)[1]) == ".ico": icon = wx.Icon(file, wx.BITMAP_TYPE_ICO) img = wx.BitmapFromIcon(icon) else: img = wx.Bitmap(file, wx.BITMAP_TYPE_ANY) if not img.Ok(): return 0, file + " failed to load!" else: if maskClr: om = img.GetMask() mask = wx.MaskColour(img, maskClr) img.SetMask(mask) if om is not None: om.Destroy() if outputName: newname = outputName else: newname = os.path.join(outputDir, os.path.basename(os.path.splitext(file)[0]) + outExt) if img.SaveFile(newname, outType): return 1, file + " converted to " + newname else: img = wx.ImageFromBitmap(img) if img.SaveFile(newname, outType): return 1, "ok" else: return 0, file + " failed to save!" def main(args, outType, outExt, doc): if not args or ("-h" in args): print doc return outputDir = "" maskClr = None outputName = None try: opts, fileArgs = getopt.getopt(args, "m:n:o:") except getopt.GetoptError: print __doc__ return for opt, val in opts: if opt == "-m": maskClr = val elif opt == "-n": outputName = val elif opt == "-o": outputDir = val if not fileArgs: print doc return for arg in fileArgs: for file in glob.glob(arg): if not os.path.isfile(file): continue ok, msg = convert(file, maskClr, outputDir, outputName, outType, outExt) print msg
gpl-2.0
3,033,087,872,703,939,000
8,306,284,134,938,484,000
26.637363
88
0.561431
false
topiaruss/django-filer
filer/migrations/0012_renaming_folderpermissions.py
49
10617
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): db.rename_column('filer_folderpermission', 'can_edit_new', 'can_edit') db.rename_column('filer_folderpermission', 'can_read_new', 'can_read') db.rename_column('filer_folderpermission', 'can_add_children_new', 'can_add_children') def backwards(self, orm): db.rename_column('filer_folderpermission', 'can_edit', 'can_edit_new') db.rename_column('filer_folderpermission', 'can_read', 'can_read_new') db.rename_column('filer_folderpermission', 'can_add_children', 'can_add_children_new') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'filer.clipboard': { 'Meta': {'object_name': 'Clipboard'}, 'files': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'in_clipboards'", 'symmetrical': 'False', 'through': "orm['filer.ClipboardItem']", 'to': "orm['filer.File']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filer_clipboards'", 'to': "orm['auth.User']"}) }, 'filer.clipboarditem': { 'Meta': {'object_name': 'ClipboardItem'}, 'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Clipboard']"}), 'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'filer.file': { 'Meta': {'object_name': 'File'}, '_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}), 'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}), 'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'filer.folder': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'filer.folderpermission': { 'Meta': {'object_name': 'FolderPermission'}, 'can_add_children': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'can_edit': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'can_read': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'everybody': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Folder']", 'null': 'True', 'blank': 'True'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'filer.image': { 'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']}, '_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), '_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}), 'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['filer']
bsd-3-clause
52,429,665,744,392,330
5,521,475,071,533,122,000
82.598425
206
0.558067
false
ogenstad/ansible
lib/ansible/modules/cloud/google/gcp_healthcheck.py
48
15302
#!/usr/bin/python # Copyright 2017 Google Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_healthcheck version_added: "2.4" short_description: Create, Update or Destroy a Healthcheck. description: - Create, Update or Destroy a Healthcheck. Currently only HTTP and HTTPS Healthchecks are supported. Healthchecks are used to monitor individual instances, managed instance groups and/or backend services. Healtchecks are reusable. - Visit U(https://cloud.google.com/compute/docs/load-balancing/health-checks) for an overview of Healthchecks on GCP. - See U(https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks) for API details on HTTP Healthchecks. - See U(https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks) for more details on the HTTPS Healtcheck API. requirements: - "python >= 2.6" - "google-api-python-client >= 1.6.2" - "google-auth >= 0.9.0" - "google-auth-httplib2 >= 0.0.2" notes: - Only supports HTTP and HTTPS Healthchecks currently. author: - "Tom Melendez (@supertom) <[email protected]>" options: check_interval: description: - How often (in seconds) to send a health check. default: 5 healthcheck_name: description: - Name of the Healthcheck. required: true healthcheck_type: description: - Type of Healthcheck. required: true choices: ["HTTP", "HTTPS"] host_header: description: - The value of the host header in the health check request. If left empty, the public IP on behalf of which this health check is performed will be used. required: true default: "" port: description: - The TCP port number for the health check request. The default value is 443 for HTTPS and 80 for HTTP. request_path: description: - The request path of the HTTPS health check request. required: false default: "/" state: description: State of the Healthcheck. required: true choices: ["present", "absent"] timeout: description: - How long (in seconds) to wait for a response before claiming failure. It is invalid for timeout to have a greater value than check_interval. default: 5 unhealthy_threshold: description: - A so-far healthy instance will be marked unhealthy after this many consecutive failures. default: 2 healthy_threshold: description: - A so-far unhealthy instance will be marked healthy after this many consecutive successes. default: 2 service_account_email: description: - service account email service_account_permissions: version_added: "2.0" description: - service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information) choices: [ "bigquery", "cloud-platform", "compute-ro", "compute-rw", "useraccounts-ro", "useraccounts-rw", "datastore", "logging-write", "monitoring", "sql-admin", "storage-full", "storage-ro", "storage-rw", "taskqueue", "userinfo-email" ] credentials_file: description: - Path to the JSON file associated with the service account email project_id: description: - Your GCP project ID ''' EXAMPLES = ''' - name: Create Minimum HealthCheck gcp_healthcheck: service_account_email: "{{ service_account_email }}" credentials_file: "{{ credentials_file }}" project_id: "{{ project_id }}" healthcheck_name: my-healthcheck healthcheck_type: HTTP state: present - name: Create HTTP HealthCheck gcp_healthcheck: service_account_email: "{{ service_account_email }}" credentials_file: "{{ credentials_file }}" project_id: "{{ project_id }}" healthcheck_name: my-healthcheck healthcheck_type: HTTP host: my-host request_path: /hc check_interval: 10 timeout: 30 unhealthy_threshhold: 2 healthy_threshhold: 1 state: present - name: Create HTTPS HealthCheck gcp_healthcheck: service_account_email: "{{ service_account_email }}" credentials_file: "{{ credentials_file }}" project_id: "{{ project_id }}" healthcheck_name: "{{ https_healthcheck }}" healthcheck_type: HTTPS host_header: my-host request_path: /hc check_interval: 5 timeout: 5 unhealthy_threshold: 2 healthy_threshold: 1 state: present ''' RETURN = ''' state: description: state of the Healthcheck returned: Always. type: str sample: present healthcheck_name: description: Name of the Healthcheck returned: Always type: str sample: my-url-map healthcheck_type: description: Type of the Healthcheck returned: Always type: str sample: HTTP healthcheck: description: GCP Healthcheck dictionary returned: Always. Refer to GCP documentation for detailed field descriptions. type: dict sample: { "name": "my-hc", "port": 443, "requestPath": "/foo" } ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.gcp import get_google_api_client, GCPUtils USER_AGENT_PRODUCT = 'ansible-healthcheck' USER_AGENT_VERSION = '0.0.1' def _validate_healthcheck_params(params): """ Validate healthcheck params. Simple validation has already assumed by AnsibleModule. :param params: Ansible dictionary containing configuration. :type params: ``dict`` :return: True or raises ValueError :rtype: ``bool`` or `class:ValueError` """ if params['timeout'] > params['check_interval']: raise ValueError("timeout (%s) is greater than check_interval (%s)" % ( params['timeout'], params['check_interval'])) return (True, '') def _build_healthcheck_dict(params): """ Reformat services in Ansible Params for GCP. :param params: Params from AnsibleModule object :type params: ``dict`` :param project_id: The GCP project ID. :type project_id: ``str`` :return: dictionary suitable for submission to GCP HealthCheck (HTTP/HTTPS) API. :rtype ``dict`` """ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'healthcheck_name') if 'timeout' in gcp_dict: gcp_dict['timeoutSec'] = gcp_dict['timeout'] del gcp_dict['timeout'] if 'checkInterval' in gcp_dict: gcp_dict['checkIntervalSec'] = gcp_dict['checkInterval'] del gcp_dict['checkInterval'] if 'hostHeader' in gcp_dict: gcp_dict['host'] = gcp_dict['hostHeader'] del gcp_dict['hostHeader'] if 'healthcheckType' in gcp_dict: del gcp_dict['healthcheckType'] return gcp_dict def _get_req_resource(client, resource_type): if resource_type == 'HTTPS': return (client.httpsHealthChecks(), 'httpsHealthCheck') else: return (client.httpHealthChecks(), 'httpHealthCheck') def get_healthcheck(client, name, project_id=None, resource_type='HTTP'): """ Get a Healthcheck from GCP. :param client: An initialized GCE Compute Disovery resource. :type client: :class: `googleapiclient.discovery.Resource` :param name: Name of the Url Map. :type name: ``str`` :param project_id: The GCP project ID. :type project_id: ``str`` :return: A dict resp from the respective GCP 'get' request. :rtype: ``dict`` """ try: resource, entity_name = _get_req_resource(client, resource_type) args = {'project': project_id, entity_name: name} req = resource.get(**args) return GCPUtils.execute_api_client_req(req, raise_404=False) except: raise def create_healthcheck(client, params, project_id, resource_type='HTTP'): """ Create a new Healthcheck. :param client: An initialized GCE Compute Disovery resource. :type client: :class: `googleapiclient.discovery.Resource` :param params: Dictionary of arguments from AnsibleModule. :type params: ``dict`` :return: Tuple with changed status and response dict :rtype: ``tuple`` in the format of (bool, dict) """ gcp_dict = _build_healthcheck_dict(params) try: resource, _ = _get_req_resource(client, resource_type) args = {'project': project_id, 'body': gcp_dict} req = resource.insert(**args) return_data = GCPUtils.execute_api_client_req(req, client, raw=False) if not return_data: return_data = get_healthcheck(client, name=params['healthcheck_name'], project_id=project_id) return (True, return_data) except: raise def delete_healthcheck(client, name, project_id, resource_type='HTTP'): """ Delete a Healthcheck. :param client: An initialized GCE Compute Disover resource. :type client: :class: `googleapiclient.discovery.Resource` :param name: Name of the Url Map. :type name: ``str`` :param project_id: The GCP project ID. :type project_id: ``str`` :return: Tuple with changed status and response dict :rtype: ``tuple`` in the format of (bool, dict) """ try: resource, entity_name = _get_req_resource(client, resource_type) args = {'project': project_id, entity_name: name} req = resource.delete(**args) return_data = GCPUtils.execute_api_client_req(req, client) return (True, return_data) except: raise def update_healthcheck(client, healthcheck, params, name, project_id, resource_type='HTTP'): """ Update a Healthcheck. If the healthcheck has not changed, the update will not occur. :param client: An initialized GCE Compute Disovery resource. :type client: :class: `googleapiclient.discovery.Resource` :param healthcheck: Name of the Url Map. :type healthcheck: ``dict`` :param params: Dictionary of arguments from AnsibleModule. :type params: ``dict`` :param name: Name of the Url Map. :type name: ``str`` :param project_id: The GCP project ID. :type project_id: ``str`` :return: Tuple with changed status and response dict :rtype: ``tuple`` in the format of (bool, dict) """ gcp_dict = _build_healthcheck_dict(params) ans = GCPUtils.are_params_equal(healthcheck, gcp_dict) if ans: return (False, 'no update necessary') try: resource, entity_name = _get_req_resource(client, resource_type) args = {'project': project_id, entity_name: name, 'body': gcp_dict} req = resource.update(**args) return_data = GCPUtils.execute_api_client_req( req, client=client, raw=False) return (True, return_data) except: raise def main(): module = AnsibleModule(argument_spec=dict( healthcheck_name=dict(required=True), healthcheck_type=dict(required=True, choices=['HTTP', 'HTTPS']), request_path=dict(required=False, default='/'), check_interval=dict(required=False, type='int', default=5), healthy_threshold=dict(required=False, type='int', default=2), unhealthy_threshold=dict(required=False, type='int', default=2), host_header=dict(required=False, type='str', default=''), timeout=dict(required=False, type='int', default=5), port=dict(required=False, type='int'), state=dict(choices=['absent', 'present'], default='present'), service_account_email=dict(), service_account_permissions=dict(type='list'), credentials_file=dict(), project_id=dict(), ), ) client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT, user_agent_version=USER_AGENT_VERSION) params = {} params['healthcheck_name'] = module.params.get('healthcheck_name') params['healthcheck_type'] = module.params.get('healthcheck_type') params['request_path'] = module.params.get('request_path') params['check_interval'] = module.params.get('check_interval') params['healthy_threshold'] = module.params.get('healthy_threshold') params['unhealthy_threshold'] = module.params.get('unhealthy_threshold') params['host_header'] = module.params.get('host_header') params['timeout'] = module.params.get('timeout') params['port'] = module.params.get('port', None) params['state'] = module.params.get('state') if not params['port']: params['port'] = 80 if params['healthcheck_type'] == 'HTTPS': params['port'] = 443 try: _validate_healthcheck_params(params) except Exception as e: module.fail_json(msg=e.message, changed=False) changed = False json_output = {'state': params['state']} healthcheck = get_healthcheck(client, name=params['healthcheck_name'], project_id=conn_params['project_id'], resource_type=params['healthcheck_type']) if not healthcheck: if params['state'] == 'absent': # Doesn't exist in GCE, and state==absent. changed = False module.fail_json( msg="Cannot delete unknown healthcheck: %s" % (params['healthcheck_name'])) else: # Create changed, json_output['healthcheck'] = create_healthcheck(client, params=params, project_id=conn_params['project_id'], resource_type=params['healthcheck_type']) elif params['state'] == 'absent': # Delete changed, json_output['healthcheck'] = delete_healthcheck(client, name=params['healthcheck_name'], project_id=conn_params['project_id'], resource_type=params['healthcheck_type']) else: changed, json_output['healthcheck'] = update_healthcheck(client, healthcheck=healthcheck, params=params, name=params['healthcheck_name'], project_id=conn_params['project_id'], resource_type=params['healthcheck_type']) json_output['changed'] = changed json_output.update(params) module.exit_json(**json_output) if __name__ == '__main__': main()
gpl-3.0
420,648,934,423,893,500
826,144,315,809,685,900
33.386517
110
0.611293
false
goodwinnk/intellij-community
python/lib/Lib/site-packages/django/contrib/admin/views/template.py
88
3255
from django import template, forms from django.contrib.admin.views.decorators import staff_member_required from django.template import loader from django.shortcuts import render_to_response from django.contrib.sites.models import Site from django.conf import settings from django.utils.importlib import import_module from django.utils.translation import ugettext_lazy as _ from django.contrib import messages def template_validator(request): """ Displays the template validator form, which finds and displays template syntax errors. """ # get a dict of {site_id : settings_module} for the validator settings_modules = {} for mod in settings.ADMIN_FOR: settings_module = import_module(mod) settings_modules[settings_module.SITE_ID] = settings_module site_list = Site.objects.in_bulk(settings_modules.keys()).values() if request.POST: form = TemplateValidatorForm(settings_modules, site_list, data=request.POST) if form.is_valid(): messages.info(request, 'The template is valid.') else: form = TemplateValidatorForm(settings_modules, site_list) return render_to_response('admin/template_validator.html', { 'title': 'Template validator', 'form': form, }, context_instance=template.RequestContext(request)) template_validator = staff_member_required(template_validator) class TemplateValidatorForm(forms.Form): site = forms.ChoiceField(_('site')) template = forms.CharField( _('template'), widget=forms.Textarea({'rows': 25, 'cols': 80})) def __init__(self, settings_modules, site_list, *args, **kwargs): self.settings_modules = settings_modules super(TemplateValidatorForm, self).__init__(*args, **kwargs) self.fields['site'].choices = [(s.id, s.name) for s in site_list] def clean_template(self): # Get the settings module. If the site isn't set, we don't raise an # error since the site field will. try: site_id = int(self.cleaned_data.get('site', None)) except (ValueError, TypeError): return settings_module = self.settings_modules.get(site_id, None) if settings_module is None: return # So that inheritance works in the site's context, register a new # function for "extends" that uses the site's TEMPLATE_DIRS instead. def new_do_extends(parser, token): node = loader.do_extends(parser, token) node.template_dirs = settings_module.TEMPLATE_DIRS return node register = template.Library() register.tag('extends', new_do_extends) template.builtins.append(register) # Now validate the template using the new TEMPLATE_DIRS, making sure to # reset the extends function in any case. error = None template_string = self.cleaned_data['template'] try: tmpl = loader.get_template_from_string(template_string) tmpl.render(template.Context({})) except template.TemplateSyntaxError, e: error = e template.builtins.remove(register) if error: raise forms.ValidationError(e.args)
apache-2.0
-8,008,044,259,720,698,000
-8,493,815,980,616,134,000
40.202532
79
0.658986
false
percipient/django-two-factor-auth
two_factor/forms.py
4
5418
from binascii import unhexlify from time import time from django import forms from django.forms import ModelForm, Form from django.utils.translation import ugettext_lazy as _ from django_otp.forms import OTPAuthenticationFormMixin from django_otp.oath import totp from django_otp.plugins.otp_totp.models import TOTPDevice from two_factor.utils import totp_digits try: from otp_yubikey.models import RemoteYubikeyDevice, YubikeyDevice except ImportError: RemoteYubikeyDevice = YubikeyDevice = None from .models import (PhoneDevice, get_available_phone_methods, get_available_methods) class MethodForm(forms.Form): method = forms.ChoiceField(label=_("Method"), initial='generator', widget=forms.RadioSelect) def __init__(self, **kwargs): super(MethodForm, self).__init__(**kwargs) self.fields['method'].choices = get_available_methods() class PhoneNumberMethodForm(ModelForm): method = forms.ChoiceField(widget=forms.RadioSelect, label=_('Method')) class Meta: model = PhoneDevice fields = 'number', 'method', def __init__(self, **kwargs): super(PhoneNumberMethodForm, self).__init__(**kwargs) self.fields['method'].choices = get_available_phone_methods() class PhoneNumberForm(ModelForm): class Meta: model = PhoneDevice fields = 'number', class DeviceValidationForm(forms.Form): token = forms.IntegerField(label=_("Token"), min_value=1, max_value=int('9' * totp_digits())) error_messages = { 'invalid_token': _('Entered token is not valid.'), } def __init__(self, device, **args): super(DeviceValidationForm, self).__init__(**args) self.device = device def clean_token(self): token = self.cleaned_data['token'] if not self.device.verify_token(token): raise forms.ValidationError(self.error_messages['invalid_token']) return token class YubiKeyDeviceForm(DeviceValidationForm): token = forms.CharField(label=_("YubiKey")) error_messages = { 'invalid_token': _("The YubiKey could not be verified."), } def clean_token(self): self.device.public_id = self.cleaned_data['token'][:-32] return super(YubiKeyDeviceForm, self).clean_token() class TOTPDeviceForm(forms.Form): token = forms.IntegerField(label=_("Token"), min_value=0, max_value=int('9' * totp_digits())) error_messages = { 'invalid_token': _('Entered token is not valid.'), } def __init__(self, key, user, metadata=None, **kwargs): super(TOTPDeviceForm, self).__init__(**kwargs) self.key = key self.tolerance = 1 self.t0 = 0 self.step = 30 self.drift = 0 self.digits = totp_digits() self.user = user self.metadata = metadata or {} @property def bin_key(self): """ The secret key as a binary string. """ return unhexlify(self.key.encode()) def clean_token(self): token = self.cleaned_data.get('token') validated = False t0s = [self.t0] key = self.bin_key if 'valid_t0' in self.metadata: t0s.append(int(time()) - self.metadata['valid_t0']) for t0 in t0s: for offset in range(-self.tolerance, self.tolerance): if totp(key, self.step, t0, self.digits, self.drift + offset) == token: self.drift = offset self.metadata['valid_t0'] = int(time()) - t0 validated = True if not validated: raise forms.ValidationError(self.error_messages['invalid_token']) return token def save(self): return TOTPDevice.objects.create(user=self.user, key=self.key, tolerance=self.tolerance, t0=self.t0, step=self.step, drift=self.drift, digits=self.digits, name='default') class DisableForm(forms.Form): understand = forms.BooleanField(label=_("Yes, I am sure")) class AuthenticationTokenForm(OTPAuthenticationFormMixin, Form): otp_token = forms.IntegerField(label=_("Token"), min_value=1, max_value=int('9' * totp_digits())) def __init__(self, user, initial_device, **kwargs): """ `initial_device` is either the user's default device, or the backup device when the user chooses to enter a backup token. The token will be verified against all devices, it is not limited to the given device. """ super(AuthenticationTokenForm, self).__init__(**kwargs) self.user = user # YubiKey generates a OTP of 44 characters (not digits). So if the # user's primary device is a YubiKey, replace the otp_token # IntegerField with a CharField. if RemoteYubikeyDevice and YubikeyDevice and \ isinstance(initial_device, (RemoteYubikeyDevice, YubikeyDevice)): self.fields['otp_token'] = forms.CharField(label=_('YubiKey')) def clean(self): self.clean_otp(self.user) return self.cleaned_data class BackupTokenForm(AuthenticationTokenForm): otp_token = forms.CharField(label=_("Token"))
mit
-230,798,069,632,795,520
3,419,911,612,558,788,600
32.652174
97
0.608712
false
jonyroda97/redbot-amigosprovaveis
lib/youtube_dl/extractor/sevenplus.py
24
2923
# coding: utf-8 from __future__ import unicode_literals import re from .brightcove import BrightcoveNewIE from ..compat import compat_str from ..utils import ( try_get, update_url_query, ) class SevenPlusIE(BrightcoveNewIE): IE_NAME = '7plus' _VALID_URL = r'https?://(?:www\.)?7plus\.com\.au/(?P<path>[^?]+\?.*?\bepisode-id=(?P<id>[^&#]+))' _TESTS = [{ 'url': 'https://7plus.com.au/MTYS?episode-id=MTYS7-003', 'info_dict': { 'id': 'MTYS7-003', 'ext': 'mp4', 'title': 'S7 E3 - Wind Surf', 'description': 'md5:29c6a69f21accda7601278f81b46483d', 'uploader_id': '5303576322001', 'upload_date': '20171201', 'timestamp': 1512106377, 'series': 'Mighty Ships', 'season_number': 7, 'episode_number': 3, 'episode': 'Wind Surf', }, 'params': { 'format': 'bestvideo', 'skip_download': True, } }, { 'url': 'https://7plus.com.au/UUUU?episode-id=AUMS43-001', 'only_matching': True, }] def _real_extract(self, url): path, episode_id = re.match(self._VALID_URL, url).groups() media = self._download_json( 'https://videoservice.swm.digital/playback', episode_id, query={ 'appId': '7plus', 'deviceType': 'web', 'platformType': 'web', 'accountId': 5303576322001, 'referenceId': 'ref:' + episode_id, 'deliveryId': 'csai', 'videoType': 'vod', })['media'] for source in media.get('sources', {}): src = source.get('src') if not src: continue source['src'] = update_url_query(src, {'rule': ''}) info = self._parse_brightcove_metadata(media, episode_id) content = self._download_json( 'https://component-cdn.swm.digital/content/' + path, episode_id, headers={ 'market-id': 4, }, fatal=False) or {} for item in content.get('items', {}): if item.get('componentData', {}).get('componentType') == 'infoPanel': for src_key, dst_key in [('title', 'title'), ('shortSynopsis', 'description')]: value = item.get(src_key) if value: info[dst_key] = value info['series'] = try_get( item, lambda x: x['seriesLogo']['name'], compat_str) mobj = re.search(r'^S(\d+)\s+E(\d+)\s+-\s+(.+)$', info['title']) if mobj: info.update({ 'season_number': int(mobj.group(1)), 'episode_number': int(mobj.group(2)), 'episode': mobj.group(3), }) return info
gpl-3.0
7,601,320,592,360,644,000
-1,154,277,212,893,448,200
33.797619
101
0.47417
false
tpsatish95/Python-Workshop
Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Lib/xml/etree/ElementTree.py
74
57368
"""Lightweight XML support for Python. XML is an inherently hierarchical data format, and the most natural way to represent it is with a tree. This module has two classes for this purpose: 1. ElementTree represents the whole XML document as a tree and 2. Element represents a single node in this tree. Interactions with the whole document (reading and writing to/from files) are usually done on the ElementTree level. Interactions with a single XML element and its sub-elements are done on the Element level. Element is a flexible container object designed to store hierarchical data structures in memory. It can be described as a cross between a list and a dictionary. Each Element has a number of properties associated with it: 'tag' - a string containing the element's name. 'attributes' - a Python dictionary storing the element's attributes. 'text' - a string containing the element's text content. 'tail' - an optional string containing text after the element's end tag. And a number of child elements stored in a Python sequence. To create an element instance, use the Element constructor, or the SubElement factory function. You can also use the ElementTree class to wrap an element structure and convert it to and from XML. """ #--------------------------------------------------------------------- # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/psf/license for licensing details. # # ElementTree # Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved. # # [email protected] # http://www.pythonware.com # -------------------------------------------------------------------- # The ElementTree toolkit is # # Copyright (c) 1999-2008 by Fredrik Lundh # # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # # Permission to use, copy, modify, and distribute this software and # its associated documentation for any purpose and without fee is # hereby granted, provided that the above copyright notice appears in # all copies, and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Secret Labs AB or the author not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- # ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. # -------------------------------------------------------------------- __all__ = [ # public symbols "Comment", "dump", "Element", "ElementTree", "fromstring", "fromstringlist", "iselement", "iterparse", "parse", "ParseError", "PI", "ProcessingInstruction", "QName", "SubElement", "tostring", "tostringlist", "TreeBuilder", "VERSION", "XML", "XMLID", "XMLParser", "register_namespace", ] VERSION = "1.3.0" import sys import re import warnings import io import contextlib from . import ElementPath class ParseError(SyntaxError): """An error when parsing an XML document. In addition to its exception value, a ParseError contains two extra attributes: 'code' - the specific exception code 'position' - the line and column of the error """ pass # -------------------------------------------------------------------- def iselement(element): """Return True if *element* appears to be an Element.""" return hasattr(element, 'tag') class Element: """An XML element. This class is the reference implementation of the Element interface. An element's length is its number of subelements. That means if you you want to check if an element is truly empty, you should check BOTH its length AND its text attribute. The element tag, attribute names, and attribute values can be either bytes or strings. *tag* is the element name. *attrib* is an optional dictionary containing element attributes. *extra* are additional element attributes given as keyword arguments. Example form: <tag attrib>text<child/>...</tag>tail """ tag = None """The element's name.""" attrib = None """Dictionary of the element's attributes.""" text = None """ Text before first subelement. This is either a string or the value None. Note that if there is no text, this attribute may be either None or the empty string, depending on the parser. """ tail = None """ Text after this element's end tag, but before the next sibling element's start tag. This is either a string or the value None. Note that if there was no text, this attribute may be either None or an empty string, depending on the parser. """ def __init__(self, tag, attrib={}, **extra): if not isinstance(attrib, dict): raise TypeError("attrib must be dict, not %s" % ( attrib.__class__.__name__,)) attrib = attrib.copy() attrib.update(extra) self.tag = tag self.attrib = attrib self._children = [] def __repr__(self): return "<Element %s at 0x%x>" % (repr(self.tag), id(self)) def makeelement(self, tag, attrib): """Create a new element with the same type. *tag* is a string containing the element name. *attrib* is a dictionary containing the element attributes. Do not call this method, use the SubElement factory function instead. """ return self.__class__(tag, attrib) def copy(self): """Return copy of current element. This creates a shallow copy. Subelements will be shared with the original tree. """ elem = self.makeelement(self.tag, self.attrib) elem.text = self.text elem.tail = self.tail elem[:] = self return elem def __len__(self): return len(self._children) def __bool__(self): warnings.warn( "The behavior of this method will change in future versions. " "Use specific 'len(elem)' or 'elem is not None' test instead.", FutureWarning, stacklevel=2 ) return len(self._children) != 0 # emulate old behaviour, for now def __getitem__(self, index): return self._children[index] def __setitem__(self, index, element): # if isinstance(index, slice): # for elt in element: # assert iselement(elt) # else: # assert iselement(element) self._children[index] = element def __delitem__(self, index): del self._children[index] def append(self, subelement): """Add *subelement* to the end of this element. The new element will appear in document order after the last existing subelement (or directly after the text, if it's the first subelement), but before the end tag for this element. """ self._assert_is_element(subelement) self._children.append(subelement) def extend(self, elements): """Append subelements from a sequence. *elements* is a sequence with zero or more elements. """ for element in elements: self._assert_is_element(element) self._children.extend(elements) def insert(self, index, subelement): """Insert *subelement* at position *index*.""" self._assert_is_element(subelement) self._children.insert(index, subelement) def _assert_is_element(self, e): # Need to refer to the actual Python implementation, not the # shadowing C implementation. if not isinstance(e, _Element_Py): raise TypeError('expected an Element, not %s' % type(e).__name__) def remove(self, subelement): """Remove matching subelement. Unlike the find methods, this method compares elements based on identity, NOT ON tag value or contents. To remove subelements by other means, the easiest way is to use a list comprehension to select what elements to keep, and then use slice assignment to update the parent element. ValueError is raised if a matching element could not be found. """ # assert iselement(element) self._children.remove(subelement) def getchildren(self): """(Deprecated) Return all subelements. Elements are returned in document order. """ warnings.warn( "This method will be removed in future versions. " "Use 'list(elem)' or iteration over elem instead.", DeprecationWarning, stacklevel=2 ) return self._children def find(self, path, namespaces=None): """Find first matching element by tag name or path. *path* is a string having either an element tag or an XPath, *namespaces* is an optional mapping from namespace prefix to full name. Return the first matching element, or None if no element was found. """ return ElementPath.find(self, path, namespaces) def findtext(self, path, default=None, namespaces=None): """Find text for first matching element by tag name or path. *path* is a string having either an element tag or an XPath, *default* is the value to return if the element was not found, *namespaces* is an optional mapping from namespace prefix to full name. Return text content of first matching element, or default value if none was found. Note that if an element is found having no text content, the empty string is returned. """ return ElementPath.findtext(self, path, default, namespaces) def findall(self, path, namespaces=None): """Find all matching subelements by tag name or path. *path* is a string having either an element tag or an XPath, *namespaces* is an optional mapping from namespace prefix to full name. Returns list containing all matching elements in document order. """ return ElementPath.findall(self, path, namespaces) def iterfind(self, path, namespaces=None): """Find all matching subelements by tag name or path. *path* is a string having either an element tag or an XPath, *namespaces* is an optional mapping from namespace prefix to full name. Return an iterable yielding all matching elements in document order. """ return ElementPath.iterfind(self, path, namespaces) def clear(self): """Reset element. This function removes all subelements, clears all attributes, and sets the text and tail attributes to None. """ self.attrib.clear() self._children = [] self.text = self.tail = None def get(self, key, default=None): """Get element attribute. Equivalent to attrib.get, but some implementations may handle this a bit more efficiently. *key* is what attribute to look for, and *default* is what to return if the attribute was not found. Returns a string containing the attribute value, or the default if attribute was not found. """ return self.attrib.get(key, default) def set(self, key, value): """Set element attribute. Equivalent to attrib[key] = value, but some implementations may handle this a bit more efficiently. *key* is what attribute to set, and *value* is the attribute value to set it to. """ self.attrib[key] = value def keys(self): """Get list of attribute names. Names are returned in an arbitrary order, just like an ordinary Python dict. Equivalent to attrib.keys() """ return self.attrib.keys() def items(self): """Get element attributes as a sequence. The attributes are returned in arbitrary order. Equivalent to attrib.items(). Return a list of (name, value) tuples. """ return self.attrib.items() def iter(self, tag=None): """Create tree iterator. The iterator loops over the element and all subelements in document order, returning all elements with a matching tag. If the tree structure is modified during iteration, new or removed elements may or may not be included. To get a stable set, use the list() function on the iterator, and loop over the resulting list. *tag* is what tags to look for (default is to return all elements) Return an iterator containing all the matching elements. """ if tag == "*": tag = None if tag is None or self.tag == tag: yield self for e in self._children: yield from e.iter(tag) # compatibility def getiterator(self, tag=None): # Change for a DeprecationWarning in 1.4 warnings.warn( "This method will be removed in future versions. " "Use 'elem.iter()' or 'list(elem.iter())' instead.", PendingDeprecationWarning, stacklevel=2 ) return list(self.iter(tag)) def itertext(self): """Create text iterator. The iterator loops over the element and all subelements in document order, returning all inner text. """ tag = self.tag if not isinstance(tag, str) and tag is not None: return if self.text: yield self.text for e in self: yield from e.itertext() if e.tail: yield e.tail def SubElement(parent, tag, attrib={}, **extra): """Subelement factory which creates an element instance, and appends it to an existing parent. The element tag, attribute names, and attribute values can be either bytes or Unicode strings. *parent* is the parent element, *tag* is the subelements name, *attrib* is an optional directory containing element attributes, *extra* are additional attributes given as keyword arguments. """ attrib = attrib.copy() attrib.update(extra) element = parent.makeelement(tag, attrib) parent.append(element) return element def Comment(text=None): """Comment element factory. This function creates a special element which the standard serializer serializes as an XML comment. *text* is a string containing the comment string. """ element = Element(Comment) element.text = text return element def ProcessingInstruction(target, text=None): """Processing Instruction element factory. This function creates a special element which the standard serializer serializes as an XML comment. *target* is a string containing the processing instruction, *text* is a string containing the processing instruction contents, if any. """ element = Element(ProcessingInstruction) element.text = target if text: element.text = element.text + " " + text return element PI = ProcessingInstruction class QName: """Qualified name wrapper. This class can be used to wrap a QName attribute value in order to get proper namespace handing on output. *text_or_uri* is a string containing the QName value either in the form {uri}local, or if the tag argument is given, the URI part of a QName. *tag* is an optional argument which if given, will make the first argument (text_or_uri) be interpreted as a URI, and this argument (tag) be interpreted as a local name. """ def __init__(self, text_or_uri, tag=None): if tag: text_or_uri = "{%s}%s" % (text_or_uri, tag) self.text = text_or_uri def __str__(self): return self.text def __repr__(self): return '<QName %r>' % (self.text,) def __hash__(self): return hash(self.text) def __le__(self, other): if isinstance(other, QName): return self.text <= other.text return self.text <= other def __lt__(self, other): if isinstance(other, QName): return self.text < other.text return self.text < other def __ge__(self, other): if isinstance(other, QName): return self.text >= other.text return self.text >= other def __gt__(self, other): if isinstance(other, QName): return self.text > other.text return self.text > other def __eq__(self, other): if isinstance(other, QName): return self.text == other.text return self.text == other def __ne__(self, other): if isinstance(other, QName): return self.text != other.text return self.text != other # -------------------------------------------------------------------- class ElementTree: """An XML element hierarchy. This class also provides support for serialization to and from standard XML. *element* is an optional root element node, *file* is an optional file handle or file name of an XML file whose contents will be used to initialize the tree with. """ def __init__(self, element=None, file=None): # assert element is None or iselement(element) self._root = element # first node if file: self.parse(file) def getroot(self): """Return root element of this tree.""" return self._root def _setroot(self, element): """Replace root element of this tree. This will discard the current contents of the tree and replace it with the given element. Use with care! """ # assert iselement(element) self._root = element def parse(self, source, parser=None): """Load external XML document into element tree. *source* is a file name or file object, *parser* is an optional parser instance that defaults to XMLParser. ParseError is raised if the parser fails to parse the document. Returns the root element of the given source document. """ close_source = False if not hasattr(source, "read"): source = open(source, "rb") close_source = True try: if parser is None: # If no parser was specified, create a default XMLParser parser = XMLParser() if hasattr(parser, '_parse_whole'): # The default XMLParser, when it comes from an accelerator, # can define an internal _parse_whole API for efficiency. # It can be used to parse the whole source without feeding # it with chunks. self._root = parser._parse_whole(source) return self._root while True: data = source.read(65536) if not data: break parser.feed(data) self._root = parser.close() return self._root finally: if close_source: source.close() def iter(self, tag=None): """Create and return tree iterator for the root element. The iterator loops over all elements in this tree, in document order. *tag* is a string with the tag name to iterate over (default is to return all elements). """ # assert self._root is not None return self._root.iter(tag) # compatibility def getiterator(self, tag=None): # Change for a DeprecationWarning in 1.4 warnings.warn( "This method will be removed in future versions. " "Use 'tree.iter()' or 'list(tree.iter())' instead.", PendingDeprecationWarning, stacklevel=2 ) return list(self.iter(tag)) def find(self, path, namespaces=None): """Find first matching element by tag name or path. Same as getroot().find(path), which is Element.find() *path* is a string having either an element tag or an XPath, *namespaces* is an optional mapping from namespace prefix to full name. Return the first matching element, or None if no element was found. """ # assert self._root is not None if path[:1] == "/": path = "." + path warnings.warn( "This search is broken in 1.3 and earlier, and will be " "fixed in a future version. If you rely on the current " "behaviour, change it to %r" % path, FutureWarning, stacklevel=2 ) return self._root.find(path, namespaces) def findtext(self, path, default=None, namespaces=None): """Find first matching element by tag name or path. Same as getroot().findtext(path), which is Element.findtext() *path* is a string having either an element tag or an XPath, *namespaces* is an optional mapping from namespace prefix to full name. Return the first matching element, or None if no element was found. """ # assert self._root is not None if path[:1] == "/": path = "." + path warnings.warn( "This search is broken in 1.3 and earlier, and will be " "fixed in a future version. If you rely on the current " "behaviour, change it to %r" % path, FutureWarning, stacklevel=2 ) return self._root.findtext(path, default, namespaces) def findall(self, path, namespaces=None): """Find all matching subelements by tag name or path. Same as getroot().findall(path), which is Element.findall(). *path* is a string having either an element tag or an XPath, *namespaces* is an optional mapping from namespace prefix to full name. Return list containing all matching elements in document order. """ # assert self._root is not None if path[:1] == "/": path = "." + path warnings.warn( "This search is broken in 1.3 and earlier, and will be " "fixed in a future version. If you rely on the current " "behaviour, change it to %r" % path, FutureWarning, stacklevel=2 ) return self._root.findall(path, namespaces) def iterfind(self, path, namespaces=None): """Find all matching subelements by tag name or path. Same as getroot().iterfind(path), which is element.iterfind() *path* is a string having either an element tag or an XPath, *namespaces* is an optional mapping from namespace prefix to full name. Return an iterable yielding all matching elements in document order. """ # assert self._root is not None if path[:1] == "/": path = "." + path warnings.warn( "This search is broken in 1.3 and earlier, and will be " "fixed in a future version. If you rely on the current " "behaviour, change it to %r" % path, FutureWarning, stacklevel=2 ) return self._root.iterfind(path, namespaces) def write(self, file_or_filename, encoding=None, xml_declaration=None, default_namespace=None, method=None, *, short_empty_elements=True): """Write element tree to a file as XML. Arguments: *file_or_filename* -- file name or a file object opened for writing *encoding* -- the output encoding (default: US-ASCII) *xml_declaration* -- bool indicating if an XML declaration should be added to the output. If None, an XML declaration is added if encoding IS NOT either of: US-ASCII, UTF-8, or Unicode *default_namespace* -- sets the default XML namespace (for "xmlns") *method* -- either "xml" (default), "html, "text", or "c14n" *short_empty_elements* -- controls the formatting of elements that contain no content. If True (default) they are emitted as a single self-closed tag, otherwise they are emitted as a pair of start/end tags """ if not method: method = "xml" elif method not in _serialize: raise ValueError("unknown method %r" % method) if not encoding: if method == "c14n": encoding = "utf-8" else: encoding = "us-ascii" else: encoding = encoding.lower() with _get_writer(file_or_filename, encoding) as write: if method == "xml" and (xml_declaration or (xml_declaration is None and encoding not in ("utf-8", "us-ascii", "unicode"))): declared_encoding = encoding if encoding == "unicode": # Retrieve the default encoding for the xml declaration import locale declared_encoding = locale.getpreferredencoding() write("<?xml version='1.0' encoding='%s'?>\n" % ( declared_encoding,)) if method == "text": _serialize_text(write, self._root) else: qnames, namespaces = _namespaces(self._root, default_namespace) serialize = _serialize[method] serialize(write, self._root, qnames, namespaces, short_empty_elements=short_empty_elements) def write_c14n(self, file): # lxml.etree compatibility. use output method instead return self.write(file, method="c14n") # -------------------------------------------------------------------- # serialization support @contextlib.contextmanager def _get_writer(file_or_filename, encoding): # returns text write method and release all resources after using try: write = file_or_filename.write except AttributeError: # file_or_filename is a file name if encoding == "unicode": file = open(file_or_filename, "w") else: file = open(file_or_filename, "w", encoding=encoding, errors="xmlcharrefreplace") with file: yield file.write else: # file_or_filename is a file-like object # encoding determines if it is a text or binary writer if encoding == "unicode": # use a text writer as is yield write else: # wrap a binary writer with TextIOWrapper with contextlib.ExitStack() as stack: if isinstance(file_or_filename, io.BufferedIOBase): file = file_or_filename elif isinstance(file_or_filename, io.RawIOBase): file = io.BufferedWriter(file_or_filename) # Keep the original file open when the BufferedWriter is # destroyed stack.callback(file.detach) else: # This is to handle passed objects that aren't in the # IOBase hierarchy, but just have a write method file = io.BufferedIOBase() file.writable = lambda: True file.write = write try: # TextIOWrapper uses this methods to determine # if BOM (for UTF-16, etc) should be added file.seekable = file_or_filename.seekable file.tell = file_or_filename.tell except AttributeError: pass file = io.TextIOWrapper(file, encoding=encoding, errors="xmlcharrefreplace", newline="\n") # Keep the original file open when the TextIOWrapper is # destroyed stack.callback(file.detach) yield file.write def _namespaces(elem, default_namespace=None): # identify namespaces used in this tree # maps qnames to *encoded* prefix:local names qnames = {None: None} # maps uri:s to prefixes namespaces = {} if default_namespace: namespaces[default_namespace] = "" def add_qname(qname): # calculate serialized qname representation try: if qname[:1] == "{": uri, tag = qname[1:].rsplit("}", 1) prefix = namespaces.get(uri) if prefix is None: prefix = _namespace_map.get(uri) if prefix is None: prefix = "ns%d" % len(namespaces) if prefix != "xml": namespaces[uri] = prefix if prefix: qnames[qname] = "%s:%s" % (prefix, tag) else: qnames[qname] = tag # default element else: if default_namespace: # FIXME: can this be handled in XML 1.0? raise ValueError( "cannot use non-qualified names with " "default_namespace option" ) qnames[qname] = qname except TypeError: _raise_serialization_error(qname) # populate qname and namespaces table for elem in elem.iter(): tag = elem.tag if isinstance(tag, QName): if tag.text not in qnames: add_qname(tag.text) elif isinstance(tag, str): if tag not in qnames: add_qname(tag) elif tag is not None and tag is not Comment and tag is not PI: _raise_serialization_error(tag) for key, value in elem.items(): if isinstance(key, QName): key = key.text if key not in qnames: add_qname(key) if isinstance(value, QName) and value.text not in qnames: add_qname(value.text) text = elem.text if isinstance(text, QName) and text.text not in qnames: add_qname(text.text) return qnames, namespaces def _serialize_xml(write, elem, qnames, namespaces, short_empty_elements, **kwargs): tag = elem.tag text = elem.text if tag is Comment: write("<!--%s-->" % text) elif tag is ProcessingInstruction: write("<?%s?>" % text) else: tag = qnames[tag] if tag is None: if text: write(_escape_cdata(text)) for e in elem: _serialize_xml(write, e, qnames, None, short_empty_elements=short_empty_elements) else: write("<" + tag) items = list(elem.items()) if items or namespaces: if namespaces: for v, k in sorted(namespaces.items(), key=lambda x: x[1]): # sort on prefix if k: k = ":" + k write(" xmlns%s=\"%s\"" % ( k, _escape_attrib(v) )) for k, v in sorted(items): # lexical order if isinstance(k, QName): k = k.text if isinstance(v, QName): v = qnames[v.text] else: v = _escape_attrib(v) write(" %s=\"%s\"" % (qnames[k], v)) if text or len(elem) or not short_empty_elements: write(">") if text: write(_escape_cdata(text)) for e in elem: _serialize_xml(write, e, qnames, None, short_empty_elements=short_empty_elements) write("</" + tag + ">") else: write(" />") if elem.tail: write(_escape_cdata(elem.tail)) HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr", "img", "input", "isindex", "link", "meta", "param") try: HTML_EMPTY = set(HTML_EMPTY) except NameError: pass def _serialize_html(write, elem, qnames, namespaces, **kwargs): tag = elem.tag text = elem.text if tag is Comment: write("<!--%s-->" % _escape_cdata(text)) elif tag is ProcessingInstruction: write("<?%s?>" % _escape_cdata(text)) else: tag = qnames[tag] if tag is None: if text: write(_escape_cdata(text)) for e in elem: _serialize_html(write, e, qnames, None) else: write("<" + tag) items = list(elem.items()) if items or namespaces: if namespaces: for v, k in sorted(namespaces.items(), key=lambda x: x[1]): # sort on prefix if k: k = ":" + k write(" xmlns%s=\"%s\"" % ( k, _escape_attrib(v) )) for k, v in sorted(items): # lexical order if isinstance(k, QName): k = k.text if isinstance(v, QName): v = qnames[v.text] else: v = _escape_attrib_html(v) # FIXME: handle boolean attributes write(" %s=\"%s\"" % (qnames[k], v)) write(">") ltag = tag.lower() if text: if ltag == "script" or ltag == "style": write(text) else: write(_escape_cdata(text)) for e in elem: _serialize_html(write, e, qnames, None) if ltag not in HTML_EMPTY: write("</" + tag + ">") if elem.tail: write(_escape_cdata(elem.tail)) def _serialize_text(write, elem): for part in elem.itertext(): write(part) if elem.tail: write(elem.tail) _serialize = { "xml": _serialize_xml, "html": _serialize_html, "text": _serialize_text, # this optional method is imported at the end of the module # "c14n": _serialize_c14n, } def register_namespace(prefix, uri): """Register a namespace prefix. The registry is global, and any existing mapping for either the given prefix or the namespace URI will be removed. *prefix* is the namespace prefix, *uri* is a namespace uri. Tags and attributes in this namespace will be serialized with prefix if possible. ValueError is raised if prefix is reserved or is invalid. """ if re.match("ns\d+$", prefix): raise ValueError("Prefix format reserved for internal use") for k, v in list(_namespace_map.items()): if k == uri or v == prefix: del _namespace_map[k] _namespace_map[uri] = prefix _namespace_map = { # "well-known" namespace prefixes "http://www.w3.org/XML/1998/namespace": "xml", "http://www.w3.org/1999/xhtml": "html", "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", "http://schemas.xmlsoap.org/wsdl/": "wsdl", # xml schema "http://www.w3.org/2001/XMLSchema": "xs", "http://www.w3.org/2001/XMLSchema-instance": "xsi", # dublin core "http://purl.org/dc/elements/1.1/": "dc", } # For tests and troubleshooting register_namespace._namespace_map = _namespace_map def _raise_serialization_error(text): raise TypeError( "cannot serialize %r (type %s)" % (text, type(text).__name__) ) def _escape_cdata(text): # escape character data try: # it's worth avoiding do-nothing calls for strings that are # shorter than 500 character, or so. assume that's, by far, # the most common case in most applications. if "&" in text: text = text.replace("&", "&amp;") if "<" in text: text = text.replace("<", "&lt;") if ">" in text: text = text.replace(">", "&gt;") return text except (TypeError, AttributeError): _raise_serialization_error(text) def _escape_attrib(text): # escape attribute value try: if "&" in text: text = text.replace("&", "&amp;") if "<" in text: text = text.replace("<", "&lt;") if ">" in text: text = text.replace(">", "&gt;") if "\"" in text: text = text.replace("\"", "&quot;") if "\n" in text: text = text.replace("\n", "&#10;") return text except (TypeError, AttributeError): _raise_serialization_error(text) def _escape_attrib_html(text): # escape attribute value try: if "&" in text: text = text.replace("&", "&amp;") if ">" in text: text = text.replace(">", "&gt;") if "\"" in text: text = text.replace("\"", "&quot;") return text except (TypeError, AttributeError): _raise_serialization_error(text) # -------------------------------------------------------------------- def tostring(element, encoding=None, method=None, *, short_empty_elements=True): """Generate string representation of XML element. All subelements are included. If encoding is "unicode", a string is returned. Otherwise a bytestring is returned. *element* is an Element instance, *encoding* is an optional output encoding defaulting to US-ASCII, *method* is an optional output which can be one of "xml" (default), "html", "text" or "c14n". Returns an (optionally) encoded string containing the XML data. """ stream = io.StringIO() if encoding == 'unicode' else io.BytesIO() ElementTree(element).write(stream, encoding, method=method, short_empty_elements=short_empty_elements) return stream.getvalue() class _ListDataStream(io.BufferedIOBase): """An auxiliary stream accumulating into a list reference.""" def __init__(self, lst): self.lst = lst def writable(self): return True def seekable(self): return True def write(self, b): self.lst.append(b) def tell(self): return len(self.lst) def tostringlist(element, encoding=None, method=None, *, short_empty_elements=True): lst = [] stream = _ListDataStream(lst) ElementTree(element).write(stream, encoding, method=method, short_empty_elements=short_empty_elements) return lst def dump(elem): """Write element tree or element structure to sys.stdout. This function should be used for debugging only. *elem* is either an ElementTree, or a single Element. The exact output format is implementation dependent. In this version, it's written as an ordinary XML file. """ # debugging if not isinstance(elem, ElementTree): elem = ElementTree(elem) elem.write(sys.stdout, encoding="unicode") tail = elem.getroot().tail if not tail or tail[-1] != "\n": sys.stdout.write("\n") # -------------------------------------------------------------------- # parsing def parse(source, parser=None): """Parse XML document into element tree. *source* is a filename or file object containing XML data, *parser* is an optional parser instance defaulting to XMLParser. Return an ElementTree instance. """ tree = ElementTree() tree.parse(source, parser) return tree def iterparse(source, events=None, parser=None): """Incrementally parse XML document into ElementTree. This class also reports what's going on to the user based on the *events* it is initialized with. The supported events are the strings "start", "end", "start-ns" and "end-ns" (the "ns" events are used to get detailed namespace information). If *events* is omitted, only "end" events are reported. *source* is a filename or file object containing XML data, *events* is a list of events to report back, *parser* is an optional parser instance. Returns an iterator providing (event, elem) pairs. """ close_source = False if not hasattr(source, "read"): source = open(source, "rb") close_source = True return _IterParseIterator(source, events, parser, close_source) class XMLPullParser: def __init__(self, events=None, *, _parser=None): # The _parser argument is for internal use only and must not be relied # upon in user code. It will be removed in a future release. # See http://bugs.python.org/issue17741 for more details. # _elementtree.c expects a list, not a deque self._events_queue = [] self._index = 0 self._parser = _parser or XMLParser(target=TreeBuilder()) # wire up the parser for event reporting if events is None: events = ("end",) self._parser._setevents(self._events_queue, events) def feed(self, data): """Feed encoded data to parser.""" if self._parser is None: raise ValueError("feed() called after end of stream") if data: try: self._parser.feed(data) except SyntaxError as exc: self._events_queue.append(exc) def _close_and_return_root(self): # iterparse needs this to set its root attribute properly :( root = self._parser.close() self._parser = None return root def close(self): """Finish feeding data to parser. Unlike XMLParser, does not return the root element. Use read_events() to consume elements from XMLPullParser. """ self._close_and_return_root() def read_events(self): """Return an iterator over currently available (event, elem) pairs. Events are consumed from the internal event queue as they are retrieved from the iterator. """ events = self._events_queue while True: index = self._index try: event = events[self._index] # Avoid retaining references to past events events[self._index] = None except IndexError: break index += 1 # Compact the list in a O(1) amortized fashion # As noted above, _elementree.c needs a list, not a deque if index * 2 >= len(events): events[:index] = [] self._index = 0 else: self._index = index if isinstance(event, Exception): raise event else: yield event class _IterParseIterator: def __init__(self, source, events, parser, close_source=False): # Use the internal, undocumented _parser argument for now; When the # parser argument of iterparse is removed, this can be killed. self._parser = XMLPullParser(events=events, _parser=parser) self._file = source self._close_file = close_source self.root = self._root = None def __next__(self): while 1: for event in self._parser.read_events(): return event if self._parser._parser is None: self.root = self._root if self._close_file: self._file.close() raise StopIteration # load event buffer data = self._file.read(16 * 1024) if data: self._parser.feed(data) else: self._root = self._parser._close_and_return_root() def __iter__(self): return self def XML(text, parser=None): """Parse XML document from string constant. This function can be used to embed "XML Literals" in Python code. *text* is a string containing XML data, *parser* is an optional parser instance, defaulting to the standard XMLParser. Returns an Element instance. """ if not parser: parser = XMLParser(target=TreeBuilder()) parser.feed(text) return parser.close() def XMLID(text, parser=None): """Parse XML document from string constant for its IDs. *text* is a string containing XML data, *parser* is an optional parser instance, defaulting to the standard XMLParser. Returns an (Element, dict) tuple, in which the dict maps element id:s to elements. """ if not parser: parser = XMLParser(target=TreeBuilder()) parser.feed(text) tree = parser.close() ids = {} for elem in tree.iter(): id = elem.get("id") if id: ids[id] = elem return tree, ids # Parse XML document from string constant. Alias for XML(). fromstring = XML def fromstringlist(sequence, parser=None): """Parse XML document from sequence of string fragments. *sequence* is a list of other sequence, *parser* is an optional parser instance, defaulting to the standard XMLParser. Returns an Element instance. """ if not parser: parser = XMLParser(target=TreeBuilder()) for text in sequence: parser.feed(text) return parser.close() # -------------------------------------------------------------------- class TreeBuilder: """Generic element structure builder. This builder converts a sequence of start, data, and end method calls to a well-formed element structure. You can use this class to build an element structure using a custom XML parser, or a parser for some other XML-like format. *element_factory* is an optional element factory which is called to create new Element instances, as necessary. """ def __init__(self, element_factory=None): self._data = [] # data collector self._elem = [] # element stack self._last = None # last element self._tail = None # true if we're after an end tag if element_factory is None: element_factory = Element self._factory = element_factory def close(self): """Flush builder buffers and return toplevel document Element.""" assert len(self._elem) == 0, "missing end tags" assert self._last is not None, "missing toplevel element" return self._last def _flush(self): if self._data: if self._last is not None: text = "".join(self._data) if self._tail: assert self._last.tail is None, "internal error (tail)" self._last.tail = text else: assert self._last.text is None, "internal error (text)" self._last.text = text self._data = [] def data(self, data): """Add text to current element.""" self._data.append(data) def start(self, tag, attrs): """Open new element and return it. *tag* is the element name, *attrs* is a dict containing element attributes. """ self._flush() self._last = elem = self._factory(tag, attrs) if self._elem: self._elem[-1].append(elem) self._elem.append(elem) self._tail = 0 return elem def end(self, tag): """Close and return current Element. *tag* is the element name. """ self._flush() self._last = self._elem.pop() assert self._last.tag == tag,\ "end tag mismatch (expected %s, got %s)" % ( self._last.tag, tag) self._tail = 1 return self._last # also see ElementTree and TreeBuilder class XMLParser: """Element structure builder for XML source data based on the expat parser. *html* are predefined HTML entities (not supported currently), *target* is an optional target object which defaults to an instance of the standard TreeBuilder class, *encoding* is an optional encoding string which if given, overrides the encoding specified in the XML file: http://www.iana.org/assignments/character-sets """ def __init__(self, html=0, target=None, encoding=None): try: from xml.parsers import expat except ImportError: try: import pyexpat as expat except ImportError: raise ImportError( "No module named expat; use SimpleXMLTreeBuilder instead" ) parser = expat.ParserCreate(encoding, "}") if target is None: target = TreeBuilder() # underscored names are provided for compatibility only self.parser = self._parser = parser self.target = self._target = target self._error = expat.error self._names = {} # name memo cache # main callbacks parser.DefaultHandlerExpand = self._default if hasattr(target, 'start'): parser.StartElementHandler = self._start if hasattr(target, 'end'): parser.EndElementHandler = self._end if hasattr(target, 'data'): parser.CharacterDataHandler = target.data # miscellaneous callbacks if hasattr(target, 'comment'): parser.CommentHandler = target.comment if hasattr(target, 'pi'): parser.ProcessingInstructionHandler = target.pi # Configure pyexpat: buffering, new-style attribute handling. parser.buffer_text = 1 parser.ordered_attributes = 1 parser.specified_attributes = 1 self._doctype = None self.entity = {} try: self.version = "Expat %d.%d.%d" % expat.version_info except AttributeError: pass # unknown def _setevents(self, events_queue, events_to_report): # Internal API for XMLPullParser # events_to_report: a list of events to report during parsing (same as # the *events* of XMLPullParser's constructor. # events_queue: a list of actual parsing events that will be populated # by the underlying parser. # parser = self._parser append = events_queue.append for event_name in events_to_report: if event_name == "start": parser.ordered_attributes = 1 parser.specified_attributes = 1 def handler(tag, attrib_in, event=event_name, append=append, start=self._start): append((event, start(tag, attrib_in))) parser.StartElementHandler = handler elif event_name == "end": def handler(tag, event=event_name, append=append, end=self._end): append((event, end(tag))) parser.EndElementHandler = handler elif event_name == "start-ns": def handler(prefix, uri, event=event_name, append=append): append((event, (prefix or "", uri or ""))) parser.StartNamespaceDeclHandler = handler elif event_name == "end-ns": def handler(prefix, event=event_name, append=append): append((event, None)) parser.EndNamespaceDeclHandler = handler else: raise ValueError("unknown event %r" % event_name) def _raiseerror(self, value): err = ParseError(value) err.code = value.code err.position = value.lineno, value.offset raise err def _fixname(self, key): # expand qname, and convert name string to ascii, if possible try: name = self._names[key] except KeyError: name = key if "}" in name: name = "{" + name self._names[key] = name return name def _start(self, tag, attr_list): # Handler for expat's StartElementHandler. Since ordered_attributes # is set, the attributes are reported as a list of alternating # attribute name,value. fixname = self._fixname tag = fixname(tag) attrib = {} if attr_list: for i in range(0, len(attr_list), 2): attrib[fixname(attr_list[i])] = attr_list[i+1] return self.target.start(tag, attrib) def _end(self, tag): return self.target.end(self._fixname(tag)) def _default(self, text): prefix = text[:1] if prefix == "&": # deal with undefined entities try: data_handler = self.target.data except AttributeError: return try: data_handler(self.entity[text[1:-1]]) except KeyError: from xml.parsers import expat err = expat.error( "undefined entity %s: line %d, column %d" % (text, self.parser.ErrorLineNumber, self.parser.ErrorColumnNumber) ) err.code = 11 # XML_ERROR_UNDEFINED_ENTITY err.lineno = self.parser.ErrorLineNumber err.offset = self.parser.ErrorColumnNumber raise err elif prefix == "<" and text[:9] == "<!DOCTYPE": self._doctype = [] # inside a doctype declaration elif self._doctype is not None: # parse doctype contents if prefix == ">": self._doctype = None return text = text.strip() if not text: return self._doctype.append(text) n = len(self._doctype) if n > 2: type = self._doctype[1] if type == "PUBLIC" and n == 4: name, type, pubid, system = self._doctype if pubid: pubid = pubid[1:-1] elif type == "SYSTEM" and n == 3: name, type, system = self._doctype pubid = None else: return if hasattr(self.target, "doctype"): self.target.doctype(name, pubid, system[1:-1]) elif self.doctype != self._XMLParser__doctype: # warn about deprecated call self._XMLParser__doctype(name, pubid, system[1:-1]) self.doctype(name, pubid, system[1:-1]) self._doctype = None def doctype(self, name, pubid, system): """(Deprecated) Handle doctype declaration *name* is the Doctype name, *pubid* is the public identifier, and *system* is the system identifier. """ warnings.warn( "This method of XMLParser is deprecated. Define doctype() " "method on the TreeBuilder target.", DeprecationWarning, ) # sentinel, if doctype is redefined in a subclass __doctype = doctype def feed(self, data): """Feed encoded data to parser.""" try: self.parser.Parse(data, 0) except self._error as v: self._raiseerror(v) def close(self): """Finish feeding data to parser and return element structure.""" try: self.parser.Parse("", 1) # end of data except self._error as v: self._raiseerror(v) try: close_handler = self.target.close except AttributeError: pass else: return close_handler() finally: # get rid of circular references del self.parser, self._parser del self.target, self._target # Import the C accelerators try: # Element is going to be shadowed by the C implementation. We need to keep # the Python version of it accessible for some "creative" by external code # (see tests) _Element_Py = Element # Element, SubElement, ParseError, TreeBuilder, XMLParser from _elementtree import * except ImportError: pass
apache-2.0
-4,124,182,136,306,886,700
8,493,523,749,603,553,000
33.413917
79
0.569987
false
edisonlz/fruit
web_project/base/site-packages/django_extensions/tests/test_compile_pyc.py
38
2083
import os import six import fnmatch from django.test import TestCase from django.core.management import call_command from django_extensions.management.utils import get_project_root class CompilePycTests(TestCase): def setUp(self): self._settings = os.environ.get('DJANGO_SETTINGS_MODULE') os.environ['DJANGO_SETTINGS_MODULE'] = 'django_extensions.settings' def tearDown(self): if self._settings: os.environ['DJANGO_SETTINGS_MODULE'] = self._settings def _find_pyc(self, path, mask='*.pyc'): pyc_glob = [] for root, dirs, filenames in os.walk(path): for filename in fnmatch.filter(filenames, mask): pyc_glob.append(os.path.join(root, filename)) return pyc_glob def test_compiles_pyc_files(self): with self.settings(BASE_DIR=get_project_root()): call_command('clean_pyc') pyc_glob = self._find_pyc(get_project_root()) self.assertEqual(len(pyc_glob), 0) with self.settings(BASE_DIR=get_project_root()): call_command('compile_pyc') pyc_glob = self._find_pyc(get_project_root()) self.assertTrue(len(pyc_glob) > 0) with self.settings(BASE_DIR=get_project_root()): call_command('clean_pyc') def test_takes_path(self): out = six.StringIO() project_root = os.path.join(get_project_root(), 'tests', 'testapp') with self.settings(BASE_DIR=get_project_root()): call_command('clean_pyc', path=project_root) pyc_glob = self._find_pyc(project_root) self.assertEqual(len(pyc_glob), 0) with self.settings(BASE_DIR=get_project_root()): call_command('compile_pyc', verbosity=2, path=project_root, stdout=out) expected = ['Compiling %s...' % fn for fn in sorted(self._find_pyc(project_root, mask='*.py'))] output = out.getvalue().splitlines() self.assertEqual(expected, sorted(output)) with self.settings(BASE_DIR=get_project_root()): call_command('clean_pyc')
apache-2.0
-3,806,818,739,411,689,000
-522,205,131,196,716,740
39.843137
83
0.62506
false
wakatime/wakatime
wakatime/packages/urllib3/connectionpool.py
94
35358
from __future__ import absolute_import import errno import logging import sys import warnings from socket import error as SocketError, timeout as SocketTimeout import socket from .exceptions import ( ClosedPoolError, ProtocolError, EmptyPoolError, HeaderParsingError, HostChangedError, LocationValueError, MaxRetryError, ProxyError, ReadTimeoutError, SSLError, TimeoutError, InsecureRequestWarning, NewConnectionError, ) from .packages.ssl_match_hostname import CertificateError from .packages import six from .packages.six.moves import queue from .connection import ( port_by_scheme, DummyConnection, HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, HTTPException, BaseSSLError, ) from .request import RequestMethods from .response import HTTPResponse from .util.connection import is_connection_dropped from .util.request import set_file_position from .util.response import assert_header_parsing from .util.retry import Retry from .util.timeout import Timeout from .util.url import get_host, Url if six.PY2: # Queue is imported for side effects on MS Windows import Queue as _unused_module_Queue # noqa: F401 xrange = six.moves.xrange log = logging.getLogger(__name__) _Default = object() # Pool objects class ConnectionPool(object): """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. """ scheme = None QueueCls = queue.LifoQueue def __init__(self, host, port=None): if not host: raise LocationValueError("No host specified.") self.host = _ipv6_host(host).lower() self._proxy_host = host.lower() self.port = port def __str__(self): return '%s(host=%r, port=%r)' % (type(self).__name__, self.host, self.port) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() # Return False to re-raise any potential exceptions return False def close(self): """ Close all pooled connections and disable the pool. """ pass # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK]) class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`httplib.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`httplib.HTTPConnection`. :param strict: Causes BadStatusLine to be raised if the status line can't be parsed as a valid HTTP/1.0 or 1.1 status line, passed into :class:`httplib.HTTPConnection`. .. note:: Only works in Python 2. This parameter is ignored in Python 3. :param timeout: Socket timeout in seconds for each individual connection. This can be a float or integer, which sets the timeout for the HTTP request, or an instance of :class:`urllib3.util.Timeout` which gives you more fine-grained control over request timeouts. After the constructor has been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param retries: Retry configuration to use by default with requests in this pool. :param _proxy: Parsed proxy URL, should not be used directly, instead, see :class:`urllib3.connectionpool.ProxyManager`" :param _proxy_headers: A dictionary with proxy headers, should not be used directly, instead, see :class:`urllib3.connectionpool.ProxyManager`" :param \\**conn_kw: Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, :class:`urllib3.connection.HTTPSConnection` instances. """ scheme = 'http' ConnectionCls = HTTPConnection ResponseCls = HTTPResponse def __init__(self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, **conn_kw): ConnectionPool.__init__(self, host, port) RequestMethods.__init__(self, headers) self.strict = strict if not isinstance(timeout, Timeout): timeout = Timeout.from_float(timeout) if retries is None: retries = Retry.DEFAULT self.timeout = timeout self.retries = retries self.pool = self.QueueCls(maxsize) self.block = block self.proxy = _proxy self.proxy_headers = _proxy_headers or {} # Fill the queue up so that doing get() on it will block properly for _ in xrange(maxsize): self.pool.put(None) # These are mostly for testing and debugging purposes. self.num_connections = 0 self.num_requests = 0 self.conn_kw = conn_kw if self.proxy: # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. # We cannot know if the user has added default socket options, so we cannot replace the # list. self.conn_kw.setdefault('socket_options', []) def _new_conn(self): """ Return a fresh :class:`HTTPConnection`. """ self.num_connections += 1 log.debug("Starting new HTTP connection (%d): %s", self.num_connections, self.host) conn = self.ConnectionCls(host=self.host, port=self.port, timeout=self.timeout.connect_timeout, strict=self.strict, **self.conn_kw) return conn def _get_conn(self, timeout=None): """ Get a connection. Will return a pooled connection if one is available. If no connections are available and :prop:`.block` is ``False``, then a fresh connection is returned. :param timeout: Seconds to wait before giving up and raising :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and :prop:`.block` is ``True``. """ conn = None try: conn = self.pool.get(block=self.block, timeout=timeout) except AttributeError: # self.pool is None raise ClosedPoolError(self, "Pool is closed.") except queue.Empty: if self.block: raise EmptyPoolError(self, "Pool reached maximum size and no more " "connections are allowed.") pass # Oh well, we'll create a new connection then # If this is a persistent connection, check if it got disconnected if conn and is_connection_dropped(conn): log.debug("Resetting dropped connection: %s", self.host) conn.close() if getattr(conn, 'auto_open', 1) == 0: # This is a proxied connection that has been mutated by # httplib._tunnel() and cannot be reused (since it would # attempt to bypass the proxy) conn = None return conn or self._new_conn() def _put_conn(self, conn): """ Put a connection back into the pool. :param conn: Connection object for the current host and port as returned by :meth:`._new_conn` or :meth:`._get_conn`. If the pool is already full, the connection is closed and discarded because we exceeded maxsize. If connections are discarded frequently, then maxsize should be increased. If the pool is closed, then the connection will be closed and discarded. """ try: self.pool.put(conn, block=False) return # Everything is dandy, done. except AttributeError: # self.pool is None. pass except queue.Full: # This should never happen if self.block == True log.warning( "Connection pool is full, discarding connection: %s", self.host) # Connection never got put back into the pool, close it. if conn: conn.close() def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ pass def _prepare_proxy(self, conn): # Nothing to do for HTTP connections. pass def _get_timeout(self, timeout): """ Helper that always returns a :class:`urllib3.util.Timeout` """ if timeout is _Default: return self.timeout.clone() if isinstance(timeout, Timeout): return timeout.clone() else: # User passed us an int/float. This is for backwards compatibility, # can be removed later return Timeout.from_float(timeout) def _raise_timeout(self, err, url, timeout_value): """Is the error actually a timeout? Will raise a ReadTimeout or pass""" if isinstance(err, SocketTimeout): raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) # See the above comment about EAGAIN in Python 3. In Python 2 we have # to specifically catch it and throw the timeout error if hasattr(err, 'errno') and err.errno in _blocking_errnos: raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) # Catch possible read timeouts thrown as SSL errors. If not the # case, rethrow the original. We need to do this because of: # http://bugs.python.org/issue10272 if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6 raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) def _make_request(self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw): """ Perform a request on a given urllib connection object taken from our pool. :param conn: a connection from one of our connection pools :param timeout: Socket timeout in seconds for the request. This can be a float or integer, which will set the same timeout value for the socket connect and the socket read, or an instance of :class:`urllib3.util.Timeout`, which gives you more fine-grained control over your timeouts. """ self.num_requests += 1 timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() conn.timeout = timeout_obj.connect_timeout # Trigger any extra validation we need to do. try: self._validate_conn(conn) except (SocketTimeout, BaseSSLError) as e: # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) raise # conn.request() calls httplib.*.request, not the method in # urllib3.request. It also calls makefile (recv) on the socket. if chunked: conn.request_chunked(method, url, **httplib_request_kw) else: conn.request(method, url, **httplib_request_kw) # Reset the timeout for the recv() on the socket read_timeout = timeout_obj.read_timeout # App Engine doesn't have a sock attr if getattr(conn, 'sock', None): # In Python 3 socket.py will catch EAGAIN and return None when you # try and read into the file pointer created by http.client, which # instead raises a BadStatusLine exception. Instead of catching # the exception and assuming all BadStatusLine exceptions are read # timeouts, check for a zero timeout before making the request. if read_timeout == 0: raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % read_timeout) if read_timeout is Timeout.DEFAULT_TIMEOUT: conn.sock.settimeout(socket.getdefaulttimeout()) else: # None or a value conn.sock.settimeout(read_timeout) # Receive the response from the server try: try: # Python 2.7, use buffering of HTTP responses httplib_response = conn.getresponse(buffering=True) except TypeError: # Python 2.6 and older, Python 3 try: httplib_response = conn.getresponse() except Exception as e: # Remove the TypeError from the exception chain in Python 3; # otherwise it looks like a programming error was the cause. six.raise_from(e, None) except (SocketTimeout, BaseSSLError, SocketError) as e: self._raise_timeout(err=e, url=url, timeout_value=read_timeout) raise # AppEngine doesn't have a version attr. http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port, method, url, http_version, httplib_response.status, httplib_response.length) try: assert_header_parsing(httplib_response.msg) except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 log.warning( 'Failed to parse headers (url=%s): %s', self._absolute_url(url), hpe, exc_info=True) return httplib_response def _absolute_url(self, path): return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url def close(self): """ Close all pooled connections and disable the pool. """ # Disable access to the pool old_pool, self.pool = self.pool, None try: while True: conn = old_pool.get(block=False) if conn: conn.close() except queue.Empty: pass # Done. def is_same_host(self, url): """ Check if the given ``url`` is a member of the same host as this connection pool. """ if url.startswith('/'): return True # TODO: Add optional support for socket.gethostbyname checking. scheme, host, port = get_host(url) host = _ipv6_host(host).lower() # Use explicit default port for comparison when none is given if self.port and not port: port = port_by_scheme.get(scheme) elif not self.port and port == port_by_scheme.get(scheme): port = None return (scheme, host, port) == (self.scheme, self.host, self.port) def urlopen(self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, chunked=False, body_pos=None, **response_kw): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param body: Data to send in the request body (useful for creating POST requests, see HTTPConnectionPool.post_url for more convenience). :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When False, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param chunked: If True, urllib3 will send the body using chunked transfer encoding. Otherwise, urllib3 will send the body using the standard content-length form. Defaults to False. :param int body_pos: Position to seek to in file-like body in the event of a retry or redirect. Typically this won't need to be set because urllib3 will auto-populate the value when needed. :param \\**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get('preload_content', True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) conn = None # Track whether `conn` needs to be released before # returning/raising/recursing. Update this variable if necessary, and # leave `release_conn` constant throughout the function. That way, if # the function recurses, the original value of `release_conn` will be # passed down into the recursive call, and its value will be respected. # # See issue #651 [1] for details. # # [1] <https://github.com/shazow/urllib3/issues/651> release_this_conn = release_conn # Merge the proxy headers. Only do this in HTTP. We have to copy the # headers dict so we can safely change it without those changes being # reflected in anyone else's copy. if self.scheme == 'http': headers = headers.copy() headers.update(self.proxy_headers) # Must keep the exception bound to a separate variable or else Python 3 # complains about UnboundLocalError. err = None # Keep track of whether we cleanly exited the except block. This # ensures we do proper cleanup in finally. clean_exit = False # Rewind body position, if needed. Record current position # for future rewinds in the event of a redirect/retry. body_pos = set_file_position(body, body_pos) try: # Request a connection from the queue. timeout_obj = self._get_timeout(timeout) conn = self._get_conn(timeout=pool_timeout) conn.timeout = timeout_obj.connect_timeout is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None) if is_new_proxy_conn: self._prepare_proxy(conn) # Make the request on the httplib connection object. httplib_response = self._make_request(conn, method, url, timeout=timeout_obj, body=body, headers=headers, chunked=chunked) # If we're going to release the connection in ``finally:``, then # the response doesn't need to know about the connection. Otherwise # it will also try to release it and we'll have a double-release # mess. response_conn = conn if not release_conn else None # Pass method to Response for length checking response_kw['request_method'] = method # Import httplib's response into our own wrapper object response = self.ResponseCls.from_httplib(httplib_response, pool=self, connection=response_conn, retries=retries, **response_kw) # Everything went great! clean_exit = True except queue.Empty: # Timed out by queue. raise EmptyPoolError(self, "No pool connections are available.") except (TimeoutError, HTTPException, SocketError, ProtocolError, BaseSSLError, SSLError, CertificateError) as e: # Discard the connection for these exceptions. It will be # replaced during the next _get_conn() call. clean_exit = False if isinstance(e, (BaseSSLError, CertificateError)): e = SSLError(e) elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError('Cannot connect to proxy.', e) elif isinstance(e, (SocketError, HTTPException)): e = ProtocolError('Connection aborted.', e) retries = retries.increment(method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]) retries.sleep() # Keep track of the error for the retry warning. err = e finally: if not clean_exit: # We hit some kind of exception, handled or otherwise. We need # to throw the connection away unless explicitly told not to. # Close the connection, set the variable to None, and make sure # we put the None back in the pool to avoid leaking it. conn = conn and conn.close() release_this_conn = True if release_this_conn: # Put the connection back to be reused. If the connection is # expired then it will be None, which will get replaced with a # fresh connection during _get_conn. self._put_conn(conn) if not conn: # Try again log.warning("Retrying (%r) after connection " "broken by '%r': %s", retries, err, url) return self.urlopen(method, url, body, headers, retries, redirect, assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, body_pos=body_pos, **response_kw) def drain_and_release_conn(response): try: # discard any remaining response body, the connection will be # released back to the pool once the entire response is read response.read() except (TimeoutError, HTTPException, SocketError, ProtocolError, BaseSSLError, SSLError) as e: pass # Handle redirect? redirect_location = redirect and response.get_redirect_location() if redirect_location: if response.status == 303: method = 'GET' try: retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_redirect: # Drain and release the connection for this response, since # we're not returning it to be released manually. drain_and_release_conn(response) raise return response # drain and return the connection to the pool before recursing drain_and_release_conn(response) retries.sleep_for_retry(response) log.debug("Redirecting %s -> %s", url, redirect_location) return self.urlopen( method, redirect_location, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, body_pos=body_pos, **response_kw) # Check if we should retry the HTTP response. has_retry_after = bool(response.getheader('Retry-After')) if retries.is_retry(method, response.status, has_retry_after): try: retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_status: # Drain and release the connection for this response, since # we're not returning it to be released manually. drain_and_release_conn(response) raise return response # drain and return the connection to the pool before recursing drain_and_release_conn(response) retries.sleep(response) log.debug("Retry: %s", url) return self.urlopen( method, url, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, body_pos=body_pos, **response_kw) return response class HTTPSConnectionPool(HTTPConnectionPool): """ Same as :class:`.HTTPConnectionPool`, but HTTPS. When Python is compiled with the :mod:`ssl` module, then :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates, instead of :class:`.HTTPSConnection`. :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, ``assert_hostname`` and ``host`` in this order to verify connections. If ``assert_hostname`` is False, no verification is done. The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket into an SSL socket. """ scheme = 'https' ConnectionCls = HTTPSConnection def __init__(self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, ssl_version=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None, **conn_kw): HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, block, headers, retries, _proxy, _proxy_headers, **conn_kw) if ca_certs and cert_reqs is None: cert_reqs = 'CERT_REQUIRED' self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.ca_certs = ca_certs self.ca_cert_dir = ca_cert_dir self.ssl_version = ssl_version self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint def _prepare_conn(self, conn): """ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` and establish the tunnel if proxy is used. """ if isinstance(conn, VerifiedHTTPSConnection): conn.set_cert(key_file=self.key_file, cert_file=self.cert_file, cert_reqs=self.cert_reqs, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint) conn.ssl_version = self.ssl_version return conn def _prepare_proxy(self, conn): """ Establish tunnel connection early, because otherwise httplib would improperly set Host: header to proxy's IP:port. """ # Python 2.7+ try: set_tunnel = conn.set_tunnel except AttributeError: # Platform-specific: Python 2.6 set_tunnel = conn._set_tunnel if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older set_tunnel(self._proxy_host, self.port) else: set_tunnel(self._proxy_host, self.port, self.proxy_headers) conn.connect() def _new_conn(self): """ Return a fresh :class:`httplib.HTTPSConnection`. """ self.num_connections += 1 log.debug("Starting new HTTPS connection (%d): %s", self.num_connections, self.host) if not self.ConnectionCls or self.ConnectionCls is DummyConnection: raise SSLError("Can't connect to HTTPS URL because the SSL " "module is not available.") actual_host = self.host actual_port = self.port if self.proxy is not None: actual_host = self.proxy.host actual_port = self.proxy.port conn = self.ConnectionCls(host=actual_host, port=actual_port, timeout=self.timeout.connect_timeout, strict=self.strict, **self.conn_kw) return self._prepare_conn(conn) def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ super(HTTPSConnectionPool, self)._validate_conn(conn) # Force connect early to allow us to validate the connection. if not getattr(conn, 'sock', None): # AppEngine might not have `.sock` conn.connect() if not conn.is_verified: warnings.warn(( 'Unverified HTTPS request is being made. ' 'Adding certificate verification is strongly advised. See: ' 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' '#ssl-warnings'), InsecureRequestWarning) def connection_from_url(url, **kw): """ Given a url, return an :class:`.ConnectionPool` instance of its host. This is a shortcut for not having to parse out the scheme, host, and port of the url before creating an :class:`.ConnectionPool` instance. :param url: Absolute URL string that must include the scheme. Port is optional. :param \\**kw: Passes additional parameters to the constructor of the appropriate :class:`.ConnectionPool`. Useful for specifying things like timeout, maxsize, headers, etc. Example:: >>> conn = connection_from_url('http://google.com/') >>> r = conn.request('GET', '/') """ scheme, host, port = get_host(url) port = port or port_by_scheme.get(scheme, 80) if scheme == 'https': return HTTPSConnectionPool(host, port=port, **kw) else: return HTTPConnectionPool(host, port=port, **kw) def _ipv6_host(host): """ Process IPv6 address literals """ # httplib doesn't like it when we include brackets in IPv6 addresses # Specifically, if we include brackets but also pass the port then # httplib crazily doubles up the square brackets on the Host header. # Instead, we need to make sure we never pass ``None`` as the port. # However, for backward compatibility reasons we can't actually # *assert* that. See http://bugs.python.org/issue28539 # # Also if an IPv6 address literal has a zone identifier, the # percent sign might be URIencoded, convert it back into ASCII if host.startswith('[') and host.endswith(']'): host = host.replace('%25', '%').strip('[]') return host
bsd-3-clause
-3,788,248,291,605,174,000
-5,995,499,386,859,953,000
38.069613
99
0.59565
false
codasus/django-blogages
blogages/django/core/files/uploadhandler.py
136
7193
""" Base file upload handler classes, and the built-in concrete subclasses """ try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.files.uploadedfile import TemporaryUploadedFile, InMemoryUploadedFile from django.utils import importlib __all__ = ['UploadFileException','StopUpload', 'SkipFile', 'FileUploadHandler', 'TemporaryFileUploadHandler', 'MemoryFileUploadHandler', 'load_handler', 'StopFutureHandlers'] class UploadFileException(Exception): """ Any error having to do with uploading files. """ pass class StopUpload(UploadFileException): """ This exception is raised when an upload must abort. """ def __init__(self, connection_reset=False): """ If ``connection_reset`` is ``True``, Django knows will halt the upload without consuming the rest of the upload. This will cause the browser to show a "connection reset" error. """ self.connection_reset = connection_reset def __unicode__(self): if self.connection_reset: return u'StopUpload: Halt current upload.' else: return u'StopUpload: Consume request data, then halt.' class SkipFile(UploadFileException): """ This exception is raised by an upload handler that wants to skip a given file. """ pass class StopFutureHandlers(UploadFileException): """ Upload handers that have handled a file and do not want future handlers to run should raise this exception instead of returning None. """ pass class FileUploadHandler(object): """ Base class for streaming upload handlers. """ chunk_size = 64 * 2 ** 10 #: The default chunk size is 64 KB. def __init__(self, request=None): self.file_name = None self.content_type = None self.content_length = None self.charset = None self.request = request def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None): """ Handle the raw input from the client. Parameters: :input_data: An object that supports reading via .read(). :META: ``request.META``. :content_length: The (integer) value of the Content-Length header from the client. :boundary: The boundary from the Content-Type header. Be sure to prepend two '--'. """ pass def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None): """ Signal that a new file has been started. Warning: As with any data from the client, you should not trust content_length (and sometimes won't even get it). """ self.field_name = field_name self.file_name = file_name self.content_type = content_type self.content_length = content_length self.charset = charset if content_type_extra is None: content_type_extra = {} self.content_type_extra = content_type_extra def receive_data_chunk(self, raw_data, start): """ Receive data from the streamed upload parser. ``start`` is the position in the file of the chunk. """ raise NotImplementedError() def file_complete(self, file_size): """ Signal that a file has completed. File size corresponds to the actual size accumulated by all the chunks. Subclasses should return a valid ``UploadedFile`` object. """ raise NotImplementedError() def upload_complete(self): """ Signal that the upload is complete. Subclasses should perform cleanup that is necessary for this handler. """ pass class TemporaryFileUploadHandler(FileUploadHandler): """ Upload handler that streams data into a temporary file. """ def __init__(self, *args, **kwargs): super(TemporaryFileUploadHandler, self).__init__(*args, **kwargs) def new_file(self, file_name, *args, **kwargs): """ Create the file object to append to as data is coming in. """ super(TemporaryFileUploadHandler, self).new_file(file_name, *args, **kwargs) self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset) def receive_data_chunk(self, raw_data, start): self.file.write(raw_data) def file_complete(self, file_size): self.file.seek(0) self.file.size = file_size return self.file class MemoryFileUploadHandler(FileUploadHandler): """ File upload handler to stream uploads into memory (used for small files). """ def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None): """ Use the content_length to signal whether or not this handler should be in use. """ # Check the content-length header to see if we should # If the post is too large, we cannot use the Memory handler. if content_length > settings.FILE_UPLOAD_MAX_MEMORY_SIZE: self.activated = False else: self.activated = True def new_file(self, *args, **kwargs): super(MemoryFileUploadHandler, self).new_file(*args, **kwargs) if self.activated: self.file = StringIO() raise StopFutureHandlers() def receive_data_chunk(self, raw_data, start): """ Add the data to the StringIO file. """ if self.activated: self.file.write(raw_data) else: return raw_data def file_complete(self, file_size): """ Return a file object if we're activated. """ if not self.activated: return self.file.seek(0) return InMemoryUploadedFile( file = self.file, field_name = self.field_name, name = self.file_name, content_type = self.content_type, size = file_size, charset = self.charset ) def load_handler(path, *args, **kwargs): """ Given a path to a handler, return an instance of that handler. E.g.:: >>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request) <TemporaryFileUploadHandler object at 0x...> """ i = path.rfind('.') module, attr = path[:i], path[i+1:] try: mod = importlib.import_module(module) except ImportError, e: raise ImproperlyConfigured('Error importing upload handler module %s: "%s"' % (module, e)) except ValueError, e: raise ImproperlyConfigured('Error importing upload handler module. Is FILE_UPLOAD_HANDLERS a correctly defined list or tuple?') try: cls = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured('Module "%s" does not define a "%s" upload handler backend' % (module, attr)) return cls(*args, **kwargs)
mit
-4,240,780,207,435,017,000
4,337,760,458,356,220,000
31.844749
135
0.62255
false
Zephor5/scrapy
scrapy/spiders/__init__.py
134
3606
""" Base class for Scrapy spiders See documentation in docs/topics/spiders.rst """ import logging import warnings from scrapy import signals from scrapy.http import Request from scrapy.utils.trackref import object_ref from scrapy.utils.url import url_is_from_spider from scrapy.utils.deprecate import create_deprecated_class from scrapy.exceptions import ScrapyDeprecationWarning class Spider(object_ref): """Base class for scrapy spiders. All spiders must inherit from this class. """ name = None custom_settings = None def __init__(self, name=None, **kwargs): if name is not None: self.name = name elif not getattr(self, 'name', None): raise ValueError("%s must have a name" % type(self).__name__) self.__dict__.update(kwargs) if not hasattr(self, 'start_urls'): self.start_urls = [] @property def logger(self): logger = logging.getLogger(self.name) return logging.LoggerAdapter(logger, {'spider': self}) def log(self, message, level=logging.DEBUG, **kw): """Log the given message at the given log level This helper wraps a log call to the logger within the spider, but you can use it directly (e.g. Spider.logger.info('msg')) or use any other Python logger too. """ self.logger.log(level, message, **kw) @classmethod def from_crawler(cls, crawler, *args, **kwargs): spider = cls(*args, **kwargs) spider._set_crawler(crawler) return spider def set_crawler(self, crawler): warnings.warn("set_crawler is deprecated, instantiate and bound the " "spider to this crawler with from_crawler method " "instead.", category=ScrapyDeprecationWarning, stacklevel=2) assert not hasattr(self, 'crawler'), "Spider already bounded to a " \ "crawler" self._set_crawler(crawler) def _set_crawler(self, crawler): self.crawler = crawler self.settings = crawler.settings crawler.signals.connect(self.close, signals.spider_closed) def start_requests(self): for url in self.start_urls: yield self.make_requests_from_url(url) def make_requests_from_url(self, url): return Request(url, dont_filter=True) def parse(self, response): raise NotImplementedError @classmethod def update_settings(cls, settings): settings.setdict(cls.custom_settings or {}, priority='spider') @classmethod def handles_request(cls, request): return url_is_from_spider(request.url, cls) @staticmethod def close(spider, reason): closed = getattr(spider, 'closed', None) if callable(closed): return closed(reason) def __str__(self): return "<%s %r at 0x%0x>" % (type(self).__name__, self.name, id(self)) __repr__ = __str__ BaseSpider = create_deprecated_class('BaseSpider', Spider) class ObsoleteClass(object): def __init__(self, message): self.message = message def __getattr__(self, name): raise AttributeError(self.message) spiders = ObsoleteClass( '"from scrapy.spider import spiders" no longer works - use ' '"from scrapy.spiderloader import SpiderLoader" and instantiate ' 'it with your project settings"' ) # Top-level imports from scrapy.spiders.crawl import CrawlSpider, Rule from scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider from scrapy.spiders.sitemap import SitemapSpider
bsd-3-clause
-6,761,880,524,788,717,000
953,312,531,293,705,500
29.820513
78
0.642263
false
euphorie/Euphorie
src/euphorie/client/tests/test_functional_country.py
1
2524
# coding=utf-8 from euphorie.client.tests.utils import addSurvey from euphorie.client.tests.utils import registerUserInClient from euphorie.content.tests.utils import BASIC_SURVEY from euphorie.testing import EuphorieFunctionalTestCase import urllib class CountryFunctionalTests(EuphorieFunctionalTestCase): def test_surveys_filtered_by_language(self): survey = """<sector xmlns="http://xml.simplon.biz/euphorie/survey/1.0"> <title>Sector</title> <survey> <title>Survey</title> <language>en</language> </survey> </sector>""" survey_nl = """<sector xmlns="http://xml.simplon.biz/euphorie/survey/1.0"> <title>Branche</title> <survey> <title>Vragenlijst</title> <language>nl</language> </survey> </sector>""" # noqa self.loginAsPortalOwner() addSurvey(self.portal, survey) addSurvey(self.portal, survey_nl) browser = self.get_browser() # Pass the language as URL parameter to ensure that we get the NL # version browser.open("%s?language=nl" % self.portal.client.absolute_url()) registerUserInClient(browser, link="Registreer") # Note, this used to test that the URL was that of the client, # in the correct country (nl), with `?language=nl-NL` appended. # I don't see where in the code this language URL parameter would # come from, so I remove it in this test as well. self.assertEqual(browser.url, "http://nohost/plone/client/nl") self.assertEqual( browser.getControl(name="survey").options, ["branche/vragenlijst"] ) browser.open("%s?language=en" % self.portal.client["nl"].absolute_url()) self.assertEqual(browser.getControl(name="survey").options, ["sector/survey"]) def test_must_select_valid_survey(self): self.loginAsPortalOwner() addSurvey(self.portal, BASIC_SURVEY) browser = self.get_browser() browser.open(self.portal.client["nl"].absolute_url()) registerUserInClient(browser) data = urllib.urlencode( {"action": "new", "survey": "", "title:utf8:ustring": "Foo"} ) browser.handleErrors = False browser.open(browser.url, data) self.assertEqual(browser.url, "http://nohost/plone/client/nl")
gpl-2.0
-3,930,878,734,704,798,700
8,435,964,840,025,946,000
44.071429
86
0.603011
false
dan-bowerman/PAGER
PAGER_Scripts/publishService.py
1
21916
# -*- coding: UTF-8 -*- """Functions to aid in publishing a service in the publication workflow. """ import glob import json import os import random import string import urllib2 import xml.dom.minidom as DOM from xml.sax.saxutils import escape import arcpy import checkError def createMXD(inFolder, template,logs): """Create MXD from the layers in the folder. Args: inFolder: Path of folder to work from. template: Template MXD file. logs: log list holds all log items for current publication Returns: A new MXD file. """ checkError.printLog(logs,"Creating MXD...") arcpy.env.workspace = inFolder #Open the template mxd = arcpy.mapping.MapDocument(template) #Save the template to a new MXD, specific for this data mxd.saveACopy(inFolder + "\\" + "publishMXD.mxd") #Reopen the new file mxd = None mxd = arcpy.mapping.MapDocument(inFolder + "\\" + "publishMXD.mxd") #Add layer #http://help.arcgis.com/en/arcgisdesktop/10.0/help/index.html#/AddLayer/00s300000025000000/ #http://gis.stackexchange.com/questions/4882/how-do-i-add-a-shapefile-in-arcgis-via-python shps = arcpy.ListFeatureClasses("*.shp", "") dataFrame = arcpy.mapping.ListDataFrames(mxd, "*")[0] if shps: for shp in shps: newLayer = arcpy.mapping.Layer(inFolder + "\\" + shp) arcpy.mapping.AddLayer(dataFrame, newLayer, "BOTTOM") applyRandomSymbology(mxd, dataFrame, 0,logs) del newLayer mxd.save() checkError.printLog(logs,"Publishing MXD created") else: #If there's no shapefile checkError.printLog(logs,"No shapefile. Check payload folder") return mxd def applyRandomSymbology(mxd, dataFrame, layerIndex,logs): """Change the specified layer's symbology to a random colour. Args: mxd: MXD file. dataFrame: DataFrame object of the MXD file. layerIndex: Index value of layer. logs: log list holds all log items for current publication """ #Layer you want to update updateLayer = arcpy.mapping.ListLayers(mxd, "", dataFrame)[layerIndex] #Grab the properties of the layer desc = arcpy.Describe(updateLayer) groupLayerFile = None if desc.shapeType == 'Point' or desc.shapeType == 'Polygon' or desc.shapeType == 'Polyline': groupLayerFile = arcpy.mapping.Layer(r"%s\%sColours.lyr" % (os.path.dirname(__file__), desc.shapeType)) else: return groupLayerList = arcpy.mapping.ListLayers(groupLayerFile) groupLayersCount = len(groupLayerList) #Start with 1 because the first layer of groupLayerList is a group layer randomNumber = random.randint(1, groupLayersCount - 1) #Select random layer you want to apply to updateLayer sourceLayer = groupLayerList[randomNumber] arcpy.mapping.UpdateLayer(dataFrame, updateLayer, sourceLayer, True) #Use ArcGIS for Server REST API to get the list of map services that is already #published def getCatalog(server, port,logs): """Use ArcGIS for Server REST API to get the list of map service that are already published. Args: server: Domain of server to connect to. port: Port of server to connect to. logs: log list holds all log items for current publication Returns: List of map services. """ serviceList = [] baseUrl = "http://{}:{}/arcgis/rest/services".format(server, port) catalog = json.load(urllib2.urlopen(baseUrl + "/" + "?f=json")) if "error" in catalog: return services = catalog['services'] for service in services: response = json.load(urllib2.urlopen(baseUrl + '/' + service['name'] + '/' + service['type'] + "?f=json")) serviceList.append(service['name']) folders = catalog['folders'] for folderName in folders: catalog = json.load(urllib2.urlopen(baseUrl + "/" + folderName + "?f=json")) if "error" in catalog: return services = catalog['services'] for service in services: response = json.load(urllib2.urlopen(baseUrl + '/' + service['name'] + '/' + service['type'] + "?f=json")) serviceList.append(service['name']) return serviceList def serviceStatus(serviceFullName,smallKey, smallKeyFolder, server, port,geocatUrl, geocatUsername, geocatPassword,logs): """Check the status of a pubilshed service. Args: smallKey: Small key of current payload. SmallKeyFolder: folder of current payload serviceFullName: Name of the service. server: Domain of server to connect to. port: Port of server to connect to. geocatUrl: geocat Url geocatUsername: geocat user name geocatPassword: geocat password logs: log list holds all log items for current publication Returns: A string - 'ERROR' or 'SUCCESS'. """ status = 'SUCCESS' baseUrl = "http://{}:{}/arcgis/rest/services".format(server, port) response = json.load(urllib2.urlopen(baseUrl + '/' + serviceFullName + '/' + 'MapServer' + "?f=json")) if "error" in response: status = 'ERROR' else: #check further if there is any records returned queryUrl = baseUrl + '/' + serviceFullName + '/' + 'MapServer' queryUrl= queryUrl + "/0/query?where=1%3D1&text=&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&outFields=&returnGeometry=true&maxAllowableOffset=&geometryPrecision=&outSR=&returnIdsOnly=false&returnCountOnly=true&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&returnDistinctValues=false&f=json" response= json.load(urllib2.urlopen(queryUrl)) if "error" in response: status ="ERROR" checkError.printLog(logs,"Service " + smallKey + " returns error.") onlineResources.updateErrorStatus(smallKey, smallKeyFolder, RESTENDPOINTNOTFOUND['code'], geocatUrl, geocatUsername, geocatPassword) return status #July 8, 2014 - Not working def makeDescriptor(smallkey, baseUrl,logs): """Use the makeDescriptor service to create a JSON descriptor file. Assumption: The JSON file exists in a folder. Args: smallKey: Small key of current payload. baseUrl: Base URL of makeDescriptor service. logs: log list holds all log items for current publication """ makeDescriptorUrl = baseUrl + '/' + smallkey print "make descriptorUrl:"+ makeDescriptorUrl response = json.load(urllib2.urlopen(makeDescriptorUrl + "?f=json")) if 'Error' in response: checkError.printLog(logs,response['Error']) else: checkError.printLog(logs,response['msg']) def getFrenchText(prop): """Gets the French text for the given property and returns a string. Must be parent node that contains "gmd:LocalisedCharacterString" as a direct child. Args: prop: Nodelist object to retrieve text from. Returns: String of French text (or empty if none exists). """ try: localisedString = prop.item(0).getElementsByTagName("gmd:LocalisedCharacterString") if localisedString.length > 0 and localisedString.item(0).hasChildNodes(): return localisedString.item(0).firstChild.data else: return "" except: return "" def getEnglishText(prop): """Gets the English text for the given property and returns a string. Must be parent node that contains "gco:CharacterString" as a direct child. Args: prop: Nodelist object to retrieve text from. Returns: String of English text (or empty if none exists). """ try: characterString = prop.item(0).getElementsByTagName("gco:CharacterString") if characterString.length > 0 and characterString.item(0).hasChildNodes(): return characterString.item(0).firstChild.data else: return "" except: return "" def joinStrings((strings), separator=" / "): """Joins strings divided by a separator string and returns the result. Filters out empty strings. Args: (strings): Tuple of strings (i.e. (englishText, frenchText)). separator (optional): Separator string (default = " / "). Returns: The joined string. """ return separator.join(filter(None, strings)) def setServiceProperties(prop, doc, propList): """Sets WMS/WFS service properties using propList dictionary values. Args: prop: DOM element node/property to be altered. doc: DOM Document instance. propList: Dictionary of WMS/WFS properties. """ if prop.firstChild.data in propList: propValue = propList.get(prop.firstChild.data) if prop.nextSibling.hasChildNodes(): prop.nextSibling.firstChild.replaceWholeText(propValue) else: txt = doc.createTextNode(propValue) prop.nextSibling.appendChild(txt) def escapeSpecialCharacters(propList): """Substitutes special characters in dictionary with an escape sequence and returns a dictionary. See: http://resources.arcgis.com/en/help/main/10.2/index.html#//00sq00000082000000 Args: propList: Dictionary of WMS/WFS properties to be parsed. Returns: Dictionary with substituted escape sequences. """ chars = {"\"": "&quot;", "'": "&apos;"} for k, v in propList.items(): #Uses xml.sax.saxutils.escape with custom entities for single/double #quotes propList[k] = escape(v, chars) return propList def getFirstElement(nodeList, tagName): """Gets the first child element of a node list specified by a tag name and returns a node list object. Args: nodeList: Node list object to be searched. tagName: Element name to search for. Returns: A NodeList object. """ return nodeList.item(0).getElementsByTagName(tagName) def getMetadata(workspace, smallKey): """Gets the metadata records (Eng/Fr) from supplied XML and returns a dictionary. Args: workspace: Absolute path of workspace folder. smallKey: Small key of current payload. Returns: A dictionary filled with metadata records. """ #WMS/WFS combined property list with default values propList = {u"title": u"", u"abstract": u"", u"keyword": u"", u"contactPerson": u"Inquiry Centre / Informathèque", u"individualName": u"Inquiry Centre / Informathèque", u"contactPosition": u"", u"positionName": u"", u"contactOrganization": u"Environment Canada / Environnement Canada", u"providerName": u"Environment Canada / Environnement Canada", u"address": u"10 Wellington, 23rd Floor / 10, rue Wellington, 23e étage", u"deliveryPoint": u"10 Wellington, 23rd Floor / 10, rue Wellington, 23e étage", u"addressType": u"", u"city": u"Gatineau", u"stateOrProvince": u"QC", u"administrativeArea": u"QC", u"postCode": u"K1A0H3", u"postalCode": u"K1A0H3", u"country": u"Canada", u"contactVoiceTelephone": u"800-668-6767", u"phone": u"800-668-6767", u"contactFacsimileTelephone": u"819-994-1412", u"facsimile": u"819-994-1412", u"contactElectronicMailAddress": u"[email protected]", u"electronicMailAddress": u"[email protected]", u"fees": u"None / Aucun", u"accessConstraints": u""} metadataXML = os.path.abspath(os.path.join(workspace, "..", smallKey + ".xml")) doc = DOM.parse(metadataXML) identificationInfoNode = doc.getElementsByTagName("gmd:identificationInfo") #Drill down to title node citationNode = getFirstElement(identificationInfoNode, "gmd:citation") titleNode = getFirstElement(citationNode, "gmd:title") propList["title"] = joinStrings((getEnglishText(titleNode), getFrenchText(titleNode))) #Drill down to abstract node abstractNode = getFirstElement(identificationInfoNode, "gmd:abstract") propList["abstract"] = joinStrings((getEnglishText(abstractNode), getFrenchText(abstractNode))) #Drill down to position node pointOfContactNode = getFirstElement(identificationInfoNode, "gmd:pointOfContact") positionNameNode = getFirstElement(pointOfContactNode, "gmd:positionName") propList["contactPosition"] = joinStrings((getEnglishText(positionNameNode), getFrenchText(positionNameNode))) propList["positionName"] = propList["contactPosition"] #Drill down to first keyword node descriptiveKeywordsNode = getFirstElement(identificationInfoNode, "gmd:descriptiveKeywords") keywordNode = getFirstElement(descriptiveKeywordsNode, "gmd:keyword") propList["keyword"] = joinStrings((getEnglishText(keywordNode), getFrenchText(keywordNode)), ", ") #Drill down to constraints node resourceConstraintsNode = getFirstElement(identificationInfoNode, "gmd:resourceConstraints") otherConstraintsNode = getFirstElement(resourceConstraintsNode, "gmd:otherConstraints") propList["accessConstraints"] = joinStrings((getEnglishText(otherConstraintsNode), getFrenchText(otherConstraintsNode))) return propList def enableCapabilities(soeType, sddraft, smallKey, workspace,logs): """Enable capabilities for the service and set maxRecordCount. Args: soeType: List of capabilities. sddraft: Path to Service Definition Draft file. smallKey: Small key of current payload. workspace: Absolute path of workspace folder. logs: log list holds all log items for current publication Returns: Path to output .sddraft file. """ #Properties dictionary for WMS/WFS Service propList = getMetadata(workspace, smallKey) propList = escapeSpecialCharacters(propList) #New maxRecordCount to set for publishing services (default: 1000) maxRecordCount = 10000 #New maxInstances to set for publishing services (default: 2) maxInstances = 1 #Read the sddraft xml. doc = DOM.parse(sddraft) #Find all elements named TypeName. This is where the server object #extension (SOE) names are defined. typeNames = doc.getElementsByTagName('TypeName') for typeName in typeNames: #Get the TypeName whose properties we want to modify. if typeName.firstChild.data in soeType: extension = typeName.parentNode for extElement in extension.childNodes: #Enabled SOE. if extElement.tagName == 'Enabled': extElement.firstChild.data = 'true' #Set WMS/WFS service properties if typeName.firstChild.data == "WMSServer" or typeName.firstChild.data == "WFSServer": svcExtension = typeName.parentNode for extElement in svcExtension.childNodes: if extElement.tagName == "Props": for propArray in extElement.childNodes: for propSetProperty in propArray.childNodes: for prop in propSetProperty.childNodes: if prop.nodeType == 1 and prop.tagName == "Key": setServiceProperties(prop, doc, propList) #Set maxRecordCount for MapServer services elif typeName.firstChild.data == "MapServer": svcConfiguration = typeName.parentNode for svcConfigElement in svcConfiguration.childNodes: if svcConfigElement.tagName == "Definition": for definitionElement in svcConfigElement.childNodes: if definitionElement.tagName == "ConfigurationProperties": for propArray in definitionElement.childNodes: for propSet in propArray.childNodes: for prop in propSet.childNodes: if prop.tagName == "Key": if prop.firstChild.data == "maxRecordCount": prop.nextSibling.firstChild.data = maxRecordCount print "maxRecordCount set to: %s" % str(maxRecordCount) if definitionElement.tagName == "Props": for propArray in definitionElement.childNodes: for propSet in propArray.childNodes: for prop in propSet.childNodes: if prop.tagName == "Key": if prop.firstChild.data == "MaxInstances": prop.nextSibling.firstChild.data = maxInstances print "maxInstances set to: %s" % str(maxInstances) print "WMS/WFS service properties set" #Output to a new sddraft outXML = os.path.join(workspace, "ogcEnabledSDDraft.sddraft") if os.path.exists(outXML): os.remove(outXML) f = open(outXML, 'w') f.write(doc.toxml(encoding="utf-8")) f.close() checkError.printLog(logs,"Service definition created with %s enabled" % ", ".join(map(str, soeType))) checkError.printLog(logs,"") del f, doc return outXML def addFileSizeToJson(smallKey, smallKeyFolder, shpFolder): """Add the file size in bytes of the .shp file to the JSON descriptor. Args: smallKey: Small key of current payload. smallKeyFolder: Folder in which data is unzipped to. shpFolder: Folder containing the .shp file. """ os.chdir(shpFolder) for file in glob.glob("*.shp"): shpFileName = file shpFilePath = os.path.join(shpFolder, shpFileName) sizeInBytes = os.path.getsize(shpFilePath) jsonPath = os.path.join(smallKeyFolder, smallKey) + '.json' with open(jsonPath) as f: data = json.load(f) data["config"]["File_Size"] = sizeInBytes with open(jsonPath, "w") as f: json.dump(data, f) def publishMXD(inFolder, mxd, connPath, serviceName, folder, logs, summary=None, tags=None): """Publish the service. Args: inFolder: Absolute path of workspace folder. mxd: MXD file to publish. connPath: Path to connection file that is used to connect to a GIS Server. serviceName: Name of the service. folder: Name of the folder to publish in. logs: log list holds all log items for current publication summary (optional): A string that represents the Item Description Summary (default=None). tags (optional): A string that represents the Item Description Tags (default=None). """ workspace = inFolder checkError.printLog(logs,"Publishing MXD in: " + workspace) # Provide other service details service = serviceName sddraft = workspace + "/" + service + '.sddraft' sd = workspace + "/" + service + '.sd' folderName = folder # make sure the folder is registered with the server, if not, add it to the # datastore #if workspace not in [i[2] for i in arcpy.ListDataStoreItems(connPath, 'FOLDER')]: # # both the client and server paths are the same # dsStatus = arcpy.AddDataStoreItem(connPath, "FOLDER", "Workspace for " + service, workspace, workspace) # print "Data store: " + str(dsStatus) # Create service definition draft # Data will be copied to server # Syntax: CreateMapSDDraft(map_document, out_sddraft, service_name, # {server_type}, {connection_file_path}, {copy_data_to_server}, # {folder_name}, {summary}, {tags}) arcpy.mapping.CreateMapSDDraft(mxd, sddraft, service, 'ARCGIS_SERVER', connPath, True, folderName, summary, tags) #Modify the sd to enable wms, wfs, and then wcs capabilities on the service soeType = ['WMSServer', 'WFSServer', 'GeoJSONServer'] ogcSDDraft = enableCapabilities(soeType, sddraft, service, workspace,logs) # Analyze the service definition draft analysis = arcpy.mapping.AnalyzeForSD(ogcSDDraft) # Print errors, warnings, and messages returned from the analysis checkError.printLog(logs,"The following information was returned during analysis of the MXD:") for key in ('messages', 'warnings', 'errors'): checkError.printLog(logs,'----' + key.upper() + '---') vars = analysis[key] errorList ="" if not vars: checkError.printLog(logs,' None') else: for ((message, code), layerlist) in vars.iteritems(): errorList= ' '+ message+ ' CODE %i' % code errorList = errorList+ ' applies to:' for layer in layerlist: errorList= errorList+ layer.name, checkError.printLog(logs,errorList) # Stage and upload the service if the sddraft analysis did not contain # errors if analysis['errors'] == {}: # Execute StageService. This creates the service definition. arcpy.StageService_server(ogcSDDraft, sd) # Execute UploadServiceDefinition. This uploads the service definition # and publishes the service. arcpy.UploadServiceDefinition_server(sd, connPath) checkError.printLog(logs, "Service successfully published") del ogcSDDraft else: checkError.printLog(logs,analysis['errors']) checkError.printLog(logs,"Service could not be published because errors were found during analysis.")
mit
7,134,202,663,343,142,000
1,158,388,652,262,387,700
38.198569
425
0.646632
false
tsnoam/python-telegram-bot
tests/test_update.py
1
2618
#!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2016 # Leandro Toledo de Souza <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. """This module contains a object that represents Tests for Telegram Update""" import os import unittest import sys sys.path.append('.') import telegram from tests.base import BaseTest class UpdateTest(BaseTest, unittest.TestCase): """This object represents Tests for Telegram Update.""" def setUp(self): self.update_id = 868573637 self.message = {'message_id': 319, 'from': {'id': 12173560, 'first_name': "Leandro", 'last_name': "S.", 'username': "leandrotoledo"}, 'chat': {'id': 12173560, 'type': 'private', 'first_name': "Leandro", 'last_name': "S.", 'username': "leandrotoledo"}, 'date': 1441644592, 'text': "Update Test"} self.json_dict = { 'update_id': self.update_id, 'message': self.message } def test_update_de_json(self): update = telegram.Update.de_json(self.json_dict) self.assertEqual(update.update_id, self.update_id) self.assertTrue(isinstance(update.message, telegram.Message)) def test_update_to_json(self): update = telegram.Update.de_json(self.json_dict) self.assertTrue(self.is_json(update.to_json())) def test_update_to_dict(self): update = telegram.Update.de_json(self.json_dict) self.assertTrue(self.is_dict(update.to_dict())) self.assertEqual(update['update_id'], self.update_id) self.assertTrue(isinstance(update['message'], telegram.Message)) if __name__ == '__main__': unittest.main()
gpl-3.0
5,405,163,838,036,369,000
-5,749,183,713,368,245,000
34.863014
77
0.60084
false
jjgomera/pychemqt
lib/mEoS/MM.py
1
7485
#!/usr/bin/python3 # -*- coding: utf-8 -*- '''Pychemqt, Chemical Engineering Process simulator Copyright (C) 2009-2017, Juan José Gómez Romera <[email protected]> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.''' from unittest import TestCase from lib import unidades from lib.meos import MEoS class MM(MEoS): """Multiparameter equation of state for hexamethyldisiloxane""" name = "hexamethyldisiloxane" CASNumber = "107-46-0" formula = "C6H18OSi2" synonym = "MM" _refPropName = "MM" _coolPropName = "MM" rhoc = unidades.Density(304.4043888253152) Tc = unidades.Temperature(518.69997204) Pc = unidades.Pressure(1939.39, "kPa") M = 162.3768 # g/mol Tt = unidades.Temperature(204.93) Tb = unidades.Temperature(373.401) f_acent = 0.418 momentoDipolar = unidades.DipoleMoment(0.801, "Debye") id = 1376 Fi1 = {"ao_log": [1, 3], "pow": [0, 1], "ao_pow": [72.110754, -10.431499], "ao_exp": [18.59, 29.58, 19.74, 4.87], "titao": [20/Tc, 1400/Tc, 3600/Tc, 6300/Tc]} f = 8.314472 CP1 = {"ao": 51.894/f, "an": [741.34e-3/f, -416e-6/f, 70e-9/f], "pow": [1, 2, 3]} thol = { "__type__": "Helmholtz", "__name__": "Helmholtz equation of state for hexamethyldisiloxane of " "Thol (2015).", "__doi__": {"autor": "Thol, M., Dubberke, F.H., Rutkai, G., Windmann, " "T., Köster, A., Span, R., Vrabec, J.", "title": "Fundamental equation of state correlation for " "hexamethyldisiloxane based on experimental and " "molecular simulation data", "ref": "Fluid Phase Equilibria 418 (2016) 133-151", "doi": "10.1016/j.fluid.2015.09.047"}, "R": 8.3144621, "cp": Fi1, "ref": "OTO", "Tmin": Tt, "Tmax": 1200.0, "Pmax": 600000.0, "rhomax": 5.266, "M": 162.3768, "Tc": 518.7, "rhoc": 1.653, "Pc": 1931.1, "nr1": [0.5063651e-1, 8.604724, -9.179684, -1.146325, 0.4878559], "d1": [4, 1, 1, 2, 3], "t1": [1, 0.346, 0.46, 1.01, 0.59], "nr2": [-2.434088, -1.621326, 0.6239872, -2.306057, -0.5555096e-1], "d2": [1, 3, 2, 2, 7], "t2": [2.600, 3.330, 0.750, 2.950, 0.930], "c2": [2, 2, 1, 2, 1], "gamma2": [1]*5, "nr3": [9.385015, -2.493508, -3.308032, -0.1885803, -0.9883865e-1, 0.1111090, 0.1061928, -0.1452454e-1], "d3": [1, 1, 3, 3, 1, 2, 3, 1], "t3": [1.33, 1.68, 1.7, 3.08, 5.41, 1.4, 1.1, 5.3], "alfa3": [1.0334, 1.544, 1.113, 1.113, 1.11, 7.2, 1.45, 4.73], "beta3": [0.4707, 0.32, 0.404, 0.517, 0.432, 7.2, 1.2, 35.8], "gamma3": [1.7754, 0.692, 1.242, 0.421, 0.406, 0.163, 0.795, 0.88], "epsilon3": [0.8927, 0.5957, 0.559, 1.056, 1.3, 0.106, 0.181, 0.525], "nr4": []} colonna = { "__type__": "Helmholtz", "__name__": "Helmholtz equation of state for hexamethyldisiloxane of " "Colonna (2006).", "__doi__": {"autor": "Colonna, P., Nannan, N.R., Guardone, A., " "Lemmon, E.W.", "title": "Multiparameter Equations of State for Selected " "Siloxanes", "ref": "Fluid Phase Equilibria, 244:193-211, 2006.", "doi": "10.1016/j.fluid.2006.04.015"}, "R": 8.314472, "cp": CP1, "ref": "NBP", "Tmin": 273.0, "Tmax": 673.0, "Pmax": 30000.0, "rhomax": 5.21, "nr1": [1.01686012, -2.19713029, 0.75443188, -0.68003426, 0.19082162, 0.10530133e-2], "d1": [1, 1, 1, 2, 3, 7], "t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875], "nr2": [0.6284595, 0.30903042e-1, -0.83948727, -0.20262381, -0.35131597e-1, 0.25902341e-1], "d2": [2, 5, 1, 4, 3, 4], "t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12.0], "c2": [1, 1, 2, 2, 3, 3], "gamma2": [1]*6} eq = thol, colonna _vapor_Pressure = { "eq": 3, "n": [-0.850230e1, 0.380300e1, -0.341500e1, -0.467900e1, -0.310600e1], "t": [1.0, 1.5, 1.98, 3.86, 14.6]} _liquid_Density = { "eq": 1, "n": [0.4003e1, -0.6406e1, 0.115e2, -0.1004e2, 0.4e1], "t": [0.436, 0.827, 1.24, 1.7, 2.23]} _vapor_Density = { "eq": 2, "n": [-0.37421e1, -0.37087e2, 0.7546e2, -0.7167e2, -68.69, -178.4], "t": [0.428, 1.79, 2.28, 2.8, 7, 15.4]} class Test(TestCase): def test_thol(self): # Test in thesis # Thol, M. # Empirical Multiparameter Equations of State Based on Molecular # Simulation and Hybrid Data Sets # PhD thesis, Ruhr-Universität Bochum, 2015. # Appendix A, Pag 259 # The two first point are inverted in table st = MM(T=250, rhom=0.0001, eq="thol") self.assertEqual(round(st.P.MPa, 11), 2.0772979e-4) self.assertEqual(round(st.cpM.JmolK, 5), 216.58261) self.assertEqual(round(st.w, 5), 115.31572) self.assertEqual(round(st.hM.Jmol, 4), 1715.1951) self.assertEqual(round(st.sM.JmolK, 6), 38.943461) self.assertEqual(round(st.aM.Jmol, 3), -10097.968) st = MM(T=250, rhom=5, eq="thol") self.assertEqual(round(st.P.MPa, 7), 2.3550378) self.assertEqual(round(st.cpM.JmolK, 5), 290.08361) self.assertEqual(round(st.w, 4), 1068.3855) self.assertEqual(round(st.hM.Jmol, 3), -38660.057) self.assertEqual(round(st.sM.JmolK, 5), -126.50074) self.assertEqual(round(st.aM.Jmol, 4), -7505.8794) st = MM(T=400, rhom=0.05, eq="thol") self.assertEqual(round(st.P.MPa, 8), 0.15367468) self.assertEqual(round(st.cpM.JmolK, 5), 293.72933) self.assertEqual(round(st.w, 5), 134.70433) self.assertEqual(round(st.hM.Jmol, 3), 38493.817) self.assertEqual(round(st.sM.JmolK, 6), 99.143187) self.assertEqual(round(st.aM.Jmol, 4), -4236.9519) st = MM(T=400, rhom=4.5, eq="thol") self.assertEqual(round(st.P.MPa, 6), 40.937214) self.assertEqual(round(st.cpM.JmolK, 5), 339.40133) self.assertEqual(round(st.w, 5), 930.21218) self.assertEqual(round(st.hM.Jmol, 3), 13672.106) self.assertEqual(round(st.sM.JmolK, 6), 11.063873) self.assertEqual(round(st.aM.Jmol, 5), 149.39757) st = MM(T=560, rhom=4.5, eq="thol") self.assertEqual(round(st.P.MPa, 5), 123.02530) self.assertEqual(round(st.cpM.JmolK, 5), 387.27687) self.assertEqual(round(st.w, 4), 1132.8991) self.assertEqual(round(st.hM.Jmol, 3), 83661.457) self.assertEqual(round(st.sM.JmolK, 5), 119.31484) self.assertEqual(round(st.aM.Jmol, 3), -10493.807)
gpl-3.0
-2,683,181,906,396,066,300
-497,272,952,476,042,240
38.373684
79
0.546718
false
40223219/2015_midterm
static/Brython3.1.1-20150328-091302/Lib/heapq.py
628
18065
"""Heap queue algorithm (a.k.a. priority queue). Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for all k, counting elements from 0. For the sake of comparison, non-existing elements are considered to be infinite. The interesting property of a heap is that a[0] is always its smallest element. Usage: heap = [] # creates an empty heap heappush(heap, item) # pushes a new item on the heap item = heappop(heap) # pops the smallest item from the heap item = heap[0] # smallest item on the heap without popping it heapify(x) # transforms list into a heap, in-place, in linear time item = heapreplace(heap, item) # pops and returns smallest item, and adds # new item; the heap size is unchanged Our API differs from textbook heap algorithms as follows: - We use 0-based indexing. This makes the relationship between the index for a node and the indexes for its children slightly less obvious, but is more suitable since Python uses 0-based indexing. - Our heappop() method returns the smallest item, not the largest. These two make it possible to view the heap as a regular Python list without surprises: heap[0] is the smallest item, and heap.sort() maintains the heap invariant! """ # Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger __about__ = """Heap queues [explanation by François Pinard] Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for all k, counting elements from 0. For the sake of comparison, non-existing elements are considered to be infinite. The interesting property of a heap is that a[0] is always its smallest element. The strange invariant above is meant to be an efficient memory representation for a tournament. The numbers below are `k', not a[k]: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In an usual binary tournament we see in sports, each cell is the winner over the two cells it tops, and we can trace the winner down the tree to see all opponents s/he had. However, in many computer applications of such tournaments, we do not need to trace the history of a winner. To be more memory efficient, when a winner is promoted, we try to replace it by something else at a lower level, and the rule becomes that a cell and the two cells it tops contain three different items, but the top cell "wins" over the two topped cells. If this heap invariant is protected at all time, index 0 is clearly the overall winner. The simplest algorithmic way to remove it and find the "next" winner is to move some loser (let's say cell 30 in the diagram above) into the 0 position, and then percolate this new 0 down the tree, exchanging values, until the invariant is re-established. This is clearly logarithmic on the total number of items in the tree. By iterating over all items, you get an O(n ln n) sort. A nice feature of this sort is that you can efficiently insert new items while the sort is going on, provided that the inserted items are not "better" than the last 0'th element you extracted. This is especially useful in simulation contexts, where the tree holds all incoming events, and the "win" condition means the smallest scheduled time. When an event schedule other events for execution, they are scheduled into the future, so they can easily go into the heap. So, a heap is a good structure for implementing schedulers (this is what I used for my MIDI sequencer :-). Various structures for implementing schedulers have been extensively studied, and heaps are good for this, as they are reasonably speedy, the speed is almost constant, and the worst case is not much different than the average case. However, there are other representations which are more efficient overall, yet the worst cases might be terrible. Heaps are also very useful in big disk sorts. You most probably all know that a big sort implies producing "runs" (which are pre-sorted sequences, which size is usually related to the amount of CPU memory), followed by a merging passes for these runs, which merging is often very cleverly organised[1]. It is very important that the initial sort produces the longest runs possible. Tournaments are a good way to that. If, using all the memory available to hold a tournament, you replace and percolate items that happen to fit the current run, you'll produce runs which are twice the size of the memory for random input, and much better for input fuzzily ordered. Moreover, if you output the 0'th item on disk and get an input which may not fit in the current tournament (because the value "wins" over the last output value), it cannot fit in the heap, so the size of the heap decreases. The freed memory could be cleverly reused immediately for progressively building a second heap, which grows at exactly the same rate the first heap is melting. When the first heap completely vanishes, you switch heaps and start a new run. Clever and quite effective! In a word, heaps are useful memory structures to know. I use them in a few applications, and I think it is good to keep a `heap' module around. :-) -------------------- [1] The disk balancing algorithms which are current, nowadays, are more annoying than clever, and this is a consequence of the seeking capabilities of the disks. On devices which cannot seek, like big tape drives, the story was quite different, and one had to be very clever to ensure (far in advance) that each tape movement will be the most effective possible (that is, will best participate at "progressing" the merge). Some tapes were even able to read backwards, and this was also used to avoid the rewinding time. Believe me, real good tape sorts were quite spectacular to watch! From all times, sorting has always been a Great Art! :-) """ __all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', 'nlargest', 'nsmallest', 'heappushpop'] from itertools import islice, count, tee, chain def heappush(heap, item): """Push item onto heap, maintaining the heap invariant.""" heap.append(item) _siftdown(heap, 0, len(heap)-1) def heappop(heap): """Pop the smallest item off the heap, maintaining the heap invariant.""" lastelt = heap.pop() # raises appropriate IndexError if heap is empty if heap: returnitem = heap[0] heap[0] = lastelt _siftup(heap, 0) else: returnitem = lastelt return returnitem def heapreplace(heap, item): """Pop and return the current smallest value, and add the new item. This is more efficient than heappop() followed by heappush(), and can be more appropriate when using a fixed-size heap. Note that the value returned may be larger than item! That constrains reasonable uses of this routine unless written as part of a conditional replacement: if item > heap[0]: item = heapreplace(heap, item) """ returnitem = heap[0] # raises appropriate IndexError if heap is empty heap[0] = item _siftup(heap, 0) return returnitem def heappushpop(heap, item): """Fast version of a heappush followed by a heappop.""" if heap and heap[0] < item: item, heap[0] = heap[0], item _siftup(heap, 0) return item def heapify(x): """Transform list into a heap, in-place, in O(len(x)) time.""" n = len(x) # Transform bottom-up. The largest index there's any point to looking at # is the largest with a child index in-range, so must have 2*i + 1 < n, # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. for i in reversed(range(n//2)): _siftup(x, i) def _heappushpop_max(heap, item): """Maxheap version of a heappush followed by a heappop.""" if heap and item < heap[0]: item, heap[0] = heap[0], item _siftup_max(heap, 0) return item def _heapify_max(x): """Transform list into a maxheap, in-place, in O(len(x)) time.""" n = len(x) for i in reversed(range(n//2)): _siftup_max(x, i) def nlargest(n, iterable): """Find the n largest elements in a dataset. Equivalent to: sorted(iterable, reverse=True)[:n] """ if n < 0: return [] it = iter(iterable) result = list(islice(it, n)) if not result: return result heapify(result) _heappushpop = heappushpop for elem in it: _heappushpop(result, elem) result.sort(reverse=True) return result def nsmallest(n, iterable): """Find the n smallest elements in a dataset. Equivalent to: sorted(iterable)[:n] """ if n < 0: return [] it = iter(iterable) result = list(islice(it, n)) if not result: return result _heapify_max(result) _heappushpop = _heappushpop_max for elem in it: _heappushpop(result, elem) result.sort() return result # 'heap' is a heap at all indices >= startpos, except possibly for pos. pos # is the index of a leaf with a possibly out-of-order value. Restore the # heap invariant. def _siftdown(heap, startpos, pos): newitem = heap[pos] # Follow the path to the root, moving parents down until finding a place # newitem fits. while pos > startpos: parentpos = (pos - 1) >> 1 parent = heap[parentpos] if newitem < parent: heap[pos] = parent pos = parentpos continue break heap[pos] = newitem # The child indices of heap index pos are already heaps, and we want to make # a heap at index pos too. We do this by bubbling the smaller child of # pos up (and so on with that child's children, etc) until hitting a leaf, # then using _siftdown to move the oddball originally at index pos into place. # # We *could* break out of the loop as soon as we find a pos where newitem <= # both its children, but turns out that's not a good idea, and despite that # many books write the algorithm that way. During a heap pop, the last array # element is sifted in, and that tends to be large, so that comparing it # against values starting from the root usually doesn't pay (= usually doesn't # get us out of the loop early). See Knuth, Volume 3, where this is # explained and quantified in an exercise. # # Cutting the # of comparisons is important, since these routines have no # way to extract "the priority" from an array element, so that intelligence # is likely to be hiding in custom comparison methods, or in array elements # storing (priority, record) tuples. Comparisons are thus potentially # expensive. # # On random arrays of length 1000, making this change cut the number of # comparisons made by heapify() a little, and those made by exhaustive # heappop() a lot, in accord with theory. Here are typical results from 3 # runs (3 just to demonstrate how small the variance is): # # Compares needed by heapify Compares needed by 1000 heappops # -------------------------- -------------------------------- # 1837 cut to 1663 14996 cut to 8680 # 1855 cut to 1659 14966 cut to 8678 # 1847 cut to 1660 15024 cut to 8703 # # Building the heap by using heappush() 1000 times instead required # 2198, 2148, and 2219 compares: heapify() is more efficient, when # you can use it. # # The total compares needed by list.sort() on the same lists were 8627, # 8627, and 8632 (this should be compared to the sum of heapify() and # heappop() compares): list.sort() is (unsurprisingly!) more efficient # for sorting. def _siftup(heap, pos): endpos = len(heap) startpos = pos newitem = heap[pos] # Bubble up the smaller child until hitting a leaf. childpos = 2*pos + 1 # leftmost child position while childpos < endpos: # Set childpos to index of smaller child. rightpos = childpos + 1 if rightpos < endpos and not heap[childpos] < heap[rightpos]: childpos = rightpos # Move the smaller child up. heap[pos] = heap[childpos] pos = childpos childpos = 2*pos + 1 # The leaf at pos is empty now. Put newitem there, and bubble it up # to its final resting place (by sifting its parents down). heap[pos] = newitem _siftdown(heap, startpos, pos) def _siftdown_max(heap, startpos, pos): 'Maxheap variant of _siftdown' newitem = heap[pos] # Follow the path to the root, moving parents down until finding a place # newitem fits. while pos > startpos: parentpos = (pos - 1) >> 1 parent = heap[parentpos] if parent < newitem: heap[pos] = parent pos = parentpos continue break heap[pos] = newitem def _siftup_max(heap, pos): 'Maxheap variant of _siftup' endpos = len(heap) startpos = pos newitem = heap[pos] # Bubble up the larger child until hitting a leaf. childpos = 2*pos + 1 # leftmost child position while childpos < endpos: # Set childpos to index of larger child. rightpos = childpos + 1 if rightpos < endpos and not heap[rightpos] < heap[childpos]: childpos = rightpos # Move the larger child up. heap[pos] = heap[childpos] pos = childpos childpos = 2*pos + 1 # The leaf at pos is empty now. Put newitem there, and bubble it up # to its final resting place (by sifting its parents down). heap[pos] = newitem _siftdown_max(heap, startpos, pos) # If available, use C implementation #_heapq does not exist in brython, so lets just comment it out. #try: # from _heapq import * #except ImportError: # pass def merge(*iterables): '''Merge multiple sorted inputs into a single sorted output. Similar to sorted(itertools.chain(*iterables)) but returns a generator, does not pull the data into memory all at once, and assumes that each of the input streams is already sorted (smallest to largest). >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] ''' _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration _len = len h = [] h_append = h.append for itnum, it in enumerate(map(iter, iterables)): try: next = it.__next__ h_append([next(), itnum, next]) except _StopIteration: pass heapify(h) while _len(h) > 1: try: while True: v, itnum, next = s = h[0] yield v s[0] = next() # raises StopIteration when exhausted _heapreplace(h, s) # restore heap condition except _StopIteration: _heappop(h) # remove empty iterator if h: # fast case when only a single iterator remains v, itnum, next = h[0] yield v yield from next.__self__ # Extend the implementations of nsmallest and nlargest to use a key= argument _nsmallest = nsmallest def nsmallest(n, iterable, key=None): """Find the n smallest elements in a dataset. Equivalent to: sorted(iterable, key=key)[:n] """ # Short-cut for n==1 is to use min() when len(iterable)>0 if n == 1: it = iter(iterable) head = list(islice(it, 1)) if not head: return [] if key is None: return [min(chain(head, it))] return [min(chain(head, it), key=key)] # When n>=size, it's faster to use sorted() try: size = len(iterable) except (TypeError, AttributeError): pass else: if n >= size: return sorted(iterable, key=key)[:n] # When key is none, use simpler decoration if key is None: it = zip(iterable, count()) # decorate result = _nsmallest(n, it) return [r[0] for r in result] # undecorate # General case, slowest method in1, in2 = tee(iterable) it = zip(map(key, in1), count(), in2) # decorate result = _nsmallest(n, it) return [r[2] for r in result] # undecorate _nlargest = nlargest def nlargest(n, iterable, key=None): """Find the n largest elements in a dataset. Equivalent to: sorted(iterable, key=key, reverse=True)[:n] """ # Short-cut for n==1 is to use max() when len(iterable)>0 if n == 1: it = iter(iterable) head = list(islice(it, 1)) if not head: return [] if key is None: return [max(chain(head, it))] return [max(chain(head, it), key=key)] # When n>=size, it's faster to use sorted() try: size = len(iterable) except (TypeError, AttributeError): pass else: if n >= size: return sorted(iterable, key=key, reverse=True)[:n] # When key is none, use simpler decoration if key is None: it = zip(iterable, count(0,-1)) # decorate result = _nlargest(n, it) return [r[0] for r in result] # undecorate # General case, slowest method in1, in2 = tee(iterable) it = zip(map(key, in1), count(0,-1), in2) # decorate result = _nlargest(n, it) return [r[2] for r in result] # undecorate if __name__ == "__main__": # Simple sanity test heap = [] data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] for item in data: heappush(heap, item) sort = [] while heap: sort.append(heappop(heap)) print(sort) import doctest doctest.testmod()
gpl-3.0
4,348,989,329,506,305,500
5,936,227,295,028,690,000
36.870021
81
0.646701
false
alexlib/Qt-Python-Binding-Examples
windows/custom_win_flags.py
1
1096
#!/usr/bin/env python #-*- coding:utf-8 -*- """ demo template Tested environment: Mac OS X 10.6.8 """ import sys try: from PySide import QtCore from PySide import QtGui except ImportError: from PyQt4 import QtCore from PyQt4 import QtGui class SheetWin(QtGui.QWidget): def __init__(self, parent = None): super(SheetWin, self).__init__(parent) self.setWindowFlags(QtCore.Qt.Sheet) btn = QtGui.QPushButton("close", self) btn.move(10, 10) btn.clicked.connect(self.close) class Demo(QtGui.QWidget): def __init__(self): super(Demo, self).__init__() x, y, w, h = 500, 200, 300, 400 self.setGeometry(x, y, w, h) btn = QtGui.QPushButton("btn", self) btn.clicked.connect(self.btn_cb) def btn_cb(self): sw_obj = SheetWin(self) sw_obj.show() def show_and_raise(self): self.show() self.raise_() if __name__ == "__main__": app = QtGui.QApplication(sys.argv) demo = Demo() demo.show_and_raise() sys.exit(app.exec_())
bsd-3-clause
6,210,434,554,326,081,000
-2,492,767,846,081,983,500
18.245614
46
0.57573
false
ContinuumIO/dask
dask/cache.py
2
2000
from .callbacks import Callback from timeit import default_timer from numbers import Number import sys overhead = sys.getsizeof(1.23) * 4 + sys.getsizeof(()) * 4 class Cache(Callback): """ Use cache for computation Examples -------- >>> cache = Cache(1e9) # doctest: +SKIP The cache can be used locally as a context manager around ``compute`` or ``get`` calls: >>> with cache: # doctest: +SKIP ... result = x.compute() You can also register a cache globally, so that it works for all computations: >>> cache.register() # doctest: +SKIP >>> cache.unregister() # doctest: +SKIP """ def __init__(self, cache, *args, **kwargs): try: import cachey except ImportError as ex: raise ImportError( 'Cache requires cachey, "{ex}" problem ' "importing".format(ex=str(ex)) ) from ex self._nbytes = cachey.nbytes if isinstance(cache, Number): cache = cachey.Cache(cache, *args, **kwargs) else: assert not args and not kwargs self.cache = cache self.starttimes = dict() def _start(self, dsk): self.durations = dict() overlap = set(dsk) & set(self.cache.data) for key in overlap: dsk[key] = self.cache.data[key] def _pretask(self, key, dsk, state): self.starttimes[key] = default_timer() def _posttask(self, key, value, dsk, state, id): duration = default_timer() - self.starttimes[key] deps = state["dependencies"][key] if deps: duration += max(self.durations.get(k, 0) for k in deps) self.durations[key] = duration nb = self._nbytes(value) + overhead + sys.getsizeof(key) * 4 self.cache.put(key, value, cost=duration / nb / 1e9, nbytes=nb) def _finish(self, dsk, state, errored): self.starttimes.clear() self.durations.clear()
bsd-3-clause
1,661,562,194,783,245,300
7,789,420,904,793,590,000
29.769231
87
0.572
false
HybridF5/tempest
tempest/api/volume/test_volumes_negative.py
5
13622
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from tempest.api.volume import base from tempest.common.utils import data_utils from tempest.common import waiters from tempest.lib import exceptions as lib_exc from tempest import test class VolumesV2NegativeTest(base.BaseVolumeTest): @classmethod def setup_clients(cls): super(VolumesV2NegativeTest, cls).setup_clients() cls.client = cls.volumes_client @classmethod def resource_setup(cls): super(VolumesV2NegativeTest, cls).resource_setup() cls.name_field = cls.special_fields['name_field'] # Create a test shared instance and volume for attach/detach tests cls.volume = cls.create_volume() cls.mountpoint = "/dev/vdc" @test.attr(type=['negative']) @test.idempotent_id('f131c586-9448-44a4-a8b0-54ca838aa43e') def test_volume_get_nonexistent_volume_id(self): # Should not be able to get a non-existent volume self.assertRaises(lib_exc.NotFound, self.client.show_volume, str(uuid.uuid4())) @test.attr(type=['negative']) @test.idempotent_id('555efa6e-efcd-44ef-8a3b-4a7ca4837a29') def test_volume_delete_nonexistent_volume_id(self): # Should not be able to delete a non-existent Volume self.assertRaises(lib_exc.NotFound, self.client.delete_volume, str(uuid.uuid4())) @test.attr(type=['negative']) @test.idempotent_id('1ed83a8a-682d-4dfb-a30e-ee63ffd6c049') def test_create_volume_with_invalid_size(self): # Should not be able to create volume with invalid size # in request v_name = data_utils.rand_name('Volume') metadata = {'Type': 'work'} self.assertRaises(lib_exc.BadRequest, self.client.create_volume, size='#$%', display_name=v_name, metadata=metadata) @test.attr(type=['negative']) @test.idempotent_id('9387686f-334f-4d31-a439-33494b9e2683') def test_create_volume_with_out_passing_size(self): # Should not be able to create volume without passing size # in request v_name = data_utils.rand_name('Volume') metadata = {'Type': 'work'} self.assertRaises(lib_exc.BadRequest, self.client.create_volume, size='', display_name=v_name, metadata=metadata) @test.attr(type=['negative']) @test.idempotent_id('41331caa-eaf4-4001-869d-bc18c1869360') def test_create_volume_with_size_zero(self): # Should not be able to create volume with size zero v_name = data_utils.rand_name('Volume') metadata = {'Type': 'work'} self.assertRaises(lib_exc.BadRequest, self.client.create_volume, size='0', display_name=v_name, metadata=metadata) @test.attr(type=['negative']) @test.idempotent_id('8b472729-9eba-446e-a83b-916bdb34bef7') def test_create_volume_with_size_negative(self): # Should not be able to create volume with size negative v_name = data_utils.rand_name('Volume') metadata = {'Type': 'work'} self.assertRaises(lib_exc.BadRequest, self.client.create_volume, size='-1', display_name=v_name, metadata=metadata) @test.attr(type=['negative']) @test.idempotent_id('10254ed8-3849-454e-862e-3ab8e6aa01d2') def test_create_volume_with_nonexistent_volume_type(self): # Should not be able to create volume with non-existent volume type v_name = data_utils.rand_name('Volume') metadata = {'Type': 'work'} self.assertRaises(lib_exc.NotFound, self.client.create_volume, size='1', volume_type=str(uuid.uuid4()), display_name=v_name, metadata=metadata) @test.attr(type=['negative']) @test.idempotent_id('0c36f6ae-4604-4017-b0a9-34fdc63096f9') def test_create_volume_with_nonexistent_snapshot_id(self): # Should not be able to create volume with non-existent snapshot v_name = data_utils.rand_name('Volume') metadata = {'Type': 'work'} self.assertRaises(lib_exc.NotFound, self.client.create_volume, size='1', snapshot_id=str(uuid.uuid4()), display_name=v_name, metadata=metadata) @test.attr(type=['negative']) @test.idempotent_id('47c73e08-4be8-45bb-bfdf-0c4e79b88344') def test_create_volume_with_nonexistent_source_volid(self): # Should not be able to create volume with non-existent source volume v_name = data_utils.rand_name('Volume') metadata = {'Type': 'work'} self.assertRaises(lib_exc.NotFound, self.client.create_volume, size='1', source_volid=str(uuid.uuid4()), display_name=v_name, metadata=metadata) @test.attr(type=['negative']) @test.idempotent_id('0186422c-999a-480e-a026-6a665744c30c') def test_update_volume_with_nonexistent_volume_id(self): v_name = data_utils.rand_name('Volume') metadata = {'Type': 'work'} self.assertRaises(lib_exc.NotFound, self.client.update_volume, volume_id=str(uuid.uuid4()), display_name=v_name, metadata=metadata) @test.attr(type=['negative']) @test.idempotent_id('e66e40d6-65e6-4e75-bdc7-636792fa152d') def test_update_volume_with_invalid_volume_id(self): v_name = data_utils.rand_name('Volume') metadata = {'Type': 'work'} self.assertRaises(lib_exc.NotFound, self.client.update_volume, volume_id='#$%%&^&^', display_name=v_name, metadata=metadata) @test.attr(type=['negative']) @test.idempotent_id('72aeca85-57a5-4c1f-9057-f320f9ea575b') def test_update_volume_with_empty_volume_id(self): v_name = data_utils.rand_name('Volume') metadata = {'Type': 'work'} self.assertRaises(lib_exc.NotFound, self.client.update_volume, volume_id='', display_name=v_name, metadata=metadata) @test.attr(type=['negative']) @test.idempotent_id('30799cfd-7ee4-446c-b66c-45b383ed211b') def test_get_invalid_volume_id(self): # Should not be able to get volume with invalid id self.assertRaises(lib_exc.NotFound, self.client.show_volume, '#$%%&^&^') @test.attr(type=['negative']) @test.idempotent_id('c6c3db06-29ad-4e91-beb0-2ab195fe49e3') def test_get_volume_without_passing_volume_id(self): # Should not be able to get volume when empty ID is passed self.assertRaises(lib_exc.NotFound, self.client.show_volume, '') @test.attr(type=['negative']) @test.idempotent_id('1f035827-7c32-4019-9240-b4ec2dbd9dfd') def test_delete_invalid_volume_id(self): # Should not be able to delete volume when invalid ID is passed self.assertRaises(lib_exc.NotFound, self.client.delete_volume, '!@#$%^&*()') @test.attr(type=['negative']) @test.idempotent_id('441a1550-5d44-4b30-af0f-a6d402f52026') def test_delete_volume_without_passing_volume_id(self): # Should not be able to delete volume when empty ID is passed self.assertRaises(lib_exc.NotFound, self.client.delete_volume, '') @test.attr(type=['negative']) @test.idempotent_id('f5e56b0a-5d02-43c1-a2a7-c9b792c2e3f6') @test.services('compute') def test_attach_volumes_with_nonexistent_volume_id(self): srv_name = data_utils.rand_name('Instance') server = self.create_server( name=srv_name, wait_until='ACTIVE') self.addCleanup(waiters.wait_for_server_termination, self.servers_client, server['id']) self.addCleanup(self.servers_client.delete_server, server['id']) self.assertRaises(lib_exc.NotFound, self.client.attach_volume, str(uuid.uuid4()), instance_uuid=server['id'], mountpoint=self.mountpoint) @test.attr(type=['negative']) @test.idempotent_id('9f9c24e4-011d-46b5-b992-952140ce237a') def test_detach_volumes_with_invalid_volume_id(self): self.assertRaises(lib_exc.NotFound, self.client.detach_volume, 'xxx') @test.attr(type=['negative']) @test.idempotent_id('e0c75c74-ee34-41a9-9288-2a2051452854') def test_volume_extend_with_size_smaller_than_original_size(self): # Extend volume with smaller size than original size. extend_size = 0 self.assertRaises(lib_exc.BadRequest, self.client.extend_volume, self.volume['id'], new_size=extend_size) @test.attr(type=['negative']) @test.idempotent_id('5d0b480d-e833-439f-8a5a-96ad2ed6f22f') def test_volume_extend_with_non_number_size(self): # Extend volume when size is non number. extend_size = 'abc' self.assertRaises(lib_exc.BadRequest, self.client.extend_volume, self.volume['id'], new_size=extend_size) @test.attr(type=['negative']) @test.idempotent_id('355218f1-8991-400a-a6bb-971239287d92') def test_volume_extend_with_None_size(self): # Extend volume with None size. extend_size = None self.assertRaises(lib_exc.BadRequest, self.client.extend_volume, self.volume['id'], new_size=extend_size) @test.attr(type=['negative']) @test.idempotent_id('8f05a943-013c-4063-ac71-7baf561e82eb') def test_volume_extend_with_nonexistent_volume_id(self): # Extend volume size when volume is nonexistent. extend_size = int(self.volume['size']) + 1 self.assertRaises(lib_exc.NotFound, self.client.extend_volume, str(uuid.uuid4()), new_size=extend_size) @test.attr(type=['negative']) @test.idempotent_id('aff8ba64-6d6f-4f2e-bc33-41a08ee9f115') def test_volume_extend_without_passing_volume_id(self): # Extend volume size when passing volume id is None. extend_size = int(self.volume['size']) + 1 self.assertRaises(lib_exc.NotFound, self.client.extend_volume, None, new_size=extend_size) @test.attr(type=['negative']) @test.idempotent_id('ac6084c0-0546-45f9-b284-38a367e0e0e2') def test_reserve_volume_with_nonexistent_volume_id(self): self.assertRaises(lib_exc.NotFound, self.client.reserve_volume, str(uuid.uuid4())) @test.attr(type=['negative']) @test.idempotent_id('eb467654-3dc1-4a72-9b46-47c29d22654c') def test_unreserve_volume_with_nonexistent_volume_id(self): self.assertRaises(lib_exc.NotFound, self.client.unreserve_volume, str(uuid.uuid4())) @test.attr(type=['negative']) @test.idempotent_id('449c4ed2-ecdd-47bb-98dc-072aeccf158c') def test_reserve_volume_with_negative_volume_status(self): # Mark volume as reserved. self.client.reserve_volume(self.volume['id']) # Mark volume which is marked as reserved before self.assertRaises(lib_exc.BadRequest, self.client.reserve_volume, self.volume['id']) # Unmark volume as reserved. self.client.unreserve_volume(self.volume['id']) @test.attr(type=['negative']) @test.idempotent_id('0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f') def test_list_volumes_with_nonexistent_name(self): v_name = data_utils.rand_name('Volume') params = {self.name_field: v_name} fetched_volume = self.client.list_volumes(params=params)['volumes'] self.assertEqual(0, len(fetched_volume)) @test.attr(type=['negative']) @test.idempotent_id('9ca17820-a0e7-4cbd-a7fa-f4468735e359') def test_list_volumes_detail_with_nonexistent_name(self): v_name = data_utils.rand_name('Volume') params = {self.name_field: v_name} fetched_volume = \ self.client.list_volumes(detail=True, params=params)['volumes'] self.assertEqual(0, len(fetched_volume)) @test.attr(type=['negative']) @test.idempotent_id('143b279b-7522-466b-81be-34a87d564a7c') def test_list_volumes_with_invalid_status(self): params = {'status': 'null'} fetched_volume = self.client.list_volumes(params=params)['volumes'] self.assertEqual(0, len(fetched_volume)) @test.attr(type=['negative']) @test.idempotent_id('ba94b27b-be3f-496c-a00e-0283b373fa75') def test_list_volumes_detail_with_invalid_status(self): params = {'status': 'null'} fetched_volume = \ self.client.list_volumes(detail=True, params=params)['volumes'] self.assertEqual(0, len(fetched_volume)) class VolumesV1NegativeTest(VolumesV2NegativeTest): _api_version = 1 _name = 'display_name'
apache-2.0
-6,824,398,652,621,045,000
-7,378,212,568,259,302,000
43.957096
78
0.634855
false
codecakes/random_games
single_linkedlist.py
1
8155
## Singly Linked List ## Each Node is relatively Head to the next node it refers to. ## The Linked List can be broken down into: ## Nodes. """ Single Linked List can: - Insert at Head, Tail, Inbetween - Delete at Head, Tail, Inbetween - Add another LinkedList to Existing """ class LinkedNode(object): """ Nodes have following attributes: 1. Node Value 2. Next Node 3. Head Node? 4. Last Node? """ def __init__(self): self.val = None self.nxt = None #this is telling the length of orphan node self or the length of chain #so far including the self node self.len = 1 #set the following if part of a linked list chain self.head = 0 self.last = 0 #this ptr in each node tells the head node ptr self.headptr = None def setVal(self, val): self.val = val def getVal(self): return self.val def getLen(self): return self.len def setNxt(self, other): self.nxt = other #O(1) def setHead(self): """If there is a successive node, set this as Head node""" if self.hasNxt() and self.head==0: self.head = 1 self.headptr = self def setLast(self): """If this is the last node, set this as Last node""" if not self.hasNxt() and self.last==0: self.last = 1 def insertHead(self, newNode): """Insert newNode as Head node""" if self.isHead(): node = self else: node = self.headptr if node: newNode.len = 1 newNode.setNxt(node) #O(1) newNode.setHead() def insertLast(self, newNode): """insert newNode as Last node""" newNode.setLast() node = self #O(k<=n) while not node.isLast(): node = node.getNxt() node.last = 0 node.setNxt(newNode) newNode.len = node.len + 1 newNode.headptr = self.headptr def getNxt(self): return self.nxt def hasNxt(self): return self.getNxt() != None def disconnectNxt(self): if self.hasNxt(): self.nxt = None self.head = 0 def isHead(self): return self.head == 1 def isLast(self): return self.last == 1 class SingleLinkedList(object): def __init__(self, link_node): self.head_node = link_node self.last_node = None self.master_ln = 0 self.updateHead(self.head_node) def add_node(self, val): assert self.head_node == self.last_node.headptr newNode = LinkedNode() newNode.setVal(val) self.last_node.setNxt(newNode) self.last_node = newNode newNode.len = self.master_ln + 1 self.master_ln = newNode.len newNode.headptr = self.head_node def deleteNode(self, val): prev = node = self.head_node node_val = node.val while node_val != val and node.hasNxt(): prev = node node = node.getNxt() node_val = node.val if node_val == val: break if node_val == val: if node.isLast(): #if its last node prev.disconnectNxt() head = prev.headptr elif node.isHead(): #if its head node nxt = node.getNxt() node.disconnectNxt() nxt.setHead() head = nxt elif node.hasNxt(): #if its somewhere between nxt = node.getNxt() node.disconnectNxt() nxt.len = prev.len + 1 prev.setNxt(nxt) head = prev.headptr self.updateHead(head) def updateHead(self, headptr): """ Set each node's headptr to head node of Chain. Set incremental length as node increases """ node = headptr self.head_node = node.headptr = headptr node.head = 1 node.len = 1 ln = node.len #till the end of chain while node.hasNxt(): #get next node node = node.getNxt() #Set each node's headptr to head node of Chain node.headptr = headptr node.head = 0 #Set incremental length as node increases node.len = ln + 1 ln = node.len node.setLast() self.last_node = node self.master_ln = ln assert node.headptr.len == 1 def updateList(self, otherlist): """Merge another linked list from end to current linked list""" other = otherlist.head_node if other.isHead(): other.head = 0 #Ripple headptr and inc length across nodes self.last_node.setNxt(other) self.updateHead(self.head_node) def insertPos(self, val, pos): """Insert newNode as position pos if legit. Current Pos is always 1 and relatively current node is the start node. But each node gives absolute chain/linked-list length. """ if pos < 0 or pos > self.master_ln: return newNode = LinkedNode() newNode.setVal(val) if pos == self.master_ln: self.last_node.insertLast(newNode) self.master_ln += 1 #newNode.headptr = self.head_node self.last_node = newNode return elif pos == self.head_node.len: self.head_node.insertHead(newNode) self.head_node = newNode self.updateHead(self.head_node) return node = self.head_node while node.len < pos-1: node = node.getNxt() if node.len == pos-1: break assert node.len == pos-1 nxt_node = node.getNxt() node.setNxt(newNode) #newNode has nxt_node's postion newNode.setNxt(nxt_node) #nxt_node's position is incremented by 1 self.updateHead(self.head_node) return #just for testing if __name__ == "__main__": a,b,c,d,n = [LinkedNode() for _ in xrange(5)] n.setVal(1) map(lambda x: x[0].setVal(x[1]), ((a,2), (b,3), (c,4), (d,5))) n.setNxt(a) a.setNxt(b) b.setNxt(c) L = SingleLinkedList(n) node = L.head_node print "="*10 while node.hasNxt(): print node.val print node.headptr.val print node.len print node = node.getNxt() print node.val print node.headptr.val print node.len L.insertPos(40, 2) #L.insertPos(1, 30) node = L.head_node print "="*10 while node.hasNxt(): print node.val print node.headptr.val print node.len print node = node.getNxt() print node.val print node.headptr.val print node.len L.deleteNode(40) L.deleteNode(3) L.deleteNode(1) L.deleteNode(2) print "="*10 node = L.head_node while node.hasNxt(): print node.val print node.headptr.val print node.len print node = node.getNxt() print node.val print node.headptr.val print node.len L.add_node(40) L.insertPos(20, 1) print "="*10 node = L.head_node while node.hasNxt(): print node.val print node.headptr.val print node.len print node = node.getNxt() print node.val print node.headptr.val print node.len f,g,h = [LinkedNode() for _ in xrange(3)] map(lambda x: x[0].setVal(x[1]), ((f,300), (g,45), (h, 56))) f.setNxt(g) g.setNxt(h) R = SingleLinkedList(f) L.updateList(R) print "="*10 node = L.head_node while node.hasNxt(): print node.val print node.headptr.val print node.len print node = node.getNxt() print node.val print node.headptr.val print node.len
mit
-4,902,148,599,703,186,000
-971,895,365,504,262,500
25.917492
79
0.527406
false
googlefonts/fontbakery
Lib/fontbakery/reporters/__init__.py
1
4250
""" Separation of Concerns Disclaimer: While created specifically for checking fonts and font-families this module has no domain knowledge about fonts. It can be used for any kind of (document) checking. Please keep it so. It will be valuable for other domains as well. Domain specific knowledge should be encoded only in the Profile (Checks, Conditions) and MAYBE in *customized* reporters e.g. subclasses. """ from collections import Counter from fontbakery.checkrunner import ( DEBUG , INFO , WARN , ERROR , STARTCHECK , SKIP , PASS , FAIL , ENDCHECK , SECTIONSUMMARY , START , END ) from fontbakery.errors import ProtocolViolationError class FontbakeryReporter: def __init__(self, is_async=False, runner=None, output_file=None, loglevels=None): self._started = None self._ended = None self._order = None self._results = [] # ENDCHECK events in order of appearance self._indexes = {} self._tick = 0 self._counter = Counter() self.loglevels = loglevels # Runner should know if it is async! self.is_async = is_async self.runner = runner self._worst_check_status = None self.output_file = output_file def run(self, order=None): """ self.runner must be present """ for event in self.runner.run(order=order): self.receive(event) @property def order(self): return self._order def write(self): if self.output_file is not None: raise NotImplementedError( f'{type(self)} does not implement the "write" method, ' 'but it has an "output_file".' ) # reporters without an output file do nothing here def _get_key(self, identity): section, check, iterargs = identity return (str(section) if section else section , str(check) if check else check , iterargs ) def _get_index(self, identity): key = self._get_key(identity) try: return self._indexes[key] except KeyError: self._indexes[key] = len(self._indexes) return self._indexes[key] def _set_order(self, order): self._order = tuple(order) length = len(self._order) self._counter['(not finished)'] = length - len(self._results) self._indexes = dict(zip(map(self._get_key, self._order), range(length))) def _cleanup(self, event): pass def _output(self, event): pass def _register(self, event): status, message, identity = event self._tick += 1 if status == START: self._set_order(message) self._started = event if status == END: self._ended = event if status == ENDCHECK: self._results.append(event) self._counter[message.name] += 1 self._counter['(not finished)'] -= 1 @property def worst_check_status(self): """ Returns a status or None if there was no check result """ return self._worst_check_status def receive(self, event): status, message, identity = event if self._started is None and status != START: raise ProtocolViolationError(f'Received Event before status START:' f' {status} {message}.') if self._ended: status, message, identity = event raise ProtocolViolationError(f'Received Event after status END:' f' {status} {message}.') if status is ENDCHECK and (self._worst_check_status is None \ or self._worst_check_status < message): # we only record ENDCHECK, because check runner may in the future # have tools to upgrade/downgrade the actually worst status # this should be future proof. self._worst_check_status = message self._register(event) self._cleanup(event) self._output(event)
apache-2.0
754,277,490,275,333,200
-8,394,810,045,013,369,000
31.442748
86
0.567294
false
JeyZeta/Dangerous
Dangerous/Golismero/thirdparty_libs/dns/zone.py
26
37794
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS Zones.""" from __future__ import generators import sys import re import dns.exception import dns.name import dns.node import dns.rdataclass import dns.rdatatype import dns.rdata import dns.rrset import dns.tokenizer import dns.ttl import dns.grange class BadZone(dns.exception.DNSException): """The zone is malformed.""" pass class NoSOA(BadZone): """The zone has no SOA RR at its origin.""" pass class NoNS(BadZone): """The zone has no NS RRset at its origin.""" pass class UnknownOrigin(BadZone): """The zone's origin is unknown.""" pass class Zone(object): """A DNS zone. A Zone is a mapping from names to nodes. The zone object may be treated like a Python dictionary, e.g. zone[name] will retrieve the node associated with that name. The I{name} may be a dns.name.Name object, or it may be a string. In the either case, if the name is relative it is treated as relative to the origin of the zone. @ivar rdclass: The zone's rdata class; the default is class IN. @type rdclass: int @ivar origin: The origin of the zone. @type origin: dns.name.Name object @ivar nodes: A dictionary mapping the names of nodes in the zone to the nodes themselves. @type nodes: dict @ivar relativize: should names in the zone be relativized? @type relativize: bool @cvar node_factory: the factory used to create a new node @type node_factory: class or callable """ node_factory = dns.node.Node __slots__ = ['rdclass', 'origin', 'nodes', 'relativize'] def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True): """Initialize a zone object. @param origin: The origin of the zone. @type origin: dns.name.Name object @param rdclass: The zone's rdata class; the default is class IN. @type rdclass: int""" self.rdclass = rdclass self.origin = origin self.nodes = {} self.relativize = relativize def __eq__(self, other): """Two zones are equal if they have the same origin, class, and nodes. @rtype: bool """ if not isinstance(other, Zone): return False if self.rdclass != other.rdclass or \ self.origin != other.origin or \ self.nodes != other.nodes: return False return True def __ne__(self, other): """Are two zones not equal? @rtype: bool """ return not self.__eq__(other) def _validate_name(self, name): if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) elif not isinstance(name, dns.name.Name): raise KeyError("name parameter must be convertable to a DNS name") if name.is_absolute(): if not name.is_subdomain(self.origin): raise KeyError("name parameter must be a subdomain of the zone origin") if self.relativize: name = name.relativize(self.origin) return name def __getitem__(self, key): key = self._validate_name(key) return self.nodes[key] def __setitem__(self, key, value): key = self._validate_name(key) self.nodes[key] = value def __delitem__(self, key): key = self._validate_name(key) del self.nodes[key] def __iter__(self): return self.nodes.iterkeys() def iterkeys(self): return self.nodes.iterkeys() def keys(self): return self.nodes.keys() def itervalues(self): return self.nodes.itervalues() def values(self): return self.nodes.values() def iteritems(self): return self.nodes.iteritems() def items(self): return self.nodes.items() def get(self, key): key = self._validate_name(key) return self.nodes.get(key) def __contains__(self, other): return other in self.nodes def find_node(self, name, create=False): """Find a node in the zone, possibly creating it. @param name: the name of the node to find @type name: dns.name.Name object or string @param create: should the node be created if it doesn't exist? @type create: bool @raises KeyError: the name is not known and create was not specified. @rtype: dns.node.Node object """ name = self._validate_name(name) node = self.nodes.get(name) if node is None: if not create: raise KeyError node = self.node_factory() self.nodes[name] = node return node def get_node(self, name, create=False): """Get a node in the zone, possibly creating it. This method is like L{find_node}, except it returns None instead of raising an exception if the node does not exist and creation has not been requested. @param name: the name of the node to find @type name: dns.name.Name object or string @param create: should the node be created if it doesn't exist? @type create: bool @rtype: dns.node.Node object or None """ try: node = self.find_node(name, create) except KeyError: node = None return node def delete_node(self, name): """Delete the specified node if it exists. It is not an error if the node does not exist. """ name = self._validate_name(name) if self.nodes.has_key(name): del self.nodes[name] def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE, create=False): """Look for rdata with the specified name and type in the zone, and return an rdataset encapsulating it. The I{name}, I{rdtype}, and I{covers} parameters may be strings, in which case they will be converted to their proper type. The rdataset returned is not a copy; changes to it will change the zone. KeyError is raised if the name or type are not found. Use L{get_rdataset} if you want to have None returned instead. @param name: the owner name to look for @type name: DNS.name.Name object or string @param rdtype: the rdata type desired @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string @param create: should the node and rdataset be created if they do not exist? @type create: bool @raises KeyError: the node or rdata could not be found @rtype: dns.rrset.RRset object """ name = self._validate_name(name) if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) if isinstance(covers, (str, unicode)): covers = dns.rdatatype.from_text(covers) node = self.find_node(name, create) return node.find_rdataset(self.rdclass, rdtype, covers, create) def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE, create=False): """Look for rdata with the specified name and type in the zone, and return an rdataset encapsulating it. The I{name}, I{rdtype}, and I{covers} parameters may be strings, in which case they will be converted to their proper type. The rdataset returned is not a copy; changes to it will change the zone. None is returned if the name or type are not found. Use L{find_rdataset} if you want to have KeyError raised instead. @param name: the owner name to look for @type name: DNS.name.Name object or string @param rdtype: the rdata type desired @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string @param create: should the node and rdataset be created if they do not exist? @type create: bool @rtype: dns.rrset.RRset object """ try: rdataset = self.find_rdataset(name, rdtype, covers, create) except KeyError: rdataset = None return rdataset def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE): """Delete the rdataset matching I{rdtype} and I{covers}, if it exists at the node specified by I{name}. The I{name}, I{rdtype}, and I{covers} parameters may be strings, in which case they will be converted to their proper type. It is not an error if the node does not exist, or if there is no matching rdataset at the node. If the node has no rdatasets after the deletion, it will itself be deleted. @param name: the owner name to look for @type name: DNS.name.Name object or string @param rdtype: the rdata type desired @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string """ name = self._validate_name(name) if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) if isinstance(covers, (str, unicode)): covers = dns.rdatatype.from_text(covers) node = self.get_node(name) if not node is None: node.delete_rdataset(self.rdclass, rdtype, covers) if len(node) == 0: self.delete_node(name) def replace_rdataset(self, name, replacement): """Replace an rdataset at name. It is not an error if there is no rdataset matching I{replacement}. Ownership of the I{replacement} object is transferred to the zone; in other words, this method does not store a copy of I{replacement} at the node, it stores I{replacement} itself. If the I{name} node does not exist, it is created. @param name: the owner name @type name: DNS.name.Name object or string @param replacement: the replacement rdataset @type replacement: dns.rdataset.Rdataset """ if replacement.rdclass != self.rdclass: raise ValueError('replacement.rdclass != zone.rdclass') node = self.find_node(name, True) node.replace_rdataset(replacement) def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE): """Look for rdata with the specified name and type in the zone, and return an RRset encapsulating it. The I{name}, I{rdtype}, and I{covers} parameters may be strings, in which case they will be converted to their proper type. This method is less efficient than the similar L{find_rdataset} because it creates an RRset instead of returning the matching rdataset. It may be more convenient for some uses since it returns an object which binds the owner name to the rdata. This method may not be used to create new nodes or rdatasets; use L{find_rdataset} instead. KeyError is raised if the name or type are not found. Use L{get_rrset} if you want to have None returned instead. @param name: the owner name to look for @type name: DNS.name.Name object or string @param rdtype: the rdata type desired @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string @raises KeyError: the node or rdata could not be found @rtype: dns.rrset.RRset object """ name = self._validate_name(name) if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) if isinstance(covers, (str, unicode)): covers = dns.rdatatype.from_text(covers) rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers) rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers) rrset.update(rdataset) return rrset def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE): """Look for rdata with the specified name and type in the zone, and return an RRset encapsulating it. The I{name}, I{rdtype}, and I{covers} parameters may be strings, in which case they will be converted to their proper type. This method is less efficient than the similar L{get_rdataset} because it creates an RRset instead of returning the matching rdataset. It may be more convenient for some uses since it returns an object which binds the owner name to the rdata. This method may not be used to create new nodes or rdatasets; use L{find_rdataset} instead. None is returned if the name or type are not found. Use L{find_rrset} if you want to have KeyError raised instead. @param name: the owner name to look for @type name: DNS.name.Name object or string @param rdtype: the rdata type desired @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string @rtype: dns.rrset.RRset object """ try: rrset = self.find_rrset(name, rdtype, covers) except KeyError: rrset = None return rrset def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY, covers=dns.rdatatype.NONE): """Return a generator which yields (name, rdataset) tuples for all rdatasets in the zone which have the specified I{rdtype} and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default, then all rdatasets will be matched. @param rdtype: int or string @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string """ if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) if isinstance(covers, (str, unicode)): covers = dns.rdatatype.from_text(covers) for (name, node) in self.iteritems(): for rds in node: if rdtype == dns.rdatatype.ANY or \ (rds.rdtype == rdtype and rds.covers == covers): yield (name, rds) def iterate_rdatas(self, rdtype=dns.rdatatype.ANY, covers=dns.rdatatype.NONE): """Return a generator which yields (name, ttl, rdata) tuples for all rdatas in the zone which have the specified I{rdtype} and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default, then all rdatas will be matched. @param rdtype: int or string @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string """ if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) if isinstance(covers, (str, unicode)): covers = dns.rdatatype.from_text(covers) for (name, node) in self.iteritems(): for rds in node: if rdtype == dns.rdatatype.ANY or \ (rds.rdtype == rdtype and rds.covers == covers): for rdata in rds: yield (name, rds.ttl, rdata) def to_file(self, f, sorted=True, relativize=True, nl=None): """Write a zone to a file. @param f: file or string. If I{f} is a string, it is treated as the name of a file to open. @param sorted: if True, the file will be written with the names sorted in DNSSEC order from least to greatest. Otherwise the names will be written in whatever order they happen to have in the zone's dictionary. @param relativize: if True, domain names in the output will be relativized to the zone's origin (if possible). @type relativize: bool @param nl: The end of line string. If not specified, the output will use the platform's native end-of-line marker (i.e. LF on POSIX, CRLF on Windows, CR on Macintosh). @type nl: string or None """ if sys.hexversion >= 0x02030000: # allow Unicode filenames str_type = basestring else: str_type = str if nl is None: opts = 'w' else: opts = 'wb' if isinstance(f, str_type): f = file(f, opts) want_close = True else: want_close = False try: if sorted: names = self.keys() names.sort() else: names = self.iterkeys() for n in names: l = self[n].to_text(n, origin=self.origin, relativize=relativize) if nl is None: print >> f, l else: f.write(l) f.write(nl) finally: if want_close: f.close() def check_origin(self): """Do some simple checking of the zone's origin. @raises dns.zone.NoSOA: there is no SOA RR @raises dns.zone.NoNS: there is no NS RRset @raises KeyError: there is no origin node """ if self.relativize: name = dns.name.empty else: name = self.origin if self.get_rdataset(name, dns.rdatatype.SOA) is None: raise NoSOA if self.get_rdataset(name, dns.rdatatype.NS) is None: raise NoNS class _MasterReader(object): """Read a DNS master file @ivar tok: The tokenizer @type tok: dns.tokenizer.Tokenizer object @ivar ttl: The default TTL @type ttl: int @ivar last_name: The last name read @type last_name: dns.name.Name object @ivar current_origin: The current origin @type current_origin: dns.name.Name object @ivar relativize: should names in the zone be relativized? @type relativize: bool @ivar zone: the zone @type zone: dns.zone.Zone object @ivar saved_state: saved reader state (used when processing $INCLUDE) @type saved_state: list of (tokenizer, current_origin, last_name, file) tuples. @ivar current_file: the file object of the $INCLUDed file being parsed (None if no $INCLUDE is active). @ivar allow_include: is $INCLUDE allowed? @type allow_include: bool @ivar check_origin: should sanity checks of the origin node be done? The default is True. @type check_origin: bool """ def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone, allow_include=False, check_origin=True): if isinstance(origin, (str, unicode)): origin = dns.name.from_text(origin) self.tok = tok self.current_origin = origin self.relativize = relativize self.ttl = 0 self.last_name = None self.zone = zone_factory(origin, rdclass, relativize=relativize) self.saved_state = [] self.current_file = None self.allow_include = allow_include self.check_origin = check_origin def _eat_line(self): while 1: token = self.tok.get() if token.is_eol_or_eof(): break def _rr_line(self): """Process one line from a DNS master file.""" # Name if self.current_origin is None: raise UnknownOrigin token = self.tok.get(want_leading = True) if not token.is_whitespace(): self.last_name = dns.name.from_text(token.value, self.current_origin) else: token = self.tok.get() if token.is_eol_or_eof(): # treat leading WS followed by EOL/EOF as if they were EOL/EOF. return self.tok.unget(token) name = self.last_name if not name.is_subdomain(self.zone.origin): self._eat_line() return if self.relativize: name = name.relativize(self.zone.origin) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError # TTL try: ttl = dns.ttl.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.ttl.BadTTL: ttl = self.ttl # Class try: rdclass = dns.rdataclass.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.exception.SyntaxError: raise dns.exception.SyntaxError except: rdclass = self.zone.rdclass if rdclass != self.zone.rdclass: raise dns.exception.SyntaxError("RR class is not zone's class") # Type try: rdtype = dns.rdatatype.from_text(token.value) except: raise dns.exception.SyntaxError("unknown rdatatype '%s'" % token.value) n = self.zone.nodes.get(name) if n is None: n = self.zone.node_factory() self.zone.nodes[name] = n try: rd = dns.rdata.from_text(rdclass, rdtype, self.tok, self.current_origin, False) except dns.exception.SyntaxError: # Catch and reraise. (ty, va) = sys.exc_info()[:2] raise va except: # All exceptions that occur in the processing of rdata # are treated as syntax errors. This is not strictly # correct, but it is correct almost all of the time. # We convert them to syntax errors so that we can emit # helpful filename:line info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError("caught exception %s: %s" % (str(ty), str(va))) rd.choose_relativity(self.zone.origin, self.relativize) covers = rd.covers() rds = n.find_rdataset(rdclass, rdtype, covers, True) rds.add(rd, ttl) def _parse_modify(self, side): # Here we catch everything in '{' '}' in a group so we can replace it # with ''. is_generate1 = re.compile("^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$") is_generate2 = re.compile("^.*\$({(\+|-?)(\d+)}).*$") is_generate3 = re.compile("^.*\$({(\+|-?)(\d+),(\d+)}).*$") # Sometimes there are modifiers in the hostname. These come after # the dollar sign. They are in the form: ${offset[,width[,base]]}. # Make names g1 = is_generate1.match(side) if g1: mod, sign, offset, width, base = g1.groups() if sign == '': sign = '+' g2 = is_generate2.match(side) if g2: mod, sign, offset = g2.groups() if sign == '': sign = '+' width = 0 base = 'd' g3 = is_generate3.match(side) if g3: mod, sign, offset, width = g1.groups() if sign == '': sign = '+' width = g1.groups()[2] base = 'd' if not (g1 or g2 or g3): mod = '' sign = '+' offset = 0 width = 0 base = 'd' if base != 'd': raise NotImplemented return mod, sign, offset, width, base def _generate_line(self): # range lhs [ttl] [class] type rhs [ comment ] """Process one line containing the GENERATE statement from a DNS master file.""" if self.current_origin is None: raise UnknownOrigin token = self.tok.get() # Range (required) try: start, stop, step = dns.grange.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except: raise dns.exception.SyntaxError # lhs (required) try: lhs = token.value token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except: raise dns.exception.SyntaxError # TTL try: ttl = dns.ttl.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.ttl.BadTTL: ttl = self.ttl # Class try: rdclass = dns.rdataclass.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.exception.SyntaxError: raise dns.exception.SyntaxError except: rdclass = self.zone.rdclass if rdclass != self.zone.rdclass: raise dns.exception.SyntaxError("RR class is not zone's class") # Type try: rdtype = dns.rdatatype.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except: raise dns.exception.SyntaxError("unknown rdatatype '%s'" % token.value) # lhs (required) try: rhs = token.value except: raise dns.exception.SyntaxError lmod, lsign, loffset, lwidth, lbase = self._parse_modify(lhs) rmod, rsign, roffset, rwidth, rbase = self._parse_modify(rhs) for i in range(start, stop + 1, step): # +1 because bind is inclusive and python is exclusive if lsign == '+': lindex = i + int(loffset) elif lsign == '-': lindex = i - int(loffset) if rsign == '-': rindex = i - int(roffset) elif rsign == '+': rindex = i + int(roffset) lzfindex = str(lindex).zfill(int(lwidth)) rzfindex = str(rindex).zfill(int(rwidth)) name = lhs.replace('$%s' % (lmod), lzfindex) rdata = rhs.replace('$%s' % (rmod), rzfindex) self.last_name = dns.name.from_text(name, self.current_origin) name = self.last_name if not name.is_subdomain(self.zone.origin): self._eat_line() return if self.relativize: name = name.relativize(self.zone.origin) n = self.zone.nodes.get(name) if n is None: n = self.zone.node_factory() self.zone.nodes[name] = n try: rd = dns.rdata.from_text(rdclass, rdtype, rdata, self.current_origin, False) except dns.exception.SyntaxError: # Catch and reraise. (ty, va) = sys.exc_info()[:2] raise va except: # All exceptions that occur in the processing of rdata # are treated as syntax errors. This is not strictly # correct, but it is correct almost all of the time. # We convert them to syntax errors so that we can emit # helpful filename:line info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError("caught exception %s: %s" % (str(ty), str(va))) rd.choose_relativity(self.zone.origin, self.relativize) covers = rd.covers() rds = n.find_rdataset(rdclass, rdtype, covers, True) rds.add(rd, ttl) def read(self): """Read a DNS master file and build a zone object. @raises dns.zone.NoSOA: No SOA RR was found at the zone origin @raises dns.zone.NoNS: No NS RRset was found at the zone origin """ try: while 1: token = self.tok.get(True, True).unescape() if token.is_eof(): if not self.current_file is None: self.current_file.close() if len(self.saved_state) > 0: (self.tok, self.current_origin, self.last_name, self.current_file, self.ttl) = self.saved_state.pop(-1) continue break elif token.is_eol(): continue elif token.is_comment(): self.tok.get_eol() continue elif token.value[0] == '$': u = token.value.upper() if u == '$TTL': token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError("bad $TTL") self.ttl = dns.ttl.from_text(token.value) self.tok.get_eol() elif u == '$ORIGIN': self.current_origin = self.tok.get_name() self.tok.get_eol() if self.zone.origin is None: self.zone.origin = self.current_origin elif u == '$INCLUDE' and self.allow_include: token = self.tok.get() filename = token.value token = self.tok.get() if token.is_identifier(): new_origin = dns.name.from_text(token.value, \ self.current_origin) self.tok.get_eol() elif not token.is_eol_or_eof(): raise dns.exception.SyntaxError("bad origin in $INCLUDE") else: new_origin = self.current_origin self.saved_state.append((self.tok, self.current_origin, self.last_name, self.current_file, self.ttl)) self.current_file = file(filename, 'r') self.tok = dns.tokenizer.Tokenizer(self.current_file, filename) self.current_origin = new_origin elif u == '$GENERATE': self._generate_line() else: raise dns.exception.SyntaxError("Unknown master file directive '" + u + "'") continue self.tok.unget(token) self._rr_line() except dns.exception.SyntaxError, detail: (filename, line_number) = self.tok.where() if detail is None: detail = "syntax error" raise dns.exception.SyntaxError("%s:%d: %s" % (filename, line_number, detail)) # Now that we're done reading, do some basic checking of the zone. if self.check_origin: self.zone.check_origin() def from_text(text, origin = None, rdclass = dns.rdataclass.IN, relativize = True, zone_factory=Zone, filename=None, allow_include=False, check_origin=True): """Build a zone object from a master file format string. @param text: the master file format input @type text: string. @param origin: The origin of the zone; if not specified, the first $ORIGIN statement in the master file will determine the origin of the zone. @type origin: dns.name.Name object or string @param rdclass: The zone's rdata class; the default is class IN. @type rdclass: int @param relativize: should names be relativized? The default is True @type relativize: bool @param zone_factory: The zone factory to use @type zone_factory: function returning a Zone @param filename: The filename to emit when describing where an error occurred; the default is '<string>'. @type filename: string @param allow_include: is $INCLUDE allowed? @type allow_include: bool @param check_origin: should sanity checks of the origin node be done? The default is True. @type check_origin: bool @raises dns.zone.NoSOA: No SOA RR was found at the zone origin @raises dns.zone.NoNS: No NS RRset was found at the zone origin @rtype: dns.zone.Zone object """ # 'text' can also be a file, but we don't publish that fact # since it's an implementation detail. The official file # interface is from_file(). if filename is None: filename = '<string>' tok = dns.tokenizer.Tokenizer(text, filename) reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory, allow_include=allow_include, check_origin=check_origin) reader.read() return reader.zone def from_file(f, origin = None, rdclass = dns.rdataclass.IN, relativize = True, zone_factory=Zone, filename=None, allow_include=True, check_origin=True): """Read a master file and build a zone object. @param f: file or string. If I{f} is a string, it is treated as the name of a file to open. @param origin: The origin of the zone; if not specified, the first $ORIGIN statement in the master file will determine the origin of the zone. @type origin: dns.name.Name object or string @param rdclass: The zone's rdata class; the default is class IN. @type rdclass: int @param relativize: should names be relativized? The default is True @type relativize: bool @param zone_factory: The zone factory to use @type zone_factory: function returning a Zone @param filename: The filename to emit when describing where an error occurred; the default is '<file>', or the value of I{f} if I{f} is a string. @type filename: string @param allow_include: is $INCLUDE allowed? @type allow_include: bool @param check_origin: should sanity checks of the origin node be done? The default is True. @type check_origin: bool @raises dns.zone.NoSOA: No SOA RR was found at the zone origin @raises dns.zone.NoNS: No NS RRset was found at the zone origin @rtype: dns.zone.Zone object """ if sys.hexversion >= 0x02030000: # allow Unicode filenames; turn on universal newline support str_type = basestring opts = 'rU' else: str_type = str opts = 'r' if isinstance(f, str_type): if filename is None: filename = f f = file(f, opts) want_close = True else: if filename is None: filename = '<file>' want_close = False try: z = from_text(f, origin, rdclass, relativize, zone_factory, filename, allow_include, check_origin) finally: if want_close: f.close() return z def from_xfr(xfr, zone_factory=Zone, relativize=True, check_origin=True): """Convert the output of a zone transfer generator into a zone object. @param xfr: The xfr generator @type xfr: generator of dns.message.Message objects @param relativize: should names be relativized? The default is True. It is essential that the relativize setting matches the one specified to dns.query.xfr(). @type relativize: bool @param check_origin: should sanity checks of the origin node be done? The default is True. @type check_origin: bool @raises dns.zone.NoSOA: No SOA RR was found at the zone origin @raises dns.zone.NoNS: No NS RRset was found at the zone origin @rtype: dns.zone.Zone object """ z = None for r in xfr: if z is None: if relativize: origin = r.origin else: origin = r.answer[0].name rdclass = r.answer[0].rdclass z = zone_factory(origin, rdclass, relativize=relativize) for rrset in r.answer: znode = z.nodes.get(rrset.name) if not znode: znode = z.node_factory() z.nodes[rrset.name] = znode zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype, rrset.covers, True) zrds.update_ttl(rrset.ttl) for rd in rrset: rd.choose_relativity(z.origin, relativize) zrds.add(rd) if check_origin: z.check_origin() return z
mit
7,958,869,256,685,804,000
2,856,067,711,282,767,000
35.980431
100
0.571255
false
harshilasu/LinkurApp
y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/ec2/bundleinstance.py
18
2754
# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Represents an EC2 Bundle Task """ from boto.ec2.ec2object import EC2Object class BundleInstanceTask(EC2Object): def __init__(self, connection=None): super(BundleInstanceTask, self).__init__(connection) self.id = None self.instance_id = None self.progress = None self.start_time = None self.state = None self.bucket = None self.prefix = None self.upload_policy = None self.upload_policy_signature = None self.update_time = None self.code = None self.message = None def __repr__(self): return 'BundleInstanceTask:%s' % self.id def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'bundleId': self.id = value elif name == 'instanceId': self.instance_id = value elif name == 'progress': self.progress = value elif name == 'startTime': self.start_time = value elif name == 'state': self.state = value elif name == 'bucket': self.bucket = value elif name == 'prefix': self.prefix = value elif name == 'uploadPolicy': self.upload_policy = value elif name == 'uploadPolicySignature': self.upload_policy_signature = value elif name == 'updateTime': self.update_time = value elif name == 'code': self.code = value elif name == 'message': self.message = value else: setattr(self, name, value)
gpl-3.0
7,155,740,968,091,552,000
7,311,776,022,869,855,000
34.307692
74
0.638707
false
NeovaHealth/odoo
addons/l10n_ve/__openerp__.py
260
2960
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved ############################################################################## # Module programed and financed by: # Vauxoo, C.A. (<http://vauxoo.com>). # Our Community team mantain this module: # https://launchpad.net/~openerp-venezuela # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name' : 'Venezuela - Accounting', 'version': '1.0', 'author': ['OpenERP SA', 'Vauxoo'], 'category': 'Localization/Account Charts', 'description': """ Chart of Account for Venezuela. =============================== Venezuela doesn't have any chart of account by law, but the default proposed in OpenERP should comply with some Accepted best practices in Venezuela, this plan comply with this practices. This module has been tested as base for more of 1000 companies, because it is based in a mixtures of most common software in the Venezuelan market what will allow for sure to accountants feel them first steps with OpenERP more confortable. This module doesn't pretend be the total localization for Venezuela, but it will help you to start really quickly with OpenERP in this country. This module give you. --------------------- - Basic taxes for Venezuela. - Have basic data to run tests with community localization. - Start a company from 0 if your needs are basic from an accounting PoV. We recomend install account_anglo_saxon if you want valued your stocks as Venezuela does with out invoices. If you install this module, and select Custom chart a basic chart will be proposed, but you will need set manually account defaults for taxes. """, 'depends': ['account', 'base_vat', 'account_chart' ], 'demo': [], 'data': ['data/account_tax_code.xml', 'data/account_user_types.xml', 'data/account_chart.xml', 'data/account_tax.xml', 'data/l10n_chart_ve_wizard.xml' ], 'auto_install': False, 'installable': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-366,537,307,791,558,700
-5,051,183,828,256,924,000
37.947368
84
0.631081
false
shreddd/newt-2.0
authnz/adapters/template_adapter.py
3
2418
"""Auth Adapter Template File IMPORTANT: NOT A FUNCTIONAL ADAPTER. FUNCTIONS MUST BE IMPLEMENTED Notes: - Each of the functions defined below must return a json serializable object, json_response, or valid HttpResponse object - A json_response creates an HttpResponse object given parameters: - content: string with the contents of the response - status: string with the status of the response - status_code: HTTP status code - error: string with the error message if there is one """ from common.response import json_response import logging import re logger = logging.getLogger("newt." + __name__) def get_status(request): """Returns the current user status Keyword arguments: request -- Django HttpRequest """ pass def login(request): """Logs the user in and returns the status Keyword arguments: request -- Django HttpRequest """ pass def logout(request): """Logs the user out and returns the status Keyword arguments: request -- Django HttpRequest """ pass """A tuple list in the form of: ( (compiled_regex_exp, associated_function, request_required), ... ) Note: The compiled_regex_exp must have named groups corresponding to the arguments of the associated_function Note: if request_required is True, the associated_function must have request as the first argument Example: patterns = ( (re.compile(r'/usage/(?P<path>.+)$'), get_usage, False), (re.compile(r'/image/(?P<query>.+)$'), get_image, False), (re.compile(r'/(?P<path>.+)$'), get_resource, False), ) """ patterns = ( ) def extras_router(request, query): """Maps a query to a function if the pattern matches and returns result Keyword arguments: request -- Django HttpRequest query -- the query to be matched against """ for pattern, func, req in patterns: match = pattern.match(query) if match and req: return func(request, **match.groupdict()) elif match: return func(**match.groupdict()) # Returns an Unimplemented response if no pattern matches return json_response(status="Unimplemented", status_code=501, error="", content="query: %s" % query)
bsd-2-clause
5,113,117,577,913,672,000
-8,619,267,623,563,836,000
27.127907
75
0.624483
false
miptliot/edx-platform
common/lib/capa/capa/tests/test_customrender.py
37
2295
from lxml import etree import unittest import xml.sax.saxutils as saxutils from capa.tests.helpers import test_capa_system from capa import customrender # just a handy shortcut lookup_tag = customrender.registry.get_class_for_tag def extract_context(xml): """ Given an xml element corresponding to the output of test_capa_system.render_template, get back the original context """ return eval(xml.text) def quote_attr(s): return saxutils.quoteattr(s)[1:-1] # don't want the outer quotes class HelperTest(unittest.TestCase): ''' Make sure that our helper function works! ''' def check(self, d): xml = etree.XML(test_capa_system().render_template('blah', d)) self.assertEqual(d, extract_context(xml)) def test_extract_context(self): self.check({}) self.check({1, 2}) self.check({'id', 'an id'}) self.check({'with"quote', 'also"quote'}) class SolutionRenderTest(unittest.TestCase): ''' Make sure solutions render properly. ''' def test_rendering(self): solution = 'To compute unicorns, count them.' xml_str = """<solution id="solution_12">{s}</solution>""".format(s=solution) element = etree.fromstring(xml_str) renderer = lookup_tag('solution')(test_capa_system(), element) self.assertEqual(renderer.id, 'solution_12') # Our test_capa_system "renders" templates to a div with the repr of the context. xml = renderer.get_html() context = extract_context(xml) self.assertEqual(context, {'id': 'solution_12'}) class MathRenderTest(unittest.TestCase): ''' Make sure math renders properly. ''' def check_parse(self, latex_in, mathjax_out): xml_str = """<math>{tex}</math>""".format(tex=latex_in) element = etree.fromstring(xml_str) renderer = lookup_tag('math')(test_capa_system(), element) self.assertEqual(renderer.mathstr, mathjax_out) def test_parsing(self): self.check_parse('$abc$', '[mathjaxinline]abc[/mathjaxinline]') self.check_parse('$abc', '$abc') self.check_parse(r'$\displaystyle 2+2$', '[mathjax] 2+2[/mathjax]') # NOTE: not testing get_html yet because I don't understand why it's doing what it's doing.
agpl-3.0
130,280,251,498,731,040
3,359,159,228,788,607,500
28.805195
102
0.646623
false
anryko/ansible
test/units/modules/network/fortios/test_fortios_user_security_exempt_list.py
21
7891
# Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <https://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest from mock import ANY from ansible.module_utils.network.fortios.fortios import FortiOSHandler try: from ansible.modules.network.fortios import fortios_user_security_exempt_list except ImportError: pytest.skip("Could not load required modules for testing", allow_module_level=True) @pytest.fixture(autouse=True) def connection_mock(mocker): connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_user_security_exempt_list.Connection') return connection_class_mock fos_instance = FortiOSHandler(connection_mock) def test_user_security_exempt_list_creation(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'user_security_exempt_list': { 'description': 'test_value_3', 'name': 'default_name_4', }, 'vdom': 'root'} is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance) expected_data = { 'description': 'test_value_3', 'name': 'default_name_4', } set_method_mock.assert_called_with('user', 'security-exempt-list', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_user_security_exempt_list_creation_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'user_security_exempt_list': { 'description': 'test_value_3', 'name': 'default_name_4', }, 'vdom': 'root'} is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance) expected_data = { 'description': 'test_value_3', 'name': 'default_name_4', } set_method_mock.assert_called_with('user', 'security-exempt-list', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_user_security_exempt_list_removal(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) input_data = { 'username': 'admin', 'state': 'absent', 'user_security_exempt_list': { 'description': 'test_value_3', 'name': 'default_name_4', }, 'vdom': 'root'} is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance) delete_method_mock.assert_called_with('user', 'security-exempt-list', mkey=ANY, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_user_security_exempt_list_deletion_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) input_data = { 'username': 'admin', 'state': 'absent', 'user_security_exempt_list': { 'description': 'test_value_3', 'name': 'default_name_4', }, 'vdom': 'root'} is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance) delete_method_mock.assert_called_with('user', 'security-exempt-list', mkey=ANY, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_user_security_exempt_list_idempotent(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'user_security_exempt_list': { 'description': 'test_value_3', 'name': 'default_name_4', }, 'vdom': 'root'} is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance) expected_data = { 'description': 'test_value_3', 'name': 'default_name_4', } set_method_mock.assert_called_with('user', 'security-exempt-list', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 404 def test_user_security_exempt_list_filter_foreign_attributes(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'user_security_exempt_list': { 'random_attribute_not_valid': 'tag', 'description': 'test_value_3', 'name': 'default_name_4', }, 'vdom': 'root'} is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance) expected_data = { 'description': 'test_value_3', 'name': 'default_name_4', } set_method_mock.assert_called_with('user', 'security-exempt-list', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200
gpl-3.0
2,277,146,836,519,351,600
1,481,524,442,769,251,800
35.031963
142
0.670384
false
deandunbar/html2bwml
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py
1182
3734
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import SJISDistributionAnalysis from .jpcntx import SJISContextAnalysis from .mbcssm import SJISSMModel from . import constants class SJISProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(SJISSMModel) self._mDistributionAnalyzer = SJISDistributionAnalysis() self._mContextAnalyzer = SJISContextAnalysis() self.reset() def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() def get_charset_name(self): return "SHIFT_JIS" def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): codingState = self._mCodingSM.next_state(aBuf[i]) if codingState == constants.eError: if constants._debug: sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') self._mState = constants.eNotMe break elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:], charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3 - charLen], charLen) self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mLastChar[0] = aBuf[aLen - 1] if self.get_state() == constants.eDetecting: if (self._mContextAnalyzer.got_enough_data() and (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() def get_confidence(self): contxtCf = self._mContextAnalyzer.get_confidence() distribCf = self._mDistributionAnalyzer.get_confidence() return max(contxtCf, distribCf)
mit
-8,978,022,916,448,887,000
4,525,154,003,864,666,600
40.032967
78
0.602571
false
liuliwork/django
tests/model_options/models/default_related_name.py
414
1056
from django.db import models class Author(models.Model): first_name = models.CharField(max_length=128) last_name = models.CharField(max_length=128) class Editor(models.Model): name = models.CharField(max_length=128) bestselling_author = models.ForeignKey(Author, models.CASCADE) class Book(models.Model): title = models.CharField(max_length=128) authors = models.ManyToManyField(Author) editor = models.ForeignKey(Editor, models.CASCADE, related_name="edited_books") class Meta: default_related_name = "books" class Store(models.Model): name = models.CharField(max_length=128) address = models.CharField(max_length=128) class Meta: abstract = True default_related_name = "%(app_label)s_%(model_name)ss" class BookStore(Store): available_books = models.ManyToManyField(Book) class EditorStore(Store): editor = models.ForeignKey(Editor, models.CASCADE) available_books = models.ManyToManyField(Book) class Meta: default_related_name = "editor_stores"
bsd-3-clause
-2,037,479,404,153,098,000
1,017,019,134,896,840,600
24.756098
83
0.706439
false
rghe/ansible
lib/ansible/modules/network/panos/panos_security_rule.py
16
19823
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, techbizdev <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: panos_security_rule short_description: Create security rule policy on PAN-OS devices or Panorama management console. description: - Security policies allow you to enforce rules and take action, and can be as general or specific as needed. The policy rules are compared against the incoming traffic in sequence, and because the first rule that matches the traffic is applied, the more specific rules must precede the more general ones. author: "Ivan Bojer (@ivanbojer), Robert Hagen (@rnh556)" version_added: "2.4" requirements: - pan-python can be obtained from PyPi U(https://pypi.org/project/pan-python/) - pandevice can be obtained from PyPi U(https://pypi.org/project/pandevice/) - xmltodict can be obtained from PyPi U(https://pypi.org/project/xmltodict/) notes: - Checkmode is not supported. - Panorama is supported. options: ip_address: description: - IP address (or hostname) of PAN-OS device being configured. required: true username: description: - Username credentials to use for auth unless I(api_key) is set. default: "admin" password: description: - Password credentials to use for auth unless I(api_key) is set. required: true api_key: description: - API key that can be used instead of I(username)/I(password) credentials. operation: description: - The action to be taken. Supported values are I(add)/I(update)/I(find)/I(delete). default: 'add' rule_name: description: - Name of the security rule. required: true rule_type: description: - Type of security rule (version 6.1 of PanOS and above). default: "universal" description: description: - Description for the security rule. tag_name: description: - Administrative tags that can be added to the rule. Note, tags must be already defined. source_zone: description: - List of source zones. default: "any" destination_zone: description: - List of destination zones. default: "any" source_ip: description: - List of source addresses. default: "any" source_user: description: - Use users to enforce policy for individual users or a group of users. default: "any" hip_profiles: description: > - If you are using GlobalProtect with host information profile (HIP) enabled, you can also base the policy on information collected by GlobalProtect. For example, the user access level can be determined HIP that notifies the firewall about the user's local configuration. default: "any" destination_ip: description: - List of destination addresses. default: "any" application: description: - List of applications. default: "any" service: description: - List of services. default: "application-default" log_start: description: - Whether to log at session start. log_end: description: - Whether to log at session end. default: true action: description: - Action to apply once rules maches. default: "allow" group_profile: description: > - Security profile group that is already defined in the system. This property supersedes antivirus, vulnerability, spyware, url_filtering, file_blocking, data_filtering, and wildfire_analysis properties. antivirus: description: - Name of the already defined antivirus profile. vulnerability: description: - Name of the already defined vulnerability profile. spyware: description: - Name of the already defined spyware profile. url_filtering: description: - Name of the already defined url_filtering profile. file_blocking: description: - Name of the already defined file_blocking profile. data_filtering: description: - Name of the already defined data_filtering profile. wildfire_analysis: description: - Name of the already defined wildfire_analysis profile. devicegroup: description: > - Device groups are used for the Panorama interaction with Firewall(s). The group must exists on Panorama. If device group is not define we assume that we are contacting Firewall. commit: description: - Commit configuration if changed. type: bool default: 'yes' ''' EXAMPLES = ''' - name: add an SSH inbound rule to devicegroup panos_security_rule: ip_address: '{{ ip_address }}' username: '{{ username }}' password: '{{ password }}' operation: 'add' rule_name: 'SSH permit' description: 'SSH rule test' tag_name: ['ProjectX'] source_zone: ['public'] destination_zone: ['private'] source_ip: ['any'] source_user: ['any'] destination_ip: ['1.1.1.1'] category: ['any'] application: ['ssh'] service: ['application-default'] hip_profiles: ['any'] action: 'allow' devicegroup: 'Cloud Edge' - name: add a rule to allow HTTP multimedia only from CDNs panos_security_rule: ip_address: '10.5.172.91' username: 'admin' password: 'paloalto' operation: 'add' rule_name: 'HTTP Multimedia' description: 'Allow HTTP multimedia only to host at 1.1.1.1' source_zone: ['public'] destination_zone: ['private'] source_ip: ['any'] source_user: ['any'] destination_ip: ['1.1.1.1'] category: ['content-delivery-networks'] application: ['http-video', 'http-audio'] service: ['service-http', 'service-https'] hip_profiles: ['any'] action: 'allow' - name: add a more complex rule that uses security profiles panos_security_rule: ip_address: '{{ ip_address }}' username: '{{ username }}' password: '{{ password }}' operation: 'add' rule_name: 'Allow HTTP w profile' log_start: false log_end: true action: 'allow' antivirus: 'default' vulnerability: 'default' spyware: 'default' url_filtering: 'default' wildfire_analysis: 'default' - name: delete a devicegroup security rule panos_security_rule: ip_address: '{{ ip_address }}' api_key: '{{ api_key }}' operation: 'delete' rule_name: 'Allow telnet' devicegroup: 'DC Firewalls' - name: find a specific security rule panos_security_rule: ip_address: '{{ ip_address }}' password: '{{ password }}' operation: 'find' rule_name: 'Allow RDP to DCs' register: result - debug: msg='{{result.stdout_lines}}' ''' RETURN = ''' # Default return values ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native try: import pan.xapi from pan.xapi import PanXapiError import pandevice from pandevice import base from pandevice import firewall from pandevice import panorama from pandevice import objects from pandevice import policies import xmltodict import json HAS_LIB = True except ImportError: HAS_LIB = False def get_devicegroup(device, devicegroup): dg_list = device.refresh_devices() for group in dg_list: if isinstance(group, pandevice.panorama.DeviceGroup): if group.name == devicegroup: return group return False def get_rulebase(device, devicegroup): # Build the rulebase if isinstance(device, pandevice.firewall.Firewall): rulebase = pandevice.policies.Rulebase() device.add(rulebase) elif isinstance(device, pandevice.panorama.Panorama): dg = panorama.DeviceGroup(devicegroup) device.add(dg) rulebase = policies.PreRulebase() dg.add(rulebase) else: return False policies.SecurityRule.refreshall(rulebase) return rulebase def find_rule(rulebase, rule_name): # Search for the rule name rule = rulebase.find(rule_name) if rule: return rule else: return False def rule_is_match(propose_rule, current_rule): match_check = ['name', 'description', 'group_profile', 'antivirus', 'vulnerability', 'spyware', 'url_filtering', 'file_blocking', 'data_filtering', 'wildfire_analysis', 'type', 'action', 'tag', 'log_start', 'log_end'] list_check = ['tozone', 'fromzone', 'source', 'source_user', 'destination', 'category', 'application', 'service', 'hip_profiles'] for check in match_check: propose_check = getattr(propose_rule, check, None) current_check = getattr(current_rule, check, None) if propose_check != current_check: return False for check in list_check: propose_check = getattr(propose_rule, check, []) current_check = getattr(current_rule, check, []) if set(propose_check) != set(current_check): return False return True def create_security_rule(**kwargs): security_rule = policies.SecurityRule( name=kwargs['rule_name'], description=kwargs['description'], fromzone=kwargs['source_zone'], source=kwargs['source_ip'], source_user=kwargs['source_user'], hip_profiles=kwargs['hip_profiles'], tozone=kwargs['destination_zone'], destination=kwargs['destination_ip'], application=kwargs['application'], service=kwargs['service'], category=kwargs['category'], log_start=kwargs['log_start'], log_end=kwargs['log_end'], action=kwargs['action'], type=kwargs['rule_type'] ) if 'tag_name' in kwargs: security_rule.tag = kwargs['tag_name'] # profile settings if 'group_profile' in kwargs: security_rule.group = kwargs['group_profile'] else: if 'antivirus' in kwargs: security_rule.virus = kwargs['antivirus'] if 'vulnerability' in kwargs: security_rule.vulnerability = kwargs['vulnerability'] if 'spyware' in kwargs: security_rule.spyware = kwargs['spyware'] if 'url_filtering' in kwargs: security_rule.url_filtering = kwargs['url_filtering'] if 'file_blocking' in kwargs: security_rule.file_blocking = kwargs['file_blocking'] if 'data_filtering' in kwargs: security_rule.data_filtering = kwargs['data_filtering'] if 'wildfire_analysis' in kwargs: security_rule.wildfire_analysis = kwargs['wildfire_analysis'] return security_rule def add_rule(rulebase, sec_rule): if rulebase: rulebase.add(sec_rule) sec_rule.create() return True else: return False def update_rule(rulebase, nat_rule): if rulebase: rulebase.add(nat_rule) nat_rule.apply() return True else: return False def main(): argument_spec = dict( ip_address=dict(required=True), password=dict(no_log=True), username=dict(default='admin'), api_key=dict(no_log=True), operation=dict(default='add', choices=['add', 'update', 'delete', 'find']), rule_name=dict(required=True), description=dict(default=''), tag_name=dict(type='list'), destination_zone=dict(type='list', default=['any']), source_zone=dict(type='list', default=['any']), source_ip=dict(type='list', default=["any"]), source_user=dict(type='list', default=['any']), destination_ip=dict(type='list', default=["any"]), category=dict(type='list', default=['any']), application=dict(type='list', default=['any']), service=dict(type='list', default=['application-default']), hip_profiles=dict(type='list', default=['any']), group_profile=dict(), antivirus=dict(), vulnerability=dict(), spyware=dict(), url_filtering=dict(), file_blocking=dict(), data_filtering=dict(), wildfire_analysis=dict(), log_start=dict(type='bool', default=False), log_end=dict(type='bool', default=True), rule_type=dict(default='universal'), action=dict(default='allow'), devicegroup=dict(), commit=dict(type='bool', default=True) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, required_one_of=[['api_key', 'password']]) if not HAS_LIB: module.fail_json(msg='Missing required libraries.') ip_address = module.params["ip_address"] password = module.params["password"] username = module.params['username'] api_key = module.params['api_key'] operation = module.params['operation'] rule_name = module.params['rule_name'] description = module.params['description'] tag_name = module.params['tag_name'] source_zone = module.params['source_zone'] source_ip = module.params['source_ip'] source_user = module.params['source_user'] hip_profiles = module.params['hip_profiles'] destination_zone = module.params['destination_zone'] destination_ip = module.params['destination_ip'] application = module.params['application'] service = module.params['service'] category = module.params['category'] log_start = module.params['log_start'] log_end = module.params['log_end'] action = module.params['action'] group_profile = module.params['group_profile'] antivirus = module.params['antivirus'] vulnerability = module.params['vulnerability'] spyware = module.params['spyware'] url_filtering = module.params['url_filtering'] file_blocking = module.params['file_blocking'] data_filtering = module.params['data_filtering'] wildfire_analysis = module.params['wildfire_analysis'] rule_type = module.params['rule_type'] devicegroup = module.params['devicegroup'] commit = module.params['commit'] # Create the device with the appropriate pandevice type device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key) # If Panorama, validate the devicegroup dev_group = None if devicegroup and isinstance(device, panorama.Panorama): dev_group = get_devicegroup(device, devicegroup) if dev_group: device.add(dev_group) else: module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup) # Get the rulebase rulebase = get_rulebase(device, dev_group) # Which action shall we take on the object? if operation == "find": # Search for the object match = find_rule(rulebase, rule_name) # If found, format and return the result if match: match_dict = xmltodict.parse(match.element_str()) module.exit_json( stdout_lines=json.dumps(match_dict, indent=2), msg='Rule matched' ) else: module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name) elif operation == "delete": # Search for the object match = find_rule(rulebase, rule_name) # If found, delete it if match: try: if commit: match.delete() except PanXapiError as exc: module.fail_json(msg=to_native(exc)) module.exit_json(changed=True, msg='Rule \'%s\' successfully deleted' % rule_name) else: module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name) elif operation == "add": new_rule = create_security_rule( rule_name=rule_name, description=description, tag_name=tag_name, source_zone=source_zone, destination_zone=destination_zone, source_ip=source_ip, source_user=source_user, destination_ip=destination_ip, category=category, application=application, service=service, hip_profiles=hip_profiles, group_profile=group_profile, antivirus=antivirus, vulnerability=vulnerability, spyware=spyware, url_filtering=url_filtering, file_blocking=file_blocking, data_filtering=data_filtering, wildfire_analysis=wildfire_analysis, log_start=log_start, log_end=log_end, rule_type=rule_type, action=action ) # Search for the rule. Fail if found. match = find_rule(rulebase, rule_name) if match: if rule_is_match(match, new_rule): module.exit_json(changed=False, msg='Rule \'%s\' is already in place' % rule_name) else: module.fail_json(msg='Rule \'%s\' already exists. Use operation: \'update\' to change it.' % rule_name) else: try: changed = add_rule(rulebase, new_rule) if changed and commit: device.commit(sync=True) except PanXapiError as exc: module.fail_json(msg=to_native(exc)) module.exit_json(changed=changed, msg='Rule \'%s\' successfully added' % rule_name) elif operation == 'update': # Search for the rule. Update if found. match = find_rule(rulebase, rule_name) if match: try: new_rule = create_security_rule( rule_name=rule_name, description=description, tag_name=tag_name, source_zone=source_zone, destination_zone=destination_zone, source_ip=source_ip, source_user=source_user, destination_ip=destination_ip, category=category, application=application, service=service, hip_profiles=hip_profiles, group_profile=group_profile, antivirus=antivirus, vulnerability=vulnerability, spyware=spyware, url_filtering=url_filtering, file_blocking=file_blocking, data_filtering=data_filtering, wildfire_analysis=wildfire_analysis, log_start=log_start, log_end=log_end, rule_type=rule_type, action=action ) changed = update_rule(rulebase, new_rule) if changed and commit: device.commit(sync=True) except PanXapiError as exc: module.fail_json(msg=to_native(exc)) module.exit_json(changed=changed, msg='Rule \'%s\' successfully updated' % rule_name) else: module.fail_json(msg='Rule \'%s\' does not exist. Use operation: \'add\' to add it.' % rule_name) if __name__ == '__main__': main()
gpl-3.0
-489,197,606,165,063,700
-2,247,652,600,270,300,000
34.335116
141
0.60339
false
Jai-Chaudhary/termite-data-server
web2py/scripts/tickets2db.py
38
1391
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import os import time import stat import datetime from gluon.utils import md5_hash from gluon.restricted import RestrictedError, TicketStorage from gluon import DAL SLEEP_MINUTES = 5 errors_path = os.path.join(request.folder, 'errors') try: db_string = open(os.path.join(request.folder, 'private', 'ticket_storage.txt')).read().replace('\r', '').replace('\n', '').strip() except: db_string = 'sqlite://storage.db' db_path = os.path.join(request.folder, 'databases') tk_db = DAL(db_string, folder=db_path, auto_import=True) ts = TicketStorage(db=tk_db) tk_table = ts._get_table( db=tk_db, tablename=ts.tablename, app=request.application) hashes = {} while 1: if request.tickets_db: print "You're storing tickets yet in database" sys.exit(1) for file in os.listdir(errors_path): filename = os.path.join(errors_path, file) modified_time = os.stat(filename)[stat.ST_MTIME] modified_time = datetime.datetime.fromtimestamp(modified_time) ticket_id = file ticket_data = open(filename).read() tk_table.insert(ticket_id=ticket_id, ticket_data=ticket_data, created_datetime=modified_time ) tk_db.commit() os.unlink(filename) time.sleep(SLEEP_MINUTES * 60)
bsd-3-clause
-4,174,154,504,113,759,000
-6,084,640,598,165,290,000
26.27451
134
0.641984
false
proxysh/Safejumper-for-Mac
buildlinux/env32/lib/python2.7/site-packages/cffi/cffi_opcode.py
64
5477
class CffiOp(object): def __init__(self, op, arg): self.op = op self.arg = arg def as_c_expr(self): if self.op is None: assert isinstance(self.arg, str) return '(_cffi_opcode_t)(%s)' % (self.arg,) classname = CLASS_NAME[self.op] return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg) def as_python_bytes(self): if self.op is None and self.arg.isdigit(): value = int(self.arg) # non-negative: '-' not in self.arg if value >= 2**31: raise OverflowError("cannot emit %r: limited to 2**31-1" % (self.arg,)) return format_four_bytes(value) if isinstance(self.arg, str): from .ffiplatform import VerificationError raise VerificationError("cannot emit to Python: %r" % (self.arg,)) return format_four_bytes((self.arg << 8) | self.op) def __str__(self): classname = CLASS_NAME.get(self.op, self.op) return '(%s %s)' % (classname, self.arg) def format_four_bytes(num): return '\\x%02X\\x%02X\\x%02X\\x%02X' % ( (num >> 24) & 0xFF, (num >> 16) & 0xFF, (num >> 8) & 0xFF, (num ) & 0xFF) OP_PRIMITIVE = 1 OP_POINTER = 3 OP_ARRAY = 5 OP_OPEN_ARRAY = 7 OP_STRUCT_UNION = 9 OP_ENUM = 11 OP_FUNCTION = 13 OP_FUNCTION_END = 15 OP_NOOP = 17 OP_BITFIELD = 19 OP_TYPENAME = 21 OP_CPYTHON_BLTN_V = 23 # varargs OP_CPYTHON_BLTN_N = 25 # noargs OP_CPYTHON_BLTN_O = 27 # O (i.e. a single arg) OP_CONSTANT = 29 OP_CONSTANT_INT = 31 OP_GLOBAL_VAR = 33 OP_DLOPEN_FUNC = 35 OP_DLOPEN_CONST = 37 OP_GLOBAL_VAR_F = 39 OP_EXTERN_PYTHON = 41 PRIM_VOID = 0 PRIM_BOOL = 1 PRIM_CHAR = 2 PRIM_SCHAR = 3 PRIM_UCHAR = 4 PRIM_SHORT = 5 PRIM_USHORT = 6 PRIM_INT = 7 PRIM_UINT = 8 PRIM_LONG = 9 PRIM_ULONG = 10 PRIM_LONGLONG = 11 PRIM_ULONGLONG = 12 PRIM_FLOAT = 13 PRIM_DOUBLE = 14 PRIM_LONGDOUBLE = 15 PRIM_WCHAR = 16 PRIM_INT8 = 17 PRIM_UINT8 = 18 PRIM_INT16 = 19 PRIM_UINT16 = 20 PRIM_INT32 = 21 PRIM_UINT32 = 22 PRIM_INT64 = 23 PRIM_UINT64 = 24 PRIM_INTPTR = 25 PRIM_UINTPTR = 26 PRIM_PTRDIFF = 27 PRIM_SIZE = 28 PRIM_SSIZE = 29 PRIM_INT_LEAST8 = 30 PRIM_UINT_LEAST8 = 31 PRIM_INT_LEAST16 = 32 PRIM_UINT_LEAST16 = 33 PRIM_INT_LEAST32 = 34 PRIM_UINT_LEAST32 = 35 PRIM_INT_LEAST64 = 36 PRIM_UINT_LEAST64 = 37 PRIM_INT_FAST8 = 38 PRIM_UINT_FAST8 = 39 PRIM_INT_FAST16 = 40 PRIM_UINT_FAST16 = 41 PRIM_INT_FAST32 = 42 PRIM_UINT_FAST32 = 43 PRIM_INT_FAST64 = 44 PRIM_UINT_FAST64 = 45 PRIM_INTMAX = 46 PRIM_UINTMAX = 47 _NUM_PRIM = 48 _UNKNOWN_PRIM = -1 _UNKNOWN_FLOAT_PRIM = -2 _UNKNOWN_LONG_DOUBLE = -3 _IO_FILE_STRUCT = -1 PRIMITIVE_TO_INDEX = { 'char': PRIM_CHAR, 'short': PRIM_SHORT, 'int': PRIM_INT, 'long': PRIM_LONG, 'long long': PRIM_LONGLONG, 'signed char': PRIM_SCHAR, 'unsigned char': PRIM_UCHAR, 'unsigned short': PRIM_USHORT, 'unsigned int': PRIM_UINT, 'unsigned long': PRIM_ULONG, 'unsigned long long': PRIM_ULONGLONG, 'float': PRIM_FLOAT, 'double': PRIM_DOUBLE, 'long double': PRIM_LONGDOUBLE, '_Bool': PRIM_BOOL, 'wchar_t': PRIM_WCHAR, 'int8_t': PRIM_INT8, 'uint8_t': PRIM_UINT8, 'int16_t': PRIM_INT16, 'uint16_t': PRIM_UINT16, 'int32_t': PRIM_INT32, 'uint32_t': PRIM_UINT32, 'int64_t': PRIM_INT64, 'uint64_t': PRIM_UINT64, 'intptr_t': PRIM_INTPTR, 'uintptr_t': PRIM_UINTPTR, 'ptrdiff_t': PRIM_PTRDIFF, 'size_t': PRIM_SIZE, 'ssize_t': PRIM_SSIZE, 'int_least8_t': PRIM_INT_LEAST8, 'uint_least8_t': PRIM_UINT_LEAST8, 'int_least16_t': PRIM_INT_LEAST16, 'uint_least16_t': PRIM_UINT_LEAST16, 'int_least32_t': PRIM_INT_LEAST32, 'uint_least32_t': PRIM_UINT_LEAST32, 'int_least64_t': PRIM_INT_LEAST64, 'uint_least64_t': PRIM_UINT_LEAST64, 'int_fast8_t': PRIM_INT_FAST8, 'uint_fast8_t': PRIM_UINT_FAST8, 'int_fast16_t': PRIM_INT_FAST16, 'uint_fast16_t': PRIM_UINT_FAST16, 'int_fast32_t': PRIM_INT_FAST32, 'uint_fast32_t': PRIM_UINT_FAST32, 'int_fast64_t': PRIM_INT_FAST64, 'uint_fast64_t': PRIM_UINT_FAST64, 'intmax_t': PRIM_INTMAX, 'uintmax_t': PRIM_UINTMAX, } F_UNION = 0x01 F_CHECK_FIELDS = 0x02 F_PACKED = 0x04 F_EXTERNAL = 0x08 F_OPAQUE = 0x10 G_FLAGS = dict([('_CFFI_' + _key, globals()[_key]) for _key in ['F_UNION', 'F_CHECK_FIELDS', 'F_PACKED', 'F_EXTERNAL', 'F_OPAQUE']]) CLASS_NAME = {} for _name, _value in list(globals().items()): if _name.startswith('OP_') and isinstance(_value, int): CLASS_NAME[_value] = _name[3:]
gpl-2.0
6,804,674,230,901,391,000
-1,032,626,676,605,556,500
29.597765
78
0.509586
false
buckket/weltklang
lib/rfk/xmpp_bot.py
4
3793
import ast import sys import logging import click from redis import StrictRedis from redis.exceptions import ConnectionError from sleekxmpp import ClientXMPP from sleekxmpp.exceptions import XMPPError import rfk from rfk.helper import get_path import rfk.helper.daemonize class RfKBot(ClientXMPP): def __init__(self, jid, password): super(RfKBot, self).__init__(jid, password) self.add_event_handler('session_start', self.start) self.register_plugin('xep_0004') # Data Forms self.register_plugin('xep_0030') # Service Discovery self.register_plugin('xep_0060') # Publish-Subscribe self.register_plugin('xep_0115') # Entity Capabilities self.register_plugin('xep_0118') # User Tune self.register_plugin('xep_0128') # Service Discovery Extensions self.register_plugin('xep_0163') # Personal Eventing Protocol self.register_plugin('xep_0199') # XMPP Ping self.auto_authorize = True self.auto_subscribe = True def start(self, event): self.send_presence() self.get_roster() self['xep_0115'].update_caps() def send_messages(self, data): try: for recipient in data['recipients']: logging.info('Sending message to {}'.format(recipient)) self.send_message(recipient, data['message']) return True except (KeyError, XMPPError): return False def update_tune(self, data): try: if data['tune']: (artist, title) = (data['tune']['artist'], data['tune']['title']) logging.info('Updating tune: {} - {}'.format(artist, title)) self['xep_0118'].publish_tune(artist=artist, title=title) else: logging.info('Updating tune: None') self['xep_0118'].stop() return True except (KeyError, XMPPError): return False @click.command() @click.option('-j', '--jid', help='JID to use') @click.option('-p', '--password', help='password to use', hide_input=True) @click.option('-f', '--foreground', help='run in foreground', is_flag=True, default=False) def main(jid, password, foreground): rfk.init(enable_geoip=False) if not jid: jid = rfk.CONFIG.get('xmpp', 'jid') if not password: password = rfk.CONFIG.get('xmpp', 'password') if not foreground: rfk.helper.daemonize.createDaemon(get_path()) # Setup logging logging.basicConfig(level=logging.INFO, format='%(levelname)-8s %(message)s') # Setup XMPP instance xmpp = RfKBot(jid, password) # Connect to the XMPP server and start processing XMPP stanzas if xmpp.connect(): xmpp.process(block=False) def message_handler(message): if message and message['type'] == 'message': data = ast.literal_eval(message['data']) try: if data['type'] == 'message': xmpp.send_messages(data) elif data['type'] == 'tune': xmpp.update_tune(data) except (KeyError, TypeError) as err: logging.error('message_handler error: {}'.format(err)) try: redis_client = StrictRedis(host='localhost', port=6379, decode_responses=True) redis_pubsub = redis_client.pubsub(ignore_subscribe_messages=True) redis_pubsub.subscribe('rfk-xmpp') for message in redis_pubsub.listen(): message_handler(message) except (ConnectionError, KeyboardInterrupt): xmpp.disconnect(wait=True) return False else: return False if __name__ == '__main__': sys.exit(main())
bsd-3-clause
-8,975,289,672,938,313,000
1,318,851,955,077,231,000
32.27193
90
0.594516
false
kobolabs/calibre
src/calibre/ebooks/pdb/header.py
24
2801
# -*- coding: utf-8 -*- ''' Read the header data from a pdb file. ''' __license__ = 'GPL v3' __copyright__ = '2009, John Schember <[email protected]>' __docformat__ = 'restructuredtext en' import re import struct import time class PdbHeaderReader(object): def __init__(self, stream): self.stream = stream self.ident = self.identity() self.num_sections = self.section_count() self.title = self.name() def identity(self): self.stream.seek(60) ident = self.stream.read(8) return ident def section_count(self): self.stream.seek(76) return struct.unpack('>H', self.stream.read(2))[0] def name(self): self.stream.seek(0) return re.sub('[^-A-Za-z0-9 ]+', '_', self.stream.read(32).replace('\x00', '')) def full_section_info(self, number): if number not in range(0, self.num_sections): raise ValueError('Not a valid section number %i' % number) self.stream.seek(78 + number * 8) offset, a1, a2, a3, a4 = struct.unpack('>LBBBB', self.stream.read(8))[0] flags, val = a1, a2 << 16 | a3 << 8 | a4 return (offset, flags, val) def section_offset(self, number): if number not in range(0, self.num_sections): raise ValueError('Not a valid section number %i' % number) self.stream.seek(78 + number * 8) return struct.unpack('>LBBBB', self.stream.read(8))[0] def section_data(self, number): if number not in range(0, self.num_sections): raise ValueError('Not a valid section number %i' % number) start = self.section_offset(number) if number == self.num_sections -1: self.stream.seek(0, 2) end = self.stream.tell() else: end = self.section_offset(number + 1) self.stream.seek(start) return self.stream.read(end - start) class PdbHeaderBuilder(object): def __init__(self, identity, title): self.identity = identity.ljust(3, '\x00')[:8] self.title = '%s\x00' % re.sub('[^-A-Za-z0-9 ]+', '_', title).ljust(31, '\x00')[:31].encode('ascii', 'replace') def build_header(self, section_lengths, out_stream): ''' section_lengths = Lenght of each section in file. ''' now = int(time.time()) nrecords = len(section_lengths) out_stream.write(self.title + struct.pack('>HHIIIIII', 0, 0, now, now, 0, 0, 0, 0)) out_stream.write(self.identity + struct.pack('>IIH', nrecords, 0, nrecords)) offset = 78 + (8 * nrecords) + 2 for id, record in enumerate(section_lengths): out_stream.write(struct.pack('>LBBBB', long(offset), 0, 0, 0, 0)) offset += record out_stream.write('\x00\x00')
gpl-3.0
6,388,992,786,618,698,000
-4,997,156,897,150,903,000
31.195402
119
0.579436
false
sventech/YAK-server
test_project/test_app/tests/test_social.py
1
10812
from django.conf import settings from django.contrib.auth import get_user_model from django.contrib.contenttypes.models import ContentType from django.core.urlresolvers import reverse from test_project.test_app.models import Post from test_project.test_app.tests.factories import UserFactory, PostFactory, CommentFactory from yak.rest_core.test import SchemaTestCase from yak.rest_social_network.models import Follow, Comment, Tag, Like User = get_user_model() class BaseAPITests(SchemaTestCase): def setUp(self): super(BaseAPITests, self).setUp() self.dev_user = UserFactory() class FlagTestCase(BaseAPITests): def test_users_can_flag_content(self): test_user = UserFactory() content_type = ContentType.objects.get_for_model(Post) flag_url = reverse('flag') data = { 'content_type': content_type.pk, 'object_id': PostFactory().pk } self.assertSchemaPost(flag_url, "$flagRequest", "$flagResponse", data, test_user) class ShareTestCase(BaseAPITests): def test_users_can_share_content(self): test_user = UserFactory() content_type = ContentType.objects.get_for_model(Post) shares_url = reverse('shares-list') data = { 'content_type': content_type.pk, 'object_id': PostFactory().pk, 'shared_with': [test_user.pk] } self.assertSchemaPost(shares_url, "$shareRequest", "$shareResponse", data, self.dev_user) def test_users_can_share_content_multiple_times(self): sharing_user = UserFactory() test_user = UserFactory() content_type = ContentType.objects.get_for_model(Post) shares_url = reverse('shares-list') data = { 'content_type': content_type.pk, 'object_id': PostFactory().pk, 'shared_with': [test_user.pk] } self.assertSchemaPost(shares_url, "$shareRequest", "$shareResponse", data, sharing_user) data['shared_with'] = [self.dev_user.pk] self.assertSchemaPost(shares_url, "$shareRequest", "$shareResponse", data, sharing_user) class LikeTestCase(BaseAPITests): def test_users_can_like_content(self): content_type = ContentType.objects.get_for_model(Post) likes_url = reverse('likes-list') data = { 'content_type': content_type.pk, 'object_id': PostFactory().pk, } self.assertSchemaPost(likes_url, "$likeRequest", "$likeResponse", data, self.dev_user) def test_liked_mixin(self): post = PostFactory() url = reverse("posts-detail", args=[post.pk]) like = Like.objects.create(content_type=ContentType.objects.get_for_model(Post), object_id=post.pk, user=self.dev_user) response = self.assertSchemaGet(url, None, "$postResponse", self.dev_user) self.assertEqual(response.data["liked_id"], like.pk) other_post = PostFactory() url = reverse("posts-detail", args=[other_post.pk]) response = self.assertSchemaGet(url, None, "$postResponse", self.dev_user) self.assertIsNone(response.data["liked_id"]) class CommentTestCase(BaseAPITests): def test_users_can_comment_on_content(self): content_type = ContentType.objects.get_for_model(Post) comments_url = reverse('comments-list') data = { 'content_type': content_type.pk, 'object_id': PostFactory().pk, 'description': 'This is a user comment.' } self.assertSchemaPost(comments_url, "$commentRequest", "$commentResponse", data, self.dev_user) def test_comment_related_tags(self): content_type = ContentType.objects.get_for_model(Post) Comment.objects.create(content_type=content_type, object_id=1, description='Testing of a hashtag. #django', user=self.dev_user) tags_url = reverse('tags-list') response = self.assertSchemaGet(tags_url, None, "$tagResponse", self.dev_user) self.assertEqual(response.data['results'][0]['name'], 'django') self.assertIsNotNone(Tag.objects.get(name='django')) def test_comments_for_specific_object(self): test_user = UserFactory() post_content_type = ContentType.objects.get_for_model(Post) post = PostFactory(user=test_user) comment = CommentFactory(content_type=post_content_type, object_id=post.pk) post2 = PostFactory(user=test_user) CommentFactory(content_type=post_content_type, object_id=post2.pk) url = reverse('comments-list') parameters = { 'content_type': post_content_type.pk, 'object_id': post.pk, } response = self.assertSchemaGet(url, parameters, "$commentResponse", self.dev_user) self.assertEqual(len(response.data["results"]), 1) self.assertEqual(response.data["results"][0]["id"], comment.pk) class UserFollowingTestCase(BaseAPITests): def test_user_can_follow_each_other(self): test_user1 = UserFactory() user_content_type = ContentType.objects.get_for_model(User) follow_url = reverse('follows-list') # Dev User to follow Test User 1 data = { 'content_type': user_content_type.pk, 'object_id': test_user1.pk } response = self.assertSchemaPost(follow_url, "$followRequest", "$followResponse", data, self.dev_user) self.assertEqual(response.data['following']['username'], test_user1.username) def test_following_endpoint(self): test_user1 = UserFactory() test_user2 = UserFactory() user_content_type = ContentType.objects.get_for_model(User) # Dev User to follow User 1, User 2 to follow Dev User Follow.objects.create(content_type=user_content_type, object_id=test_user1.pk, user=self.dev_user) Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=test_user2) following_url = reverse('users-following', args=[self.dev_user.pk]) response = self.assertSchemaGet(following_url, None, "$followResponse", self.dev_user) self.assertEqual(len(response.data), 1) self.assertEqual(response.data[0]['following']['username'], test_user1.username) def test_follower_endpoint(self): test_user1 = UserFactory() test_user2 = UserFactory() user_content_type = ContentType.objects.get_for_model(User) # Dev User to follow User 1, User 2 to follow Dev User Follow.objects.create(content_type=user_content_type, object_id=test_user1.pk, user=self.dev_user) Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=test_user2) followers_url = reverse('users-followers', args=[self.dev_user.pk]) response = self.assertSchemaGet(followers_url, None, "$followResponse", self.dev_user) self.assertEqual(len(response.data), 1) self.assertEqual(response.data[0]['follower']['username'], test_user2.username) def test_follow_pagination(self): user_content_type = ContentType.objects.get_for_model(User) for _ in range(0, 30): user = UserFactory() Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=user) followers_url = reverse('users-followers', args=[self.dev_user.pk]) response = self.assertSchemaGet(followers_url, None, "$followResponse", self.dev_user) self.assertEqual(len(response.data), settings.REST_FRAMEWORK['PAGE_SIZE']) response = self.assertSchemaGet(followers_url, {"page": 2}, "$followResponse", self.dev_user) self.assertEqual(len(response.data), 30 - settings.REST_FRAMEWORK['PAGE_SIZE']) def test_user_can_unfollow_user(self): follower = UserFactory() user_content_type = ContentType.objects.get_for_model(User) follow_object = Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=follower) follows_url = reverse('follows-detail', kwargs={'pk': follow_object.pk}) # If you are not the follower of the user, you cannot unfollow the user self.assertSchemaDelete(follows_url, self.dev_user, unauthorized=True) # If you are the follower of that user, you can unfollow the user self.assertSchemaDelete(follows_url, follower) # Check that original follow object no longer exists self.assertEqual(Follow.objects.filter(pk=follow_object.pk).exists(), False) def test_user_following_and_follower_count(self): follower1 = UserFactory() follower2 = UserFactory() following = UserFactory() user_content_type = ContentType.objects.get_for_model(User) # Follower setup Follow.objects.create(content_type=user_content_type, object_id=following.pk, user=self.dev_user) Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=follower1) Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=follower2) users_url = reverse('users-detail', kwargs={'pk': self.dev_user.pk}) response = self.assertSchemaGet(users_url, None, "$userResponse", self.dev_user) self.assertEqual(response.data['user_following_count'], 1) self.assertEqual(response.data['user_followers_count'], 2) def test_bulk_follow(self): user1 = UserFactory() user2 = UserFactory() url = reverse('follows-bulk-create') user_content_type = ContentType.objects.get_for_model(User) data = [ {'content_type': user_content_type.pk, 'object_id': user1.pk}, {'content_type': user_content_type.pk, 'object_id': user2.pk} ] self.assertSchemaPost(url, "$followRequest", "$followResponse", data, self.dev_user) self.assertEqual(user1.user_followers_count(), 1) self.assertEqual(user2.user_followers_count(), 1) def test_follow_id(self): follower = UserFactory() user_content_type = ContentType.objects.get_for_model(User) follow_object = Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=follower) url = reverse("users-detail", args=[self.dev_user.pk]) response = self.assertSchemaGet(url, None, "$userResponse", follower) self.assertEqual(response.data['follow_id'], follow_object.pk) not_follower = UserFactory() url = reverse("users-detail", args=[self.dev_user.pk]) response = self.assertSchemaGet(url, None, "$userResponse", not_follower) self.assertIsNone(response.data['follow_id'])
mit
-4,282,615,054,625,166,000
8,122,433,031,369,599,000
46.213974
120
0.654458
false
jwhonce/openshift-ansible
roles/lib_utils/library/oo_azure_rm_publish_image.py
3
10040
#!/usr/bin/env python # pylint: disable=missing-docstring # Copyright 2018 Red Hat, Inc. and/or its affiliates # and other contributors as indicated by the @author tags. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function # noqa: F401 # import httplib import json import os import time import requests from ansible.module_utils.basic import AnsibleModule class AzurePublisherException(Exception): '''Exception class for AzurePublisher''' pass class AzurePublisher(object): '''Python class to represent the Azure Publishing portal https://cloudpartner.azure.com''' # pylint: disable=too-many-arguments def __init__(self, publisher_id, client_info, ssl_verify=True, api_version='2017-10-31', debug=False): ''' :publisher_id: string of the publisher id :client_info: a dict containing the client_id, client_secret to get an access_token ''' self._azure_server = 'https://cloudpartner.azure.com/api/publishers/{}'.format(publisher_id) self.client_info = client_info self.ssl_verify = ssl_verify self.api_version = 'api-version={}'.format(api_version) self.debug = debug # if self.debug: # import httplib # httplib.HTTPSConnection.debuglevel = 1 # httplib.HTTPConnection.debuglevel = 1 self._access_token = None @property def server(self): '''property for server url''' return self._azure_server @property def token(self): '''property for the access_token curl --data-urlencode "client_id=$AZURE_CLIENT_ID" \ --data-urlencode "client_secret=$AZURE_CLIENT_SECRET" \ --data-urlencode "grant_type=client_credentials" \ --data-urlencode "resource=https://cloudpartner.azure.com" \ https://login.microsoftonline.com/$AZURE_TENANT_ID/oauth2/token ''' if self._access_token is None: url = 'https://login.microsoftonline.com/{}/oauth2/token'.format(self.client_info['tenant_id']) data = { 'client_id': {self.client_info['client_id']}, 'client_secret': self.client_info['client_secret'], 'grant_type': 'client_credentials', 'resource': 'https://cloudpartner.azure.com' } results = AzurePublisher.request('POST', url, data, {}) jres = results.json() self._access_token = jres['access_token'] return self._access_token def get_offers(self, offer=None, version=None, slot='preview'): ''' fetch all offers by publisherid ''' url = '/offers' if offer is not None: url += '/{}'.format(offer) if version is not None: url += '/versions/{}'.format(version) if slot == 'preview': url += '/slot/{}'.format(slot) url += '?{}'.format(self.api_version) return self.prepare_action(url) def get_operations(self, offer, operation=None, status=None): ''' create or modify an offer ''' url = '/offers/{0}/submissions'.format(offer) if operation is not None: url += '/operations/{0}'.format(operation) if not url.endswith('/'): url += '/' url += '?{0}'.format(self.api_version) if status is not None: url += '&status={0}'.format(status) return self.prepare_action(url, 'GET') def cancel_operation(self, offer): ''' create or modify an offer ''' url = '/offers/{0}/cancel?{1}'.format(offer, self.api_version) return self.prepare_action(url, 'POST') def publish(self, offer, emails): ''' publish an offer ''' url = '/offers/{0}/publish?{1}'.format(offer, self.api_version) data = { 'metadata': { 'notification-emails': ','.join(emails), } } return self.prepare_action(url, 'POST', data=data) def go_live(self, offer): ''' create or modify an offer ''' url = '/offers/{0}/golive?{1}'.format(offer, self.api_version) return self.prepare_action(url, 'POST') def create_or_modify_offer(self, offer, data=None, modify=False): ''' create or modify an offer ''' url = '/offers/{0}?{1}'.format(offer, self.api_version) headers = None if modify: headers = { 'If-Match': '*', } return self.prepare_action(url, 'PUT', data=data, add_headers=headers) def prepare_action(self, url, action='GET', data=None, add_headers=None): '''perform the http request :action: string of either GET|POST ''' headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Bearer {}'.format(self.token) } if add_headers is not None: headers.update(add_headers) if data is None: data = '' else: data = json.dumps(data) return AzurePublisher.request(action.upper(), self.server + url, data, headers) def cancel_and_wait_for_operation(self, params): '''cancel the current publish operation and wait for operation to complete''' # cancel the publish operation self.cancel_operation(offer=params['offer']) # we need to wait here for 'submissionState' to move to 'canceled' while True: # fetch operations ops = self.get_operations(params['offer']) if self.debug: print(ops.json()) if ops.json()[0]['submissionState'] == 'canceled': break time.sleep(5) return ops def manage_offer(self, params): ''' handle creating or modifying offers''' # fetch the offer to verify it exists: results = self.get_offers(offer=params['offer']) if results.status_code == 200 and params['force']: return self.create_or_modify_offer(offer=params['offer'], data=params['offer_data'], modify=True) return self.create_or_modify_offer(offer=params['offer'], data=params['offer_data']) @staticmethod def request(action, url, data=None, headers=None, ssl_verify=True): req = requests.Request(action.upper(), url, data=data, headers=headers) session = requests.Session() req_prep = session.prepare_request(req) response = session.send(req_prep, verify=ssl_verify) return response @staticmethod def run_ansible(params): '''perform the ansible operations''' client_info = { 'tenant_id': params['tenant_id'], 'client_id': params['client_id'], 'client_secret': params['client_secret']} apc = AzurePublisher(params['publisher'], client_info, debug=params['debug']) if params['state'] == 'offer': results = apc.manage_offer(params) elif params['state'] == 'publish': results = apc.publish(offer=params['offer'], emails=params['emails']) results.json = lambda: '' elif params['state'] == 'cancel_op': results = apc.cancel_and_wait_for_operation(params) elif params['state'] == 'go_live': results = apc.go_live(offer=params['offer']) else: raise AzurePublisherException('Unsupported query type: {}'.format(params['state'])) changed = False if results.status_code in [200, 201, 202]: changed = True return {'data': results.json(), 'changed': changed, 'status_code': results.status_code} # pylint: disable=too-many-branches def main(): ''' ansible oc module for secrets ''' module = AnsibleModule( argument_spec=dict( state=dict(default='offer', choices=['offer', 'cancel_op', 'go_live', 'publish']), force=dict(default=False, type='bool'), publisher=dict(default='redhat', type='str'), debug=dict(default=False, type='bool'), tenant_id=dict(default=os.environ.get('AZURE_TENANT_ID'), type='str'), client_id=dict(default=os.environ.get('AZURE_CLIENT_ID'), type='str'), client_secret=dict(default=os.environ.get('AZURE_CLIENT_SECRET'), type='str'), offer=dict(default=None, type='str'), offer_data=dict(default=None, type='dict'), emails=dict(default=None, type='list'), ), required_if=[ ["state", "offer", ["offer_data"]], ], ) # Verify we recieved either a valid key or edits with valid keys when receiving a src file. # A valid key being not None or not ''. if (module.params['tenant_id'] is None or module.params['client_id'] is None or module.params['client_secret'] is None): return module.fail_json(**{'failed': True, 'msg': 'Please specify tenant_id, client_id, and client_secret'}) rval = AzurePublisher.run_ansible(module.params) if int(rval['status_code']) >= 300: rval['msg'] = 'Failed. status_code {}'.format(rval['status_code']) return module.fail_json(**rval) return module.exit_json(**rval) if __name__ == '__main__': main()
apache-2.0
-5,332,874,357,817,418,000
-6,049,679,010,468,711,000
33.62069
109
0.582072
false
asimshankar/tensorflow
tensorflow/python/debug/cli/base_ui.py
89
7715
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base Class of TensorFlow Debugger (tfdbg) Command-Line Interface.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse from tensorflow.python.debug.cli import cli_config from tensorflow.python.debug.cli import command_parser from tensorflow.python.debug.cli import debugger_cli_common class BaseUI(object): """Base class of tfdbg user interface.""" CLI_PROMPT = "tfdbg> " CLI_EXIT_COMMANDS = ["exit", "quit"] ERROR_MESSAGE_PREFIX = "ERROR: " INFO_MESSAGE_PREFIX = "INFO: " def __init__(self, on_ui_exit=None, config=None): """Constructor of the base class. Args: on_ui_exit: (`Callable`) the callback to be called when the UI exits. config: An instance of `cli_config.CLIConfig()` carrying user-facing configurations. """ self._on_ui_exit = on_ui_exit self._command_handler_registry = ( debugger_cli_common.CommandHandlerRegistry()) self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry() # Create top-level tab-completion context and register the exit and help # commands. self._tab_completion_registry.register_tab_comp_context( [""], self.CLI_EXIT_COMMANDS + [debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] + debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES) self._config = config or cli_config.CLIConfig() self._config_argparser = argparse.ArgumentParser( description="config command", usage=argparse.SUPPRESS) subparsers = self._config_argparser.add_subparsers() set_parser = subparsers.add_parser("set") set_parser.add_argument("property_name", type=str) set_parser.add_argument("property_value", type=str) set_parser = subparsers.add_parser("show") self.register_command_handler( "config", self._config_command_handler, self._config_argparser.format_help(), prefix_aliases=["cfg"]) def set_help_intro(self, help_intro): """Set an introductory message to the help output of the command registry. Args: help_intro: (RichTextLines) Rich text lines appended to the beginning of the output of the command "help", as introductory information. """ self._command_handler_registry.set_help_intro(help_intro=help_intro) def register_command_handler(self, prefix, handler, help_info, prefix_aliases=None): """A wrapper around CommandHandlerRegistry.register_command_handler(). In addition to calling the wrapped register_command_handler() method, this method also registers the top-level tab-completion context based on the command prefixes and their aliases. See the doc string of the wrapped method for more details on the args. Args: prefix: (str) command prefix. handler: (callable) command handler. help_info: (str) help information. prefix_aliases: (list of str) aliases of the command prefix. """ self._command_handler_registry.register_command_handler( prefix, handler, help_info, prefix_aliases=prefix_aliases) self._tab_completion_registry.extend_comp_items("", [prefix]) if prefix_aliases: self._tab_completion_registry.extend_comp_items("", prefix_aliases) def register_tab_comp_context(self, *args, **kwargs): """Wrapper around TabCompletionRegistry.register_tab_comp_context().""" self._tab_completion_registry.register_tab_comp_context(*args, **kwargs) def run_ui(self, init_command=None, title=None, title_color=None, enable_mouse_on_start=True): """Run the UI until user- or command- triggered exit. Args: init_command: (str) Optional command to run on CLI start up. title: (str) Optional title to display in the CLI. title_color: (str) Optional color of the title, e.g., "yellow". enable_mouse_on_start: (bool) Whether the mouse mode is to be enabled on start-up. Returns: An exit token of arbitrary type. Can be None. """ raise NotImplementedError("run_ui() is not implemented in BaseUI") def _parse_command(self, command): """Parse a command string into prefix and arguments. Args: command: (str) Command string to be parsed. Returns: prefix: (str) The command prefix. args: (list of str) The command arguments (i.e., not including the prefix). output_file_path: (str or None) The path to save the screen output to (if any). """ command = command.strip() if not command: return "", [], None command_items = command_parser.parse_command(command) command_items, output_file_path = command_parser.extract_output_file_path( command_items) return command_items[0], command_items[1:], output_file_path def _analyze_tab_complete_input(self, text): """Analyze raw input to tab-completer. Args: text: (str) the full, raw input text to be tab-completed. Returns: context: (str) the context str. For example, If text == "print_tensor softmax", returns "print_tensor". If text == "print", returns "". If text == "", returns "". prefix: (str) the prefix to be tab-completed, from the last word. For example, if text == "print_tensor softmax", returns "softmax". If text == "print", returns "print". If text == "", returns "". except_last_word: (str) the input text, except the last word. For example, if text == "print_tensor softmax", returns "print_tensor". If text == "print_tensor -a softmax", returns "print_tensor -a". If text == "print", returns "". If text == "", returns "". """ text = text.lstrip() if not text: # Empty (top-level) context. context = "" prefix = "" except_last_word = "" else: items = text.split(" ") if len(items) == 1: # Single word: top-level context. context = "" prefix = items[0] except_last_word = "" else: # Multiple words. context = items[0] prefix = items[-1] except_last_word = " ".join(items[:-1]) + " " return context, prefix, except_last_word @property def config(self): """Obtain the CLIConfig of this `BaseUI` instance.""" return self._config def _config_command_handler(self, args, screen_info=None): """Command handler for the "config" command.""" del screen_info # Currently unused. parsed = self._config_argparser.parse_args(args) if hasattr(parsed, "property_name") and hasattr(parsed, "property_value"): # set. self._config.set(parsed.property_name, parsed.property_value) return self._config.summarize(highlight=parsed.property_name) else: # show. return self._config.summarize()
apache-2.0
-3,713,981,349,140,820,000
2,074,468,352,344,098,000
34.883721
80
0.646533
false
blueshed/blueshed-py
src/blueshed/handlers/basic_auth.py
1
1716
''' Created on 30 Jan 2014 @author: peterb ''' import base64 import logging class BasicAuthMixin(object): """ BasicAuthMixin """ def _request_auth(self, realm): if self._headers_written: raise Exception('headers have already been written') self.set_status(401) self.set_header('WWW-Authenticate', 'Basic realm="%s"' % realm) self.finish() return False def get_authenticated_user(self, auth_func, realm): """Requests HTTP basic authentication credentials from the client, or authenticates the user if credentials are provided.""" try: auth = self.request.headers.get('Authorization') if auth == None: return self._request_auth(realm) if not auth.startswith('Basic '): return self._request_auth(realm) auth_decoded = base64.decodestring(auth[6:]) username, password = auth_decoded.split(':', 1) if auth_func(self, realm, username, password): return True else: return self._request_auth(realm) except Exception: logging.exception('basic-auth') return self._request_auth(realm) def basic_auth(realm, auth_func): """A decorator that can be used on methods that you wish to protect with HTTP basic""" def basic_auth_decorator(func): def func_replacement(self, *args, **kwargs): if self.get_authenticated_user(auth_func, realm): return func(self, *args, **kwargs) return func_replacement return basic_auth_decorator
mit
3,140,551,342,061,185,500
-3,853,713,322,337,761,000
29.642857
86
0.578089
false
stefanocasazza/FrameworkBenchmarks
frameworks/Python/web2py/app/standard/modules/controller.py
24
1725
# -*- coding: utf-8 -*- from random import randint from functools import partial import json as jsonOut from gluon import current from database import Dal, RawDal, num_queries def plaintext(): current.response.headers['Content-Type'] = 'text/plain' return 'Hello, World!' def json(): current.response.headers['Content-Type'] = 'application/json' return jsonOut.dumps({'message': 'Hello, World!'}) def db(): current.response.headers['Content-Type']='application/json' return jsonOut.dumps(Dal('World').get_world(randint(1, 10000))) def queries(): current.response.headers['Content-Type']='application/json' db = RawDal() if current.optimized else Dal('World') get_world = db.get_world r10k = partial(randint, 1, 10000) worlds = [get_world(r10k()) for _ in xrange(num_queries(current.request.vars.queries))] return jsonOut.dumps(worlds) def updates(): current.response.headers['Content-Type']='application/json' db = RawDal() if current.optimized else Dal('World') get_world = db.get_world update_world = db.update_world r10k = partial(randint, 1, 10000) worlds = [] for wid in (r10k() for _ in xrange(num_queries(current.request.vars.queries))): world = get_world(wid) newNumber = r10k() world['randomNumber'] = newNumber worlds.append(world) update_world(wid, newNumber) return jsonOut.dumps(worlds) def fortune(): new_message = {'id': 0, 'message': 'Additional fortune added at request time.'} db = RawDal() if current.optimized else Dal('Fortune') fortunes = db.get_fortunes(new_message=new_message) return current.response.render('fortune.html', fortunes=fortunes)
bsd-3-clause
1,366,638,955,515,204,600
-778,588,002,548,017,900
34.204082
83
0.677101
false