repo_name
stringlengths
5
100
path
stringlengths
4
299
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1.03M
license
stringclasses
15 values
hash
int64
-9,223,351,895,964,839,000
9,223,297,778B
line_mean
float64
3.17
100
line_max
int64
7
1k
alpha_frac
float64
0.25
0.98
autogenerated
bool
1 class
hgl888/linux
scripts/gdb/linux/utils.py
509
4833
# # gdb helper commands and functions for Linux kernel debugging # # common utilities # # Copyright (c) Siemens AG, 2011-2013 # # Authors: # Jan Kiszka <[email protected]> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb class CachedType: def __init__(self, name): self._type = None self._name = name def _new_objfile_handler(self, event): self._type = None gdb.events.new_objfile.disconnect(self._new_objfile_handler) def get_type(self): if self._type is None: self._type = gdb.lookup_type(self._name) if self._type is None: raise gdb.GdbError( "cannot resolve type '{0}'".format(self._name)) if hasattr(gdb, 'events') and hasattr(gdb.events, 'new_objfile'): gdb.events.new_objfile.connect(self._new_objfile_handler) return self._type long_type = CachedType("long") def get_long_type(): global long_type return long_type.get_type() def offset_of(typeobj, field): element = gdb.Value(0).cast(typeobj) return int(str(element[field].address).split()[0], 16) def container_of(ptr, typeobj, member): return (ptr.cast(get_long_type()) - offset_of(typeobj, member)).cast(typeobj) class ContainerOf(gdb.Function): """Return pointer to containing data structure. $container_of(PTR, "TYPE", "ELEMENT"): Given PTR, return a pointer to the data structure of the type TYPE in which PTR is the address of ELEMENT. Note that TYPE and ELEMENT have to be quoted as strings.""" def __init__(self): super(ContainerOf, self).__init__("container_of") def invoke(self, ptr, typename, elementname): return container_of(ptr, gdb.lookup_type(typename.string()).pointer(), elementname.string()) ContainerOf() BIG_ENDIAN = 0 LITTLE_ENDIAN = 1 target_endianness = None def get_target_endianness(): global target_endianness if target_endianness is None: endian = gdb.execute("show endian", to_string=True) if "little endian" in endian: target_endianness = LITTLE_ENDIAN elif "big endian" in endian: target_endianness = BIG_ENDIAN else: raise gdb.GdbError("unknown endianness '{0}'".format(str(endian))) return target_endianness def read_memoryview(inf, start, length): return memoryview(inf.read_memory(start, length)) def read_u16(buffer): value = [0, 0] if type(buffer[0]) is str: value[0] = ord(buffer[0]) value[1] = ord(buffer[1]) else: value[0] = buffer[0] value[1] = buffer[1] if get_target_endianness() == LITTLE_ENDIAN: return value[0] + (value[1] << 8) else: return value[1] + (value[0] << 8) def read_u32(buffer): if get_target_endianness() == LITTLE_ENDIAN: return read_u16(buffer[0:2]) + (read_u16(buffer[2:4]) << 16) else: return read_u16(buffer[2:4]) + (read_u16(buffer[0:2]) << 16) def read_u64(buffer): if get_target_endianness() == LITTLE_ENDIAN: return read_u32(buffer[0:4]) + (read_u32(buffer[4:8]) << 32) else: return read_u32(buffer[4:8]) + (read_u32(buffer[0:4]) << 32) target_arch = None def is_target_arch(arch): if hasattr(gdb.Frame, 'architecture'): return arch in gdb.newest_frame().architecture().name() else: global target_arch if target_arch is None: target_arch = gdb.execute("show architecture", to_string=True) return arch in target_arch GDBSERVER_QEMU = 0 GDBSERVER_KGDB = 1 gdbserver_type = None def get_gdbserver_type(): def exit_handler(event): global gdbserver_type gdbserver_type = None gdb.events.exited.disconnect(exit_handler) def probe_qemu(): try: return gdb.execute("monitor info version", to_string=True) != "" except: return False def probe_kgdb(): try: thread_info = gdb.execute("info thread 2", to_string=True) return "shadowCPU0" in thread_info except: return False global gdbserver_type if gdbserver_type is None: if probe_qemu(): gdbserver_type = GDBSERVER_QEMU elif probe_kgdb(): gdbserver_type = GDBSERVER_KGDB if gdbserver_type is not None and hasattr(gdb, 'events'): gdb.events.exited.connect(exit_handler) return gdbserver_type def gdb_eval_or_none(expresssion): try: return gdb.parse_and_eval(expresssion) except: return None def dentry_name(d): parent = d['d_parent'] if parent == d or parent == 0: return "" p = dentry_name(d['d_parent']) + "/" return p + d['d_iname'].string()
gpl-2.0
-1,860,262,912,401,114,600
25.266304
78
0.609766
false
hehongliang/tensorflow
tensorflow/python/data/experimental/kernel_tests/function_buffering_resource_test.py
4
9699
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the private `FunctionBufferingResource` used in prefetching.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading from tensorflow.core.protobuf import config_pb2 from tensorflow.python.data.experimental.ops import prefetching_ops from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.ops import resource_variable_ops from tensorflow.python.platform import test class FunctionBufferingResourceTest(test_base.DatasetTestBase): def setUp(self): self._event = threading.Event() def _create_ds_and_iterator(self, device0, initializable=False): def gen(): for i in range(1, 10): yield [float(i)] if i == 6: self._event.set() with ops.device(device0): ds = dataset_ops.Dataset.from_generator(gen, (dtypes.float32)) if initializable: ds_iterator = ds.make_initializable_iterator() else: ds_iterator = ds.make_one_shot_iterator() return (ds, ds_iterator) def _create_ops(self, ds, ds_iterator, buffer_name, device0, device1): ds_iterator_handle = ds_iterator.string_handle() @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)]) def _remote_fn(h): remote_iterator = iterator_ops.Iterator.from_string_handle( h, ds.output_types, ds.output_shapes) return remote_iterator.get_next() target = constant_op.constant(device0) with ops.device(device1): buffer_resource_handle = prefetching_ops.function_buffering_resource( f=_remote_fn.get_concrete_function(), output_types=[dtypes.float32], target_device=target, string_arg=ds_iterator_handle, buffer_size=3, shared_name=buffer_name) with ops.device(device1): prefetch_op = prefetching_ops.function_buffering_resource_get_next( function_buffer_resource=buffer_resource_handle, output_types=[dtypes.float32]) reset_op = prefetching_ops.function_buffering_resource_reset( function_buffer_resource=buffer_resource_handle) destroy_op = resource_variable_ops.destroy_resource_op( buffer_resource_handle, ignore_lookup_error=True) return (prefetch_op, reset_op, destroy_op) def _prefetch_fn_helper_one_shot(self, buffer_name, device0, device1): worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) ds, ds_iterator = self._create_ds_and_iterator(device0, initializable=False) prefetch_op, _, destroy_op = self._create_ops(ds, ds_iterator, buffer_name, device0, device1) with self.test_session(config=worker_config) as sess: elem = sess.run(prefetch_op) self.assertEqual(elem, [1.0]) elem = sess.run(prefetch_op) self.assertEqual(elem, [2.0]) elem = sess.run(prefetch_op) self.assertEqual(elem, [3.0]) elem = sess.run(prefetch_op) self.assertEqual(elem, [4.0]) self._event.wait() elem = sess.run(prefetch_op) self.assertEqual(elem, [5.0]) sess.run(destroy_op) def testSameDeviceCPU(self): self._prefetch_fn_helper_one_shot("same_device_cpu", "/job:localhost/replica:0/task:0/cpu:0", "/job:localhost/replica:0/task:0/cpu:0") def testDifferentDeviceCPU(self): self._prefetch_fn_helper_one_shot("diff_device_cpu", "/job:localhost/replica:0/task:0/cpu:0", "/job:localhost/replica:0/task:0/cpu:1") def testDifferentDeviceCPUGPU(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") self._prefetch_fn_helper_one_shot("cpu_gpu", "/job:localhost/replica:0/task:0/cpu:0", "/job:localhost/replica:0/task:0/gpu:0") def testReinitialization(self): worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) device0 = "/job:localhost/replica:0/task:0/cpu:0" device1 = "/job:localhost/replica:0/task:0/cpu:1" ds, ds_iterator = self._create_ds_and_iterator(device0, initializable=True) prefetch_op, reset_op, destroy_op = self._create_ops( ds, ds_iterator, "reinit", device0, device1) with self.test_session(config=worker_config) as sess: sess.run(ds_iterator.initializer) elem = sess.run(prefetch_op) self.assertEqual(elem, [1.0]) elem = sess.run(prefetch_op) self.assertEqual(elem, [2.0]) elem = sess.run(prefetch_op) self.assertEqual(elem, [3.0]) elem = sess.run(prefetch_op) self.assertEqual(elem, [4.0]) self._event.wait() elem = sess.run(prefetch_op) self.assertEqual(elem, [5.0]) # Lets reset the function buffering resource and reinitialize the # iterator. Should be able to go through this again. self._event.clear() sess.run(reset_op) sess.run(ds_iterator.initializer) elem = sess.run(prefetch_op) self.assertEqual(elem, [1.0]) elem = sess.run(prefetch_op) self.assertEqual(elem, [2.0]) elem = sess.run(prefetch_op) self.assertEqual(elem, [3.0]) elem = sess.run(prefetch_op) self.assertEqual(elem, [4.0]) self._event.wait() elem = sess.run(prefetch_op) self.assertEqual(elem, [5.0]) sess.run(destroy_op) def testReinitializationOutOfRange(self): worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) device0 = "/job:localhost/replica:0/task:0/cpu:0" device1 = "/job:localhost/replica:0/task:0/cpu:1" ds, ds_iterator = self._create_ds_and_iterator(device0, initializable=True) prefetch_op, reset_op, destroy_op = self._create_ops( ds, ds_iterator, "reinit", device0, device1) with self.test_session(config=worker_config) as sess: sess.run(ds_iterator.initializer) for i in range(1, 10): elem = sess.run(prefetch_op) self.assertEqual(elem, [float(i)]) # Try fetching after its over twice to test out end of sequence. with self.assertRaises(errors.OutOfRangeError): sess.run(prefetch_op) with self.assertRaises(errors.OutOfRangeError): sess.run(prefetch_op) # Now reset everything and try it out again. self._event.clear() sess.run(reset_op) sess.run(ds_iterator.initializer) for i in range(1, 10): elem = sess.run(prefetch_op) self.assertEqual(elem, [float(i)]) # Try fetching after its over twice to test out end of sequence. with self.assertRaises(errors.OutOfRangeError): sess.run(prefetch_op) with self.assertRaises(errors.OutOfRangeError): sess.run(prefetch_op) sess.run(destroy_op) def testStringsGPU(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") device0 = "/job:localhost/replica:0/task:0/cpu:0" device1 = "/job:localhost/replica:0/task:0/gpu:0" ds = dataset_ops.Dataset.from_tensor_slices(["a", "b", "c"]) ds_iterator = ds.make_one_shot_iterator() ds_iterator_handle = ds_iterator.string_handle() @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)]) def _remote_fn(h): remote_iterator = iterator_ops.Iterator.from_string_handle( h, ds.output_types, ds.output_shapes) return remote_iterator.get_next() target = constant_op.constant(device0) with ops.device(device1): buffer_resource_handle = prefetching_ops.function_buffering_resource( f=_remote_fn.get_concrete_function(), output_types=[dtypes.string], target_device=target, string_arg=ds_iterator_handle, buffer_size=3, shared_name="strings") with ops.device(device1): prefetch_op = prefetching_ops.function_buffering_resource_get_next( function_buffer_resource=buffer_resource_handle, output_types=[dtypes.string]) destroy_op = resource_variable_ops.destroy_resource_op( buffer_resource_handle, ignore_lookup_error=True) with self.cached_session() as sess: self.assertEqual([b"a"], sess.run(prefetch_op)) self.assertEqual([b"b"], sess.run(prefetch_op)) self.assertEqual([b"c"], sess.run(prefetch_op)) with self.assertRaises(errors.OutOfRangeError): sess.run(prefetch_op) sess.run(destroy_op) if __name__ == "__main__": test.main()
apache-2.0
1,363,879,257,170,505,000
38.108871
80
0.657181
false
yotchang4s/cafebabepy
src/main/python/json/scanner.py
106
2416
"""JSON token scanner """ import re try: from _json import make_scanner as c_make_scanner except ImportError: c_make_scanner = None __all__ = ['make_scanner'] NUMBER_RE = re.compile( r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?', (re.VERBOSE | re.MULTILINE | re.DOTALL)) def py_make_scanner(context): parse_object = context.parse_object parse_array = context.parse_array parse_string = context.parse_string match_number = NUMBER_RE.match strict = context.strict parse_float = context.parse_float parse_int = context.parse_int parse_constant = context.parse_constant object_hook = context.object_hook object_pairs_hook = context.object_pairs_hook memo = context.memo def _scan_once(string, idx): try: nextchar = string[idx] except IndexError: raise StopIteration(idx) if nextchar == '"': return parse_string(string, idx + 1, strict) elif nextchar == '{': return parse_object((string, idx + 1), strict, _scan_once, object_hook, object_pairs_hook, memo) elif nextchar == '[': return parse_array((string, idx + 1), _scan_once) elif nextchar == 'n' and string[idx:idx + 4] == 'null': return None, idx + 4 elif nextchar == 't' and string[idx:idx + 4] == 'true': return True, idx + 4 elif nextchar == 'f' and string[idx:idx + 5] == 'false': return False, idx + 5 m = match_number(string, idx) if m is not None: integer, frac, exp = m.groups() if frac or exp: res = parse_float(integer + (frac or '') + (exp or '')) else: res = parse_int(integer) return res, m.end() elif nextchar == 'N' and string[idx:idx + 3] == 'NaN': return parse_constant('NaN'), idx + 3 elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity': return parse_constant('Infinity'), idx + 8 elif nextchar == '-' and string[idx:idx + 9] == '-Infinity': return parse_constant('-Infinity'), idx + 9 else: raise StopIteration(idx) def scan_once(string, idx): try: return _scan_once(string, idx) finally: memo.clear() return _scan_once make_scanner = c_make_scanner or py_make_scanner
bsd-3-clause
8,870,081,426,801,974,000
32.09589
71
0.552152
false
easyw/kicad-3d-models-in-freecad
cadquery/FCAD_script_generator/jst/main_generator.py
1
13014
# -*- coding: utf8 -*- #!/usr/bin/python # # This was originaly derived from a cadquery script for generating PDIP models in X3D format # from https://bitbucket.org/hyOzd/freecad-macros # author hyOzd # # Adapted by easyw for step and vrlm export # See https://github.com/easyw/kicad-3d-models-in-freecad ## requirements ## cadquery FreeCAD plugin ## https://github.com/jmwright/cadquery-freecad-module ## to run the script just do: freecad scriptName modelName ## e.g. FreeCAD export_conn_jst_xh.py all ## the script will generate STEP and VRML parametric models ## to be used with kicad StepUp script #* These are FreeCAD & cadquery tools * #* to export generated models in STEP & VRML format. * #* * #* cadquery script for generating JST-XH models in STEP AP214 * #* Copyright (c) 2016 * #* Rene Poeschl https://github.com/poeschlr * #* All trademarks within this guide belong to their legitimate owners. * #* * #* This program is free software; you can redistribute it and/or modify * #* it under the terms of the GNU General Public License (GPL) * #* as published by the Free Software Foundation; either version 2 of * #* the License, or (at your option) any later version. * #* for detail see the LICENCE text file. * #* * #* This program is distributed in the hope that it will be useful, * #* but WITHOUT ANY WARRANTY; without even the implied warranty of * #* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * #* GNU Library General Public License for more details. * #* * #* You should have received a copy of the GNU Library General Public * #* License along with this program; if not, write to the Free Software * #* Foundation, Inc., * #* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA * #* * #**************************************************************************** __title__ = "main generator for molex connector models" __author__ = "scripts: maurice and hyOzd; models: see cq_model files" __Comment__ = '''This generator loads cadquery model scripts and generates step/wrl files for the official kicad library.''' ___ver___ = "1.2 03/12/2017" save_memory = True #reducing memory consuming for all generation params check_Model = True stop_on_first_error = True check_log_file = 'check-log.md' global_3dpath = '../_3Dmodels/' import sys, os import traceback import datetime from datetime import datetime from math import sqrt from collections import namedtuple sys.path.append("../_tools") import exportPartToVRML as expVRML import shaderColors import add_license as L import re, fnmatch import yaml save_memory = True #reducing memory consuming for all generation params check_Model = True check_log_file = 'check-log.md' if FreeCAD.GuiUp: from PySide import QtCore, QtGui try: # Gui.SendMsgToActiveView("Run") # from Gui.Command import * Gui.activateWorkbench("CadQueryWorkbench") import cadquery as cq from Helpers import show # CadQuery Gui except Exception as e: # catch *all* exceptions print(e) msg = "missing CadQuery 0.3.0 or later Module!\r\n\r\n" msg += "https://github.com/jmwright/cadquery-freecad-module/wiki\n" if QtGui is not None: reply = QtGui.QMessageBox.information(None,"Info ...",msg) ####################################################################### #from Gui.Command import * # Import cad_tools #sys.path.append("../_tools") from cqToolsExceptions import * import cq_cad_tools # Reload tools reload(cq_cad_tools) # Explicitly load all needed functions from cq_cad_tools import multiFuseObjs_wColors, GetListOfObjects, restore_Main_Tools, \ exportSTEP, close_CQ_Example, saveFCdoc, z_RotateObject,\ runGeometryCheck # Gui.SendMsgToActiveView("Run") #Gui.activateWorkbench("CadQueryWorkbench") #import FreeCADGui as Gui try: close_CQ_Example(App, Gui) except: FreeCAD.Console.PrintMessage("can't close example.") #import FreeCAD, Draft, FreeCADGui import ImportGui def export_one_part(module, variant, pincount, configuration, log): print('\n##########################################################') series_definition = module.series_params variant_params = series_definition.variant_params[variant] params = variant_params['param_generator'](pincount) if module.LICENCE_Info.LIST_license[0]=="": LIST_license=L.LIST_int_license LIST_license.append("") else: LIST_license=module.LICENCE_Info.LIST_license LIST_license[0] = "Copyright (C) "+datetime.now().strftime("%Y")+", " + module.LICENCE_Info.STR_licAuthor pins_per_row = pincount//series_definition.number_of_rows mpn = variant_params['mpn_format_string'].format(pincount=pincount, pins_per_row=pins_per_row) orientation = configuration['orientation_options'][variant_params['orientation']] FileName = configuration['fp_name_format_string'].\ format(man=series_definition.manufacturer, series=series_definition.series, mpn=mpn, num_rows=series_definition.number_of_rows, pins_per_row=pins_per_row, pitch=series_definition.pitch, orientation=orientation, mount_pin=variant_params['mount_pin']) FileName = FileName.replace('__', '_') lib_name = configuration['lib_name_format_string'].format(man=series_definition.manufacturer) fc_mpn = mpn.replace('.', '').replace('-', '_').replace('(', '').replace(')', '') ModelName = '{:s}_{:s}'.format(series_definition.manufacturer, fc_mpn) # For some reason the Model name can not start with a number. FreeCAD.Console.PrintMessage('\r\n'+FileName+'\r\n') #FileName = modul.all_params[variant].file_name Newdoc = FreeCAD.newDocument(ModelName) print(Newdoc.Label) App.setActiveDocument(ModelName) App.ActiveDocument=App.getDocument(ModelName) Gui.ActiveDocument=Gui.getDocument(ModelName) color_keys = series_definition.color_keys obj_suffixes = series_definition.obj_suffixes colors = [shaderColors.named_colors[key].getDiffuseInt() for key in color_keys] cq_obj_data = module.generate_part(params) for i in range(len(cq_obj_data)): color_i = colors[i] + (0,) show(cq_obj_data[i], color_i) doc = FreeCAD.ActiveDocument doc.Label = ModelName objs=GetListOfObjects(FreeCAD, doc) for i in range(len(objs)): objs[i].Label = ModelName + obj_suffixes[i] restore_Main_Tools() out_dir='{:s}{:s}.3dshapes'.format(global_3dpath, lib_name) if not os.path.exists(out_dir): os.makedirs(out_dir) used_color_keys = color_keys export_file_name=out_dir+os.sep+FileName+'.wrl' export_objects = [] for i in range(len(objs)): export_objects.append(expVRML.exportObject(freecad_object = objs[i], shape_color=color_keys[i], face_colors=None)) scale=1/2.54 colored_meshes = expVRML.getColoredMesh(Gui, export_objects , scale) expVRML.writeVRMLFile(colored_meshes, export_file_name, used_color_keys, LIST_license) fusion = multiFuseObjs_wColors(FreeCAD, FreeCADGui, ModelName, objs, keepOriginals=True) exportSTEP(doc,FileName,out_dir,fusion) step_path = '{dir:s}/{name:s}.step'.format(dir=out_dir, name=FileName) L.addLicenseToStep(out_dir, '{:s}.step'.\ format(FileName), LIST_license, module.LICENCE_Info.STR_licAuthor, module.LICENCE_Info.STR_licEmail, module.LICENCE_Info.STR_licOrgSys, module.LICENCE_Info.STR_licPreProc) FreeCAD.activeDocument().recompute() saveFCdoc(App, Gui, doc, FileName, out_dir) #FreeCADGui.activateWorkbench("PartWorkbench") if save_memory == False and check_Model==False: FreeCADGui.SendMsgToActiveView("ViewFit") FreeCADGui.activeDocument().activeView().viewAxometric() if save_memory == True or check_Model==True: doc=FreeCAD.ActiveDocument FreeCAD.closeDocument(doc.Name) if check_Model==True: runGeometryCheck(App, Gui, step_path, log, ModelName, save_memory=save_memory) def exportSeries(module, configuration, log, model_filter_regobj): series_definition = module.series_params for variant in series_definition.variant_params: #print(variant) pinrange = series_definition.variant_params[variant]['pinrange'] for pins in pinrange: try: if model_filter_regobj.match(str(pins)): export_one_part(module, variant, pins, configuration, log) except GeometryError as e: e.print_errors(stop_on_first_error) if stop_on_first_error: return -1 except FreeCADVersionError as e: FreeCAD.Console.PrintError(e) return -1 return 0 ######################### ADD MODEL GENERATORS ######################### sys.path.append("cq_models") import conn_jst_eh_models import conn_jst_ph_models import conn_jst_xh_models import conn_jst_gh_models all_series = { 'eh':conn_jst_eh_models, 'ph':conn_jst_ph_models, 'xh':conn_jst_xh_models, 'gh':conn_jst_gh_models } ######################################################################### class argparse(): def __init__(self): self.config = '../_tools/config/connector_config_KLCv3.yaml' self.model_filter = '*' self.series = all_series.values() def parse_args(self, args): for arg in args: if '=' in arg: self.parseValueArg(*arg.split('=')) else: self.argSwitchArg(arg) def parseValueArg(self, name, value): if name == 'config': self.config = value elif name == 'pins_filter': self.model_filter = value elif name == 'log': global check_log_file check_log_file = value elif name == 'series': #print("param series:") #print(value) series_str = value.split(',') self.series = [] for s in series_str: if s.lower() in all_series: self.series.append(all_series[s.lower()]) def argSwitchArg(self, name): if name == '?': self.print_usage() exit() elif name == 'disable_check': global check_Model check_Model = False elif name == 'disable_Memory_reduction': global save_memory save_memory = False elif name == 'error_tolerant': global stop_on_first_error stop_on_first_error = False def print_usage(self): print("Generater script for phoenix contact 3d models.") print('usage: FreeCAD main_generator.py [optional arguments and switches]') print('optional arguments:') print('\tconfig=[config file]: default:config_phoenix_KLCv3.0.yaml') print('\tpins_filter=[filter pincount using linux file filter syntax]') print('\tlog=[log file path]') print('\tseries=[series name],[series name],...') print('switches:') print('\tdisable_check') print('\tdisable_Memory_reduction') print('\terror_tolerant\n') def __str__(self): return 'config:{:s}, filter:{:s}, series:{:s}, with_plug:{:d}'.format( self.config, self.model_filter, str(self.series), self.with_plug) if __name__ == "__main__" or __name__ == "main_generator": FreeCAD.Console.PrintMessage('\r\nRunning...\r\n') args = argparse() args.parse_args(sys.argv) modelfilter = args.model_filter with open(args.config, 'r') as config_stream: try: configuration = yaml.load(config_stream) except yaml.YAMLError as exc: print(exc) model_filter_regobj=re.compile(fnmatch.translate(modelfilter)) with open(check_log_file, 'w') as log: log.write('# Check report for Molex 3d model genration\n') for typ in args.series: try: if exportSeries(typ, configuration, log, model_filter_regobj) != 0: break except Exception as exeption: traceback.print_exc() break FreeCAD.Console.PrintMessage('\n\nDone\n')
gpl-2.0
-2,069,341,764,201,125,400
35.15
136
0.599739
false
xuerenlv/PaperWork
my_study/pandas_study/test1.py
1
1044
# -*- coding: utf-8 -*- ''' Created on Oct 16, 2015 @author: nlp ''' import sys import traceback from store_model import Single_weibo_store import datetime from datetime import timedelta import pprint import jieba reload(sys) sys.setdefaultencoding('utf8') from sklearn import svm X = [[0, 0], [1, 1]] y = [0, 1] clf = svm.SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3, gamma=0.0, kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) clf.fit(X, y) def is_all_chinese(word): for uchar in word: if uchar >= u'\u4e00' and uchar<=u'\u9fa5': pass else: return False return True def is_all_alpha(word): for one in word: if (one >= 'a' and one <= u'z') or (one >= 'A' and one <= u'Z'): pass else: return False return True if __name__ == '__main__': x = '[timedelta' y = u'薛薛啊结构' print is_all_chinese(x) print is_all_chinese(y) pass
apache-2.0
353,639,054,111,473,700
18.884615
76
0.592843
false
iqas/e2gui
tools/create_picon_sats.py
193
2365
# # create symlinks for picons # usage: create_picon_sats lamedb # run in picon directory. # It will read the servicenames from the lamedb and create symlinks # for the servicereference names. # # by pieterg, 2008 import os, sys f = open(sys.argv[1]).readlines() f = f[f.index("services\n")+1:-3] while len(f) > 2: ref = [int(x, 0x10) for x in f[0][:-1].split(':')] name = f[1][:-1] name = name.replace('\xc2\x87', '').replace('\xc2\x86', '') fields = f[2].split(',') if len(fields) and fields[0][0] is 'p': provider = fields[0].split(':')[1] else: provider = 'unknown' if ref[4] == 2: servicetype = 'radio' else: ref[4] = 1 servicetype = 'tv' sat = str(ref[1]/16/16/16/16) # SID:NS:TSID:ONID:STYPE:UNUSED(channelnumber in enigma1) # X X X X D D # REFTYPE:FLAGS:STYPE:SID:TSID:ONID:NS:PARENT_SID:PARENT_TSID:UNUSED # D D X X X X X X X X refstr = "1:0:%X:%X:%X:%X:%X:0:0:0" % (ref[4], ref[0], ref[2], ref[3], ref[1]) refstr = refstr.replace(':', '_') filename = name + ".png" linkname = refstr + ".png" filename = filename.replace('/', '_').replace('\\', '_').replace('&', '_').replace('\'', '').replace('"', '').replace('`', '').replace('*', '_').replace('?', '_').replace(' ', '_').replace('(', '_').replace(')', '_').replace('|', '_') provider = provider.replace('/', '_').replace('\\', '_').replace('&', '_').replace('\'', '').replace('"', '').replace('`', '').replace('*', '_').replace('?', '_').replace(' ', '_').replace('(', '_').replace(')', '_').replace('|', '_') filename = filename.replace('\n', '') provider = provider.replace('\n', '') for i in range(len(filename)): if ord(filename[i]) > 127: filename = filename[0:i] + '_' + filename[i + 1:] for i in range(len(provider)): if ord(provider[i]) > 127: provider = provider[0:i] + '_' + provider[i + 1:] if sat == "65535": sat = "cable" filename = sat + "_" + provider + "_" + servicetype + "_" + filename else: filename = sat + "_" + provider + "_" + servicetype + "_" + filename sat = sat[0:2] + '.' + sat[-1:] + 'e' #TODO: west try: os.makedirs(sat + '/' + servicetype) except: pass try: os.rename(linkname, sat + '/' + servicetype + '/' + filename) except: pass try: os.symlink(filename, sat + '/' + servicetype + '/' + linkname) except: pass f =f[3:]
gpl-2.0
-7,925,405,888,037,362,000
27.154762
235
0.541226
false
SUSE-Cloud/nova
nova/api/openstack/compute/plugins/v3/agents.py
8
6896
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import db from nova import exception from nova.openstack.common.gettextutils import _ ALIAS = "os-agents" authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS) class AgentsIndexTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('agents') elem = xmlutil.SubTemplateElement(root, 'agent', selector='agents') elem.set('hypervisor') elem.set('os') elem.set('architecture') elem.set('version') elem.set('md5hash') elem.set('agent_id') elem.set('url') return xmlutil.MasterTemplate(root, 1) class AgentController(object): """ The agent is talking about guest agent.The host can use this for things like accessing files on the disk, configuring networking, or running other applications/scripts in the guest while it is running. Typically this uses some hypervisor-specific transport to avoid being dependent on a working network configuration. Xen, VMware, and VirtualBox have guest agents,although the Xen driver is the only one with an implementation for managing them in openstack. KVM doesn't really have a concept of a guest agent (although one could be written). You can find the design of agent update in this link: http://wiki.openstack.org/AgentUpdate and find the code in nova.virt.xenapi.vmops.VMOps._boot_new_instance. In this design We need update agent in guest from host, so we need some interfaces to update the agent info in host. You can find more information about the design of the GuestAgent in the following link: http://wiki.openstack.org/GuestAgent http://wiki.openstack.org/GuestAgentXenStoreCommunication """ @extensions.expected_errors(()) @wsgi.serializers(xml=AgentsIndexTemplate) def index(self, req): """ Return a list of all agent builds. Filter by hypervisor. """ context = req.environ['nova.context'] authorize(context) hypervisor = None agents = [] if 'hypervisor' in req.GET: hypervisor = req.GET['hypervisor'] for agent_build in db.agent_build_get_all(context, hypervisor): agents.append({'hypervisor': agent_build.hypervisor, 'os': agent_build.os, 'architecture': agent_build.architecture, 'version': agent_build.version, 'md5hash': agent_build.md5hash, 'agent_id': agent_build.id, 'url': agent_build.url}) return {'agents': agents} @extensions.expected_errors((400, 404)) def update(self, req, id, body): """Update an existing agent build.""" context = req.environ['nova.context'] authorize(context) try: para = body['agent'] url = para['url'] md5hash = para['md5hash'] version = para['version'] except TypeError as e: raise webob.exc.HTTPBadRequest() except KeyError as e: raise webob.exc.HTTPBadRequest(explanation=_( "Could not find %s parameter in the request") % e.args[0]) try: db.agent_build_update(context, id, {'version': version, 'url': url, 'md5hash': md5hash}) except exception.AgentBuildNotFound as ex: raise webob.exc.HTTPNotFound(explanation=ex.format_message()) return {"agent": {'agent_id': id, 'version': version, 'url': url, 'md5hash': md5hash}} @extensions.expected_errors(404) @wsgi.response(204) def delete(self, req, id): """Deletes an existing agent build.""" context = req.environ['nova.context'] authorize(context) try: db.agent_build_destroy(context, id) except exception.AgentBuildNotFound as ex: raise webob.exc.HTTPNotFound(explanation=ex.format_message()) @extensions.expected_errors((400, 409)) @wsgi.response(201) def create(self, req, body): """Creates a new agent build.""" context = req.environ['nova.context'] authorize(context) try: agent = body['agent'] hypervisor = agent['hypervisor'] os = agent['os'] architecture = agent['architecture'] version = agent['version'] url = agent['url'] md5hash = agent['md5hash'] except TypeError as e: raise webob.exc.HTTPBadRequest() except KeyError as e: raise webob.exc.HTTPBadRequest(explanation=_( "Could not find %s parameter in the request") % e.args[0]) try: agent_build_ref = db.agent_build_create(context, {'hypervisor': hypervisor, 'os': os, 'architecture': architecture, 'version': version, 'url': url, 'md5hash': md5hash}) agent['agent_id'] = agent_build_ref.id except exception.AgentBuildExists as ex: raise webob.exc.HTTPConflict(explanation=ex.format_message()) return {'agent': agent} class Agents(extensions.V3APIExtensionBase): """Agents support.""" name = "Agents" alias = ALIAS namespace = "http://docs.openstack.org/compute/ext/agents/api/v3" version = 1 def get_resources(self): resource = [extensions.ResourceExtension(ALIAS, AgentController())] return resource def get_controller_extensions(self): """It's an abstract function V3APIExtensionBase and the extension will not be loaded without it. """ return []
apache-2.0
6,880,636,415,451,997,000
36.075269
78
0.591792
false
ProgVal/Limnoria-test
plugins/Debug/plugin.py
3
6668
#!/usr/bin/python ### # Copyright (c) 2002-2005, Jeremiah Fincher # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### """ This is for debugging purposes only and you shouldn't load this it unless a Supybot developer requests you to debug some issue. """ import supybot.plugins as plugins import gc import os import sys try: import exceptions except ImportError: # Python 3 import builtins class exceptions: """Pseudo-module""" pass for (key, value) in list(exceptions.__dict__.items()): if isinstance(value, type) and issubclass(value, Exception): exceptions[key] = value import supybot.conf as conf import supybot.utils as utils import supybot.ircdb as ircdb from supybot.commands import * import supybot.ircmsgs as ircmsgs import supybot.callbacks as callbacks def getTracer(fd): def tracer(frame, event, _): if event == 'call': code = frame.f_code fd.write('%s: %s\n' % (code.co_filename, code.co_name)) return tracer class Debug(callbacks.Privmsg): """This plugin provides debugging abilities for Supybot. It should not be loaded with a default installation.""" capability = 'owner' def __init__(self, irc): # Setup exec command. self.__parent = super(Debug, self) self.__parent.__init__(irc) setattr(self.__class__, 'exec', self.__class__._exec) def callCommand(self, name, irc, msg, *args, **kwargs): if ircdb.checkCapability(msg.prefix, self.capability): self.__parent.callCommand(name, irc, msg, *args, **kwargs) else: irc.errorNoCapability(self.capability) _evalEnv = {'_': None, '__': None, '___': None, } _evalEnv.update(globals()) def eval(self, irc, msg, args, s): """<expression> Evaluates <expression> (which should be a Python expression) and returns its value. If an exception is raised, reports the exception (and logs the traceback to the bot's logfile). """ try: self._evalEnv.update(locals()) x = eval(s, self._evalEnv, self._evalEnv) self._evalEnv['___'] = self._evalEnv['__'] self._evalEnv['__'] = self._evalEnv['_'] self._evalEnv['_'] = x irc.reply(repr(x)) except SyntaxError as e: irc.reply(format('%s: %q', utils.exnToString(e), s)) eval = wrap(eval, ['text']) def _exec(self, irc, msg, args, s): """<statement> Execs <code>. Returns success if it didn't raise any exceptions. """ exec(s) irc.replySuccess() _exec = wrap(_exec, ['text']) def simpleeval(self, irc, msg, args, text): """<expression> Evaluates the given expression. """ try: irc.reply(repr(eval(text))) except Exception as e: irc.reply(utils.exnToString(e)) simpleeval = wrap(simpleeval, ['text']) def exn(self, irc, msg, args, name): """<exception name> Raises the exception matching <exception name>. """ if isinstance(__builtins__, dict): exn = __builtins__[name] else: exn = getattr(__builtins__, name) raise exn(msg.prefix) exn = wrap(exn, ['text']) def sendquote(self, irc, msg, args, text): """<raw IRC message> Sends (not queues) the raw IRC message given. """ msg = ircmsgs.IrcMsg(text) irc.sendMsg(msg) sendquote = wrap(sendquote, ['text']) def settrace(self, irc, msg, args, filename): """[<filename>] Starts tracing function calls to <filename>. If <filename> is not given, sys.stdout is used. This causes much output. """ if filename: fd = open(filename, 'a') else: fd = sys.stdout sys.settrace(getTracer(fd)) irc.replySuccess() settrace = wrap(settrace, [additional('filename')]) def unsettrace(self, irc, msg, args): """takes no arguments Stops tracing function calls on stdout. """ sys.settrace(None) irc.replySuccess() unsettrace = wrap(unsettrace) def channeldb(self, irc, msg, args, channel): """[<channel>] Returns the result of the channeldb converter. """ irc.reply(channel) channeldb = wrap(channeldb, ['channeldb']) def collect(self, irc, msg, args, times): """[<times>] Does <times> gc collections, returning the number of objects collected each time. <times> defaults to 1. """ L = [] while times: L.append(gc.collect()) times -= 1 irc.reply(format('%L', list(map(str, L)))) collect = wrap(collect, [additional('positiveInt', 1)]) def environ(self, irc, msg, args): """takes no arguments Returns the environment of the supybot process. """ irc.reply(repr(os.environ)) environ = wrap(environ) Class = Debug # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
bsd-3-clause
9,131,488,519,622,215,000
32.009901
79
0.620426
false
jerivas/mezzanine
mezzanine/pages/templatetags/pages_tags.py
2
8361
from collections import defaultdict from django.core.exceptions import ImproperlyConfigured from django.template import TemplateSyntaxError, Variable from django.template.loader import get_template from django.utils.translation import gettext_lazy as _ from mezzanine import template from mezzanine.pages.models import Page from mezzanine.utils.urls import home_slug register = template.Library() @register.render_tag def page_menu(context, token): """ Return a list of child pages for the given parent, storing all pages in a dict in the context when first called using parents as keys for retrieval on subsequent recursive calls from the menu template. """ # First arg could be the menu template file name, or the parent page. # Also allow for both to be used. template_name = None parent_page = None parts = token.split_contents()[1:] for part in parts: part = Variable(part).resolve(context) if isinstance(part, str): template_name = part elif isinstance(part, Page): parent_page = part if template_name is None: try: template_name = context["menu_template_name"] except KeyError: error = "No template found for page_menu in: %s" % parts raise TemplateSyntaxError(error) context["menu_template_name"] = template_name if "menu_pages" not in context: try: user = context["request"].user slug = context["request"].path except KeyError: user = None slug = "" num_children = lambda id: lambda: len(context["menu_pages"][id]) has_children = lambda id: lambda: num_children(id)() > 0 rel = [ m.__name__.lower() for m in Page.get_content_models() if not m._meta.proxy ] published = Page.objects.published(for_user=user).select_related(*rel) # Store the current page being viewed in the context. Used # for comparisons in page.set_menu_helpers. if "page" not in context: try: context.dicts[0]["_current_page"] = published.exclude( content_model="link" ).get(slug=slug) except Page.DoesNotExist: context.dicts[0]["_current_page"] = None elif slug: context.dicts[0]["_current_page"] = context["page"] # Some homepage related context flags. on_home is just a helper # indicated we're on the homepage. has_home indicates an actual # page object exists for the homepage, which can be used to # determine whether or not to show a hard-coded homepage link # in the page menu. home = home_slug() context.dicts[0]["on_home"] = slug == home context.dicts[0]["has_home"] = False # Maintain a dict of page IDs -> parent IDs for fast # lookup in setting page.is_current_or_ascendant in # page.set_menu_helpers. context.dicts[0]["_parent_page_ids"] = {} pages = defaultdict(list) for page in published.order_by("_order"): page.set_helpers(context) context["_parent_page_ids"][page.id] = page.parent_id setattr(page, "num_children", num_children(page.id)) setattr(page, "has_children", has_children(page.id)) pages[page.parent_id].append(page) if page.slug == home: context.dicts[0]["has_home"] = True # Include menu_pages in all contexts, not only in the # block being rendered. context.dicts[0]["menu_pages"] = pages # ``branch_level`` must be stored against each page so that the # calculation of it is correctly applied. This looks weird but if we do # the ``branch_level`` as a separate arg to the template tag with the # addition performed on it, the addition occurs each time the template # tag is called rather than once per level. context["branch_level"] = 0 parent_page_id = None if parent_page is not None: context["branch_level"] = getattr(parent_page, "branch_level", 0) + 1 parent_page_id = parent_page.id # Build the ``page_branch`` template variable, which is the list of # pages for the current parent. Here we also assign the attributes # to the page object that determines whether it belongs in the # current menu template being rendered. context["page_branch"] = context["menu_pages"].get(parent_page_id, []) context["page_branch_in_menu"] = False for page in context["page_branch"]: page.in_menu = page.in_menu_template(template_name) page.num_children_in_menu = 0 if page.in_menu: context["page_branch_in_menu"] = True for child in context["menu_pages"].get(page.id, []): if child.in_menu_template(template_name): page.num_children_in_menu += 1 page.has_children_in_menu = page.num_children_in_menu > 0 page.branch_level = context["branch_level"] page.parent = parent_page context["parent_page"] = page.parent # Prior to pages having the ``in_menus`` field, pages had two # boolean fields ``in_navigation`` and ``in_footer`` for # controlling menu inclusion. Attributes and variables # simulating these are maintained here for backwards # compatibility in templates, but will be removed eventually. page.in_navigation = page.in_menu page.in_footer = not (not page.in_menu and "footer" in template_name) if page.in_navigation: context["page_branch_in_navigation"] = True if page.in_footer: context["page_branch_in_footer"] = True t = get_template(template_name) return t.render(context.flatten()) @register.as_tag def models_for_pages(*args): """ Create a select list containing each of the models that subclass the ``Page`` model. """ from warnings import warn warn( "template tag models_for_pages is deprectaed, use " "PageAdmin.get_content_models instead" ) from mezzanine.pages.admin import PageAdmin return PageAdmin.get_content_models() @register.render_tag def set_model_permissions(context, token): """ Assigns a permissions dict to the given model, much like Django does with its dashboard app list. Used within the change list for pages, to implement permission checks for the navigation tree. """ model = context[token.split_contents()[1]] opts = model._meta perm_name = opts.app_label + ".%s_" + opts.object_name.lower() request = context["request"] setattr(model, "perms", {}) for perm_type in ("add", "change", "delete"): model.perms[perm_type] = request.user.has_perm(perm_name % perm_type) return "" @register.render_tag def set_page_permissions(context, token): """ Assigns a permissions dict to the given page instance, combining Django's permission for the page's model and a permission check against the instance itself calling the page's ``can_add``, ``can_change`` and ``can_delete`` custom methods. Used within the change list for pages, to implement permission checks for the navigation tree. """ page = context[token.split_contents()[1]] model = page.get_content_model() try: opts = model._meta except AttributeError: if model is None: error = _( "Could not load the model for the following page, " "was it removed?" ) obj = page else: # A missing inner Meta class usually means the Page model # hasn't been directly subclassed. error = _( "An error occured with the following class. Does " "it subclass Page directly?" ) obj = model.__class__.__name__ raise ImproperlyConfigured(error + " '%s'" % obj) perm_name = opts.app_label + ".%s_" + opts.object_name.lower() request = context["request"] setattr(page, "perms", {}) for perm_type in ("add", "change", "delete"): perm = request.user.has_perm(perm_name % perm_type) perm = perm and getattr(model, "can_%s" % perm_type)(request) page.perms[perm_type] = perm return ""
bsd-2-clause
-5,751,617,969,671,375,000
39.391304
86
0.626121
false
deonwu/Goku
scripts/GokuCtrl/ipa4django/views/simpleviews.py
1
6103
import types from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render_to_response from django import template from django.db.models.query import QuerySet import re from django.core import serializers class ViewHandler(object): def __init__(self, func, param_validator=None): self.target = func if func.func_defaults: reqiured_args_count = func.func_code.co_argcount - len(func.func_defaults) else: reqiured_args_count = func.func_code.co_argcount var_names = func.func_code.co_varnames self.reqiured_args = var_names[:reqiured_args_count] self.options_args = var_names[reqiured_args_count:] self.param_validator = param_validator def __parse_args(self, r, args, params): args = list(args) args.reverse() param = [] for name in self.reqiured_args: if name in ['r', 'request']: param.append(r) continue if not params.has_key(name): if args: param.append(args.pop()) continue else: raise Exception, "Not found required parameter '%s'" % name param.append(params[name]) kw_param = {} for name in self.options_args: if params.has_key(name): kw_param[str(name)] = params[name] return (param, kw_param) def __call__(self, r,*args, **kwargs): param, kw_args = self.__parse_args(r, args, r.REQUEST) for k, v in kw_args.iteritems(): kwargs[k] = v if callable(self.param_validator): self.param_validator(r, self.target, param, kwargs) return self.target(*param, **kwargs) class SimpleViews(object): def __init__(self, view): self.view = self.__import_views(view) self.cached = {} self._pre_handler = None self.__param_validator = None if hasattr(self.view, "pre_handler"): self._pre_handler = getattr(self.view, "pre_handler") if hasattr(self.view, "__validation__"): self.__param_validator = getattr(self.view, "__validation__") def pre_handler(self, r, url): if self._pre_handler: return self._pre_handler(r, url) else: return None def __call__(self, request, url, **kwargs): pre = self.pre_handler(request, url) if pre is not None: return self.result_router(pre, request) if "/" in url: patterns = url.split("/") handle_url, args = patterns[0], patterns[1:] else: handle_url, args = url, () h = None if self.cached.has_key(handle_url): h = self.cached[handle_url] else: try: obj = getattr(self.view, handle_url) except AttributeError, e: try: obj = getattr(self.view, "default_view") except: raise e h = ViewHandler(obj, self.__param_validator) self.cached[handle_url] = h return self.result_router(h(request, *args, **kwargs), request) def result_router(self, r, request): mime_types = ['text/javascript', ] if isinstance(r, HttpResponse): return r elif isinstance(r, types.TupleType): if isinstance(r[0], QuerySet): return HttpResponse(serializers.serialize("mixed_json", r)) elif len(r) == 1 and re.match("^(http|redirect):", r[0]): url = r[0].replace("redirect:","") return HttpResponseRedirect(url) elif len(r) == 2 and r[0] in mime_types: return HttpResponse(r[1], r[0]) elif len(r) == 2: temp, context = r return render_to_response(temp, context, context_instance=template.RequestContext(request)) return render_to_response(*r) elif isinstance(r, basestring): return HttpResponse(r) else: return HttpResponse(serializers.serialize("mixed_json", r), 'application/json') def __import_views(self, v): if isinstance(v, types.ModuleType): return v elif isinstance(v, basestring): return __import__(v, globals(), locals(), ["__name__"], -1) elif isinstance(v, types.ClassType): return v() raise Exception, "Not supported views '%s'." % v class SimpleUrl(SimpleViews): def __init__(self, handler): handler = handler.split(".") module_name, handler = ".".join(handler[:-1]), handler[-1] SimpleViews.__init__(self, module_name) self.action_url = handler #self.target = ViewHandler(self.__import_url(handler)) def __call__(self, request, **kwargs): return SimpleViews.__call__(self, request, self.action_url, **kwargs) def anti_crack(r, *args): from anticrack.image import picChecker (code, image) = c.createChecker() sessionId = r.COOKIES.get('sessionid', None) if sessionId is not None: cache_key = "anti_%s" % sessionId memcache.add(key=cache_key, value=code, time=60 * 5, namespace='global') logging.debug("save anti-crack code for:%s-->%s" % (sessionId, code)) return HttpResponse(image.getdata, mimetype='image/gif') def anti_verify(r, code=""): old_code = None sessionId = r.COOKIES.get('sessionid', None) if sessionId is not None: cache_key = "anti_%s" % sessionId old_code = memcache.get(key=cache_key, namespace='global') logging.debug("get anti-crack code for:%s-->%s" % (sessionId, old_code)) return code == old_code
mit
-487,352,504,734,629,100
34.277457
92
0.539407
false
Semi-global/edx-platform
cms/djangoapps/contentstore/course_group_config.py
70
13178
""" Class for manipulating groups configuration on a course object. """ import json import logging from util.db import generate_int_id, MYSQL_MAX_INT from django.utils.translation import ugettext as _ from contentstore.utils import reverse_usage_url from xmodule.partitions.partitions import UserPartition from xmodule.split_test_module import get_split_user_partitions from openedx.core.djangoapps.course_groups.partition_scheme import get_cohorted_user_partition MINIMUM_GROUP_ID = 100 RANDOM_SCHEME = "random" COHORT_SCHEME = "cohort" # Note: the following content group configuration strings are not # translated since they are not visible to users. CONTENT_GROUP_CONFIGURATION_DESCRIPTION = 'The groups in this configuration can be mapped to cohort groups in the LMS.' CONTENT_GROUP_CONFIGURATION_NAME = 'Content Group Configuration' log = logging.getLogger(__name__) class GroupConfigurationsValidationError(Exception): """ An error thrown when a group configurations input is invalid. """ pass class GroupConfiguration(object): """ Prepare Group Configuration for the course. """ def __init__(self, json_string, course, configuration_id=None): """ Receive group configuration as a json (`json_string`), deserialize it and validate. """ self.configuration = GroupConfiguration.parse(json_string) self.course = course self.assign_id(configuration_id) self.assign_group_ids() self.validate() @staticmethod def parse(json_string): """ Deserialize given json that represents group configuration. """ try: configuration = json.loads(json_string) except ValueError: raise GroupConfigurationsValidationError(_("invalid JSON")) configuration["version"] = UserPartition.VERSION return configuration def validate(self): """ Validate group configuration representation. """ if not self.configuration.get("name"): raise GroupConfigurationsValidationError(_("must have name of the configuration")) if len(self.configuration.get('groups', [])) < 1: raise GroupConfigurationsValidationError(_("must have at least one group")) def assign_id(self, configuration_id=None): """ Assign id for the json representation of group configuration. """ if configuration_id: self.configuration['id'] = int(configuration_id) else: self.configuration['id'] = generate_int_id( MINIMUM_GROUP_ID, MYSQL_MAX_INT, GroupConfiguration.get_used_ids(self.course) ) def assign_group_ids(self): """ Assign ids for the group_configuration's groups. """ used_ids = [g.id for p in self.course.user_partitions for g in p.groups] # Assign ids to every group in configuration. for group in self.configuration.get('groups', []): if group.get('id') is None: group["id"] = generate_int_id(MINIMUM_GROUP_ID, MYSQL_MAX_INT, used_ids) used_ids.append(group["id"]) @staticmethod def get_used_ids(course): """ Return a list of IDs that already in use. """ return set([p.id for p in course.user_partitions]) def get_user_partition(self): """ Get user partition for saving in course. """ return UserPartition.from_json(self.configuration) @staticmethod def _get_usage_info(course, unit, item, usage_info, group_id, scheme_name=None): """ Get usage info for unit/module. """ unit_url = reverse_usage_url( 'container_handler', course.location.course_key.make_usage_key(unit.location.block_type, unit.location.name) ) usage_dict = {'label': u"{} / {}".format(unit.display_name, item.display_name), 'url': unit_url} if scheme_name == RANDOM_SCHEME: validation_summary = item.general_validation_message() usage_dict.update({'validation': validation_summary.to_json() if validation_summary else None}) usage_info[group_id].append(usage_dict) return usage_info @staticmethod def get_content_experiment_usage_info(store, course): """ Get usage information for all Group Configurations currently referenced by a split_test instance. """ split_tests = store.get_items(course.id, qualifiers={'category': 'split_test'}) return GroupConfiguration._get_content_experiment_usage_info(store, course, split_tests) @staticmethod def get_split_test_partitions_with_usage(store, course): """ Returns json split_test group configurations updated with usage information. """ usage_info = GroupConfiguration.get_content_experiment_usage_info(store, course) configurations = [] for partition in get_split_user_partitions(course.user_partitions): configuration = partition.to_json() configuration['usage'] = usage_info.get(partition.id, []) configurations.append(configuration) return configurations @staticmethod def _get_content_experiment_usage_info(store, course, split_tests): # pylint: disable=unused-argument """ Returns all units names, their urls and validation messages. Returns: {'user_partition_id': [ { 'label': 'Unit 1 / Experiment 1', 'url': 'url_to_unit_1', 'validation': {'message': 'a validation message', 'type': 'warning'} }, { 'label': 'Unit 2 / Experiment 2', 'url': 'url_to_unit_2', 'validation': {'message': 'another validation message', 'type': 'error'} } ], } """ usage_info = {} for split_test in split_tests: if split_test.user_partition_id not in usage_info: usage_info[split_test.user_partition_id] = [] unit = split_test.get_parent() if not unit: log.warning("Unable to find parent for split_test %s", split_test.location) continue usage_info = GroupConfiguration._get_usage_info( course=course, unit=unit, item=split_test, usage_info=usage_info, group_id=split_test.user_partition_id, scheme_name=RANDOM_SCHEME ) return usage_info @staticmethod def get_content_groups_usage_info(store, course): """ Get usage information for content groups. """ items = store.get_items(course.id, settings={'group_access': {'$exists': True}}) return GroupConfiguration._get_content_groups_usage_info(course, items) @staticmethod def _get_content_groups_usage_info(course, items): """ Returns all units names and their urls. This will return only groups for the cohort user partition. Returns: {'group_id': [ { 'label': 'Unit 1 / Problem 1', 'url': 'url_to_unit_1' }, { 'label': 'Unit 2 / Problem 2', 'url': 'url_to_unit_2' } ], } """ usage_info = {} for item, group_id in GroupConfiguration._iterate_items_and_content_group_ids(course, items): if group_id not in usage_info: usage_info[group_id] = [] unit = item.get_parent() if not unit: log.warning("Unable to find parent for component %s", item.location) continue usage_info = GroupConfiguration._get_usage_info( course, unit=unit, item=item, usage_info=usage_info, group_id=group_id ) return usage_info @staticmethod def get_content_groups_items_usage_info(store, course): """ Get usage information on items for content groups. """ items = store.get_items(course.id, settings={'group_access': {'$exists': True}}) return GroupConfiguration._get_content_groups_items_usage_info(course, items) @staticmethod def _get_content_groups_items_usage_info(course, items): """ Returns all items names and their urls. This will return only groups for the cohort user partition. Returns: {'group_id': [ { 'label': 'Problem 1 / Problem 1', 'url': 'url_to_item_1' }, { 'label': 'Problem 2 / Problem 2', 'url': 'url_to_item_2' } ], } """ usage_info = {} for item, group_id in GroupConfiguration._iterate_items_and_content_group_ids(course, items): if group_id not in usage_info: usage_info[group_id] = [] usage_info = GroupConfiguration._get_usage_info( course, unit=item, item=item, usage_info=usage_info, group_id=group_id ) return usage_info @staticmethod def _iterate_items_and_content_group_ids(course, items): """ Iterate through items and content group IDs in a course. This will yield group IDs *only* for cohort user partitions. Yields: tuple of (item, group_id) """ content_group_configuration = get_cohorted_user_partition(course) if content_group_configuration is not None: for item in items: if hasattr(item, 'group_access') and item.group_access: group_ids = item.group_access.get(content_group_configuration.id, []) for group_id in group_ids: yield item, group_id @staticmethod def update_usage_info(store, course, configuration): """ Update usage information for particular Group Configuration. Returns json of particular group configuration updated with usage information. """ configuration_json = None # Get all Experiments that use particular Group Configuration in course. if configuration.scheme.name == RANDOM_SCHEME: split_tests = store.get_items( course.id, category='split_test', content={'user_partition_id': configuration.id} ) configuration_json = configuration.to_json() usage_information = GroupConfiguration._get_content_experiment_usage_info(store, course, split_tests) configuration_json['usage'] = usage_information.get(configuration.id, []) elif configuration.scheme.name == COHORT_SCHEME: # In case if scheme is "cohort" configuration_json = GroupConfiguration.update_content_group_usage_info(store, course, configuration) return configuration_json @staticmethod def update_content_group_usage_info(store, course, configuration): """ Update usage information for particular Content Group Configuration. Returns json of particular content group configuration updated with usage information. """ usage_info = GroupConfiguration.get_content_groups_usage_info(store, course) content_group_configuration = configuration.to_json() for group in content_group_configuration['groups']: group['usage'] = usage_info.get(group['id'], []) return content_group_configuration @staticmethod def get_or_create_content_group(store, course): """ Returns the first user partition from the course which uses the CohortPartitionScheme, or generates one if no such partition is found. The created partition is not saved to the course until the client explicitly creates a group within the partition and POSTs back. """ content_group_configuration = get_cohorted_user_partition(course) if content_group_configuration is None: content_group_configuration = UserPartition( id=generate_int_id(MINIMUM_GROUP_ID, MYSQL_MAX_INT, GroupConfiguration.get_used_ids(course)), name=CONTENT_GROUP_CONFIGURATION_NAME, description=CONTENT_GROUP_CONFIGURATION_DESCRIPTION, groups=[], scheme_id=COHORT_SCHEME ) return content_group_configuration.to_json() content_group_configuration = GroupConfiguration.update_content_group_usage_info( store, course, content_group_configuration ) return content_group_configuration
agpl-3.0
-6,284,373,591,785,163,000
35.203297
119
0.591896
false
ciex/motor
lib/werkzeug/datastructures.py
73
83980
# -*- coding: utf-8 -*- """ werkzeug.datastructures ~~~~~~~~~~~~~~~~~~~~~~~ This module provides mixins and classes with an immutable interface. :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re import codecs import mimetypes from itertools import repeat from werkzeug._internal import _proxy_repr, _missing, _empty_stream _locale_delim_re = re.compile(r'[_-]') def is_immutable(self): raise TypeError('%r objects are immutable' % self.__class__.__name__) def iter_multi_items(mapping): """Iterates over the items of a mapping yielding keys and values without dropping any from more complex structures. """ if isinstance(mapping, MultiDict): for item in mapping.iteritems(multi=True): yield item elif isinstance(mapping, dict): for key, value in mapping.iteritems(): if isinstance(value, (tuple, list)): for value in value: yield key, value else: yield key, value else: for item in mapping: yield item class ImmutableListMixin(object): """Makes a :class:`list` immutable. .. versionadded:: 0.5 :private: """ _hash_cache = None def __hash__(self): if self._hash_cache is not None: return self._hash_cache rv = self._hash_cache = hash(tuple(self)) return rv def __reduce_ex__(self, protocol): return type(self), (list(self),) def __delitem__(self, key): is_immutable(self) def __delslice__(self, i, j): is_immutable(self) def __iadd__(self, other): is_immutable(self) __imul__ = __iadd__ def __setitem__(self, key, value): is_immutable(self) def __setslice__(self, i, j, value): is_immutable(self) def append(self, item): is_immutable(self) remove = append def extend(self, iterable): is_immutable(self) def insert(self, pos, value): is_immutable(self) def pop(self, index=-1): is_immutable(self) def reverse(self): is_immutable(self) def sort(self, cmp=None, key=None, reverse=None): is_immutable(self) class ImmutableList(ImmutableListMixin, list): """An immutable :class:`list`. .. versionadded:: 0.5 :private: """ __repr__ = _proxy_repr(list) class ImmutableDictMixin(object): """Makes a :class:`dict` immutable. .. versionadded:: 0.5 :private: """ _hash_cache = None @classmethod def fromkeys(cls, keys, value=None): instance = super(cls, cls).__new__(cls) instance.__init__(zip(keys, repeat(value))) return instance def __reduce_ex__(self, protocol): return type(self), (dict(self),) def _iter_hashitems(self): return self.iteritems() def __hash__(self): if self._hash_cache is not None: return self._hash_cache rv = self._hash_cache = hash(frozenset(self._iter_hashitems())) return rv def setdefault(self, key, default=None): is_immutable(self) def update(self, *args, **kwargs): is_immutable(self) def pop(self, key, default=None): is_immutable(self) def popitem(self): is_immutable(self) def __setitem__(self, key, value): is_immutable(self) def __delitem__(self, key): is_immutable(self) def clear(self): is_immutable(self) class ImmutableMultiDictMixin(ImmutableDictMixin): """Makes a :class:`MultiDict` immutable. .. versionadded:: 0.5 :private: """ def __reduce_ex__(self, protocol): return type(self), (self.items(multi=True),) def _iter_hashitems(self): return self.iteritems(multi=True) def add(self, key, value): is_immutable(self) def popitemlist(self): is_immutable(self) def poplist(self, key): is_immutable(self) def setlist(self, key, new_list): is_immutable(self) def setlistdefault(self, key, default_list=None): is_immutable(self) class UpdateDictMixin(object): """Makes dicts call `self.on_update` on modifications. .. versionadded:: 0.5 :private: """ on_update = None def calls_update(name): def oncall(self, *args, **kw): rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw) if self.on_update is not None: self.on_update(self) return rv oncall.__name__ = name return oncall __setitem__ = calls_update('__setitem__') __delitem__ = calls_update('__delitem__') clear = calls_update('clear') pop = calls_update('pop') popitem = calls_update('popitem') setdefault = calls_update('setdefault') update = calls_update('update') del calls_update class TypeConversionDict(dict): """Works like a regular dict but the :meth:`get` method can perform type conversions. :class:`MultiDict` and :class:`CombinedMultiDict` are subclasses of this class and provide the same feature. .. versionadded:: 0.5 """ def get(self, key, default=None, type=None): """Return the default value if the requested data doesn't exist. If `type` is provided and is a callable it should convert the value, return it or raise a :exc:`ValueError` if that is not possible. In this case the function will return the default as if the value was not found: >>> d = TypeConversionDict(foo='42', bar='blub') >>> d.get('foo', type=int) 42 >>> d.get('bar', -1, type=int) -1 :param key: The key to be looked up. :param default: The default value to be returned if the key can't be looked up. If not further specified `None` is returned. :param type: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the default value is returned. """ try: rv = self[key] if type is not None: rv = type(rv) except (KeyError, ValueError): rv = default return rv class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict): """Works like a :class:`TypeConversionDict` but does not support modifications. .. versionadded:: 0.5 """ def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return TypeConversionDict(self) def __copy__(self): return self class MultiDict(TypeConversionDict): """A :class:`MultiDict` is a dictionary subclass customized to deal with multiple values for the same key which is for example used by the parsing functions in the wrappers. This is necessary because some HTML form elements pass multiple values for the same key. :class:`MultiDict` implements all standard dictionary methods. Internally, it saves all values for a key as a list, but the standard dict access methods will only return the first value for a key. If you want to gain access to the other values, too, you have to use the `list` methods as explained below. Basic Usage: >>> d = MultiDict([('a', 'b'), ('a', 'c')]) >>> d MultiDict([('a', 'b'), ('a', 'c')]) >>> d['a'] 'b' >>> d.getlist('a') ['b', 'c'] >>> 'a' in d True It behaves like a normal dict thus all dict functions will only return the first value when multiple values for one key are found. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. A :class:`MultiDict` can be constructed from an iterable of ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2 onwards some keyword parameters. :param mapping: the initial value for the :class:`MultiDict`. Either a regular dict, an iterable of ``(key, value)`` tuples or `None`. """ def __init__(self, mapping=None): if isinstance(mapping, MultiDict): dict.__init__(self, ((k, l[:]) for k, l in mapping.iterlists())) elif isinstance(mapping, dict): tmp = {} for key, value in mapping.iteritems(): if isinstance(value, (tuple, list)): value = list(value) else: value = [value] tmp[key] = value dict.__init__(self, tmp) else: tmp = {} for key, value in mapping or (): tmp.setdefault(key, []).append(value) dict.__init__(self, tmp) def __getstate__(self): return dict(self.lists()) def __setstate__(self, value): dict.clear(self) dict.update(self, value) def __iter__(self): return self.iterkeys() def __getitem__(self, key): """Return the first data value for this key; raises KeyError if not found. :param key: The key to be looked up. :raise KeyError: if the key does not exist. """ if key in self: return dict.__getitem__(self, key)[0] raise BadRequestKeyError(key) def __setitem__(self, key, value): """Like :meth:`add` but removes an existing key first. :param key: the key for the value. :param value: the value to set. """ dict.__setitem__(self, key, [value]) def add(self, key, value): """Adds a new value for the key. .. versionadded:: 0.6 :param key: the key for the value. :param value: the value to add. """ dict.setdefault(self, key, []).append(value) def getlist(self, key, type=None): """Return the list of items for a given key. If that key is not in the `MultiDict`, the return value will be an empty list. Just as `get` `getlist` accepts a `type` parameter. All items will be converted with the callable defined there. :param key: The key to be looked up. :param type: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. """ try: rv = dict.__getitem__(self, key) except KeyError: return [] if type is None: return list(rv) result = [] for item in rv: try: result.append(type(item)) except ValueError: pass return result def setlist(self, key, new_list): """Remove the old values for a key and add new ones. Note that the list you pass the values in will be shallow-copied before it is inserted in the dictionary. >>> d = MultiDict() >>> d.setlist('foo', ['1', '2']) >>> d['foo'] '1' >>> d.getlist('foo') ['1', '2'] :param key: The key for which the values are set. :param new_list: An iterable with the new values for the key. Old values are removed first. """ dict.__setitem__(self, key, list(new_list)) def setdefault(self, key, default=None): """Returns the value for the key if it is in the dict, otherwise it returns `default` and sets that value for `key`. :param key: The key to be looked up. :param default: The default value to be returned if the key is not in the dict. If not further specified it's `None`. """ if key not in self: self[key] = default else: default = self[key] return default def setlistdefault(self, key, default_list=None): """Like `setdefault` but sets multiple values. The list returned is not a copy, but the list that is actually used internally. This means that you can put new values into the dict by appending items to the list: >>> d = MultiDict({"foo": 1}) >>> d.setlistdefault("foo").extend([2, 3]) >>> d.getlist("foo") [1, 2, 3] :param key: The key to be looked up. :param default: An iterable of default values. It is either copied (in case it was a list) or converted into a list before returned. :return: a :class:`list` """ if key not in self: default_list = list(default_list or ()) dict.__setitem__(self, key, default_list) else: default_list = dict.__getitem__(self, key) return default_list def items(self, multi=False): """Return a list of ``(key, value)`` pairs. :param multi: If set to `True` the list returned will have a pair for each value of each key. Otherwise it will only contain pairs for the first value of each key. :return: a :class:`list` """ return list(self.iteritems(multi)) def lists(self): """Return a list of ``(key, values)`` pairs, where values is the list of all values associated with the key. :return: a :class:`list` """ return list(self.iterlists()) def values(self): """Returns a list of the first value on every key's value list. :return: a :class:`list`. """ return [self[key] for key in self.iterkeys()] def listvalues(self): """Return a list of all values associated with a key. Zipping :meth:`keys` and this is the same as calling :meth:`lists`: >>> d = MultiDict({"foo": [1, 2, 3]}) >>> zip(d.keys(), d.listvalues()) == d.lists() True :return: a :class:`list` """ return list(self.iterlistvalues()) def iteritems(self, multi=False): """Like :meth:`items` but returns an iterator.""" for key, values in dict.iteritems(self): if multi: for value in values: yield key, value else: yield key, values[0] def iterlists(self): """Like :meth:`items` but returns an iterator.""" for key, values in dict.iteritems(self): yield key, list(values) def itervalues(self): """Like :meth:`values` but returns an iterator.""" for values in dict.itervalues(self): yield values[0] def iterlistvalues(self): """Like :meth:`listvalues` but returns an iterator.""" return dict.itervalues(self) def copy(self): """Return a shallow copy of this object.""" return self.__class__(self) def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first value for each key. :return: a :class:`dict` """ if flat: return dict(self.iteritems()) return dict(self.lists()) def update(self, other_dict): """update() extends rather than replaces existing key lists.""" for key, value in iter_multi_items(other_dict): MultiDict.add(self, key, value) def pop(self, key, default=_missing): """Pop the first item for a list on the dict. Afterwards the key is removed from the dict, so additional values are discarded: >>> d = MultiDict({"foo": [1, 2, 3]}) >>> d.pop("foo") 1 >>> "foo" in d False :param key: the key to pop. :param default: if provided the value to return if the key was not in the dictionary. """ try: return dict.pop(self, key)[0] except KeyError, e: if default is not _missing: return default raise BadRequestKeyError(str(e)) def popitem(self): """Pop an item from the dict.""" try: item = dict.popitem(self) return (item[0], item[1][0]) except KeyError, e: raise BadRequestKeyError(str(e)) def poplist(self, key): """Pop the list for a key from the dict. If the key is not in the dict an empty list is returned. .. versionchanged:: 0.5 If the key does no longer exist a list is returned instead of raising an error. """ return dict.pop(self, key, []) def popitemlist(self): """Pop a ``(key, list)`` tuple from the dict.""" try: return dict.popitem(self) except KeyError, e: raise BadRequestKeyError(str(e)) def __copy__(self): return self.copy() def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self.items(multi=True)) class _omd_bucket(object): """Wraps values in the :class:`OrderedMultiDict`. This makes it possible to keep an order over multiple different keys. It requires a lot of extra memory and slows down access a lot, but makes it possible to access elements in O(1) and iterate in O(n). """ __slots__ = ('prev', 'key', 'value', 'next') def __init__(self, omd, key, value): self.prev = omd._last_bucket self.key = key self.value = value self.next = None if omd._first_bucket is None: omd._first_bucket = self if omd._last_bucket is not None: omd._last_bucket.next = self omd._last_bucket = self def unlink(self, omd): if self.prev: self.prev.next = self.next if self.next: self.next.prev = self.prev if omd._first_bucket is self: omd._first_bucket = self.next if omd._last_bucket is self: omd._last_bucket = self.prev class OrderedMultiDict(MultiDict): """Works like a regular :class:`MultiDict` but preserves the order of the fields. To convert the ordered multi dict into a list you can use the :meth:`items` method and pass it ``multi=True``. In general an :class:`OrderedMultiDict` is an order of magnitude slower than a :class:`MultiDict`. .. admonition:: note Due to a limitation in Python you cannot convert an ordered multi dict into a regular dict by using ``dict(multidict)``. Instead you have to use the :meth:`to_dict` method, otherwise the internal bucket objects are exposed. """ def __init__(self, mapping=None): dict.__init__(self) self._first_bucket = self._last_bucket = None if mapping is not None: OrderedMultiDict.update(self, mapping) def __eq__(self, other): if not isinstance(other, MultiDict): return NotImplemented if isinstance(other, OrderedMultiDict): iter1 = self.iteritems(multi=True) iter2 = other.iteritems(multi=True) try: for k1, v1 in iter1: k2, v2 = iter2.next() if k1 != k2 or v1 != v2: return False except StopIteration: return False try: iter2.next() except StopIteration: return True return False if len(self) != len(other): return False for key, values in self.iterlists(): if other.getlist(key) != values: return False return True def __ne__(self, other): return not self.__eq__(other) def __reduce_ex__(self, protocol): return type(self), (self.items(multi=True),) def __getstate__(self): return self.items(multi=True) def __setstate__(self, values): dict.clear(self) for key, value in values: self.add(key, value) def __getitem__(self, key): if key in self: return dict.__getitem__(self, key)[0].value raise BadRequestKeyError(key) def __setitem__(self, key, value): self.poplist(key) self.add(key, value) def __delitem__(self, key): self.pop(key) def iterkeys(self): return (key for key, value in self.iteritems()) def itervalues(self): return (value for key, value in self.iteritems()) def iteritems(self, multi=False): ptr = self._first_bucket if multi: while ptr is not None: yield ptr.key, ptr.value ptr = ptr.next else: returned_keys = set() while ptr is not None: if ptr.key not in returned_keys: returned_keys.add(ptr.key) yield ptr.key, ptr.value ptr = ptr.next def iterlists(self): returned_keys = set() ptr = self._first_bucket while ptr is not None: if ptr.key not in returned_keys: yield ptr.key, self.getlist(ptr.key) returned_keys.add(ptr.key) ptr = ptr.next def iterlistvalues(self): for key, values in self.iterlists(): yield values def add(self, key, value): dict.setdefault(self, key, []).append(_omd_bucket(self, key, value)) def getlist(self, key, type=None): try: rv = dict.__getitem__(self, key) except KeyError: return [] if type is None: return [x.value for x in rv] result = [] for item in rv: try: result.append(type(item.value)) except ValueError: pass return result def setlist(self, key, new_list): self.poplist(key) for value in new_list: self.add(key, value) def setlistdefault(self, key, default_list=None): raise TypeError('setlistdefault is unsupported for ' 'ordered multi dicts') def update(self, mapping): for key, value in iter_multi_items(mapping): OrderedMultiDict.add(self, key, value) def poplist(self, key): buckets = dict.pop(self, key, ()) for bucket in buckets: bucket.unlink(self) return [x.value for x in buckets] def pop(self, key, default=_missing): try: buckets = dict.pop(self, key) except KeyError, e: if default is not _missing: return default raise BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return buckets[0].value def popitem(self): try: key, buckets = dict.popitem(self) except KeyError, e: raise BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return key, buckets[0].value def popitemlist(self): try: key, buckets = dict.popitem(self) except KeyError, e: raise BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return key, [x.value for x in buckets] def _options_header_vkw(value, kw): return dump_options_header(value, dict((k.replace('_', '-'), v) for k, v in kw.items())) class Headers(object): """An object that stores some headers. It has a dict-like interface but is ordered and can store the same keys multiple times. This data structure is useful if you want a nicer way to handle WSGI headers which are stored as tuples in a list. From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is also a subclass of the :class:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers` class, with the exception of `__getitem__`. :mod:`wsgiref` will return `None` for ``headers['missing']``, whereas :class:`Headers` will raise a :class:`KeyError`. To create a new :class:`Headers` object pass it a list or dict of headers which are used as default values. This does not reuse the list passed to the constructor for internal usage. To create a :class:`Headers` object that uses as internal storage the list or list-like object you can use the :meth:`linked` class method. :param defaults: The list of default values for the :class:`Headers`. """ def __init__(self, defaults=None, _list=None): if _list is None: _list = [] self._list = _list if defaults is not None: if isinstance(defaults, (list, Headers)): self._list.extend(defaults) else: self.extend(defaults) @classmethod def linked(cls, headerlist): """Create a new :class:`Headers` object that uses the list of headers passed as internal storage: >>> headerlist = [('Content-Length', '40')] >>> headers = Headers.linked(headerlist) >>> headers['Content-Type'] = 'text/html' >>> headerlist [('Content-Length', '40'), ('Content-Type', 'text/html')] :param headerlist: The list of headers the class is linked to. :return: new linked :class:`Headers` object. """ return cls(_list=headerlist) def __getitem__(self, key, _get_mode=False): if not _get_mode: if isinstance(key, (int, long)): return self._list[key] elif isinstance(key, slice): return self.__class__(self._list[key]) ikey = key.lower() for k, v in self._list: if k.lower() == ikey: return v # micro optimization: if we are in get mode we will catch that # exception one stack level down so we can raise a standard # key error instead of our special one. if _get_mode: raise KeyError() raise BadRequestKeyError(key) def __eq__(self, other): return other.__class__ is self.__class__ and \ set(other._list) == set(self._list) def __ne__(self, other): return not self.__eq__(other) def get(self, key, default=None, type=None): """Return the default value if the requested data doesn't exist. If `type` is provided and is a callable it should convert the value, return it or raise a :exc:`ValueError` if that is not possible. In this case the function will return the default as if the value was not found: >>> d = Headers([('Content-Length', '42')]) >>> d.get('Content-Length', type=int) 42 If a headers object is bound you must not add unicode strings because no encoding takes place. :param key: The key to be looked up. :param default: The default value to be returned if the key can't be looked up. If not further specified `None` is returned. :param type: A callable that is used to cast the value in the :class:`Headers`. If a :exc:`ValueError` is raised by this callable the default value is returned. """ try: rv = self.__getitem__(key, _get_mode=True) except KeyError: return default if type is None: return rv try: return type(rv) except ValueError: return default def getlist(self, key, type=None): """Return the list of items for a given key. If that key is not in the :class:`Headers`, the return value will be an empty list. Just as :meth:`get` :meth:`getlist` accepts a `type` parameter. All items will be converted with the callable defined there. :param key: The key to be looked up. :param type: A callable that is used to cast the value in the :class:`Headers`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. """ ikey = key.lower() result = [] for k, v in self: if k.lower() == ikey: if type is not None: try: v = type(v) except ValueError: continue result.append(v) return result def get_all(self, name): """Return a list of all the values for the named field. This method is compatible with the :mod:`wsgiref` :meth:`~wsgiref.headers.Headers.get_all` method. """ return self.getlist(name) def iteritems(self, lower=False): for key, value in self: if lower: key = key.lower() yield key, value def iterkeys(self, lower=False): for key, _ in self.iteritems(lower): yield key def itervalues(self): for _, value in self.iteritems(): yield value def keys(self, lower=False): return list(self.iterkeys(lower)) def values(self): return list(self.itervalues()) def items(self, lower=False): return list(self.iteritems(lower)) def extend(self, iterable): """Extend the headers with a dict or an iterable yielding keys and values. """ if isinstance(iterable, dict): for key, value in iterable.iteritems(): if isinstance(value, (tuple, list)): for v in value: self.add(key, v) else: self.add(key, value) else: for key, value in iterable: self.add(key, value) def __delitem__(self, key, _index_operation=True): if _index_operation and isinstance(key, (int, long, slice)): del self._list[key] return key = key.lower() new = [] for k, v in self._list: if k.lower() != key: new.append((k, v)) self._list[:] = new def remove(self, key): """Remove a key. :param key: The key to be removed. """ return self.__delitem__(key, _index_operation=False) def pop(self, key=None, default=_missing): """Removes and returns a key or index. :param key: The key to be popped. If this is an integer the item at that position is removed, if it's a string the value for that key is. If the key is omitted or `None` the last item is removed. :return: an item. """ if key is None: return self._list.pop() if isinstance(key, (int, long)): return self._list.pop(key) try: rv = self[key] self.remove(key) except KeyError: if default is not _missing: return default raise return rv def popitem(self): """Removes a key or index and returns a (key, value) item.""" return self.pop() def __contains__(self, key): """Check if a key is present.""" try: self.__getitem__(key, _get_mode=True) except KeyError: return False return True has_key = __contains__ def __iter__(self): """Yield ``(key, value)`` tuples.""" return iter(self._list) def __len__(self): return len(self._list) def add(self, _key, _value, **kw): """Add a new header tuple to the list. Keyword arguments can specify additional parameters for the header value, with underscores converted to dashes:: >>> d = Headers() >>> d.add('Content-Type', 'text/plain') >>> d.add('Content-Disposition', 'attachment', filename='foo.png') The keyword argument dumping uses :func:`dump_options_header` behind the scenes. .. versionadded:: 0.4.1 keyword arguments were added for :mod:`wsgiref` compatibility. """ if kw: _value = _options_header_vkw(_value, kw) self._validate_value(_value) self._list.append((_key, _value)) def _validate_value(self, value): if isinstance(value, basestring) and ('\n' in value or '\r' in value): raise ValueError('Detected newline in header value. This is ' 'a potential security problem') def add_header(self, _key, _value, **_kw): """Add a new header tuple to the list. An alias for :meth:`add` for compatibility with the :mod:`wsgiref` :meth:`~wsgiref.headers.Headers.add_header` method. """ self.add(_key, _value, **_kw) def clear(self): """Clears all headers.""" del self._list[:] def set(self, _key, _value, **kw): """Remove all header tuples for `key` and add a new one. The newly added key either appears at the end of the list if there was no entry or replaces the first one. Keyword arguments can specify additional parameters for the header value, with underscores converted to dashes. See :meth:`add` for more information. .. versionchanged:: 0.6.1 :meth:`set` now accepts the same arguments as :meth:`add`. :param key: The key to be inserted. :param value: The value to be inserted. """ if kw: _value = _options_header_vkw(_value, kw) self._validate_value(_value) if not self._list: self._list.append((_key, _value)) return listiter = iter(self._list) ikey = _key.lower() for idx, (old_key, old_value) in enumerate(listiter): if old_key.lower() == ikey: # replace first ocurrence self._list[idx] = (_key, _value) break else: self._list.append((_key, _value)) return self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey] def setdefault(self, key, value): """Returns the value for the key if it is in the dict, otherwise it returns `default` and sets that value for `key`. :param key: The key to be looked up. :param default: The default value to be returned if the key is not in the dict. If not further specified it's `None`. """ if key in self: return self[key] self.set(key, value) return value def __setitem__(self, key, value): """Like :meth:`set` but also supports index/slice based setting.""" if isinstance(key, (slice, int, long)): self._validate_value(value) self._list[key] = value else: self.set(key, value) def to_list(self, charset='iso-8859-1'): """Convert the headers into a list and converts the unicode header items to the specified charset. :return: list """ return [(k, isinstance(v, unicode) and v.encode(charset) or str(v)) for k, v in self] def copy(self): return self.__class__(self._list) def __copy__(self): return self.copy() def __str__(self, charset='iso-8859-1'): """Returns formatted headers suitable for HTTP transmission.""" strs = [] for key, value in self.to_list(charset): strs.append('%s: %s' % (key, value)) strs.append('\r\n') return '\r\n'.join(strs) def __repr__(self): return '%s(%r)' % ( self.__class__.__name__, list(self) ) class ImmutableHeadersMixin(object): """Makes a :class:`Headers` immutable. We do not mark them as hashable though since the only usecase for this datastructure in Werkzeug is a view on a mutable structure. .. versionadded:: 0.5 :private: """ def __delitem__(self, key): is_immutable(self) def __setitem__(self, key, value): is_immutable(self) set = __setitem__ def add(self, item): is_immutable(self) remove = add_header = add def extend(self, iterable): is_immutable(self) def insert(self, pos, value): is_immutable(self) def pop(self, index=-1): is_immutable(self) def popitem(self): is_immutable(self) def setdefault(self, key, default): is_immutable(self) class EnvironHeaders(ImmutableHeadersMixin, Headers): """Read only version of the headers from a WSGI environment. This provides the same interface as `Headers` and is constructed from a WSGI environment. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. """ def __init__(self, environ): self.environ = environ @classmethod def linked(cls, environ): raise TypeError('%r object is always linked to environment, ' 'no separate initializer' % cls.__name__) def __eq__(self, other): return self.environ is other.environ def __getitem__(self, key, _get_mode=False): # _get_mode is a no-op for this class as there is no index but # used because get() calls it. key = key.upper().replace('-', '_') if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): return self.environ[key] return self.environ['HTTP_' + key] def __len__(self): # the iter is necessary because otherwise list calls our # len which would call list again and so forth. return len(list(iter(self))) def __iter__(self): for key, value in self.environ.iteritems(): if key.startswith('HTTP_') and key not in \ ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): yield key[5:].replace('_', '-').title(), value elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): yield key.replace('_', '-').title(), value def copy(self): raise TypeError('cannot create %r copies' % self.__class__.__name__) class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict): """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict` instances as sequence and it will combine the return values of all wrapped dicts: >>> from werkzeug.datastructures import CombinedMultiDict, MultiDict >>> post = MultiDict([('foo', 'bar')]) >>> get = MultiDict([('blub', 'blah')]) >>> combined = CombinedMultiDict([get, post]) >>> combined['foo'] 'bar' >>> combined['blub'] 'blah' This works for all read operations and will raise a `TypeError` for methods that usually change data which isn't possible. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. """ def __reduce_ex__(self, protocol): return type(self), (self.dicts,) def __init__(self, dicts=None): self.dicts = dicts or [] @classmethod def fromkeys(cls): raise TypeError('cannot create %r instances by fromkeys' % cls.__name__) def __getitem__(self, key): for d in self.dicts: if key in d: return d[key] raise BadRequestKeyError(key) def get(self, key, default=None, type=None): for d in self.dicts: if key in d: if type is not None: try: return type(d[key]) except ValueError: continue return d[key] return default def getlist(self, key, type=None): rv = [] for d in self.dicts: rv.extend(d.getlist(key, type)) return rv def keys(self): rv = set() for d in self.dicts: rv.update(d.keys()) return list(rv) def iteritems(self, multi=False): found = set() for d in self.dicts: for key, value in d.iteritems(multi): if multi: yield key, value elif key not in found: found.add(key) yield key, value def itervalues(self): for key, value in self.iteritems(): yield value def values(self): return list(self.itervalues()) def items(self, multi=False): return list(self.iteritems(multi)) def iterlists(self): rv = {} for d in self.dicts: for key, values in d.iterlists(): rv.setdefault(key, []).extend(values) return rv.iteritems() def lists(self): return list(self.iterlists()) def iterlistvalues(self): return (x[0] for x in self.lists()) def listvalues(self): return list(self.iterlistvalues()) def iterkeys(self): return iter(self.keys()) __iter__ = iterkeys def copy(self): """Return a shallow copy of this object.""" return self.__class__(self.dicts[:]) def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first item for each key. :return: a :class:`dict` """ rv = {} for d in reversed(self.dicts): rv.update(d.to_dict(flat)) return rv def __len__(self): return len(self.keys()) def __contains__(self, key): for d in self.dicts: if key in d: return True return False has_key = __contains__ def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self.dicts) class FileMultiDict(MultiDict): """A special :class:`MultiDict` that has convenience methods to add files to it. This is used for :class:`EnvironBuilder` and generally useful for unittesting. .. versionadded:: 0.5 """ def add_file(self, name, file, filename=None, content_type=None): """Adds a new file to the dict. `file` can be a file name or a :class:`file`-like or a :class:`FileStorage` object. :param name: the name of the field. :param file: a filename or :class:`file`-like object :param filename: an optional filename :param content_type: an optional content type """ if isinstance(file, FileStorage): value = file else: if isinstance(file, basestring): if filename is None: filename = file file = open(file, 'rb') if filename and content_type is None: content_type = mimetypes.guess_type(filename)[0] or \ 'application/octet-stream' value = FileStorage(file, filename, name, content_type) self.add(name, value) class ImmutableDict(ImmutableDictMixin, dict): """An immutable :class:`dict`. .. versionadded:: 0.5 """ __repr__ = _proxy_repr(dict) def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return dict(self) def __copy__(self): return self class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict): """An immutable :class:`MultiDict`. .. versionadded:: 0.5 """ def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return MultiDict(self) def __copy__(self): return self class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict): """An immutable :class:`OrderedMultiDict`. .. versionadded:: 0.6 """ def _iter_hashitems(self): return enumerate(self.iteritems(multi=True)) def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return OrderedMultiDict(self) def __copy__(self): return self class Accept(ImmutableList): """An :class:`Accept` object is just a list subclass for lists of ``(value, quality)`` tuples. It is automatically sorted by quality. All :class:`Accept` objects work similar to a list but provide extra functionality for working with the data. Containment checks are normalized to the rules of that header: >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)]) >>> a.best 'ISO-8859-1' >>> 'iso-8859-1' in a True >>> 'UTF8' in a True >>> 'utf7' in a False To get the quality for an item you can use normal item lookup: >>> print a['utf-8'] 0.7 >>> a['utf7'] 0 .. versionchanged:: 0.5 :class:`Accept` objects are forced immutable now. """ def __init__(self, values=()): if values is None: list.__init__(self) self.provided = False elif isinstance(values, Accept): self.provided = values.provided list.__init__(self, values) else: self.provided = True values = [(a, b) for b, a in values] values.sort() values.reverse() list.__init__(self, [(a, b) for b, a in values]) def _value_matches(self, value, item): """Check if a value matches a given accept item.""" return item == '*' or item.lower() == value.lower() def __getitem__(self, key): """Besides index lookup (getting item n) you can also pass it a string to get the quality for the item. If the item is not in the list, the returned quality is ``0``. """ if isinstance(key, basestring): return self.quality(key) return list.__getitem__(self, key) def quality(self, key): """Returns the quality of the key. .. versionadded:: 0.6 In previous versions you had to use the item-lookup syntax (eg: ``obj[key]`` instead of ``obj.quality(key)``) """ for item, quality in self: if self._value_matches(key, item): return quality return 0 def __contains__(self, value): for item, quality in self: if self._value_matches(value, item): return True return False def __repr__(self): return '%s([%s])' % ( self.__class__.__name__, ', '.join('(%r, %s)' % (x, y) for x, y in self) ) def index(self, key): """Get the position of an entry or raise :exc:`ValueError`. :param key: The key to be looked up. .. versionchanged:: 0.5 This used to raise :exc:`IndexError`, which was inconsistent with the list API. """ if isinstance(key, basestring): for idx, (item, quality) in enumerate(self): if self._value_matches(key, item): return idx raise ValueError(key) return list.index(self, key) def find(self, key): """Get the position of an entry or return -1. :param key: The key to be looked up. """ try: return self.index(key) except ValueError: return -1 def values(self): """Return a list of the values, not the qualities.""" return list(self.itervalues()) def itervalues(self): """Iterate over all values.""" for item in self: yield item[0] def to_header(self): """Convert the header set into an HTTP header string.""" result = [] for value, quality in self: if quality != 1: value = '%s;q=%s' % (value, quality) result.append(value) return ','.join(result) def __str__(self): return self.to_header() def best_match(self, matches, default=None): """Returns the best match from a list of possible matches based on the quality of the client. If two items have the same quality, the one is returned that comes first. :param matches: a list of matches to check for :param default: the value that is returned if none match """ best_quality = -1 result = default for server_item in matches: for client_item, quality in self: if quality <= best_quality: break if self._value_matches(server_item, client_item): best_quality = quality result = server_item return result @property def best(self): """The best match as value.""" if self: return self[0][0] class MIMEAccept(Accept): """Like :class:`Accept` but with special methods and behavior for mimetypes. """ def _value_matches(self, value, item): def _normalize(x): x = x.lower() return x == '*' and ('*', '*') or x.split('/', 1) # this is from the application which is trusted. to avoid developer # frustration we actually check these for valid values if '/' not in value: raise ValueError('invalid mimetype %r' % value) value_type, value_subtype = _normalize(value) if value_type == '*' and value_subtype != '*': raise ValueError('invalid mimetype %r' % value) if '/' not in item: return False item_type, item_subtype = _normalize(item) if item_type == '*' and item_subtype != '*': return False return ( (item_type == item_subtype == '*' or value_type == value_subtype == '*') or (item_type == value_type and (item_subtype == '*' or value_subtype == '*' or item_subtype == value_subtype)) ) @property def accept_html(self): """True if this object accepts HTML.""" return ( 'text/html' in self or 'application/xhtml+xml' in self or self.accept_xhtml ) @property def accept_xhtml(self): """True if this object accepts XHTML.""" return ( 'application/xhtml+xml' in self or 'application/xml' in self ) @property def accept_json(self): """True if this object accepts JSON.""" return 'application/json' in self class LanguageAccept(Accept): """Like :class:`Accept` but with normalization for languages.""" def _value_matches(self, value, item): def _normalize(language): return _locale_delim_re.split(language.lower()) return item == '*' or _normalize(value) == _normalize(item) class CharsetAccept(Accept): """Like :class:`Accept` but with normalization for charsets.""" def _value_matches(self, value, item): def _normalize(name): try: return codecs.lookup(name).name except LookupError: return name.lower() return item == '*' or _normalize(value) == _normalize(item) def cache_property(key, empty, type): """Return a new property object for a cache header. Useful if you want to add support for a cache extension in a subclass.""" return property(lambda x: x._get_cache_value(key, empty, type), lambda x, v: x._set_cache_value(key, v, type), lambda x: x._del_cache_value(key), 'accessor for %r' % key) class _CacheControl(UpdateDictMixin, dict): """Subclass of a dict that stores values for a Cache-Control header. It has accessors for all the cache-control directives specified in RFC 2616. The class does not differentiate between request and response directives. Because the cache-control directives in the HTTP header use dashes the python descriptors use underscores for that. To get a header of the :class:`CacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionchanged:: 0.4 Setting `no_cache` or `private` to boolean `True` will set the implicit none-value which is ``*``: >>> cc = ResponseCacheControl() >>> cc.no_cache = True >>> cc <ResponseCacheControl 'no-cache'> >>> cc.no_cache '*' >>> cc.no_cache = None >>> cc <ResponseCacheControl ''> In versions before 0.5 the behavior documented here affected the now no longer existing `CacheControl` class. """ no_cache = cache_property('no-cache', '*', None) no_store = cache_property('no-store', None, bool) max_age = cache_property('max-age', -1, int) no_transform = cache_property('no-transform', None, None) def __init__(self, values=(), on_update=None): dict.__init__(self, values or ()) self.on_update = on_update self.provided = values is not None def _get_cache_value(self, key, empty, type): """Used internally by the accessor properties.""" if type is bool: return key in self if key in self: value = self[key] if value is None: return empty elif type is not None: try: value = type(value) except ValueError: pass return value def _set_cache_value(self, key, value, type): """Used internally by the accessor properties.""" if type is bool: if value: self[key] = None else: self.pop(key, None) else: if value is None: self.pop(key) elif value is True: self[key] = None else: self[key] = value def _del_cache_value(self, key): """Used internally by the accessor properties.""" if key in self: del self[key] def to_header(self): """Convert the stored values into a cache control header.""" return dump_header(self) def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % ( self.__class__.__name__, self.to_header() ) class RequestCacheControl(ImmutableDictMixin, _CacheControl): """A cache control for requests. This is immutable and gives access to all the request-relevant cache control headers. To get a header of the :class:`RequestCacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 0.5 In previous versions a `CacheControl` class existed that was used both for request and response. """ max_stale = cache_property('max-stale', '*', int) min_fresh = cache_property('min-fresh', '*', int) no_transform = cache_property('no-transform', None, None) only_if_cached = cache_property('only-if-cached', None, bool) class ResponseCacheControl(_CacheControl): """A cache control for responses. Unlike :class:`RequestCacheControl` this is mutable and gives access to response-relevant cache control headers. To get a header of the :class:`ResponseCacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 0.5 In previous versions a `CacheControl` class existed that was used both for request and response. """ public = cache_property('public', None, bool) private = cache_property('private', '*', None) must_revalidate = cache_property('must-revalidate', None, bool) proxy_revalidate = cache_property('proxy-revalidate', None, bool) s_maxage = cache_property('s-maxage', None, None) # attach cache_property to the _CacheControl as staticmethod # so that others can reuse it. _CacheControl.cache_property = staticmethod(cache_property) class CallbackDict(UpdateDictMixin, dict): """A dict that calls a function passed every time something is changed. The function is passed the dict instance. """ def __init__(self, initial=None, on_update=None): dict.__init__(self, initial or ()) self.on_update = on_update def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, dict.__repr__(self) ) class HeaderSet(object): """Similar to the :class:`ETags` class this implements a set-like structure. Unlike :class:`ETags` this is case insensitive and used for vary, allow, and content-language headers. If not constructed using the :func:`parse_set_header` function the instantiation works like this: >>> hs = HeaderSet(['foo', 'bar', 'baz']) >>> hs HeaderSet(['foo', 'bar', 'baz']) """ def __init__(self, headers=None, on_update=None): self._headers = list(headers or ()) self._set = set([x.lower() for x in self._headers]) self.on_update = on_update def add(self, header): """Add a new header to the set.""" self.update((header,)) def remove(self, header): """Remove a header from the set. This raises an :exc:`KeyError` if the header is not in the set. .. versionchanged:: 0.5 In older versions a :exc:`IndexError` was raised instead of a :exc:`KeyError` if the object was missing. :param header: the header to be removed. """ key = header.lower() if key not in self._set: raise KeyError(header) self._set.remove(key) for idx, key in enumerate(self._headers): if key.lower() == header: del self._headers[idx] break if self.on_update is not None: self.on_update(self) def update(self, iterable): """Add all the headers from the iterable to the set. :param iterable: updates the set with the items from the iterable. """ inserted_any = False for header in iterable: key = header.lower() if key not in self._set: self._headers.append(header) self._set.add(key) inserted_any = True if inserted_any and self.on_update is not None: self.on_update(self) def discard(self, header): """Like :meth:`remove` but ignores errors. :param header: the header to be discarded. """ try: return self.remove(header) except KeyError: pass def find(self, header): """Return the index of the header in the set or return -1 if not found. :param header: the header to be looked up. """ header = header.lower() for idx, item in enumerate(self._headers): if item.lower() == header: return idx return -1 def index(self, header): """Return the index of the header in the set or raise an :exc:`IndexError`. :param header: the header to be looked up. """ rv = self.find(header) if rv < 0: raise IndexError(header) return rv def clear(self): """Clear the set.""" self._set.clear() del self._headers[:] if self.on_update is not None: self.on_update(self) def as_set(self, preserve_casing=False): """Return the set as real python set type. When calling this, all the items are converted to lowercase and the ordering is lost. :param preserve_casing: if set to `True` the items in the set returned will have the original case like in the :class:`HeaderSet`, otherwise they will be lowercase. """ if preserve_casing: return set(self._headers) return set(self._set) def to_header(self): """Convert the header set into an HTTP header string.""" return ', '.join(map(quote_header_value, self._headers)) def __getitem__(self, idx): return self._headers[idx] def __delitem__(self, idx): rv = self._headers.pop(idx) self._set.remove(rv.lower()) if self.on_update is not None: self.on_update(self) def __setitem__(self, idx, value): old = self._headers[idx] self._set.remove(old.lower()) self._headers[idx] = value self._set.add(value.lower()) if self.on_update is not None: self.on_update(self) def __contains__(self, header): return header.lower() in self._set def __len__(self): return len(self._set) def __iter__(self): return iter(self._headers) def __nonzero__(self): return bool(self._set) def __str__(self): return self.to_header() def __repr__(self): return '%s(%r)' % ( self.__class__.__name__, self._headers ) class ETags(object): """A set that can be used to check if one etag is present in a collection of etags. """ def __init__(self, strong_etags=None, weak_etags=None, star_tag=False): self._strong = frozenset(not star_tag and strong_etags or ()) self._weak = frozenset(weak_etags or ()) self.star_tag = star_tag def as_set(self, include_weak=False): """Convert the `ETags` object into a python set. Per default all the weak etags are not part of this set.""" rv = set(self._strong) if include_weak: rv.update(self._weak) return rv def is_weak(self, etag): """Check if an etag is weak.""" return etag in self._weak def contains_weak(self, etag): """Check if an etag is part of the set including weak and strong tags.""" return self.is_weak(etag) or self.contains(etag) def contains(self, etag): """Check if an etag is part of the set ignoring weak tags. It is also possible to use the ``in`` operator. """ if self.star_tag: return True return etag in self._strong def contains_raw(self, etag): """When passed a quoted tag it will check if this tag is part of the set. If the tag is weak it is checked against weak and strong tags, otherwise strong only.""" etag, weak = unquote_etag(etag) if weak: return self.contains_weak(etag) return self.contains(etag) def to_header(self): """Convert the etags set into a HTTP header string.""" if self.star_tag: return '*' return ', '.join( ['"%s"' % x for x in self._strong] + ['w/"%s"' % x for x in self._weak] ) def __call__(self, etag=None, data=None, include_weak=False): if [etag, data].count(None) != 1: raise TypeError('either tag or data required, but at least one') if etag is None: etag = generate_etag(data) if include_weak: if etag in self._weak: return True return etag in self._strong def __nonzero__(self): return bool(self.star_tag or self._strong) def __str__(self): return self.to_header() def __iter__(self): return iter(self._strong) def __contains__(self, etag): return self.contains(etag) def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class IfRange(object): """Very simple object that represents the `If-Range` header in parsed form. It will either have neither a etag or date or one of either but never both. .. versionadded:: 0.7 """ def __init__(self, etag=None, date=None): #: The etag parsed and unquoted. Ranges always operate on strong #: etags so the weakness information is not necessary. self.etag = etag #: The date in parsed format or `None`. self.date = date def to_header(self): """Converts the object back into an HTTP header.""" if self.date is not None: return http_date(self.date) if self.etag is not None: return quote_etag(self.etag) return '' def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class Range(object): """Represents a range header. All the methods are only supporting bytes as unit. It does store multiple ranges but :meth:`range_for_length` will only work if only one range is provided. .. versionadded:: 0.7 """ def __init__(self, units, ranges): #: The units of this range. Usually "bytes". self.units = units #: A list of ``(begin, end)`` tuples for the range header provided. #: The ranges are non-inclusive. self.ranges = ranges def range_for_length(self, length): """If the range is for bytes, the length is not None and there is exactly one range and it is satisfiable it returns a ``(start, stop)`` tuple, otherwise `None`. """ if self.units != 'bytes' or length is None or len(self.ranges) != 1: return None start, end = self.ranges[0] if end is None: end = length if start < 0: start += length if is_byte_range_valid(start, end, length): return start, min(end, length) def make_content_range(self, length): """Creates a :class:`~werkzeug.datastructures.ContentRange` object from the current range and given content length. """ rng = self.range_for_length(length) if rng is not None: return ContentRange(self.units, rng[0], rng[1], length) def to_header(self): """Converts the object back into an HTTP header.""" ranges = [] for begin, end in self.ranges: if end is None: ranges.append(begin >= 0 and '%s-' % begin or str(begin)) else: ranges.append('%s-%s' % (begin, end - 1)) return '%s=%s' % (self.units, ','.join(ranges)) def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class ContentRange(object): """Represents the content range header. .. versionadded:: 0.7 """ def __init__(self, units, start, stop, length=None, on_update=None): assert is_byte_range_valid(start, stop, length), \ 'Bad range provided' self.on_update = on_update self.set(start, stop, length, units) def _callback_property(name): def fget(self): return getattr(self, name) def fset(self, value): setattr(self, name, value) if self.on_update is not None: self.on_update(self) return property(fget, fset) #: The units to use, usually "bytes" units = _callback_property('_units') #: The start point of the range or `None`. start = _callback_property('_start') #: The stop point of the range (non-inclusive) or `None`. Can only be #: `None` if also start is `None`. stop = _callback_property('_stop') #: The length of the range or `None`. length = _callback_property('_length') def set(self, start, stop, length=None, units='bytes'): """Simple method to update the ranges.""" assert is_byte_range_valid(start, stop, length), \ 'Bad range provided' self._units = units self._start = start self._stop = stop self._length = length if self.on_update is not None: self.on_update(self) def unset(self): """Sets the units to `None` which indicates that the header should no longer be used. """ self.set(None, None, units=None) def to_header(self): if self.units is None: return '' if self.length is None: length = '*' else: length = self.length if self.start is None: return '%s */%s' % (self.units, length) return '%s %s-%s/%s' % ( self.units, self.start, self.stop - 1, length ) def __nonzero__(self): return self.units is not None def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class Authorization(ImmutableDictMixin, dict): """Represents an `Authorization` header sent by the client. You should not create this kind of object yourself but use it when it's returned by the `parse_authorization_header` function. This object is a dict subclass and can be altered by setting dict items but it should be considered immutable as it's returned by the client and not meant for modifications. .. versionchanged:: 0.5 This object became immutable. """ def __init__(self, auth_type, data=None): dict.__init__(self, data or {}) self.type = auth_type username = property(lambda x: x.get('username'), doc=''' The username transmitted. This is set for both basic and digest auth all the time.''') password = property(lambda x: x.get('password'), doc=''' When the authentication type is basic this is the password transmitted by the client, else `None`.''') realm = property(lambda x: x.get('realm'), doc=''' This is the server realm sent back for HTTP digest auth.''') nonce = property(lambda x: x.get('nonce'), doc=''' The nonce the server sent for digest auth, sent back by the client. A nonce should be unique for every 401 response for HTTP digest auth.''') uri = property(lambda x: x.get('uri'), doc=''' The URI from Request-URI of the Request-Line; duplicated because proxies are allowed to change the Request-Line in transit. HTTP digest auth only.''') nc = property(lambda x: x.get('nc'), doc=''' The nonce count value transmitted by clients if a qop-header is also transmitted. HTTP digest auth only.''') cnonce = property(lambda x: x.get('cnonce'), doc=''' If the server sent a qop-header in the ``WWW-Authenticate`` header, the client has to provide this value for HTTP digest auth. See the RFC for more details.''') response = property(lambda x: x.get('response'), doc=''' A string of 32 hex digits computed as defined in RFC 2617, which proves that the user knows a password. Digest auth only.''') opaque = property(lambda x: x.get('opaque'), doc=''' The opaque header from the server returned unchanged by the client. It is recommended that this string be base64 or hexadecimal data. Digest auth only.''') @property def qop(self): """Indicates what "quality of protection" the client has applied to the message for HTTP digest auth.""" def on_update(header_set): if not header_set and 'qop' in self: del self['qop'] elif header_set: self['qop'] = header_set.to_header() return parse_set_header(self.get('qop'), on_update) class WWWAuthenticate(UpdateDictMixin, dict): """Provides simple access to `WWW-Authenticate` headers.""" #: list of keys that require quoting in the generated header _require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm']) def __init__(self, auth_type=None, values=None, on_update=None): dict.__init__(self, values or ()) if auth_type: self['__auth_type__'] = auth_type self.on_update = on_update def set_basic(self, realm='authentication required'): """Clear the auth info and enable basic auth.""" dict.clear(self) dict.update(self, {'__auth_type__': 'basic', 'realm': realm}) if self.on_update: self.on_update(self) def set_digest(self, realm, nonce, qop=('auth',), opaque=None, algorithm=None, stale=False): """Clear the auth info and enable digest auth.""" d = { '__auth_type__': 'digest', 'realm': realm, 'nonce': nonce, 'qop': dump_header(qop) } if stale: d['stale'] = 'TRUE' if opaque is not None: d['opaque'] = opaque if algorithm is not None: d['algorithm'] = algorithm dict.clear(self) dict.update(self, d) if self.on_update: self.on_update(self) def to_header(self): """Convert the stored values into a WWW-Authenticate header.""" d = dict(self) auth_type = d.pop('__auth_type__', None) or 'basic' return '%s %s' % (auth_type.title(), ', '.join([ '%s=%s' % (key, quote_header_value(value, allow_token=key not in self._require_quoting)) for key, value in d.iteritems() ])) def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % ( self.__class__.__name__, self.to_header() ) def auth_property(name, doc=None): """A static helper function for subclasses to add extra authentication system properties onto a class:: class FooAuthenticate(WWWAuthenticate): special_realm = auth_property('special_realm') For more information have a look at the sourcecode to see how the regular properties (:attr:`realm` etc.) are implemented. """ def _set_value(self, value): if value is None: self.pop(name, None) else: self[name] = str(value) return property(lambda x: x.get(name), _set_value, doc=doc) def _set_property(name, doc=None): def fget(self): def on_update(header_set): if not header_set and name in self: del self[name] elif header_set: self[name] = header_set.to_header() return parse_set_header(self.get(name), on_update) return property(fget, doc=doc) type = auth_property('__auth_type__', doc=''' The type of the auth mechanism. HTTP currently specifies `Basic` and `Digest`.''') realm = auth_property('realm', doc=''' A string to be displayed to users so they know which username and password to use. This string should contain at least the name of the host performing the authentication and might additionally indicate the collection of users who might have access.''') domain = _set_property('domain', doc=''' A list of URIs that define the protection space. If a URI is an absolute path, it is relative to the canonical root URL of the server being accessed.''') nonce = auth_property('nonce', doc=''' A server-specified data string which should be uniquely generated each time a 401 response is made. It is recommended that this string be base64 or hexadecimal data.''') opaque = auth_property('opaque', doc=''' A string of data, specified by the server, which should be returned by the client unchanged in the Authorization header of subsequent requests with URIs in the same protection space. It is recommended that this string be base64 or hexadecimal data.''') algorithm = auth_property('algorithm', doc=''' A string indicating a pair of algorithms used to produce the digest and a checksum. If this is not present it is assumed to be "MD5". If the algorithm is not understood, the challenge should be ignored (and a different one used, if there is more than one).''') qop = _set_property('qop', doc=''' A set of quality-of-privacy directives such as auth and auth-int.''') def _get_stale(self): val = self.get('stale') if val is not None: return val.lower() == 'true' def _set_stale(self, value): if value is None: self.pop('stale', None) else: self['stale'] = value and 'TRUE' or 'FALSE' stale = property(_get_stale, _set_stale, doc=''' A flag, indicating that the previous request from the client was rejected because the nonce value was stale.''') del _get_stale, _set_stale # make auth_property a staticmethod so that subclasses of # `WWWAuthenticate` can use it for new properties. auth_property = staticmethod(auth_property) del _set_property class FileStorage(object): """The :class:`FileStorage` class is a thin wrapper over incoming files. It is used by the request object to represent uploaded files. All the attributes of the wrapper stream are proxied by the file storage so it's possible to do ``storage.read()`` instead of the long form ``storage.stream.read()``. """ def __init__(self, stream=None, filename=None, name=None, content_type=None, content_length=None, headers=None): self.name = name self.stream = stream or _empty_stream # if no filename is provided we can attempt to get the filename # from the stream object passed. There we have to be careful to # skip things like <fdopen>, <stderr> etc. Python marks these # special filenames with angular brackets. if filename is None: filename = getattr(stream, 'name', None) if filename and filename[0] == '<' and filename[-1] == '>': filename = None self.filename = filename if headers is None: headers = Headers() self.headers = headers if content_type is not None: headers['Content-Type'] = content_type if content_length is not None: headers['Content-Length'] = str(content_length) def _parse_content_type(self): if not hasattr(self, '_parsed_content_type'): self._parsed_content_type = \ parse_options_header(self.content_type) @property def content_type(self): """The file's content type. Usually not available""" return self.headers.get('content-type') @property def content_length(self): """The file's content length. Usually not available""" return int(self.headers.get('content-length') or 0) @property def mimetype(self): """Like :attr:`content_type` but without parameters (eg, without charset, type etc.). For example if the content type is ``text/html; charset=utf-8`` the mimetype would be ``'text/html'``. .. versionadded:: 0.7 """ self._parse_content_type() return self._parsed_content_type[0] @property def mimetype_params(self): """The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. .. versionadded:: 0.7 """ self._parse_content_type() return self._parsed_content_type[1] def save(self, dst, buffer_size=16384): """Save the file to a destination path or file object. If the destination is a file object you have to close it yourself after the call. The buffer size is the number of bytes held in memory during the copy process. It defaults to 16KB. For secure file saving also have a look at :func:`secure_filename`. :param dst: a filename or open file object the uploaded file is saved to. :param buffer_size: the size of the buffer. This works the same as the `length` parameter of :func:`shutil.copyfileobj`. """ from shutil import copyfileobj close_dst = False if isinstance(dst, basestring): dst = file(dst, 'wb') close_dst = True try: copyfileobj(self.stream, dst, buffer_size) finally: if close_dst: dst.close() def close(self): """Close the underlying file if possible.""" try: self.stream.close() except Exception: pass def __nonzero__(self): return bool(self.filename) def __getattr__(self, name): return getattr(self.stream, name) def __iter__(self): return iter(self.readline, '') def __repr__(self): return '<%s: %r (%r)>' % ( self.__class__.__name__, self.filename, self.content_type ) # circular dependencies from werkzeug.http import dump_options_header, dump_header, generate_etag, \ quote_header_value, parse_set_header, unquote_etag, quote_etag, \ parse_options_header, http_date, is_byte_range_valid from werkzeug.exceptions import BadRequestKeyError
apache-2.0
6,156,299,921,572,892,000
31.638943
83
0.568504
false
Salandora/OctoPrint
tests/util/test_counted_event.py
8
3171
# coding=utf-8 from __future__ import absolute_import, division, print_function __author__ = "Gina Häußge <[email protected]>" __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' __copyright__ = "Copyright (C) 2015 The OctoPrint Project - Released under terms of the AGPLv3 License" import unittest import time import threading from octoprint.util import CountedEvent class CountedEventTest(unittest.TestCase): def test_set_once(self): """The counter should go from 0 to 1.""" event = CountedEvent() self.assertEqual(0, event._counter) self.assertFalse(event._event.is_set()) event.set() self.assertEqual(1, event._counter) self.assertTrue(event._event.is_set()) def test_set_more_than_max(self): """The counter should never rise above max.""" event = CountedEvent(max=1) self.assertEqual(0, event._counter) self.assertFalse(event._event.is_set()) event.set() self.assertEqual(1, event._counter) self.assertTrue(event._event.is_set()) event.set() self.assertEqual(1, event._counter) self.assertTrue(event._event.is_set()) def test_clear_once(self): """The counter should to from 1 to 0.""" event = CountedEvent(1) self.assertEqual(1, event._counter) self.assertTrue(event._event.is_set()) event.clear() self.assertEqual(0, event._counter) self.assertFalse(event._event.is_set()) def test_clear_all(self): """The counter should go from 10 to 0.""" event = CountedEvent(10) self.assertEqual(10, event._counter) self.assertTrue(event._event.is_set()) event.clear(completely=True) self.assertEqual(0, event._counter) self.assertFalse(event._event.is_set()) def test_clear_more_than_available(self): """The counter should never sink below 0.""" event = CountedEvent(1) self.assertEqual(1, event._counter) self.assertTrue(event._event.is_set()) event.clear() self.assertEqual(0, event._counter) self.assertFalse(event._event.is_set()) event.clear() self.assertEqual(0, event._counter) self.assertFalse(event._event.is_set()) def test_blocked(self): """Blocked should only be true if the counter is 0.""" event = CountedEvent(0) self.assertTrue(event.blocked()) event.set() self.assertFalse(event.blocked()) event.clear() self.assertTrue(event.blocked()) def test_wait_immediately(self): """Unblocked wait should immediately return.""" event = CountedEvent(1) start = time.time() event.wait(timeout=2) duration = time.time() - start self.assertLess(duration, 1) def test_wait_blocking(self): """Set should immediately have blocked wait return.""" event = CountedEvent(0) def set_event(): time.sleep(1) event.set() thread = threading.Thread(target=set_event) thread.daemon = True thread.start() start = time.time() event.wait(timeout=2) duration = time.time() - start self.assertLess(duration, 2) def test_wait_timeout(self): """Blocked should only wait until timeout.""" event = CountedEvent(0) start = time.time() event.wait(timeout=2) duration = time.time() - start self.assertGreaterEqual(duration, 2) self.assertLess(duration, 3)
agpl-3.0
-3,805,683,923,876,191,000
21.006944
103
0.698328
false
yrizk/django-blog
blogvenv/lib/python3.4/site-packages/django/template/__init__.py
57
1897
""" Django's support for templates. The django.template namespace contains two independent subsystems: 1. Multiple Template Engines: support for pluggable template backends, built-in backends and backend-independent APIs 2. Django Template Language: Django's own template engine, including its built-in loaders, context processors, tags and filters. Ideally these subsystems would be implemented in distinct packages. However keeping them together made the implementation of Multiple Template Engines less disruptive . Here's a breakdown of which modules belong to which subsystem. Multiple Template Engines: - django.template.backends.* - django.template.loader - django.template.response Django Template Language: - django.template.base - django.template.context - django.template.context_processors - django.template.loaders.* - django.template.debug - django.template.defaultfilters - django.template.defaulttags - django.template.engine - django.template.loader_tags - django.template.smartif Shared: - django.template.utils """ # Multiple Template Engines from .engine import Engine from .utils import EngineHandler engines = EngineHandler() __all__ = ('Engine', 'engines') # Django Template Language # Public exceptions from .base import (TemplateDoesNotExist, TemplateSyntaxError, # NOQA VariableDoesNotExist) from .context import ContextPopException # NOQA # Template parts from .base import (Context, Node, NodeList, RequestContext, # NOQA StringOrigin, Template, Variable) # Deprecated in Django 1.8, will be removed in Django 2.0. from .base import resolve_variable # NOQA # Library management from .base import Library # NOQA __all__ += ('Template', 'Context', 'RequestContext')
apache-2.0
1,107,270,279,513,720,000
25.347222
78
0.71165
false
miloszz/DIRAC
Resources/Storage/GFAL2_XROOTStorage.py
2
1523
""" :mod: GFAL2_XROOTStorage ================= .. module: python :synopsis: XROOT module based on the GFAL2_StorageBase class. """ # from DIRAC from DIRAC import gLogger from DIRAC.Resources.Storage.GFAL2_StorageBase import GFAL2_StorageBase class GFAL2_XROOTStorage( GFAL2_StorageBase ): """ .. class:: GFAL2_XROOTStorage Xroot interface to StorageElement using gfal2 """ def __init__( self, storageName, parameters ): """ c'tor :param self: self reference :param str storageName: SE name :param str protocol: protocol to use :param str rootdir: base path for vo files :param str host: SE host :param int port: port to use to communicate with :host: :param str spaceToken: space token :param str wspath: location of SRM on :host: """ self.log = gLogger.getSubLogger( "GFAL2_XROOTStorage", True ) # # init base class GFAL2_StorageBase.__init__( self, storageName, parameters ) # XROOT has problems with checksums at the moment. self.checksumType = None # self.log.setLevel( "DEBUG" ) self.pluginName = 'GFAL2_XROOT' self.protocol = self.protocolParameters['Protocol'] self.host = self.protocolParameters['Host'] # Aweful hack to cope for the moment with the inability of RSS to deal with something else than SRM # self.port = "" # self.wspath = "" # self.spaceToken = "" self.protocolParameters['Port'] = 0 self.protocolParameters['WSUrl'] = 0 self.protocolParameters['SpaceToken'] = 0
gpl-3.0
-940,462,014,648,341,900
28.288462
103
0.67367
false
linjoahow/cd0505
static/Brython3.1.1-20150328-091302/Lib/sre_constants.py
692
7172
# # Secret Labs' Regular Expression Engine # # various symbols used by the regular expression engine. # run this script to update the _sre include files! # # Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. # # See the sre.py file for information on usage and redistribution. # """Internal support module for sre""" # update when constants are added or removed MAGIC = 20031017 #MAXREPEAT = 2147483648 #from _sre import MAXREPEAT # SRE standard exception (access as sre.error) # should this really be here? class error(Exception): pass # operators FAILURE = "failure" SUCCESS = "success" ANY = "any" ANY_ALL = "any_all" ASSERT = "assert" ASSERT_NOT = "assert_not" AT = "at" BIGCHARSET = "bigcharset" BRANCH = "branch" CALL = "call" CATEGORY = "category" CHARSET = "charset" GROUPREF = "groupref" GROUPREF_IGNORE = "groupref_ignore" GROUPREF_EXISTS = "groupref_exists" IN = "in" IN_IGNORE = "in_ignore" INFO = "info" JUMP = "jump" LITERAL = "literal" LITERAL_IGNORE = "literal_ignore" MARK = "mark" MAX_REPEAT = "max_repeat" MAX_UNTIL = "max_until" MIN_REPEAT = "min_repeat" MIN_UNTIL = "min_until" NEGATE = "negate" NOT_LITERAL = "not_literal" NOT_LITERAL_IGNORE = "not_literal_ignore" RANGE = "range" REPEAT = "repeat" REPEAT_ONE = "repeat_one" SUBPATTERN = "subpattern" MIN_REPEAT_ONE = "min_repeat_one" # positions AT_BEGINNING = "at_beginning" AT_BEGINNING_LINE = "at_beginning_line" AT_BEGINNING_STRING = "at_beginning_string" AT_BOUNDARY = "at_boundary" AT_NON_BOUNDARY = "at_non_boundary" AT_END = "at_end" AT_END_LINE = "at_end_line" AT_END_STRING = "at_end_string" AT_LOC_BOUNDARY = "at_loc_boundary" AT_LOC_NON_BOUNDARY = "at_loc_non_boundary" AT_UNI_BOUNDARY = "at_uni_boundary" AT_UNI_NON_BOUNDARY = "at_uni_non_boundary" # categories CATEGORY_DIGIT = "category_digit" CATEGORY_NOT_DIGIT = "category_not_digit" CATEGORY_SPACE = "category_space" CATEGORY_NOT_SPACE = "category_not_space" CATEGORY_WORD = "category_word" CATEGORY_NOT_WORD = "category_not_word" CATEGORY_LINEBREAK = "category_linebreak" CATEGORY_NOT_LINEBREAK = "category_not_linebreak" CATEGORY_LOC_WORD = "category_loc_word" CATEGORY_LOC_NOT_WORD = "category_loc_not_word" CATEGORY_UNI_DIGIT = "category_uni_digit" CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit" CATEGORY_UNI_SPACE = "category_uni_space" CATEGORY_UNI_NOT_SPACE = "category_uni_not_space" CATEGORY_UNI_WORD = "category_uni_word" CATEGORY_UNI_NOT_WORD = "category_uni_not_word" CATEGORY_UNI_LINEBREAK = "category_uni_linebreak" CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak" OPCODES = [ # failure=0 success=1 (just because it looks better that way :-) FAILURE, SUCCESS, ANY, ANY_ALL, ASSERT, ASSERT_NOT, AT, BRANCH, CALL, CATEGORY, CHARSET, BIGCHARSET, GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE, IN, IN_IGNORE, INFO, JUMP, LITERAL, LITERAL_IGNORE, MARK, MAX_UNTIL, MIN_UNTIL, NOT_LITERAL, NOT_LITERAL_IGNORE, NEGATE, RANGE, REPEAT, REPEAT_ONE, SUBPATTERN, MIN_REPEAT_ONE ] ATCODES = [ AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY, AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING, AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY, AT_UNI_NON_BOUNDARY ] CHCODES = [ CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE, CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD, CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD, CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT, CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD, CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK, CATEGORY_UNI_NOT_LINEBREAK ] def makedict(list): d = {} i = 0 for item in list: d[item] = i i = i + 1 return d OPCODES = makedict(OPCODES) ATCODES = makedict(ATCODES) CHCODES = makedict(CHCODES) # replacement operations for "ignore case" mode OP_IGNORE = { GROUPREF: GROUPREF_IGNORE, IN: IN_IGNORE, LITERAL: LITERAL_IGNORE, NOT_LITERAL: NOT_LITERAL_IGNORE } AT_MULTILINE = { AT_BEGINNING: AT_BEGINNING_LINE, AT_END: AT_END_LINE } AT_LOCALE = { AT_BOUNDARY: AT_LOC_BOUNDARY, AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY } AT_UNICODE = { AT_BOUNDARY: AT_UNI_BOUNDARY, AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY } CH_LOCALE = { CATEGORY_DIGIT: CATEGORY_DIGIT, CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT, CATEGORY_SPACE: CATEGORY_SPACE, CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE, CATEGORY_WORD: CATEGORY_LOC_WORD, CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD, CATEGORY_LINEBREAK: CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK } CH_UNICODE = { CATEGORY_DIGIT: CATEGORY_UNI_DIGIT, CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT, CATEGORY_SPACE: CATEGORY_UNI_SPACE, CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE, CATEGORY_WORD: CATEGORY_UNI_WORD, CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD, CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK, CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK } # flags SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking) SRE_FLAG_IGNORECASE = 2 # case insensitive SRE_FLAG_LOCALE = 4 # honour system locale SRE_FLAG_MULTILINE = 8 # treat target as multiline string SRE_FLAG_DOTALL = 16 # treat target as a single string SRE_FLAG_UNICODE = 32 # use unicode "locale" SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments SRE_FLAG_DEBUG = 128 # debugging SRE_FLAG_ASCII = 256 # use ascii "locale" # flags for INFO primitive SRE_INFO_PREFIX = 1 # has prefix SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix) SRE_INFO_CHARSET = 4 # pattern starts with character from given set if __name__ == "__main__": def dump(f, d, prefix): items = sorted(d.items(), key=lambda a: a[1]) for k, v in items: f.write("#define %s_%s %s\n" % (prefix, k.upper(), v)) f = open("sre_constants.h", "w") f.write("""\ /* * Secret Labs' Regular Expression Engine * * regular expression matching engine * * NOTE: This file is generated by sre_constants.py. If you need * to change anything in here, edit sre_constants.py and run it. * * Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. * * See the _sre.c file for information on usage and redistribution. */ """) f.write("#define SRE_MAGIC %d\n" % MAGIC) dump(f, OPCODES, "SRE_OP") dump(f, ATCODES, "SRE") dump(f, CHCODES, "SRE") f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE) f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE) f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE) f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE) f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL) f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE) f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE) f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX) f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL) f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET) f.close() print("done")
agpl-3.0
5,686,801,251,172,964,000
26.584615
70
0.689766
false
LordDarkula/chess_py
tests/test_pieces/test_pawn.py
1
5706
from unittest import TestCase from chess_py.core.algebraic import notation_const from chess_py.core import Board from chess_py.core.algebraic import Location, Move from chess_py.pieces import Queen, Rook, Bishop, Knight, Pawn from chess_py import color class TestPawn(TestCase): def setUp(self): self.position = Board.init_default() self.white_pawn = Pawn(color.white, Location.from_string("e2")) self.position.place_piece_at_square(self.white_pawn, Location.from_string("e2")) self.black_pawn = Pawn(color.black, Location.from_string("a7")) self.position.place_piece_at_square(self.black_pawn, Location.from_string("a7")) def test_square_in_front(self): self.assertEqual(self.white_pawn.square_in_front(self.white_pawn.location), Location.from_string("e3")) self.assertEqual(self.black_pawn.square_in_front(self.black_pawn.location), Location.from_string("a6")) def test_two_squares_in_front(self): self.assertEqual(self.white_pawn.two_squares_in_front(self.white_pawn.location), Location.from_string("e4")) self.assertEqual(self.black_pawn.two_squares_in_front(self.black_pawn.location), Location.from_string("a5")) def test_would_move_be_promotion(self): self.assertTrue(self.white_pawn.would_move_be_promotion(Location.from_string("e7"))) self.assertTrue(self.black_pawn.would_move_be_promotion(Location.from_string("a2"))) self.assertFalse(self.white_pawn.would_move_be_promotion(Location.from_string("e2"))) self.assertFalse(self.black_pawn.would_move_be_promotion(Location.from_string("a7"))) def test_create_promotion_moves(self): self.white_pawn.location = Location.from_string("e7") moves = list(self.white_pawn.create_promotion_moves(notation_const.CAPTURE, Location.from_string("e7"))) self.assertEqual(len(list(moves)), 4) self.assertEqual(moves[0].start_loc, Location.from_string("e7")) self.assertEqual(moves[0].promoted_to_piece, Queen) self.assertEqual(moves[1].promoted_to_piece, Rook) self.assertEqual(moves[2].promoted_to_piece, Bishop) self.assertEqual(moves[3].promoted_to_piece, Knight) def test_forward_moves(self): self.white_pawn.location = Location.from_string("e2") moves = list(self.white_pawn.forward_moves(self.position)) self.assertEqual(len(moves), 2) self.assertEqual(moves[0], Move(end_loc=self.white_pawn.square_in_front(self.white_pawn.location), piece=self.white_pawn, status=notation_const.MOVEMENT, start_loc=self.white_pawn.location)) self.assertEqual(moves[1], Move(end_loc=self.white_pawn.square_in_front(self.white_pawn.square_in_front(self.white_pawn.location)), piece=self.white_pawn, status=notation_const.MOVEMENT, start_loc=self.white_pawn.location)) moves = list(self.black_pawn.forward_moves(self.position)) self.assertEqual(len(moves), 2) self.assertEqual(moves[0], Move(end_loc=self.black_pawn.square_in_front(self.black_pawn.location), piece=self.black_pawn, status=notation_const.MOVEMENT, start_loc=self.black_pawn.location)) self.assertEqual(moves[1], Move(end_loc=self.black_pawn.square_in_front(self.black_pawn.square_in_front(self.black_pawn.location)), piece=self.black_pawn, status=notation_const.MOVEMENT, start_loc=self.black_pawn.location)) def test_capture_moves(self): self.position.move_piece(Location.from_string("d7"), Location.from_string("d5")) self.position.move_piece(Location.from_string("e2"), Location.from_string("e4")) black_pawn = self.position.piece_at_square(Location.from_string("d5")) move = list(self.white_pawn.capture_moves(self.position)) self.assertEqual(len(move), 1) self.assertEqual(move[0], Move(end_loc=black_pawn.location, piece=self.white_pawn, status=notation_const.CAPTURE, start_loc=self.white_pawn.location)) def test_en_passant_moves(self): self.position.move_piece(Location.from_string("d7"), Location.from_string("d4")) self.position.move_piece(Location.from_string("e2"), Location.from_string("e4")) black_pawn = self.position.piece_at_square(Location.from_string("d4")) self.position.piece_at_square(Location.from_string("e4")).just_moved_two_steps = True move = list(black_pawn.en_passant_moves(self.position)) self.assertEqual(len(move), 1) self.assertEqual(move[0], Move(end_loc=black_pawn.square_in_front(black_pawn.location.shift_right()), piece=black_pawn, status=notation_const.EN_PASSANT, start_loc=black_pawn.location)) def test_possible_moves(self): self.assertEqual(len(list(self.white_pawn.possible_moves(self.position))), 2) self.position.move_piece(Location.from_string("e2"), Location.from_string("e3")) self.assertEqual(len(list(self.white_pawn.possible_moves(self.position))), 1)
mit
-5,759,204,416,806,994,000
52.327103
139
0.615142
false
malmiron/incubator-airflow
airflow/contrib/operators/sagemaker_base_operator.py
5
3284
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json from airflow.contrib.hooks.sagemaker_hook import SageMakerHook from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults class SageMakerBaseOperator(BaseOperator): """ This is the base operator for all SageMaker operators. :param config: The configuration necessary to start a training job (templated) :type config: dict :param aws_conn_id: The AWS connection ID to use. :type aws_conn_id: str """ template_fields = ['config'] template_ext = () ui_color = '#ededed' integer_fields = [] @apply_defaults def __init__(self, config, aws_conn_id='aws_default', *args, **kwargs): super(SageMakerBaseOperator, self).__init__(*args, **kwargs) self.aws_conn_id = aws_conn_id self.config = config self.hook = None def parse_integer(self, config, field): if len(field) == 1: if isinstance(config, list): for sub_config in config: self.parse_integer(sub_config, field) return head = field[0] if head in config: config[head] = int(config[head]) return if isinstance(config, list): for sub_config in config: self.parse_integer(sub_config, field) return head, tail = field[0], field[1:] if head in config: self.parse_integer(config[head], tail) return def parse_config_integers(self): # Parse the integer fields of training config to integers # in case the config is rendered by Jinja and all fields are str for field in self.integer_fields: self.parse_integer(self.config, field) def expand_role(self): pass def preprocess_config(self): self.log.info( 'Preprocessing the config and doing required s3_operations' ) self.hook = SageMakerHook(aws_conn_id=self.aws_conn_id) self.hook.configure_s3_resources(self.config) self.parse_config_integers() self.expand_role() self.log.info( 'After preprocessing the config is:\n {}'.format( json.dumps(self.config, sort_keys=True, indent=4, separators=(',', ': '))) ) def execute(self, context): raise NotImplementedError('Please implement execute() in sub class!')
apache-2.0
-858,266,645,397,153,700
31.84
90
0.633983
false
moble/sympy
sympy/core/tests/test_basic.py
39
5776
"""This tests sympy/core/basic.py with (ideally) no reference to subclasses of Basic or Atom.""" from sympy.core.basic import Basic, Atom, preorder_traversal from sympy.core.singleton import S, Singleton from sympy.core.symbol import symbols from sympy.core.compatibility import default_sort_key, with_metaclass from sympy import sin, Lambda, Q from sympy.utilities.pytest import raises b1 = Basic() b2 = Basic(b1) b3 = Basic(b2) b21 = Basic(b2, b1) def test_structure(): assert b21.args == (b2, b1) assert b21.func(*b21.args) == b21 assert bool(b1) def test_equality(): instances = [b1, b2, b3, b21, Basic(b1, b1, b1), Basic] for i, b_i in enumerate(instances): for j, b_j in enumerate(instances): assert (b_i == b_j) == (i == j) assert (b_i != b_j) == (i != j) assert Basic() != [] assert not(Basic() == []) assert Basic() != 0 assert not(Basic() == 0) def test_matches_basic(): instances = [Basic(b1, b1, b2), Basic(b1, b2, b1), Basic(b2, b1, b1), Basic(b1, b2), Basic(b2, b1), b2, b1] for i, b_i in enumerate(instances): for j, b_j in enumerate(instances): if i == j: assert b_i.matches(b_j) == {} else: assert b_i.matches(b_j) is None assert b1.match(b1) == {} def test_has(): assert b21.has(b1) assert b21.has(b3, b1) assert b21.has(Basic) assert not b1.has(b21, b3) assert not b21.has() def test_subs(): assert b21.subs(b2, b1) == Basic(b1, b1) assert b21.subs(b2, b21) == Basic(b21, b1) assert b3.subs(b2, b1) == b2 assert b21.subs([(b2, b1), (b1, b2)]) == Basic(b2, b2) assert b21.subs({b1: b2, b2: b1}) == Basic(b2, b2) raises(ValueError, lambda: b21.subs('bad arg')) raises(ValueError, lambda: b21.subs(b1, b2, b3)) def test_atoms(): assert b21.atoms() == set() def test_free_symbols_empty(): assert b21.free_symbols == set() def test_doit(): assert b21.doit() == b21 assert b21.doit(deep=False) == b21 def test_S(): assert repr(S) == 'S' def test_xreplace(): assert b21.xreplace({b2: b1}) == Basic(b1, b1) assert b21.xreplace({b2: b21}) == Basic(b21, b1) assert b3.xreplace({b2: b1}) == b2 assert Basic(b1, b2).xreplace({b1: b2, b2: b1}) == Basic(b2, b1) assert Atom(b1).xreplace({b1: b2}) == Atom(b1) assert Atom(b1).xreplace({Atom(b1): b2}) == b2 raises(TypeError, lambda: b1.xreplace()) raises(TypeError, lambda: b1.xreplace([b1, b2])) def test_Singleton(): global instantiated instantiated = 0 class MySingleton(with_metaclass(Singleton, Basic)): def __new__(cls): global instantiated instantiated += 1 return Basic.__new__(cls) assert instantiated == 0 MySingleton() # force instantiation assert instantiated == 1 assert MySingleton() is not Basic() assert MySingleton() is MySingleton() assert S.MySingleton is MySingleton() assert instantiated == 1 class MySingleton_sub(MySingleton): pass assert instantiated == 1 MySingleton_sub() assert instantiated == 2 assert MySingleton_sub() is not MySingleton() assert MySingleton_sub() is MySingleton_sub() def test_preorder_traversal(): expr = Basic(b21, b3) assert list( preorder_traversal(expr)) == [expr, b21, b2, b1, b1, b3, b2, b1] assert list(preorder_traversal(('abc', ('d', 'ef')))) == [ ('abc', ('d', 'ef')), 'abc', ('d', 'ef'), 'd', 'ef'] result = [] pt = preorder_traversal(expr) for i in pt: result.append(i) if i == b2: pt.skip() assert result == [expr, b21, b2, b1, b3, b2] w, x, y, z = symbols('w:z') expr = z + w*(x + y) assert list(preorder_traversal([expr], keys=default_sort_key)) == \ [[w*(x + y) + z], w*(x + y) + z, z, w*(x + y), w, x + y, x, y] assert list(preorder_traversal((x + y)*z, keys=True)) == \ [z*(x + y), z, x + y, x, y] def test_sorted_args(): x = symbols('x') assert b21._sorted_args == b21.args raises(AttributeError, lambda: x._sorted_args) def test_call(): x, y = symbols('x y') # See the long history of this in issues 5026 and 5105. raises(TypeError, lambda: sin(x)({ x : 1, sin(x) : 2})) raises(TypeError, lambda: sin(x)(1)) # No effect as there are no callables assert sin(x).rcall(1) == sin(x) assert (1 + sin(x)).rcall(1) == 1 + sin(x) # Effect in the pressence of callables l = Lambda(x, 2*x) assert (l + x).rcall(y) == 2*y + x assert (x**l).rcall(2) == x**4 # TODO UndefinedFunction does not subclass Expr #f = Function('f') #assert (2*f)(x) == 2*f(x) assert (Q.real & Q.positive).rcall(x) == Q.real(x) & Q.positive(x) def test_literal_evalf_is_number_is_zero_is_comparable(): from sympy.integrals.integrals import Integral from sympy.core.symbol import symbols from sympy.core.function import Function x = symbols('x') f = Function('f') # the following should not be changed without a lot of dicussion # `foo.is_number` should be equivalent to `not foo.free_symbols` # it should not attempt anything fancy; see is_zero, is_constant # and equals for more rigorous tests. assert f(1).is_number is True i = Integral(0, (x, x, x)) # expressions that are symbolically 0 can be difficult to prove # so in case there is some easy way to know if something is 0 # it should appear in the is_zero property for that object; # if is_zero is true evalf should always be able to compute that # zero assert i.n() == 0 assert i.is_zero assert i.is_number is False assert i.evalf(2, strict=False) == 0
bsd-3-clause
6,867,853,141,326,150,000
28.319797
75
0.598511
false
dstftw/youtube-dl
youtube_dl/extractor/izlesene.py
24
4152
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse_unquote, ) from ..utils import ( determine_ext, float_or_none, get_element_by_id, int_or_none, parse_iso8601, str_to_int, ) class IzleseneIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:(?:www|m)\.)?izlesene\.com/ (?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+) ''' _TESTS = [ { 'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694', 'md5': '4384f9f0ea65086734b881085ee05ac2', 'info_dict': { 'id': '7599694', 'ext': 'mp4', 'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi', 'description': 'md5:253753e2655dde93f59f74b572454f6d', 'thumbnail': r're:^https?://.*\.jpg', 'uploader_id': 'pelikzzle', 'timestamp': int, 'upload_date': '20140702', 'duration': 95.395, 'age_limit': 0, } }, { 'url': 'http://www.izlesene.com/video/tarkan-dortmund-2006-konseri/17997', 'md5': '97f09b6872bffa284cb7fa4f6910cb72', 'info_dict': { 'id': '17997', 'ext': 'mp4', 'title': 'Tarkan Dortmund 2006 Konseri', 'thumbnail': r're:^https://.*\.jpg', 'uploader_id': 'parlayankiz', 'timestamp': int, 'upload_date': '20061112', 'duration': 253.666, 'age_limit': 0, } }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage('http://www.izlesene.com/video/%s' % video_id, video_id) video = self._parse_json( self._search_regex( r'videoObj\s*=\s*({.+?})\s*;\s*\n', webpage, 'streams'), video_id) title = video.get('videoTitle') or self._og_search_title(webpage) formats = [] for stream in video['media']['level']: source_url = stream.get('source') if not source_url or not isinstance(source_url, compat_str): continue ext = determine_ext(url, 'mp4') quality = stream.get('value') height = int_or_none(quality) formats.append({ 'format_id': '%sp' % quality if quality else 'sd', 'url': compat_urllib_parse_unquote(source_url), 'ext': ext, 'height': height, }) self._sort_formats(formats) description = self._og_search_description(webpage, default=None) thumbnail = video.get('posterURL') or self._proto_relative_url( self._og_search_thumbnail(webpage), scheme='http:') uploader = self._html_search_regex( r"adduserUsername\s*=\s*'([^']+)';", webpage, 'uploader', fatal=False) timestamp = parse_iso8601(self._html_search_meta( 'uploadDate', webpage, 'upload date')) duration = float_or_none(video.get('duration') or self._html_search_regex( r'videoduration["\']?\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'duration', fatal=False, group='value'), scale=1000) view_count = str_to_int(get_element_by_id('videoViewCount', webpage)) comment_count = self._html_search_regex( r'comment_count\s*=\s*\'([^\']+)\';', webpage, 'comment_count', fatal=False) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader_id': uploader, 'timestamp': timestamp, 'duration': duration, 'view_count': int_or_none(view_count), 'comment_count': int_or_none(comment_count), 'age_limit': self._family_friendly_search(webpage), 'formats': formats, }
unlicense
5,305,120,233,698,099,000
34.42735
99
0.506152
false
toshywoshy/ansible
lib/ansible/modules/cloud/azure/azure_rm_galleryimage_info.py
13
9211
#!/usr/bin/python # # Copyright (c) 2019 Liu Qingyi, (@smile37773) # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_galleryimage_info version_added: '2.9' short_description: Get Azure SIG Image info description: - Get info of Azure SIG Image. options: resource_group: description: - The name of the resource group. type: str required: true gallery_name: description: - The name of the shared image gallery from which the image definitions are to be retrieved. type: str required: true name: description: - Resource name. type: str extends_documentation_fragment: - azure author: - Liu Qingyi (@smile37773) ''' EXAMPLES = ''' - name: List gallery images in a gallery. azure_rm_galleryimage_info: resource_group: myResourceGroup gallery_name: myGallery - name: Get a gallery image. azure_rm_galleryimage_info: resource_group: myResourceGroup gallery_name: myGallery name: myImage ''' RETURN = ''' images: description: - A list of dict results where the key is the name of the image and the values are the info for that image. returned: always type: complex contains: id: description: - Resource ID. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup /providers/Microsoft.Compute/galleries/myGallery/images/myImage" name: description: - Resource name. returned: always type: str sample: myImage location: description: - Resource location. returned: always type: str sample: "eastus" tags: description: - Resource tags. returned: always type: dict sample: { "tag": "value" } os_state: description: - The allowed values for OS State are C(generalized). type: OperatingSystemStateTypes sample: "Generalized" os_type: description: - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image. type: OperatingSystemTypes sample: "linux/windows" identifier: description: - This is the gallery image definition identifier. type: dict contains: offer: description: - The name of the gallery image definition offer. type: str sample: "myOfferName" publisher: description: - The name of the gallery image definition publisher. type: str sample: "myPublisherName" sku: description: - The name of the gallery image definition sku. type: str sample: "mySkuName" ''' import time import json from ansible.module_utils.azure_rm_common import AzureRMModuleBase from ansible.module_utils.azure_rm_common_rest import GenericRestClient from copy import deepcopy try: from msrestazure.azure_exceptions import CloudError except Exception: # handled in azure_rm_common pass class AzureRMGalleryImagesInfo(AzureRMModuleBase): def __init__(self): self.module_arg_spec = dict( resource_group=dict( type='str', required=True ), gallery_name=dict( type='str', required=True ), name=dict( type='str' ) ) self.resource_group = None self.gallery_name = None self.name = None self.results = dict(changed=False) self.mgmt_client = None self.state = None self.url = None self.status_code = [200] self.query_parameters = {} self.query_parameters['api-version'] = '2019-03-01' self.header_parameters = {} self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' self.mgmt_client = None super(AzureRMGalleryImagesInfo, self).__init__(self.module_arg_spec, supports_tags=False) def exec_module(self, **kwargs): for key in self.module_arg_spec: setattr(self, key, kwargs[key]) self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, base_url=self._cloud_environment.endpoints.resource_manager) if (self.resource_group is not None and self.gallery_name is not None and self.name is not None): # self.results['gallery_images'] = self.format_item(self.get()) self.results['images'] = self.get() elif (self.resource_group is not None and self.gallery_name is not None): # self.results['gallery_images'] = self.format_item(self.listbygallery()) self.results['images'] = self.listbygallery() return self.results def get(self): response = None results = {} # prepare url self.url = ('/subscriptions' + '/{{ subscription_id }}' + '/resourceGroups' + '/{{ resource_group }}' + '/providers' + '/Microsoft.Compute' + '/galleries' + '/{{ gallery_name }}' + '/images' + '/{{ image_name }}') self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) self.url = self.url.replace('{{ resource_group }}', self.resource_group) self.url = self.url.replace('{{ gallery_name }}', self.gallery_name) self.url = self.url.replace('{{ image_name }}', self.name) try: response = self.mgmt_client.query(self.url, 'GET', self.query_parameters, self.header_parameters, None, self.status_code, 600, 30) results = json.loads(response.text) # self.log('Response : {0}'.format(response)) except CloudError as e: self.log('Could not get info for @(Model.ModuleOperationNameUpper).') return self.format_item(results) def listbygallery(self): response = None results = {} # prepare url self.url = ('/subscriptions' + '/{{ subscription_id }}' + '/resourceGroups' + '/{{ resource_group }}' + '/providers' + '/Microsoft.Compute' + '/galleries' + '/{{ gallery_name }}' + '/images') self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) self.url = self.url.replace('{{ resource_group }}', self.resource_group) self.url = self.url.replace('{{ gallery_name }}', self.gallery_name) try: response = self.mgmt_client.query(self.url, 'GET', self.query_parameters, self.header_parameters, None, self.status_code, 600, 30) results = json.loads(response.text) # self.log('Response : {0}'.format(response)) except CloudError as e: self.log('Could not get info for @(Model.ModuleOperationNameUpper).') return [self.format_item(x) for x in results['value']] if results['value'] else [] def format_item(self, item): d = { 'id': item['id'], 'name': item['name'], 'location': item['location'], 'tags': item.get('tags'), 'os_state': item['properties']['osState'], 'os_type': item['properties']['osType'], 'identifier': item['properties']['identifier'] } return d def main(): AzureRMGalleryImagesInfo() if __name__ == '__main__': main()
gpl-3.0
-6,688,099,203,825,484,000
32.616788
142
0.503528
false
Adtoma/amphtml
validator/validator_gen.py
112
11182
# # Copyright 2015 The AMP HTML Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the license. # """Generates validator-generated.js. This script reads validator.protoascii and reflects over its contents to generate Javascript. This Javascript consists of Closure-style classes and enums, as well as a createRules function which instantiates the data structures specified in validator.protoascii - the validator rules. From a Javascript perspective, this approach looks elaborate - you may wonder why we're not just writing Javascript directly, or why we're not encoding our rules in JSON or YAML or even, gasp, XML? Besides the additional type safety that we gain from our approach, it allows us to share the rule specifications, error codes, etc. between multiple validator implemenations, including an implementation in C++. This makes it much easier to keep otherwise likely divergent behavior in sync. """ import os def UnderscoreToCamelCase(under_score): """Helper function which converts under_score names to camelCase. In proto buffers, fields have under_scores. In Javascript, fields have camelCase. Args: under_score: A name, segmented by under_scores. Returns: A name, segmented as camelCase. """ segments = under_score.split('_') return '%s%s' % (segments[0], ''.join([s.title() for s in segments[1:]])) def FindDescriptors(validator_pb2, msg_desc_by_name, enum_desc_by_name): """Finds the message and enum descriptors in the file. This method finds the message and enum descriptors from a file descriptor; it will visit the top-level messages, and within those the enums. Args: validator_pb2: The proto2 Python module generated from validator.proto. msg_desc_by_name: A map of message descriptors, keyed by full_name. enum_desc_by_name: A map of enum descriptors, keyed by full name. """ for msg_type in validator_pb2.DESCRIPTOR.message_types_by_name.values(): msg_desc_by_name[msg_type.full_name] = msg_type for enum_type in msg_type.enum_types: enum_desc_by_name[enum_type.full_name] = enum_type def FieldTypeFor(descriptor, field_desc): """Returns the Javascript type for a given field descriptor. Args: descriptor: The descriptor module from the protobuf package, e.g. google.protobuf.descriptor. field_desc: A field descriptor for a particular field in a message. Returns: The Javascript type for the given field descriptor. """ element_type = { descriptor.FieldDescriptor.TYPE_DOUBLE: lambda: 'number', descriptor.FieldDescriptor.TYPE_INT32: lambda: 'number', descriptor.FieldDescriptor.TYPE_BOOL: lambda: 'boolean', descriptor.FieldDescriptor.TYPE_STRING: lambda: 'string', descriptor.FieldDescriptor.TYPE_ENUM: ( lambda: field_desc.enum_type.full_name), descriptor.FieldDescriptor.TYPE_MESSAGE: ( lambda: field_desc.message_type.full_name), }[field_desc.type]() if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED: return '!Array<!%s>' % element_type else: return element_type def NonRepeatedValueToString(descriptor, field_desc, value): """For a non-repeated field, renders the value as a Javascript literal. Helper function for ValueToString. Args: descriptor: The descriptor module from the protobuf package, e.g. google.protobuf.descriptor. field_desc: The type descriptor for the field value to be rendered. value: The value of the non-repeated field to be rendered. Returns: A Javascript literal for the provided non-repeated value. """ if field_desc.type == descriptor.FieldDescriptor.TYPE_STRING: escaped = ('' + value).encode('unicode-escape') return "'%s'" % escaped.replace("'", "\\'") if field_desc.type == descriptor.FieldDescriptor.TYPE_BOOL: if value: return 'true' return 'false' if field_desc.type == descriptor.FieldDescriptor.TYPE_ENUM: enum_value_name = field_desc.enum_type.values_by_number[value].name return '%s.%s' % (field_desc.enum_type.full_name, enum_value_name) if value is None: return 'null' return str(value) def ValueToString(descriptor, field_desc, value): """Renders a field value as a Javascript literal. Args: descriptor: The descriptor module from the protobuf package, e.g. google.protobuf.descriptor. field_desc: The type descriptor for the field value to be rendered. value: The value of the field to be rendered. Returns: A Javascript literal for the provided value. """ if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED: if value: return '[%s]' % ', '.join([NonRepeatedValueToString(descriptor, field_desc, s) for s in value]) return '[]' return NonRepeatedValueToString(descriptor, field_desc, value) def PrintClassFor(descriptor, msg_desc, out): """Prints a Javascript class for the given proto message. This method emits a Javascript class (Closure-style) for the given proto message to sys.stdout. Args: descriptor: The descriptor module from the protobuf package, e.g. google.protobuf.descriptor. msg_desc: The descriptor for a particular message type. out: a list of lines to output (without the newline characters), to which this function will append. """ # TODO(johannes): Should we provide access to the default values? # Those are given in field.default_value for each field. out.append('/**') out.append(' * @constructor') if (msg_desc.name == 'ValidationResult' or msg_desc.name == 'ValidationError'): out.append(' * @export') out.append(' */') out.append('%s = function() {' % msg_desc.full_name) for field in msg_desc.fields: if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: out.append(' /** @export {%s} */' % FieldTypeFor(descriptor, field)) out.append(' this.%s = [];' % UnderscoreToCamelCase(field.name)) else: out.append(' /** @export {?%s} */' % FieldTypeFor(descriptor, field)) out.append(' this.%s = null;' % UnderscoreToCamelCase(field.name)) out.append('};') out.append('') def PrintEnumFor(enum_desc, out): """Prints a Javascript enum for the given enum descriptor. Args: enum_desc: The descriptor for a particular enum type. out: a list of lines to output (without the newline characters), to which this function will append. """ out.append('/**') out.append(' * @enum {string}') out.append(' */') out.append('%s = {' % enum_desc.full_name) out.append(',\n'.join([" %s: '%s'" % (v.name, v.name) for v in enum_desc.values])) out.append('};') out.append('') def PrintObject(descriptor, msg, this_id, out): """Prints an object, by recursively constructing it. This routine emits Javascript which will construct an object modeling the provided message (in practice the ValidatorRules message). It references the classes and enums enitted by PrintClassFor and PrintEnumFor. Args: descriptor: The descriptor module from the protobuf package, e.g. google.protobuf.descriptor. msg: A protocol message instance. this_id: The id for the object being printed (all variables have the form o_${num} with ${num} being increasing integers out: a list of lines to output (without the newline characters), to which this function will append. Returns: The next object id, that is, next variable available for creating objects. """ out.append(' var o_%d = new %s();' % (this_id, msg.DESCRIPTOR.full_name)) next_id = this_id + 1 for (field_desc, field_val) in msg.ListFields(): if field_desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE: if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED: for val in field_val: field_id = next_id next_id = PrintObject(descriptor, val, field_id, out) out.append(' o_%d.%s.push(o_%d);' % ( this_id, UnderscoreToCamelCase(field_desc.name), field_id)) else: field_id = next_id next_id = PrintObject(descriptor, field_val, field_id, out) out.append(' o_%d.%s = o_%d;' % ( this_id, UnderscoreToCamelCase(field_desc.name), field_id)) else: out.append(' o_%d.%s = %s;' % ( this_id, UnderscoreToCamelCase(field_desc.name), ValueToString(descriptor, field_desc, field_val))) return next_id def GenerateValidatorGeneratedJs(specfile, validator_pb2, text_format, descriptor, out): """Main method for the code generator. This method reads the specfile and emits Javascript to sys.stdout. Args: specfile: Path to validator.protoascii, the specfile to generate Javascript from. validator_pb2: The proto2 Python module generated from validator.proto. text_format: The text_format module from the protobuf package, e.g. google.protobuf.text_format. descriptor: The descriptor module from the protobuf package, e.g. google.protobuf.descriptor. out: a list of lines to output (without the newline characters), to which this function will append. """ # First, find the descriptors and enums and generate Javascript # classes and enums. msg_desc_by_name = {} enum_desc_by_name = {} FindDescriptors(validator_pb2, msg_desc_by_name, enum_desc_by_name) rules_obj = '%s.RULES' % validator_pb2.DESCRIPTOR.package all_names = [rules_obj] + msg_desc_by_name.keys() + enum_desc_by_name.keys() all_names.sort() out.append('//') out.append('// Generated by %s - do not edit.' % os.path.basename(__file__)) out.append('//') out.append('') for name in all_names: out.append("goog.provide('%s');" % name) out.append('') for name in all_names: if name in msg_desc_by_name: PrintClassFor(descriptor, msg_desc_by_name[name], out) elif name in enum_desc_by_name: PrintEnumFor(enum_desc_by_name[name], out) # Read the rules file, validator.protoascii by parsing it as a text # message of type ValidatorRules. rules = validator_pb2.ValidatorRules() text_format.Merge(open(specfile).read(), rules) out.append('/**') out.append(' * @return {!%s}' % rules.DESCRIPTOR.full_name) out.append(' */') out.append('function createRules() {') PrintObject(descriptor, rules, 0, out) out.append(' return o_0;') out.append('}') out.append('') out.append('/**') out.append(' * @type {!%s}' % rules.DESCRIPTOR.full_name) out.append(' */') out.append('%s = createRules();' % rules_obj)
apache-2.0
-4,921,581,281,538,616,000
37.558621
80
0.686103
false
manastech/de-bee
gdata/tlslite/utils/OpenSSL_TripleDES.py
359
1666
"""OpenSSL/M2Crypto 3DES implementation.""" from cryptomath import * from TripleDES import * if m2cryptoLoaded: def new(key, mode, IV): return OpenSSL_TripleDES(key, mode, IV) class OpenSSL_TripleDES(TripleDES): def __init__(self, key, mode, IV): TripleDES.__init__(self, key, mode, IV, "openssl") self.key = key self.IV = IV def _createContext(self, encrypt): context = m2.cipher_ctx_new() cipherType = m2.des_ede3_cbc() m2.cipher_init(context, cipherType, self.key, self.IV, encrypt) return context def encrypt(self, plaintext): TripleDES.encrypt(self, plaintext) context = self._createContext(1) ciphertext = m2.cipher_update(context, plaintext) m2.cipher_ctx_free(context) self.IV = ciphertext[-self.block_size:] return ciphertext def decrypt(self, ciphertext): TripleDES.decrypt(self, ciphertext) context = self._createContext(0) #I think M2Crypto has a bug - it fails to decrypt and return the last block passed in. #To work around this, we append sixteen zeros to the string, below: plaintext = m2.cipher_update(context, ciphertext+('\0'*16)) #If this bug is ever fixed, then plaintext will end up having a garbage #plaintext block on the end. That's okay - the below code will ignore it. plaintext = plaintext[:len(ciphertext)] m2.cipher_ctx_free(context) self.IV = ciphertext[-self.block_size:] return plaintext
mit
17,559,265,038,457,636
36.886364
98
0.59964
false
leogregianin/pychess
utilities/blunders.py
2
5552
#!/usr/bin/python # -*- coding: utf-8 -*- ''' PyChess blunder finder script. This scripts allows you to analyze a played pgn file for blunders, using the engine of your choice. PYTHONPATH=lib/ python blunders.py game.pgn ''' ############################################################################### # Set up important things from gi.repository import GLib from gi.repository import GObject GObject.threads_init() mainloop = GLib.MainLoop() ############################################################################### # Do the rest of the imports import atexit import sys from queue import Queue from pychess.Players.engineNest import discoverer from pychess.Players.Player import Player, TurnInterrupt, PlayerIsDead from pychess.System.protoopen import protoopen from pychess.System import SubProcess from pychess.Utils.GameModel import GameModel from pychess.Utils.const import * from pychess.Utils.Move import listToSan, toSAN from pychess.Savers import pgn ############################################################################### # Ask the user for details def queryGameno(path): pgnfile = pgn.load(protoopen(path)) print("Selected file %s" % path) if len(pgnfile) == 0: print("The file is empty.") sys.exit() print() print("The file contains the following games:") for i in range(len(pgnfile)): name1, name2 = pgnfile.get_player_names(i) print("[%d] %s vs. %s" % (i, name1, name2)) print() if len(pgnfile) == 1: print("Autoselecting game 0.") gameno = 0 else: gameno = int(input("Select game number to be analyzed. [n]: ")) print() return pgnfile, gameno def queryAnalyzer(analyzers): print("PyChess found the following analyzers on your system:") for i, engine in enumerate(analyzers): print("[%d] %s" % (i, discoverer.getName(engine))) print() n = int(input("What engine should be your analyzer? [n] ")) print() return analyzers[n] def queryTime(): secs = int(input("Enter how many seconds we should use for each move [n]: ")) print() return secs class DummyPlayer (Player): def __init__(self): Player.__init__(self) self.Q = Queue() self.__type__ = LOCAL def makeMove (self, board1, move, board2): r = self.Q.get() if r == "del": raise PlayerIsDead if r == "int": raise TurnInterrupt def undoMoves (self, moves, gamemodel): self.Q.put('int') def end (self, status, reason): self.Q.put('del') def kill (self, reason): self.Q.put('del') def pause (self): pass def resume (self): pass def offer (self, offer): self.emit('accept', offer) def start(discoverer): atexit.register(SubProcess.finishAllSubprocesses) pgnfile, gameno = queryGameno(sys.argv[1]) analyzer = queryAnalyzer(discoverer.getAnalyzers()) secs = queryTime() name1, name2 = pgnfile.get_player_names(gameno) print("%s will now analyze the game between %s and %s with %d seconds per move." % \ (discoverer.getName(analyzer), name1, name2, secs)) print() global game, values values = {} game = GameModel() game.setPlayers([DummyPlayer(), DummyPlayer()]) analyzer = discoverer.initAnalyzerEngine(analyzer, ANALYZING, game.variant) analyzer.connect('analyze', onAnalyze) game.spectators[HINT] = analyzer game.loadAndStart(sys.argv[1], pgn, gameno, -1) def cb(): if game.ply == game.lowply: on_finish() return False check_blund() return True GLib.timeout_add_seconds(secs, cb) def on_finish(): print("Finish") mainloop.quit() def check_blund(): print() if game.ply+1 in values and game.ply in values: color = game.ply % 2 oldmoves, oldscore = values[game.ply] moves, score = values[game.ply+1] dif = score-oldscore if dif < -100 and color == WHITE: print("White blunder", dif) print("Should have done:", ", ".join(listToSan(game.getBoardAtPly(game.ply),oldmoves))) print() elif dif > 100 and color == BLACK: print("Black blunder", dif) print("Should have done:", ", ".join(listToSan(game.getBoardAtPly(game.ply),oldmoves))) print() movename = toSAN(game.getBoardAtPly(game.ply-1),game.getMoveAtPly(game.ply-1)) if game.ply % 2 == 1: move_suffix = "" else: move_suffix = "..." print("Considering %d%s %s " % ((game.ply+1)//2, move_suffix, movename,), end=' ') game.undoMoves(1) def onAnalyze(analyzer, analysis): global values if analysis: pv, score, depth = analysis[0] sys.stdout.write('.') sys.stdout.flush() if score != None: values[game.ply] = (pv, score*(-1)**game.ply) ############################################################################### # Slightly validate arguments if len(sys.argv) != 2 or sys.argv[1] == "--help": print("Usage: python blunders.py FILENAME Analyze the specified pgn file") print(" python blunders.py --help Display this help and exit") print("Note: You'll probably need to run the scripts with your PYTHONPATH set") print(" like 'PYTHONPATH=../lib/ python blunders...'") sys.exit() ############################################################################### # Push onto the mainloop and start it discoverer.connect('all_engines_discovered', start) discoverer.discover() mainloop.run()
gpl-3.0
-5,182,629,710,192,038,000
32.245509
99
0.591318
false
ZhuangER/robot_path_planning
gui/pyqtgraph/canvas/CanvasManager.py
3
2214
# -*- coding: utf-8 -*- from pyqtgraph.Qt import QtCore, QtGui if not hasattr(QtCore, 'Signal'): QtCore.Signal = QtCore.pyqtSignal import weakref class CanvasManager(QtCore.QObject): SINGLETON = None sigCanvasListChanged = QtCore.Signal() def __init__(self): if CanvasManager.SINGLETON is not None: raise Exception("Can only create one canvas manager.") CanvasManager.SINGLETON = self QtCore.QObject.__init__(self) self.canvases = weakref.WeakValueDictionary() @classmethod def instance(cls): return CanvasManager.SINGLETON def registerCanvas(self, canvas, name): n2 = name i = 0 while n2 in self.canvases: n2 = "%s_%03d" % (name, i) i += 1 self.canvases[n2] = canvas self.sigCanvasListChanged.emit() return n2 def unregisterCanvas(self, name): c = self.canvases[name] del self.canvases[name] self.sigCanvasListChanged.emit() def listCanvases(self): return list(self.canvases.keys()) def getCanvas(self, name): return self.canvases[name] manager = CanvasManager() class CanvasCombo(QtGui.QComboBox): def __init__(self, parent=None): QtGui.QComboBox.__init__(self, parent) man = CanvasManager.instance() man.sigCanvasListChanged.connect(self.updateCanvasList) self.hostName = None self.updateCanvasList() def updateCanvasList(self): canvases = CanvasManager.instance().listCanvases() canvases.insert(0, "") if self.hostName in canvases: canvases.remove(self.hostName) sel = self.currentText() if sel in canvases: self.blockSignals(True) ## change does not affect current selection; block signals during update self.clear() for i in canvases: self.addItem(i) if i == sel: self.setCurrentIndex(self.count()) self.blockSignals(False) def setHostName(self, name): self.hostName = name self.updateCanvasList()
mit
-5,423,719,733,613,903,000
28.131579
109
0.593044
false
GladeRom/android_external_chromium_org
tools/deep_memory_profiler/lib/bucket.py
64
5564
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os from lib.symbol import FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS, TYPEINFO_SYMBOLS LOGGER = logging.getLogger('dmprof') class Bucket(object): """Represents a bucket, which is a unit of memory block classification.""" def __init__(self, stacktrace, allocator_type, typeinfo, typeinfo_name): self._stacktrace = stacktrace self._allocator_type = allocator_type self._typeinfo = typeinfo self._typeinfo_name = typeinfo_name self._symbolized_stackfunction = stacktrace self._symbolized_joined_stackfunction = '' self._symbolized_stacksourcefile = stacktrace self._symbolized_joined_stacksourcefile = '' self._symbolized_typeinfo = typeinfo_name self.component_cache = '' def __str__(self): result = [] result.append(self._allocator_type) if self._symbolized_typeinfo == 'no typeinfo': result.append('tno_typeinfo') else: result.append('t' + self._symbolized_typeinfo) result.append('n' + self._typeinfo_name) result.extend(['%s(@%s)' % (function, sourcefile) for function, sourcefile in zip(self._symbolized_stackfunction, self._symbolized_stacksourcefile)]) return ' '.join(result) def symbolize(self, symbol_mapping_cache): """Makes a symbolized stacktrace and typeinfo with |symbol_mapping_cache|. Args: symbol_mapping_cache: A SymbolMappingCache object. """ # TODO(dmikurube): Fill explicitly with numbers if symbol not found. self._symbolized_stackfunction = [ symbol_mapping_cache.lookup(FUNCTION_SYMBOLS, address) for address in self._stacktrace] self._symbolized_joined_stackfunction = ' '.join( self._symbolized_stackfunction) self._symbolized_stacksourcefile = [ symbol_mapping_cache.lookup(SOURCEFILE_SYMBOLS, address) for address in self._stacktrace] self._symbolized_joined_stacksourcefile = ' '.join( self._symbolized_stacksourcefile) if not self._typeinfo: self._symbolized_typeinfo = 'no typeinfo' else: self._symbolized_typeinfo = symbol_mapping_cache.lookup( TYPEINFO_SYMBOLS, self._typeinfo) if not self._symbolized_typeinfo: self._symbolized_typeinfo = 'no typeinfo' def clear_component_cache(self): self.component_cache = '' @property def stacktrace(self): return self._stacktrace @property def allocator_type(self): return self._allocator_type @property def typeinfo(self): return self._typeinfo @property def typeinfo_name(self): return self._typeinfo_name @property def symbolized_stackfunction(self): return self._symbolized_stackfunction @property def symbolized_joined_stackfunction(self): return self._symbolized_joined_stackfunction @property def symbolized_stacksourcefile(self): return self._symbolized_stacksourcefile @property def symbolized_joined_stacksourcefile(self): return self._symbolized_joined_stacksourcefile @property def symbolized_typeinfo(self): return self._symbolized_typeinfo class BucketSet(object): """Represents a set of bucket.""" def __init__(self): self._buckets = {} self._code_addresses = set() self._typeinfo_addresses = set() def load(self, prefix): """Loads all related bucket files. Args: prefix: A prefix string for bucket file names. """ LOGGER.info('Loading bucket files.') n = 0 skipped = 0 while True: path = '%s.%04d.buckets' % (prefix, n) if not os.path.exists(path) or not os.stat(path).st_size: if skipped > 10: break n += 1 skipped += 1 continue LOGGER.info(' %s' % path) with open(path, 'r') as f: self._load_file(f) n += 1 skipped = 0 def _load_file(self, bucket_f): for line in bucket_f: words = line.split() typeinfo = None typeinfo_name = '' stacktrace_begin = 2 for index, word in enumerate(words): if index < 2: continue if word[0] == 't': typeinfo = int(word[1:], 16) self._typeinfo_addresses.add(typeinfo) elif word[0] == 'n': typeinfo_name = word[1:] else: stacktrace_begin = index break stacktrace = [int(address, 16) for address in words[stacktrace_begin:]] for frame in stacktrace: self._code_addresses.add(frame) self._buckets[int(words[0])] = Bucket( stacktrace, words[1], typeinfo, typeinfo_name) def __iter__(self): for bucket_id, bucket_content in self._buckets.iteritems(): yield bucket_id, bucket_content def __getitem__(self, bucket_id): return self._buckets[bucket_id] def get(self, bucket_id): return self._buckets.get(bucket_id) def symbolize(self, symbol_mapping_cache): for bucket_content in self._buckets.itervalues(): bucket_content.symbolize(symbol_mapping_cache) def clear_component_cache(self): for bucket_content in self._buckets.itervalues(): bucket_content.clear_component_cache() def iter_addresses(self, symbol_type): if symbol_type in [FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS]: for function in self._code_addresses: yield function else: for function in self._typeinfo_addresses: yield function
bsd-3-clause
5,528,960,404,116,997,000
28.595745
78
0.654745
false
Eureka22/ASM_xf
PythonD/lib/python2.4/site-packages/display/cursing/FileSelector.py
2
4138
# # This file is part of GNU Enterprise. # # GNU Enterprise is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2, or (at your option) any later version. # # GNU Enterprise is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public # License along with program; see the file COPYING. If not, # write to the Free Software Foundation, Inc., 59 Temple Place # - Suite 330, Boston, MA 02111-1307, USA. # # Copyright 2002-2003 Free Software Foundation # # FILE: # FileSelector.py # # DESCRIPTION: # # NOTES: # #from gnue.common.apps import GDebug import os import math from constants import * from Dialog import Dialog from DirChooser import DirChooser from FileSelectPad import FileSelectPad from Progress import Progress from TextBox import TextBox from Label import Label from Button import Button class FileSelector(Dialog): """ """ def __init__(self, Root, Y, X, H,W,Title="", **properties): """ """ if H == 0: W = int(math.floor(Root.Width() * 0.9)) X = int(math.floor((Root.Width() - W) / 2)) H = int(math.floor(Root.Height() * 0.6)) Y = int(math.floor((Root.Height() - H) / 2)) apply(Dialog.__init__, (self,Root, Y, X, Y + H, X + W),properties) self.start = os.environ['PWD'] # allow arbitrary path to be sent in as 'START' if self.HasProperty("START"): self.start = self.START self.W = W self.filePad = FileSelectPad(self,3,2,H-8,W-4) self.filePad.SetMethod("ACTION",self._SetPath) self.filePad.SetMethod("ACTIVE",self._SetFileName) self.progressBar = Progress(self,'filedisppb',H-3,2,W-14,1) self.filePad.PROGRESS = self.progressBar self.AddControl(self.filePad) self.AddControl(self.progressBar) self.fileNameInp = TextBox(self,'filenameinp',H-2, 2, W - 14) self.AddControl(self.fileNameInp) self.dirChooser = DirChooser(self,1,1,W-2,self.start,'choose' ) self.dirChooser.SetMethod("CHANGED",self._newDir) self.dirChooser._DirChooser__Scan(self.start) self.AddControl(self.dirChooser) labelRow = chr(tiLTEE) + (W-2)*chr(tiHLINE) + chr(tiRTEE) self.AddControl(Label(self,'upperborder',2,0,labelRow)) self.AddControl(Label(self,'lowerborder',H-4,0,labelRow)) self.cancel_button = Button(self,'cancelbutt',H-2 ,W-11 , 10, 'Cancel') self.accept_button = Button(self,'acceptbutt',H-3 ,W-11 , 10, 'Accept') self.AddControl(self.accept_button) self.AddControl(self.cancel_button) self.cancel_button.SetMethod("CLICK",self.Cancel) self.accept_button.SetMethod("CLICK",self.Accept) def Show(self): # self.filePad.Paint(0,0,0) self.RunDialog() return self.__accept def Cancel(self,control,arg2,arg3): self.dirChooser.dir_entries = [] self.fileNameInp.SetText('') control.EXITFORM = 1 self.__accept = 0 return 0 def Accept(self,control,arg2,arg3): control.EXITFORM = 1 self.__accept = 1 return 1 def GetPath(self): if self.dirChooser.HasEntries(): erg = self.dirChooser.dir_entries[0] if erg[-1:] != '/': erg = erg + '/' return erg else: return '' def GetFileName(self): return self.fileNameInp.GetText() def GetFullPath(self): return self.GetPath() + self.GetFileName() def Refresh(self,arg1,arg2,arg3): self.filePad.Paint(None,None,None) apply(Dialog.Paint,(self,)) def _SetPath(self,Control, Path, Arg3): if self.dirChooser.AddPath(Path,None,None) : self.fileNameInp.SetText("") def _SetFileName(self,Control, Name, Arg3): self.fileNameInp.SetText(Name) def _newDir(self, newpath): self.filePad.Display(newpath) # # radio button group #
gpl-2.0
-4,336,221,734,775,237,600
29.112782
75
0.659497
false
ventoo-bike/Ventoo-PebbleWatchFace
pebblebike/dict2bin.py
10
1188
#!/usr/bin/python import os, sys, string, pdb import re, fileinput import ctypes import struct import json import sys def main(): # arguments, print an example of correct usage. if len(sys.argv) - 1 != 1: print("********************") print("Usage suggestion:") print("python " + sys.argv[0] + " <locale_chinese.json>") print("********************") exit() dict_filename = sys.argv[1] json_dict = json.load(open(dict_filename, 'rb')) hash_dict = {int(key) : value for (key, value) in json_dict.iteritems() if key.isdigit()} #create binary resource loadable as a pebble dictionary with open(dict_filename.replace('.json','.bin'), 'w') as output_bin: output_bin.write(struct.pack('I', len(hash_dict))) #count of entries for (key, value) in hash_dict.iteritems(): output_bin.write(struct.pack('I',key)) #key output_bin.write(struct.pack('I',len(value.encode('utf-8')) + 1)) #length of string including null output_bin.write(value.encode('utf-8')) #write string as c string output_bin.write(struct.pack('B',0)) #null terminate string if __name__ == '__main__': main()
mit
2,102,988,429,200,814,600
32
106
0.604377
false
Voskrese/archlive.archldr
src/wubi/frontends/win32/uninstallation_finish_page.py
6
1766
# Copyright (c) 2008 Agostino Russo # # Written by Agostino Russo <[email protected]> # # This file is part of Wubi the Win32 Ubuntu Installer. # # Wubi is free software; you can redistribute it and/or modify # it under 5the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 2.1 of # the License, or (at your option) any later version. # # Wubi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from winui import ui from page import Page import logging log = logging.getLogger("WinuiInstallationFinishPage") class UninstallationFinishPage(Page): def on_init(self): Page.on_init(self) self.set_background_color(255,255,255) self.insert_vertical_image("%s-vertical.bmp" % self.info.previous_distro_name) #navigation self.insert_navigation(_("Finish"), default=1) self.navigation.button1.on_click = self.on_finish #main container self.insert_main() self.main.set_background_color(255,255,255) self.main.title = ui.Label(self.main, 40, 20, self.main.width - 80, 60, _("Uninstallation completed")) self.main.title.set_font(size=20, bold=True, family="Arial") self.main.label = ui.Label(self.main, 40, 90, self.main.width - 80, 40, _("%s has been successfully uninstalled") % self.info.previous_distro_name) def on_finish(self): self.frontend.stop()
gpl-2.0
-5,469,931,939,932,756,000
36.574468
155
0.708381
false
asampat3090/readthedocs.org
readthedocs/core/templatetags/core_tags.py
27
3014
import urllib import hashlib from django import template from django.conf import settings from django.utils.safestring import mark_safe from django.utils.encoding import force_bytes, force_text from readthedocs.builds.models import Version from readthedocs.projects.models import Project register = template.Library() @register.filter def gravatar(email, size=48): """hacked from djangosnippets.org, but basically given an email address render an img tag with the hashed up bits needed for leetness omgwtfstillreading """ url = "http://www.gravatar.com/avatar.php?%s" % urllib.urlencode({ 'gravatar_id': hashlib.md5(email).hexdigest(), 'size': str(size) }) return ('<img src="%s" width="%s" height="%s" alt="gravatar" ' 'class="gravatar" border="0" />' % (url, size, size)) @register.simple_tag(name="doc_url") def make_document_url(project, version=None, page=None): if not project: return "" if project.main_language_project: base_url = project.get_translation_url(version, full=True) else: base_url = project.get_docs_url(version) if page and (page != "index") and (page != "index.html"): if project.documentation_type == "sphinx_htmldir": path = page + "/" elif project.documentation_type == "sphinx_singlehtml": path = "index.html#document-" + page elif page.endswith(".html"): path = page else: path = page + ".html" else: path = "" return base_url + path @register.filter(is_safe=True) def restructuredtext(value, short=False): try: from docutils.core import publish_parts except ImportError: if settings.DEBUG: raise template.TemplateSyntaxError( "Error in 'restructuredtext' filter: " "The Python docutils library isn't installed." ) return force_text(value) else: docutils_settings = { 'raw_enabled': False, 'file_insertion_enabled': False, } docutils_settings.update(getattr(settings, 'RESTRUCTUREDTEXT_FILTER_SETTINGS', {})) parts = publish_parts(source=force_bytes(value), writer_name="html4css1", settings_overrides=docutils_settings) out = force_text(parts["fragment"]) try: if short: out = out.split("\n")[0] except IndexError: pass finally: return mark_safe(out) @register.filter def get_project(slug): try: return Project.objects.get(slug=slug) except: return None @register.filter def get_version(slug): try: return Project.objects.get(slug=slug) except: return None @register.simple_tag def url_replace(request, field, value): dict_ = request.GET.copy() dict_[field] = value return dict_.urlencode() @register.filter def key(d, key_name): return d[key_name]
mit
-8,828,587,518,612,054,000
27.433962
91
0.618115
false
bloyl/mne-python
mne/preprocessing/nirs/tests/test_optical_density.py
12
1850
# Authors: Robert Luke <[email protected]> # Eric Larson <[email protected]> # Alexandre Gramfort <[email protected]> # # License: BSD (3-clause) import os.path as op import pytest as pytest import numpy as np from numpy.testing import assert_allclose from mne.datasets.testing import data_path from mne.io import read_raw_nirx, BaseRaw from mne.preprocessing.nirs import optical_density from mne.utils import _validate_type from mne.datasets import testing fname_nirx = op.join(data_path(download=False), 'NIRx', 'nirscout', 'nirx_15_2_recording_w_short') @testing.requires_testing_data def test_optical_density(): """Test return type for optical density.""" raw = read_raw_nirx(fname_nirx, preload=False) assert 'fnirs_cw_amplitude' in raw assert 'fnirs_od' not in raw raw = optical_density(raw) _validate_type(raw, BaseRaw, 'raw') assert 'fnirs_cw_amplitude' not in raw assert 'fnirs_od' in raw @testing.requires_testing_data def test_optical_density_zeromean(): """Test that optical density can process zero mean data.""" raw = read_raw_nirx(fname_nirx, preload=True) raw._data[4] -= np.mean(raw._data[4]) with pytest.warns(RuntimeWarning, match='Negative'): raw = optical_density(raw) assert 'fnirs_od' in raw @testing.requires_testing_data def test_optical_density_manual(): """Test optical density on known values.""" test_tol = 0.01 raw = read_raw_nirx(fname_nirx, preload=True) # log(1) = 0 raw._data[4] = np.ones((145)) # log(0.5)/-1 = 0.69 # log(1.5)/-1 = -0.40 test_data = np.tile([0.5, 1.5], 73)[:145] raw._data[5] = test_data od = optical_density(raw) assert_allclose(od.get_data([4]), 0.) assert_allclose(od.get_data([5])[0, :2], [0.69, -0.4], atol=test_tol)
bsd-3-clause
-5,183,991,706,701,619,000
30.355932
73
0.665405
false
brianwoo/django-tutorial
build/Django/django/db/backends/dummy/base.py
94
2567
""" Dummy database backend for Django. Django uses this if the database ENGINE setting is empty (None or empty string). Each of these API functions, except connection.close(), raises ImproperlyConfigured. """ from django.core.exceptions import ImproperlyConfigured from django.db.backends.base.base import BaseDatabaseWrapper from django.db.backends.base.client import BaseDatabaseClient from django.db.backends.base.creation import BaseDatabaseCreation from django.db.backends.base.features import BaseDatabaseFeatures from django.db.backends.base.introspection import BaseDatabaseIntrospection from django.db.backends.base.operations import BaseDatabaseOperations from django.db.backends.base.validation import BaseDatabaseValidation def complain(*args, **kwargs): raise ImproperlyConfigured("settings.DATABASES is improperly configured. " "Please supply the ENGINE value. Check " "settings documentation for more details.") def ignore(*args, **kwargs): pass class DatabaseError(Exception): pass class IntegrityError(DatabaseError): pass class DatabaseOperations(BaseDatabaseOperations): quote_name = complain class DatabaseClient(BaseDatabaseClient): runshell = complain class DatabaseCreation(BaseDatabaseCreation): create_test_db = ignore destroy_test_db = ignore class DatabaseIntrospection(BaseDatabaseIntrospection): get_table_list = complain get_table_description = complain get_relations = complain get_indexes = complain get_key_columns = complain class DatabaseWrapper(BaseDatabaseWrapper): operators = {} # Override the base class implementations with null # implementations. Anything that tries to actually # do something raises complain; anything that tries # to rollback or undo something raises ignore. _cursor = complain ensure_connection = complain _commit = complain _rollback = ignore _close = ignore _savepoint = ignore _savepoint_commit = complain _savepoint_rollback = ignore _set_autocommit = complain def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) self.features = BaseDatabaseFeatures(self) self.ops = DatabaseOperations(self) self.client = DatabaseClient(self) self.creation = DatabaseCreation(self) self.introspection = DatabaseIntrospection(self) self.validation = BaseDatabaseValidation(self) def is_usable(self): return True
gpl-3.0
3,380,963,628,860,388,000
28.848837
80
0.732762
false
MicroWorldwide/tweeria
controllers/creation_center.py
3
15864
# -*- coding: UTF-8 -*- import basic from functions import prettyItemBonus, formatTextareaInput import re from sets import Set from misc import miscController import time class creationCenterController(basic.defaultController): DIR = './ugc/' RE_CHECK_NAME = re.compile('^[a-zA-Z0-9\s\-\+\']+$', re.U+re.I) @basic.printpage def printPage(self, page, params): return { 'create_artwork': self.printCreatingArtwork, 'creation_center': self.printCreationCenter, 'create': self.printSelectCreationForm, 'artwork': self.printArtworkPage, 'edit_artwork': self.printArtworkEditPage, 'request': self.printRequestForm } @basic.methods def methods(self, params = {}): return { 'type_of_form': { 'cancel_sell': self.cancelSellingCraftedItem, 'cancel_spell_sell': self.cancelSellingSpellPattern, 'cancel_artwork_sell': self.cancelSellingArtwork, 'create_artwork': self.createArtwork, 'put_crafted_item_to_market': self.putCraftedItemToMarket, 'put_crafted_pattern_to_market': self.putSpellPatternToMarket, 'put_artwork_to_market': self.putArtworkToMarket, 'edit_artwork': self.editArtwork, 'delete_artwork': self.deleteArtwork, 'request_auth': self.requestAuth } } # -------------------------------------------------------------------------------------------------- # Misc def getArtwork(self, params, logged_check = False, self_item_check = False, ever_default=False): if logged_check and not self.cur_player: return {'error': 1002} artwork = False if 'id' in params and params['id']: artwork = self.model.misc.getArtworkById(params['id']) if not artwork: artwork = self.model.misc.getBuiltInArtworkByUID(params['id']) if not artwork: return {'error': 5001} else: if not (self.cur_player and ('login_admin' in self.cur_player and self.cur_player['login_admin'] or 'login_moderator' in self.cur_player and self.cur_player['login_moderator'] or self.cur_player['login_id'] == artwork['author']) ): if 'reject' in artwork and artwork['reject']: if not (self.cur_player and self.cur_player['login_id'] == artwork['author']): return {'error': 5002} elif not 'approve' in artwork or not artwork['approve']['approved']: return {'error': 5003} if self_item_check: can_edit = (artwork['author'] == self.cur_player['login_id'] and not artwork['sale_info']['active']) or 'login_admin' in self.cur_player or 'login_moderator' in self.cur_player if not can_edit: self.sbuilder.httpRedirect('../') return artwork # -------------------------------------------------------------------------------------------------- # Page methods def putCraftedItemToMarket(self, params): try: if '_id' in params and int(params['cost'])> 0: self.model.items.toMarket(params['_id'], int(params['cost'])) except Exception: pass self.sbuilder.httpRedirect(params['__page__']) def putSpellPatternToMarket(self, params): try: if '_id' in params and int(params['cost'])> 0: self.model.spells.toMarket(params['_id'], int(params['cost']) ) except Exception: pass self.sbuilder.httpRedirect(params['__page__']) def putArtworkToMarket(self, params): try: if '_id' in params and int(params['cost'])> 0: self.model.misc.artworkToMarket(params['_id'], int(params['cost'])) except Exception: pass self.sbuilder.httpRedirect(params['__page__']) def cancelSellingSpellPattern(self, params): is_spell = params['copy'] == 'True' id = params['spell_id'] if is_spell: spell = self.model.spells.getUserSpellByID(id) else: # pattern spell = self.model.spells.getSpellPatternByID(id) if spell: self.model.spells.cancelSelling(self.cur_player['login_id'], spell) else: return self.error('Item not found') self.sbuilder.httpRedirect(params['__page__']) def cancelSellingCraftedItem(self, params): copy_of_item = params['copy'] == 'True' id = params['item_id'] if copy_of_item: item = self.model.items.getItem(id) else: item = self.model.items.getCraftedItem(id) if item: self.model.items.cancelSelling(self.cur_player['login_id'], item) else: return self.error('Item not found') self.sbuilder.httpRedirect(params['__page__']) def cancelSellingArtwork(self, params): id = params['artwork_id'] artwork = self.model.misc.getArtworkById(id) if artwork: self.model.misc.cancelSelling(self.cur_player['login_id'], artwork) else: return self.error('Artwork not found') self.sbuilder.httpRedirect(params['__page__']) def createArtwork(self, params): if self.balance.MIN_LEVEL_TO_CREATE > self.cur_player['login_lvl']: return self.sbuilder.throwWebError(6001) rules = { 'name': {'min_length':3, 'max_length': 40, 'match': self.RE_CHECK_NAME, 'not_dublicate': {'col_artworks': 'name'}}, 'desc': {'min_length':4, 'max_length': 1000}, 'img': {'not_null': 1}, 'race': {'not_null':1}, 'class': {'gte':1, 'lte': 3, 'not_null':1}, 'cost': {'int': 1, 'gt': 0, 'lt': 1000000, 'not_null': 1} } status = self.checkParams(params, rules) if status['status']: buff = params['race'].split(':') if len(buff) == 2: faction_id = int(buff[0]) race_id = int(buff[1]) else: return False cost = int(params['cost']) if cost < 0: cost = 1 artwork = self.model.artwork({ "cost": cost, "img": params["img"], "faction": faction_id, "author": self.cur_player['login_id'], "race": race_id, "desc": formatTextareaInput(params['desc']), "class": int(params['class']), "name": params['name'].strip().title() }) artwork.data.update(self.model.misc.getImageInfo(params)) self.model.misc.addArtwork(artwork.data) self.sbuilder.httpRedirect(self.core.loaded_data['site_address']+'/u/creation_center?creation=ok&type=artwork') else: params.update({'errors': status['errors']}) def editArtwork(self, params): artwork = self.getArtwork(params, logged_check=True, self_item_check=True) rules = { 'desc': {'min_length':4, 'max_length': 1000}, 'race': {'not_null': 1}, 'class': {'gte':1, 'lte': 3, 'not_null':1}, 'cost': {'int': 1, 'gt': 0, 'lt': 1000000, 'not_null': 1} } if artwork['name'] != params['name']: rules.update({'name': {'min_length':3, 'max_length': 40, 'match': self.RE_CHECK_NAME, 'not_dublicate': {'col_artworks': 'name'}}}) status = self.checkParams(params, rules) if status['status']: buff = params['race'].split(':') if len(buff) == 2: faction_id = int(buff[0]) race_id = int(buff[1]) else: return False cost = int(params['cost']) if cost < 0: cost = 1 new_artwork_data = { "cost": cost, "faction": faction_id, "race": race_id, "desc": formatTextareaInput(params['desc']), "class": int(params['class']), "name": params['name'].strip() } if params['img'].strip(): new_artwork_data.update({"img": params["img"]}) new_artwork_data.update(self.model.misc.getImageInfo(params)) old_data = {} for key in ['name', 'desc', 'cost', 'race', 'class', 'faction', 'img']: if key in new_artwork_data and new_artwork_data[key] != artwork[key]: old_data.update({key: artwork[key]}) for key in ['link', 'name', 'email', 'twitter']: if key in new_artwork_data['img_info'] and new_artwork_data['img_info'][key] and key in artwork['img_info']: old_data.update({'Artwork: '+key: artwork['img_info'][key]}) no_need_approve = 'login_admin' in self.cur_player and self.cur_player['login_admin'] or 'login_moderator' in self.cur_player and self.cur_player['login_moderator'] if not no_need_approve: no_need_approve = 'cost' in old_data and len(old_data) == 1 or not old_data else: self.model.misc.writeToLog(self.cur_player['login_id'], { 'action': 'artwork edit', 'artwork_id': artwork['_id'] }) new_artwork_data.update({'old_data': old_data}) self.model.misc.updateArtworkData(artwork['_id'], new_artwork_data, no_need_approve) self.sbuilder.httpRedirect('/u/artwork?id='+params['id']+'&edit=ok') self.sbuilder.httpRedirect('/u/edit_artwork?id='+params['id']+'&edit=fail') def deleteArtwork(self, params): artwork = self.getArtwork(params, logged_check=True, self_item_check=True) self.model.misc.deleteArtwork(artwork, self.cur_player['login_name']) self.sbuilder.httpRedirect('/u/creation_center?delete=artwork') def requestAuth(self, params): rules = { 'link': {'not_null': 1, 'min_length': 3}, 'fullname': {'not_null': 1, 'min_length': 3}, 'email': {'not_null': 1}, 'rules': {'not_null': 1} } status = self.checkParams(params, rules) if status['status']: data = { 'twitter_name': self.cur_player['login_name'], 'game_id': self.cur_player['login_id'], 'link': params['link'].strip(), 'fullname': params['fullname'].strip(), 'email': params['email'].strip(), 'time': time.time(), 'additional': formatTextareaInput(params['additional']) } self.model.misc.addAuthRequest(data) self.sbuilder.httpRedirect('/thx?n=ugc_request') else: params.update({'errors': status['errors']}) # -------------------------------------------------------------------------------------------------- # Print pages def printCreatingArtwork(self, fields, params): fields.update({self.title: 'Add new artwork'}) if not self.cur_player: return self.sbuilder.throwWebError(1002) response = self._printUGCDisablePage(fields) if response: return response if self.balance.MIN_LEVEL_TO_CREATE > self.cur_player['login_lvl']: return self.sbuilder.throwWebError(6001) if self.cur_player and self.cur_player['login_ugc_disabled']: return self.sbuilder.httpRedirect('/u/create') fields.update(self.balance.classes_and_races) player = self.model.players.getPlayerBy_ID(self.cur_player['login_id'], {'agree_with_rules': 1}) if not ('agree_with_rules' in player and player['agree_with_rules']): return basic.defaultController._printTemplate(self, 'rules_agree_form', fields) return basic.defaultController._printTemplate(self, 'create_artwork', fields) def printCreationCenter(self, fields, params): fields.update({self.title: 'Creation center'}) if not self.cur_player: return self.sbuilder.throwWebError(1002) items = self.model.items.getCraftedItems(self.cur_player['login_id']) spells = self.model.spells.getSpellsPattern(self.cur_player['login_id']) artworks = self.model.misc.getAllArtworksByPlayer(self.cur_player['login_id']) fields.update({ 'stat_names': self.balance.stats_name, 'can_create': self.balance.MIN_LEVEL_TO_CREATE <= self.cur_player['login_lvl'] }) if self.core.debug['create_by_invite']: fields['can_create'] = self.cur_player and self.cur_player['login_ugc_enabled'] items += spells + artworks sell_statuses = { 'sell': 'Selling', 'not_sell': 'Not Selling', 'waiting': 'Waiting', 'rejected': 'Rejected' } for item in items: if 'reject' in item: status = 'rejected' elif item['approve'] and item['approve']['approved']: if item['sale_info']['active']: status = 'sell' else: status = 'not_sell' else: status = 'waiting' # Если заклинание, то ставим соответствующий тип if 'spell_actions' in item: item['view'] = 'Spell' item['img'] += '_fit.png' # если артворк elif 'faction' in item: item['img'] += '_fit.png' item['view'] = 'Artwork' # если предмет, то сделаем красивый вывод стат else: item['view'] = item['view'].title() item.update(prettyItemBonus(item, self.balance.stats_name)) item['img'] = '/' + item['img'] + '_fit.png' item.update({'status': sell_statuses[status], 'raw_status': status}) fields.update({'items': items}) return basic.defaultController._printTemplate(self, 'creation_center', fields) def printSelectCreationForm(self, fields, params): fields.update({self.title: 'Create new'}) if not self.cur_player: return self.sbuilder.throwWebError(1002) response = self._printUGCDisablePage(fields) if response: return response fields.update({'can_create': self.balance.MIN_LEVEL_TO_CREATE <= self.cur_player['login_lvl']}) return basic.defaultController._printTemplate(self, 'select_create_form', fields) def printArtworkPage(self, fields, params): artwork = self.getArtwork(params) if 'error' in artwork: return self.sbuilder.throwWebError(artwork['error'], 'Artwork') if 'author' in artwork: author = self.model.players.getPlayerBy_ID(artwork['author'], {'name':1}) if author: artwork.update({'author_name': author['name']}) if 'UID' in artwork: artwork['img'] = self.core.ARTWORK_SHOP_PATH+artwork['img']+'.jpg' else: artwork['img'] += '_fit.png' artwork['img_info'] = miscController.formatArtworkInfo(artwork['img_info']) if 'reject' in artwork: try: rejecter = self.model.players.getPlayerBy_ID(artwork['reject']['rejecter_id'], {'name':1}) fields.update({'reject_name': rejecter['name']}) except Exception: fields.update({'reject_name': 'game'}) artwork.update({ 'race_name': self.balance.races[artwork['faction']][artwork['race']], 'class_name': self.balance.classes[str(artwork['class'])] }) fields.update(artwork) if self.cur_player: artworks = self.model.misc.getPlayersBuyedArtworks(self.cur_player['login_id']) for art in artworks: if art['name'] == artwork['name']: already_have = True break else: already_have = False fields.update({ 'already_have': already_have, 'player_race_name': self.balance.races[self.cur_player['login_faction']][self.cur_player['login_race']], 'player_class_name': self.balance.classes[str(self.cur_player['login_class'])] }) likes = self.model.items.getItemLikes(artwork['_id']) fields.update({ self.title: artwork['name']+' artwork page', 'likes': len(likes['people']), 'is_like': self.cur_player and self.cur_player['login_id'] in likes['people'], 'is_reported': self.cur_player and self.model.items.isReportItem(artwork['_id'], self.cur_player['login_id']), 'reasons': self.balance.getRejectReasons(self.balance.artwork_reject_reasons), 'categories': self.balance.categories }) return basic.defaultController._printTemplate(self, 'artwork_page', fields) def printArtworkEditPage(self, fields, params): artwork = self.getArtwork(params, logged_check=True, self_item_check=True) artwork['faction_race'] = str(artwork['faction'])+':'+str(artwork['race']) fields.update({self.title: 'Edit '+artwork['name']+' page'}) fields.update(artwork) return basic.defaultController._printTemplate(self, 'artwork_edit_page', fields) def printRequestForm(self, fields, params): fields.update({self.title: 'Request authorisation'}) if 'errors' in params: for error in params['errors']: fields.update({'_E_'+error['name']: True}) return basic.defaultController._printTemplate(self, 'request_auth', fields) data = { 'class': creationCenterController, 'type': ['u'], 'urls': ['create_artwork', 'request', 'creation_center', 'create', 'artwork', 'edit_artwork'] }
mit
3,844,820,641,875,503,600
29.619238
179
0.627599
false
skipmodea1/repository.skipmodea1
script.module.execjs/lib/execjs/_runner_sources.py
2
2973
#!/usr/bin/env python3 # -*- coding: ascii -*- from __future__ import unicode_literals, division, with_statement Node = r"""(function(program, execJS) { execJS(program) })(function() { #{source} }, function(program) { var output; var print = function(string) { process.stdout.write('' + string + '\n'); }; try { result = program(); print('') if (typeof result == 'undefined' && result !== null) { print('["ok"]'); } else { try { print(JSON.stringify(['ok', result])); } catch (err) { print('["err"]'); } } } catch (err) { print(JSON.stringify(['err', '' + err])); } });""" JavaScriptCore = r"""(function(program, execJS) { execJS(program) })(function() { return eval(#{encoded_source}); }, function(program) { var output; try { result = program(); print(""); if (typeof result == 'undefined' && result !== null) { print('["ok"]'); } else { try { print(JSON.stringify(['ok', result])); } catch (err) { print('["err"]'); } } } catch (err) { print(JSON.stringify(['err', '' + err])); } }); """ SpiderMonkey = r"""(function(program, execJS) { execJS(program) })(function() { #{source} }, function(program) { #{json2_source} var output; try { result = program(); print(""); if (typeof result == 'undefined' && result !== null) { print('["ok"]'); } else { try { print(JSON.stringify(['ok', result])); } catch (err) { print('["err"]'); } } } catch (err) { print(JSON.stringify(['err', '' + err])); } }); """ Nashorn = SpiderMonkey JScript = r"""(function(program, execJS) { execJS(program) })(function() { return eval(#{encoded_source}); }, function(program) { #{json2_source} var output, print = function(string) { string = string.replace(/[^\x00-\x7f]/g, function(ch){ return '\\u' + ('0000' + ch.charCodeAt(0).toString(16)).slice(-4); }); WScript.Echo(string); }; try { result = program(); print("") if (typeof result == 'undefined' && result !== null) { print('["ok"]'); } else { try { print(JSON.stringify(['ok', result])); } catch (err) { print('["err"]'); } } } catch (err) { print(JSON.stringify(['err', err.name + ': ' + err.message])); } }); """ PhantomJS = r""" (function(program, execJS) { execJS(program) })(function() { return eval(#{encoded_source}); }, function(program) { var output; var print = function(string) { console.log('' + string); }; try { result = program(); print('') if (typeof result == 'undefined' && result !== null) { print('["ok"]'); } else { try { print(JSON.stringify(['ok', result])); } catch (err) { print('["err"]'); } } } catch (err) { print(JSON.stringify(['err', '' + err])); } }); phantom.exit(); """ SlimerJS = PhantomJS
gpl-3.0
-1,539,132,010,210,717,000
21.869231
89
0.514968
false
skycucumber/restful
python/venv/lib/python2.7/site-packages/migrate/versioning/util/__init__.py
33
5339
#!/usr/bin/env python # -*- coding: utf-8 -*- """.. currentmodule:: migrate.versioning.util""" import warnings import logging from decorator import decorator from pkg_resources import EntryPoint import six from sqlalchemy import create_engine from sqlalchemy.engine import Engine from sqlalchemy.pool import StaticPool from migrate import exceptions from migrate.versioning.util.keyedinstance import KeyedInstance from migrate.versioning.util.importpath import import_path log = logging.getLogger(__name__) def load_model(dotted_name): """Import module and use module-level variable". :param dotted_name: path to model in form of string: ``some.python.module:Class`` .. versionchanged:: 0.5.4 """ if isinstance(dotted_name, six.string_types): if ':' not in dotted_name: # backwards compatibility warnings.warn('model should be in form of module.model:User ' 'and not module.model.User', exceptions.MigrateDeprecationWarning) dotted_name = ':'.join(dotted_name.rsplit('.', 1)) return EntryPoint.parse('x=%s' % dotted_name).load(False) else: # Assume it's already loaded. return dotted_name def asbool(obj): """Do everything to use object as bool""" if isinstance(obj, six.string_types): obj = obj.strip().lower() if obj in ['true', 'yes', 'on', 'y', 't', '1']: return True elif obj in ['false', 'no', 'off', 'n', 'f', '0']: return False else: raise ValueError("String is not true/false: %r" % obj) if obj in (True, False): return bool(obj) else: raise ValueError("String is not true/false: %r" % obj) def guess_obj_type(obj): """Do everything to guess object type from string Tries to convert to `int`, `bool` and finally returns if not succeded. .. versionadded: 0.5.4 """ result = None try: result = int(obj) except: pass if result is None: try: result = asbool(obj) except: pass if result is not None: return result else: return obj @decorator def catch_known_errors(f, *a, **kw): """Decorator that catches known api errors .. versionadded: 0.5.4 """ try: return f(*a, **kw) except exceptions.PathFoundError as e: raise exceptions.KnownError("The path %s already exists" % e.args[0]) def construct_engine(engine, **opts): """.. versionadded:: 0.5.4 Constructs and returns SQLAlchemy engine. Currently, there are 2 ways to pass create_engine options to :mod:`migrate.versioning.api` functions: :param engine: connection string or a existing engine :param engine_dict: python dictionary of options to pass to `create_engine` :param engine_arg_*: keyword parameters to pass to `create_engine` (evaluated with :func:`migrate.versioning.util.guess_obj_type`) :type engine_dict: dict :type engine: string or Engine instance :type engine_arg_*: string :returns: SQLAlchemy Engine .. note:: keyword parameters override ``engine_dict`` values. """ if isinstance(engine, Engine): return engine elif not isinstance(engine, six.string_types): raise ValueError("you need to pass either an existing engine or a database uri") # get options for create_engine if opts.get('engine_dict') and isinstance(opts['engine_dict'], dict): kwargs = opts['engine_dict'] else: kwargs = dict() # DEPRECATED: handle echo the old way echo = asbool(opts.get('echo', False)) if echo: warnings.warn('echo=True parameter is deprecated, pass ' 'engine_arg_echo=True or engine_dict={"echo": True}', exceptions.MigrateDeprecationWarning) kwargs['echo'] = echo # parse keyword arguments for key, value in six.iteritems(opts): if key.startswith('engine_arg_'): kwargs[key[11:]] = guess_obj_type(value) log.debug('Constructing engine') # TODO: return create_engine(engine, poolclass=StaticPool, **kwargs) # seems like 0.5.x branch does not work with engine.dispose and staticpool return create_engine(engine, **kwargs) @decorator def with_engine(f, *a, **kw): """Decorator for :mod:`migrate.versioning.api` functions to safely close resources after function usage. Passes engine parameters to :func:`construct_engine` and resulting parameter is available as kw['engine']. Engine is disposed after wrapped function is executed. .. versionadded: 0.6.0 """ url = a[0] engine = construct_engine(url, **kw) try: kw['engine'] = engine return f(*a, **kw) finally: if isinstance(engine, Engine) and engine is not url: log.debug('Disposing SQLAlchemy engine %s', engine) engine.dispose() class Memoize: """Memoize(fn) - an instance which acts like fn but memoizes its arguments Will only work on functions with non-mutable arguments ActiveState Code 52201 """ def __init__(self, fn): self.fn = fn self.memo = {} def __call__(self, *args): if args not in self.memo: self.memo[args] = self.fn(*args) return self.memo[args]
gpl-2.0
-4,262,876,988,429,689,300
28.661111
134
0.635138
false
dybiszb/MeanCurvatureLibrary
testing/gtest/googletest/test/gtest_env_var_test.py
343
4036
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test correctly parses environment variables.""" __author__ = '[email protected] (Zhanyong Wan)' import os import gtest_test_utils IS_WINDOWS = os.name == 'nt' IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_') environ = os.environ.copy() def AssertEq(expected, actual): if expected != actual: print('Expected: %s' % (expected,)) print(' Actual: %s' % (actual,)) raise AssertionError def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def GetFlag(flag): """Runs gtest_env_var_test_ and returns its output.""" args = [COMMAND] if flag is not None: args += [flag] return gtest_test_utils.Subprocess(args, env=environ).output def TestFlag(flag, test_val, default_val): """Verifies that the given flag is affected by the corresponding env var.""" env_var = 'GTEST_' + flag.upper() SetEnvVar(env_var, test_val) AssertEq(test_val, GetFlag(flag)) SetEnvVar(env_var, None) AssertEq(default_val, GetFlag(flag)) class GTestEnvVarTest(gtest_test_utils.TestCase): def testEnvVarAffectsFlag(self): """Tests that environment variable should affect the corresponding flag.""" TestFlag('break_on_failure', '1', '0') TestFlag('color', 'yes', 'auto') TestFlag('filter', 'FooTest.Bar', '*') SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test TestFlag('output', 'xml:tmp/foo.xml', '') TestFlag('print_time', '0', '1') TestFlag('repeat', '999', '1') TestFlag('throw_on_failure', '1', '0') TestFlag('death_test_style', 'threadsafe', 'fast') TestFlag('catch_exceptions', '0', '1') if IS_LINUX: TestFlag('death_test_use_fork', '1', '0') TestFlag('stack_trace_depth', '0', '100') def testXmlOutputFile(self): """Tests that $XML_OUTPUT_FILE affects the output flag.""" SetEnvVar('GTEST_OUTPUT', None) SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml') AssertEq('xml:tmp/bar.xml', GetFlag('output')) def testXmlOutputFileOverride(self): """Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT""" SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml') SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml') AssertEq('xml:tmp/foo.xml', GetFlag('output')) if __name__ == '__main__': gtest_test_utils.Main()
mit
5,124,921,276,959,141,000
33.495726
79
0.703419
false
leiferikb/bitpop
src/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
1
20535
# Copyright (C) 2010 Google Inc. All rights reserved. # Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged # Copyright (C) 2011 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging import optparse import os import sys import traceback from webkitpy.common.host import Host from webkitpy.layout_tests.controllers.manager import Manager from webkitpy.layout_tests.models import test_run_results from webkitpy.layout_tests.port import configuration_options, platform_options from webkitpy.layout_tests.views import buildbot_results from webkitpy.layout_tests.views import printing _log = logging.getLogger(__name__) def main(argv, stdout, stderr): options, args = parse_args(argv) if options.platform and 'test' in options.platform: # It's a bit lame to import mocks into real code, but this allows the user # to run tests against the test platform interactively, which is useful for # debugging test failures. from webkitpy.common.host_mock import MockHost host = MockHost() else: host = Host() if options.lint_test_files: from webkitpy.layout_tests.lint_test_expectations import lint return lint(host, options, stderr) try: port = host.port_factory.get(options.platform, options) except NotImplementedError, e: # FIXME: is this the best way to handle unsupported port names? print >> stderr, str(e) return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS try: run_details = run(port, options, args, stderr) if run_details.exit_code not in test_run_results.ERROR_CODES and not run_details.initial_results.keyboard_interrupted: bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging) bot_printer.print_results(run_details) return run_details.exit_code # We need to still handle KeyboardInterrupt, atleast for webkitpy unittest cases. except KeyboardInterrupt: return test_run_results.INTERRUPTED_EXIT_STATUS except test_run_results.TestRunException as e: print >> stderr, e.msg return e.code except BaseException as e: if isinstance(e, Exception): print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e)) traceback.print_exc(file=stderr) return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS def parse_args(args): option_group_definitions = [] option_group_definitions.append(("Platform options", platform_options())) option_group_definitions.append(("Configuration options", configuration_options())) option_group_definitions.append(("Printing Options", printing.print_options())) option_group_definitions.append(("Android-specific Options", [ optparse.make_option("--adb-device", action="append", default=[], help="Run Android layout tests on these devices."), # FIXME: Flip this to be off by default once we can log the device setup more cleanly. optparse.make_option("--no-android-logging", action="store_false", dest='android_logging', default=True, help="Do not log android-specific debug messages (default is to log as part of --debug-rwt-logging"), ])) option_group_definitions.append(("Results Options", [ optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true", dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"), optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false", dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"), optparse.make_option("--results-directory", help="Location of test results"), optparse.make_option("--build-directory", help="Path to the directory under which build files are kept (should not include configuration)"), optparse.make_option("--add-platform-exceptions", action="store_true", default=False, help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"), optparse.make_option("--new-baseline", action="store_true", default=False, help="Save generated results as new baselines " "into the *most-specific-platform* directory, overwriting whatever's " "already there. Equivalent to --reset-results --add-platform-exceptions"), optparse.make_option("--reset-results", action="store_true", default=False, help="Reset expectations to the " "generated results in their existing location."), optparse.make_option("--no-new-test-results", action="store_false", dest="new_test_results", default=True, help="Don't create new baselines when no expected results exist"), #FIXME: we should support a comma separated list with --pixel-test-directory as well. optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories", help="A directory where it is allowed to execute tests as pixel tests. " "Specify multiple times to add multiple directories. " "This option implies --pixel-tests. If specified, only those tests " "will be executed as pixel tests that are located in one of the " "directories enumerated with the option. Some ports may ignore this " "option while others can have a default value that can be overridden here."), optparse.make_option("--skip-failing-tests", action="store_true", default=False, help="Skip tests that are expected to fail. " "Note: When using this option, you might miss new crashes " "in these tests."), optparse.make_option("--additional-drt-flag", action="append", default=[], help="Additional command line flag to pass to the driver " "Specify multiple times to add multiple flags."), optparse.make_option("--driver-name", type="string", help="Alternative driver binary to use"), optparse.make_option("--additional-platform-directory", action="append", default=[], help="Additional directory where to look for test " "baselines (will take precendence over platform baselines). " "Specify multiple times to add multiple search path entries."), optparse.make_option("--additional-expectations", action="append", default=[], help="Path to a test_expectations file that will override previous expectations. " "Specify multiple times for multiple sets of overrides."), optparse.make_option("--compare-port", action="store", default=None, help="Use the specified port's baselines first"), optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results", help="Don't launch a browser with results after the tests " "are done"), optparse.make_option("--full-results-html", action="store_true", default=False, help="Show all failures in results.html, rather than only regressions"), optparse.make_option("--clobber-old-results", action="store_true", default=False, help="Clobbers test results from previous runs."), optparse.make_option("--smoke", action="store_true", help="Run just the SmokeTests"), optparse.make_option("--no-smoke", dest="smoke", action="store_false", help="Do not run just the SmokeTests"), ])) option_group_definitions.append(("Testing Options", [ optparse.make_option("--build", dest="build", action="store_true", default=True, help="Check to ensure the build is up-to-date (default)."), optparse.make_option("--no-build", dest="build", action="store_false", help="Don't check to see if the build is up-to-date."), optparse.make_option("-n", "--dry-run", action="store_true", default=False, help="Do everything but actually run the tests or upload results."), optparse.make_option("--nocheck-sys-deps", action="store_true", default=False, help="Don't check the system dependencies (themes)"), optparse.make_option("--wrapper", help="wrapper command to insert before invocations of " "the driver; option is split on whitespace before " "running. (Example: --wrapper='valgrind --smc-check=all')"), optparse.make_option("-i", "--ignore-tests", action="append", default=[], help="directories or test to ignore (may specify multiple times)"), optparse.make_option("--ignore-flaky-tests", action="store", help=("Control whether tests that are flaky on the bots get ignored." "'very-flaky' == Ignore any tests that flaked more than once on the bot." "'maybe-flaky' == Ignore any tests that flaked once on the bot." "'unexpected' == Ignore any tests that had unexpected results on the bot.")), optparse.make_option("--ignore-builder-category", action="store", help=("The category of builders to use with the --ignore-flaky-tests " "option ('layout' or 'deps').")), optparse.make_option("--test-list", action="append", help="read list of tests to run from file", metavar="FILE"), optparse.make_option("--skipped", action="store", default=None, help=("control how tests marked SKIP are run. " "'default' == Skip tests unless explicitly listed on the command line, " "'ignore' == Run them anyway, " "'only' == only run the SKIP tests, " "'always' == always skip, even if listed on the command line.")), optparse.make_option("--time-out-ms", help="Set the timeout for each test"), optparse.make_option("--order", action="store", default="natural", help=("determine the order in which the test cases will be run. " "'none' == use the order in which the tests were listed either in arguments or test list, " "'natural' == use the natural order (default), " "'random-seeded' == randomize the test order using a fixed seed, " "'random' == randomize the test order.")), optparse.make_option("--run-chunk", help=("Run a specified chunk (n:l), the nth of len l, " "of the layout tests")), optparse.make_option("--run-part", help=("Run a specified part (n:m), " "the nth of m parts, of the layout tests")), optparse.make_option("--batch-size", help=("Run a the tests in batches (n), after every n tests, " "the driver is relaunched."), type="int", default=None), optparse.make_option("--run-singly", action="store_true", default=False, help="DEPRECATED, same as --batch-size=1 --verbose"), optparse.make_option("--child-processes", help="Number of drivers to run in parallel."), # FIXME: Display default number of child processes that will run. optparse.make_option("-f", "--fully-parallel", action="store_true", help="run all tests in parallel"), optparse.make_option("--exit-after-n-failures", type="int", default=None, help="Exit after the first N failures instead of running all " "tests"), optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int", default=None, help="Exit after the first N crashes instead of " "running all tests"), optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"), optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"), optparse.make_option("--retry-failures", action="store_true", help="Re-try any tests that produce unexpected results. Default is to not retry if an explicit list of tests is passed to run-webkit-tests."), optparse.make_option("--no-retry-failures", action="store_false", dest="retry_failures", help="Don't re-try any tests that produce unexpected results."), optparse.make_option("--max-locked-shards", type="int", default=0, help="Set the maximum number of locked shards"), optparse.make_option("--additional-env-var", type="string", action="append", default=[], help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"), optparse.make_option("--profile", action="store_true", help="Output per-test profile information."), optparse.make_option("--profiler", action="store", help="Output per-test profile information, using the specified profiler."), optparse.make_option("--driver-logging", action="store_true", help="Print detailed logging of the driver/content_shell"), optparse.make_option("--disable-breakpad", action="store_true", help="Don't use breakpad to symbolize unexpected crashes."), optparse.make_option("--use-apache", action="store_true", help="Use Apache instead of LigHTTPd (default is port-specific)."), optparse.make_option("--no-use-apache", action="store_false", dest="use_apache", help="Use LigHTTPd instead of Apache (default is port-specific)."), optparse.make_option("--enable-leak-detection", action="store_true", help="Enable the leak detection of DOM objects."), ])) option_group_definitions.append(("Miscellaneous Options", [ optparse.make_option("--lint-test-files", action="store_true", default=False, help=("Makes sure the test files parse for all " "configurations. Does not run any tests.")), ])) # FIXME: Move these into json_results_generator.py option_group_definitions.append(("Result JSON Options", [ optparse.make_option("--master-name", help="The name of the buildbot master."), optparse.make_option("--builder-name", default="", help=("The name of the builder shown on the waterfall running " "this script e.g. WebKit.")), optparse.make_option("--build-name", default="DUMMY_BUILD_NAME", help=("The name of the builder used in its path, e.g. " "webkit-rel.")), optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER", help=("The build number of the builder running this script.")), optparse.make_option("--test-results-server", default="", help=("If specified, upload results json files to this appengine " "server.")), ])) option_parser = optparse.OptionParser() for group_name, group_options in option_group_definitions: option_group = optparse.OptionGroup(option_parser, group_name) option_group.add_options(group_options) option_parser.add_option_group(option_group) return option_parser.parse_args(args) def _set_up_derived_options(port, options, args): """Sets the options values that depend on other options values.""" if not options.child_processes: options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES", str(port.default_child_processes())) if not options.max_locked_shards: options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_SHARDS", str(port.default_max_locked_shards()))) if not options.configuration: options.configuration = port.default_configuration() if options.pixel_tests is None: options.pixel_tests = port.default_pixel_tests() if not options.time_out_ms: options.time_out_ms = str(port.default_timeout_ms()) options.slow_time_out_ms = str(5 * int(options.time_out_ms)) if options.additional_platform_directory: additional_platform_directories = [] for path in options.additional_platform_directory: additional_platform_directories.append(port.host.filesystem.abspath(path)) options.additional_platform_directory = additional_platform_directories if options.new_baseline: options.reset_results = True options.add_platform_exceptions = True if options.pixel_test_directories: options.pixel_tests = True varified_dirs = set() pixel_test_directories = options.pixel_test_directories for directory in pixel_test_directories: # FIXME: we should support specifying the directories all the ways we support it for additional # arguments specifying which tests and directories to run. We should also move the logic for that # to Port. filesystem = port.host.filesystem if not filesystem.isdir(filesystem.join(port.layout_tests_dir(), directory)): _log.warning("'%s' was passed to --pixel-test-directories, which doesn't seem to be a directory" % str(directory)) else: varified_dirs.add(directory) options.pixel_test_directories = list(varified_dirs) if options.run_singly: options.batch_size = 1 options.verbose = True if not args and not options.test_list and options.smoke is None: options.smoke = port.default_smoke_test_only() if options.smoke: if not args and not options.test_list and options.retry_failures is None: # Retry failures by default if we're doing just a smoke test (no additional tests). options.retry_failures = True if not options.test_list: options.test_list = [] options.test_list.append(port.host.filesystem.join(port.layout_tests_dir(), 'SmokeTests')) if not options.skipped: options.skipped = 'always' if not options.skipped: options.skipped = 'default' def run(port, options, args, logging_stream): logger = logging.getLogger() logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO) try: printer = printing.Printer(port, options, logging_stream, logger=logger) _set_up_derived_options(port, options, args) manager = Manager(port, options, printer) printer.print_config(port.results_directory()) run_details = manager.run(args) _log.debug("Testing completed, Exit status: %d" % run_details.exit_code) return run_details finally: printer.cleanup() if __name__ == '__main__': sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
gpl-3.0
-4,866,137,247,003,173,000
53.039474
154
0.648308
false
juanc27/myfavteam
myfavteam/models.py
1
11412
from django.db import models from django.core.urlresolvers import reverse import datetime # Create your models here. class Team(models.Model): short_name = models.CharField(max_length=25) name = models.CharField(max_length=50, null=True, blank=True) my_team = models.BooleanField(default = False) city = models.CharField(max_length=50, null=True, blank=True) description = models.CharField(max_length=255, blank=True) image = models.ImageField(null=True,blank=True, max_length=255) created = models.DateTimeField(auto_now_add=True) league = models.CharField(max_length=20, choices = (('NBA', 'NBA'), ('MLB', 'MLB'), ('NFL', 'NFL'))) #conference or league conference = models.CharField(max_length=20, choices = (('Western', 'Western'), ('Eastern', 'Eastern'), ('National', 'National'), ('American', 'American'), ('NFC', 'NFC'), ('AFC', 'AFC'), )) division = models.CharField(max_length=20, choices = (('Atlantic', 'Atlantic'), ('Central', 'Central'), ('Southeast', 'Southeast'), ('Northwest', 'Northwest'), ('Pacific', 'Pacific'), ('Southwest', 'Southwest'), )) class Meta: ordering = ['created'] def __unicode__(self): return u'%s' % self.short_name def get_absolute_url(self): return reverse('myfavteam.views.index', args=[str(self.id)]) def get_news_url(self): return reverse('myfavteam.views.news', args=[str(self.id)]) def get_social_url(self): return reverse('myfavteam.views.social', args=[str(self.id)]) def get_schedule_url(self): return reverse('myfavteam.views.schedule', args=[str(self.id)]) def get_stats_url(self): return reverse('myfavteam.views.stats', args=[str(self.id)]) def get_standings_url(self): return reverse('myfavteam.views.standings', args=[str(self.id)]) def get_roster_url(self): return reverse('myfavteam.views.roster', args=[str(self.id)]) class News(models.Model): team = models.ForeignKey('Team') title = models.CharField(max_length=255) description = models.CharField(max_length=255) date = models.DateTimeField() link = models.CharField(max_length=512) author = models.CharField(max_length=100, null=True, blank=True) website = models.ForeignKey('Website') text = models.TextField() image = models.ImageField(null=True, blank=True, max_length=255) created = models.DateTimeField(auto_now_add=True) class Meta: ordering = ['-date'] unique_together = ["link", "title"] def __unicode__(self): return u'%s' % self.title def get_absolute_url(self): return u %'%s' % self.link class Stadium(models.Model): name = models.CharField(max_length=100) city = models.CharField(max_length=100) created = models.DateTimeField(auto_now_add=True) class Meta: ordering = ['-created'] def __unicode__(self): return u'%s' % self.name #def get_absolute_url(self): # return reverse('myfavteam.views.team', args=[self.team_name]) class Tournament(models.Model): name = models.CharField(max_length=512) league = models.CharField(max_length=20, choices = (('NBA', 'NBA'), ('MLB', 'MLB'), ('NFL', 'NFL'))) standings_link = models.CharField(max_length=512, null=True, blank=True) created = models.DateTimeField(auto_now_add=True) start_date = models.DateField(auto_now_add=True) end_date = models.DateField(default='2025-12-01') class Meta: ordering = ['-created'] unique_together = ["name", "league"] def __unicode__(self): return u'%s' % self.name #def get_absolute_url(self): # return reverse('myfavteam.views.team', args=[self.team_name]) class Schedule(models.Model): tournament = models.ForeignKey('Tournament') team = models.ForeignKey('Team') team_against = models.ForeignKey('Team', related_name='agn+') stadium = models.ForeignKey('Stadium') team_score = models.IntegerField(default=0) team_against_score = models.IntegerField(default=0) is_home = models.BooleanField(default=True) date = models.DateTimeField() recap_link = models.CharField(max_length=512, null=True, blank=True) class Meta: ordering = ['-date'] unique_together = ["tournament", "team", "team_against", "is_home", "date"] def __unicode__(self): if self.is_home == True : str1 = u"{} vs {}".format(self.team, self.team_against) else: str1 = u"{} vs {}".format(self.team_against, self.team) return u'%s' % str1 #def get_absolute_url(self): # return reverse('myfavteam.views.team', args=[self.team_name]) class Position(models.Model): name = models.CharField(max_length=50, null=True, blank=True) acronym = models.CharField(max_length=10) created = models.DateTimeField(auto_now_add=True) class Meta: ordering = ['-created'] unique_together = ["name", "acronym"] def __unicode__(self): return u'%s' % self.name #def get_absolute_url(self): # return reverse('myfavteam.views.team', args=[self.team_name]) class Player(models.Model): position = models.ForeignKey('Position') team = models.ForeignKey('Team') first_name = models.CharField(max_length=50) last_name = models.CharField(max_length=50) jersey_number = models.IntegerField(null=True, blank=True) birthdate = models.DateField() twitter = models.CharField(max_length=100, null=True, blank=True) facebook = models.CharField(max_length=250, null=True, blank=True) height = models.FloatField(default=0.0) weight = models.FloatField(default=0.0) created = models.DateTimeField(auto_now_add=True) image = models.ImageField(null=True, blank=True, max_length=255) salary = models.IntegerField(default=0) college = models.CharField(max_length=100, null=True, blank=True) class Meta: ordering = ['-last_name'] unique_together = ["team", "first_name", "last_name", "jersey_number"] def __unicode__(self): str1 = u"{} {}".format(self.first_name, self.last_name) return u'%s' % str1 def get_absolute_url(self): return reverse('myfavteam.views.player', args=[str(self.id)]) def age(self): return int((datetime.date.today() - self.birthdate).days / 365.25) class PlayerNews(models.Model): news = models.ForeignKey('News') player = models.ForeignKey('Player') class Meta: unique_together = ["news", "player"] def __unicode__(self): str1 = u"{} - {}".format(self.news, self.player) return u'%s' % str1 class TournamentTeam(models.Model): tournament = models.ForeignKey('Tournament') team = models.ForeignKey('Team') class Meta: unique_together = ["tournament", "team"] def __unicode__(self): str1 = u"{} - {}".format(self.tournament, self.team) return u'%s' % str1 #use in case a players belongs to different teams or divisions class Roster(models.Model): team = models.ForeignKey('Team') player = models.ForeignKey('Player') class Meta: unique_together = ["team", "player"] def __unicode__(self): str1 = u"{} - {}".format(self.team, self.player) return u'%s' % str1 class TeamPicture(models.Model): team = models.ForeignKey('Team') image = models.ImageField(max_length=255) uploaded = models.DateTimeField(auto_now_add=True) def __unicode__(self): str1 = u"{} - Pic{}".format(self.team.name, self.id) return u'%s' % str1 class Website(models.Model): name = models.CharField(max_length=50) link = models.CharField(max_length=200) image = models.ImageField(null=True, blank=True, max_length=255) def get_absolute_url(self): return u %'%s' % self.link def __unicode__(self): return u'%s' % self.name class Standings(models.Model): tournament = models.ForeignKey('Tournament') team = models.ForeignKey('Team') wins = models.IntegerField(default=0) losses = models.IntegerField(default=0) ties = models.IntegerField(default=0) conference_wins = models.IntegerField(default=0) conference_losses = models.IntegerField(default=0) conference_ties = models.IntegerField(default=0) division_wins = models.IntegerField(default=0) division_losses = models.IntegerField(default=0) division_ties = models.IntegerField(default=0) home_wins = models.IntegerField(default=0) home_losses = models.IntegerField(default=0) home_ties = models.IntegerField(default=0) road_wins = models.IntegerField(default=0) road_losses = models.IntegerField(default=0) road_ties = models.IntegerField(default=0) last10_wins = models.IntegerField(default=0) last10_losses = models.IntegerField(default=0) last10_ties = models.IntegerField(default=0) last5_wins = models.IntegerField(default=0) last5_losses = models.IntegerField(default=0) last5_ties = models.IntegerField(default=0) streak = models.CharField(max_length=5, null=True, blank=True) class Meta: unique_together = ["tournament", "team"] def __unicode__(self): str1 = u"{} - {}".format(self.tournament, self.team) return u'%s' % str1 class BasketballPlayerStats(models.Model): tournament = models.ForeignKey('Tournament') player = models.ForeignKey('Player') points_per_game = models.FloatField(default=0.0) rebounds_per_game = models.FloatField(default=0.0) assists_per_game = models.FloatField(default=0.0) minutes_per_game = models.FloatField(default=0.0) field_goals_pct = models.FloatField(default=0.0) field_goals_3pt_pct = models.FloatField(default=0.0) free_throw_pct = models.FloatField(default=0.0) steals_per_game = models.FloatField(default=0.0) turnovers_per_game = models.FloatField(default=0.0) fouls_per_game = models.FloatField(default=0.0) class Meta: unique_together = ["tournament", "player"] def __unicode__(self): str1 = u"{} - PPG: {}".format(self.player, self.points_per_game) return u'%s' % str1 class TwitterLists(models.Model): name = models.CharField(max_length=50, null=True, blank=True) type = models.CharField(max_length=20, choices = (('main', 'main'), ('players', 'players'), ('insiders', 'insiders'), ('staff', 'staff')))
mit
-8,755,935,665,425,799,000
36.788079
83
0.596916
false
jalavik/invenio-workflows
invenio_workflows/upgrades/workflows_2014_08_12_task_results_to_dict.py
6
3636
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2014 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Upgrade script for removing WorkflowsTaskResult class to use a dict.""" import os import cPickle import base64 from invenio.legacy.dbquery import run_sql depends_on = ["workflows_2014_08_12_initial"] def info(): """Display info.""" return "Will convert all task results to dict instead of object" def do_upgrade(): """Perform the upgrade from WorkflowsTaskResult to a simple dict.""" class WorkflowsTaskResult(object): """The class to contain the current task results.""" __module__ = os.path.splitext(os.path.basename(__file__))[0] def __init__(self, task_name, name, result): """Create a task result passing task_name, name and result.""" self.task_name = task_name self.name = name self.result = result def to_dict(self): """Return a dictionary representing a full task result.""" return { 'name': self.name, 'task_name': self.task_name, 'result': self.result } from invenio_workflows import utils utils.WorkflowsTaskResult = WorkflowsTaskResult all_data_objects = run_sql("SELECT id, _extra_data FROM bwlOBJECT") for object_id, _extra_data in all_data_objects: extra_data = cPickle.loads(base64.b64decode(_extra_data)) if "_tasks_results" in extra_data: extra_data["_tasks_results"] = convert_to_dict( extra_data["_tasks_results"] ) _extra_data = base64.b64encode(cPickle.dumps(extra_data)) run_sql("UPDATE bwlOBJECT set _extra_data=%s WHERE id=%s", (_extra_data, str(object_id))) def estimate(): """Estimate running time of upgrade in seconds (optional).""" return 1 def convert_to_dict(results): """Convert WorkflowTask object to dict.""" results_new = {} if isinstance(results, list): if len(results) == 0: return results_new else: raise RuntimeError("Cannot convert task result.") for task, res in results.iteritems(): result_list = [] for result in res: if isinstance(result, dict): result_list.append(result) elif hasattr(result, "to_dict"): new_result = result.to_dict() # Set default template new_result["template"] = map_existing_templates(task) result_list.append(new_result) results_new[task] = result_list return results_new def map_existing_templates(name): """Return a template given a task name, else return default.""" mapping = { "fulltext_download": "workflows/results/files.html", "refextract": "workflows/results/refextract.html", } return mapping.get(name, "workflows/results/default.html")
gpl-2.0
-5,781,343,559,874,006,000
32.981308
74
0.632288
false
mhrivnak/pulp_puppet
devel/pulp_puppet/devel/base_cli.py
8
2333
import copy import logging import os.path import unittest import mock import okaara from pulp.bindings.bindings import Bindings from pulp.bindings.server import PulpConnection from pulp.client.extensions.core import PulpPrompt, ClientContext, PulpCli from pulp.client.extensions.exceptions import ExceptionHandler from pulp.common.config import Config # Can be used by tests to simulate a task response. Be sure to copy this before # making any changes, or better yet, use the method in ExtensionsTests. TASK_TEMPLATE = { "exception": None, "task_group_id": 'default-group', "task_id": 'default-id', "tags": [], "reasons": [], "start_time": None, "traceback": None, "state": None, "finish_time": None, "schedule_id": None, "result": None, "progress": {}, "response": None, "call_request_group_id": 'default-group', "call_request_id": 'default-id', "call_request_tags": [], } class ExtensionTests(unittest.TestCase): """ Base unit test class for all extension unit tests. """ def setUp(self): super(ExtensionTests, self).setUp() config_filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', 'test-override-admin.conf') self.config = Config(config_filename) self.server_mock = mock.Mock() self.pulp_connection = PulpConnection('', server_wrapper=self.server_mock) self.bindings = Bindings(self.pulp_connection) # Disabling color makes it easier to grep results since the character codes aren't there self.recorder = okaara.prompt.Recorder() self.prompt = PulpPrompt(enable_color=False, output=self.recorder, record_tags=True) self.logger = logging.getLogger('pulp') self.exception_handler = ExceptionHandler(self.prompt, self.config) self.context = ClientContext(self.bindings, self.config, self.logger, self.prompt, self.exception_handler) self.cli = PulpCli(self.context) self.context.cli = self.cli def task(self): """ :return: dict that contains all of the values needed to simulate a task coming back from the server :rtype: dict """ return copy.copy(TASK_TEMPLATE)
gpl-2.0
7,672,082,009,581,488,000
31.402778
96
0.64895
false
yekaylee/capstone
capstoneRepo/FacialRecognition/FinalFaceRecog_code/picam_version/config.py
3
2727
# Raspberry Pi Face Recognition Treasure Box Configuration # Copyright 2013 Tony DiCola # Edit the values below to configure the training and usage of the # face recognition box. # Pi GPIO port which is connected to the lock servo signal line. LOCK_SERVO_PIN = 18 # Pulse width value (in microseconds) for the servo at the unlocked and locked # position. Center should be a value of 1500, max left a value of 1000, and # max right a value of 2000. LOCK_SERVO_UNLOCKED = 2000 LOCK_SERVO_LOCKED = 1100 # Pi GPIO port which is connected to the button. BUTTON_PIN = 25 # Down and up values for the button. The code expects to detect a down to up # transition as an activation of the button. Therefore a normally open button # should be False (low) when down and True (high) when up. BUTTON_DOWN = False # Low signal BUTTON_UP = True # High signal # Threshold for the confidence of a recognized face before it's considered a # positive match. Confidence values below this threshold will be considered # a positive match because the lower the confidence value, or distance, the # more confident the algorithm is that the face was correctly detected. # Start with a value of 3000, but you might need to tweak this value down if # you're getting too many false positives (incorrectly recognized faces), or up # if too many false negatives (undetected faces). POSITIVE_THRESHOLD = 3000.0 # File to save and load face recognizer model. TRAINING_FILE = 'training.xml' # Directories which contain the positive and negative training image data. POSITIVE_DIR = './training/positive' NEGATIVE_DIR = './training/negative' # Value for positive and negative labels passed to face recognition model. # Can be any integer values, but must be unique from each other. # You shouldn't have to change these values. POSITIVE_LABEL = 1 NEGATIVE_LABEL = 2 # Size (in pixels) to resize images for training and prediction. # Don't change this unless you also change the size of the training images. FACE_WIDTH = 92 FACE_HEIGHT = 112 # Face detection cascade classifier configuration. # You don't need to modify this unless you know what you're doing. # See: http://docs.opencv.org/modules/objdetect/doc/cascade_classification.html HAAR_FACES = 'haarcascade_frontalface_alt.xml' HAAR_SCALE_FACTOR = 1.3 HAAR_MIN_NEIGHBORS = 4 HAAR_MIN_SIZE = (30, 30) # Filename to use when saving the most recently captured image for debugging. DEBUG_IMAGE = 'capture.pgm' def get_camera(): # Camera to use for capturing images. # Use this code for capturing from the Pi camera: import picam return picam.OpenCVCapture() # Use this code for capturing from a webcam: # import webcam # return webcam.OpenCVCapture(device_id=0)
bsd-3-clause
-2,743,251,575,847,181,000
39.102941
79
0.757609
false
xbmc/atv2
xbmc/lib/libPython/Python/Lib/idlelib/textView.py
15
2789
"""Simple text browser for IDLE """ from Tkinter import * import tkMessageBox class TextViewer(Toplevel): """ simple text viewer dialog for idle """ def __init__(self, parent, title, fileName, data=None): """If data exists, load it into viewer, otherwise try to load file. fileName - string, should be an absoulute filename """ Toplevel.__init__(self, parent) self.configure(borderwidth=5) self.geometry("=%dx%d+%d+%d" % (625, 500, parent.winfo_rootx() + 10, parent.winfo_rooty() + 10)) #elguavas - config placeholders til config stuff completed self.bg = '#ffffff' self.fg = '#000000' self.CreateWidgets() self.title(title) self.transient(parent) self.grab_set() self.protocol("WM_DELETE_WINDOW", self.Ok) self.parent = parent self.textView.focus_set() #key bindings for this dialog self.bind('<Return>',self.Ok) #dismiss dialog self.bind('<Escape>',self.Ok) #dismiss dialog if data: self.textView.insert(0.0, data) else: self.LoadTextFile(fileName) self.textView.config(state=DISABLED) self.wait_window() def LoadTextFile(self, fileName): textFile = None try: textFile = open(fileName, 'r') except IOError: tkMessageBox.showerror(title='File Load Error', message='Unable to load file %r .' % (fileName,)) else: self.textView.insert(0.0,textFile.read()) def CreateWidgets(self): frameText = Frame(self, relief=SUNKEN, height=700) frameButtons = Frame(self) self.buttonOk = Button(frameButtons, text='Close', command=self.Ok, takefocus=FALSE) self.scrollbarView = Scrollbar(frameText, orient=VERTICAL, takefocus=FALSE, highlightthickness=0) self.textView = Text(frameText, wrap=WORD, highlightthickness=0, fg=self.fg, bg=self.bg) self.scrollbarView.config(command=self.textView.yview) self.textView.config(yscrollcommand=self.scrollbarView.set) self.buttonOk.pack() self.scrollbarView.pack(side=RIGHT,fill=Y) self.textView.pack(side=LEFT,expand=TRUE,fill=BOTH) frameButtons.pack(side=BOTTOM,fill=X) frameText.pack(side=TOP,expand=TRUE,fill=BOTH) def Ok(self, event=None): self.destroy() if __name__ == '__main__': #test the dialog root=Tk() Button(root,text='View', command=lambda:TextViewer(root,'Text','./textView.py')).pack() root.mainloop()
gpl-2.0
-1,601,551,694,093,569,500
34.75641
77
0.578702
false
axtra/ansible
v2/ansible/module_utils/database.py
401
5839
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c) 2014, Toshio Kuratomi <[email protected]> # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. class SQLParseError(Exception): pass class UnclosedQuoteError(SQLParseError): pass # maps a type of identifier to the maximum number of dot levels that are # allowed to specify that identifier. For example, a database column can be # specified by up to 4 levels: database.schema.table.column _PG_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1) _MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1) def _find_end_quote(identifier, quote_char): accumulate = 0 while True: try: quote = identifier.index(quote_char) except ValueError: raise UnclosedQuoteError accumulate = accumulate + quote try: next_char = identifier[quote+1] except IndexError: return accumulate if next_char == quote_char: try: identifier = identifier[quote+2:] accumulate = accumulate + 2 except IndexError: raise UnclosedQuoteError else: return accumulate def _identifier_parse(identifier, quote_char): if not identifier: raise SQLParseError('Identifier name unspecified or unquoted trailing dot') already_quoted = False if identifier.startswith(quote_char): already_quoted = True try: end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1 except UnclosedQuoteError: already_quoted = False else: if end_quote < len(identifier) - 1: if identifier[end_quote+1] == '.': dot = end_quote + 1 first_identifier = identifier[:dot] next_identifier = identifier[dot+1:] further_identifiers = _identifier_parse(next_identifier, quote_char) further_identifiers.insert(0, first_identifier) else: raise SQLParseError('User escaped identifiers must escape extra quotes') else: further_identifiers = [identifier] if not already_quoted: try: dot = identifier.index('.') except ValueError: identifier = identifier.replace(quote_char, quote_char*2) identifier = ''.join((quote_char, identifier, quote_char)) further_identifiers = [identifier] else: if dot == 0 or dot >= len(identifier) - 1: identifier = identifier.replace(quote_char, quote_char*2) identifier = ''.join((quote_char, identifier, quote_char)) further_identifiers = [identifier] else: first_identifier = identifier[:dot] next_identifier = identifier[dot+1:] further_identifiers = _identifier_parse(next_identifier, quote_char) first_identifier = first_identifier.replace(quote_char, quote_char*2) first_identifier = ''.join((quote_char, first_identifier, quote_char)) further_identifiers.insert(0, first_identifier) return further_identifiers def pg_quote_identifier(identifier, id_type): identifier_fragments = _identifier_parse(identifier, quote_char='"') if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]: raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type])) return '.'.join(identifier_fragments) def mysql_quote_identifier(identifier, id_type): identifier_fragments = _identifier_parse(identifier, quote_char='`') if len(identifier_fragments) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type])) special_cased_fragments = [] for fragment in identifier_fragments: if fragment == '`*`': special_cased_fragments.append('*') else: special_cased_fragments.append(fragment) return '.'.join(special_cased_fragments)
gpl-3.0
-2,989,408,661,956,599,300
44.617188
134
0.66347
false
rldhont/Quantum-GIS
tests/src/python/test_authmanager_password_postgres.py
13
5704
# -*- coding: utf-8 -*- """ Tests for auth manager Password access to postgres. This is an integration test for QGIS Desktop Auth Manager postgres provider that checks if QGIS can use a stored auth manager auth configuration to access a Password protected postgres. It uses a docker container as postgres/postgis server with certificates from tests/testdata/auth_system/certs_keys Use docker-compose -f .ci/travis/linux/docker-compose.travis.yml up postgres to start the server. TODO: - Document how to restore the server data - Document how to use docker inspect to find the IP of the docker postgres server and set a host alias (or some other smart idea to do the same) .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ import os import time import signal import stat import subprocess import tempfile from shutil import rmtree from contextlib import contextmanager from utilities import unitTestDataPath from qgis.core import ( QgsApplication, QgsAuthManager, QgsAuthMethodConfig, QgsVectorLayer, QgsDataSourceUri, QgsWkbTypes, ) from qgis.PyQt.QtNetwork import QSslCertificate from qgis.testing import ( start_app, unittest, ) __author__ = 'Alessandro Pasotti' __date__ = '25/10/2016' __copyright__ = 'Copyright 2016, The QGIS Project' qgis_app = start_app() @contextmanager def ScopedCertAuthority(username, password, sslrootcert_path=None): """ Sets up the certificate authority in the authentication manager for the lifetime of this class and removes it when the class is deleted. """ authm = QgsApplication.authManager() auth_config = QgsAuthMethodConfig("Basic") auth_config.setConfig('username', username) auth_config.setConfig('password', password) auth_config.setName('test_password_auth_config') if sslrootcert_path: sslrootcert = QSslCertificate.fromPath(sslrootcert_path) assert sslrootcert is not None authm.storeCertAuthorities(sslrootcert) authm.rebuildCaCertsCache() authm.rebuildTrustedCaCertsCache() authm.rebuildCertTrustCache() assert (authm.storeAuthenticationConfig(auth_config)[0]) assert auth_config.isValid() yield auth_config if sslrootcert_path: for cert in sslrootcert: authm.removeCertAuthority(cert) authm.rebuildCaCertsCache() authm.rebuildTrustedCaCertsCache() authm.rebuildCertTrustCache() class TestAuthManager(unittest.TestCase): @classmethod def setUpClass(cls): """Run before all tests: Creates an auth configuration""" cls.username = 'docker' cls.password = 'docker' cls.dbname = 'qgis_test' cls.hostname = 'postgres' cls.port = '5432' authm = QgsApplication.authManager() assert (authm.setMasterPassword('masterpassword', True)) cls.certsdata_path = os.path.join(unitTestDataPath('auth_system'), 'certs_keys') cls.sslrootcert_path = os.path.join(cls.certsdata_path, 'chains_subissuer-issuer-root_issuer2-root2.pem') def setUp(self): """Run before each test.""" pass def tearDown(self): """Run after each test.""" pass @classmethod def _getPostGISLayer(cls, type_name, layer_name=None, authcfg=None, sslmode=QgsDataSourceUri.SslVerifyFull): """ PG layer factory """ if layer_name is None: layer_name = 'pg_' + type_name uri = QgsDataSourceUri() uri.setWkbType(QgsWkbTypes.Point) uri.setConnection(cls.hostname, cls.port, cls.dbname, "", "", sslmode, authcfg) uri.setKeyColumn('pk') uri.setSrid('EPSG:4326') uri.setDataSource('qgis_test', 'someData', "geom", "", "pk") # Note: do not expand here! layer = QgsVectorLayer(uri.uri(False), layer_name, 'postgres') return layer def testValidAuthAccess(self): """ Access the protected layer with valid credentials """ with ScopedCertAuthority(self.username, self.password, self.sslrootcert_path) as auth_config: pg_layer = self._getPostGISLayer('testlayer_èé', authcfg=auth_config.id()) self.assertTrue(pg_layer.isValid()) def testInvalidAuthAccess(self): """ Access the protected layer with invalid credentials """ with ScopedCertAuthority(self.username, self.password, self.sslrootcert_path) as auth_config: pg_layer = self._getPostGISLayer('testlayer_èé') self.assertFalse(pg_layer.isValid()) def testSslRequireNoCaCheck(self): """ Access the protected layer with valid credentials and ssl require but without the required cert authority. This should work. """ with ScopedCertAuthority(self.username, self.password) as auth_config: pg_layer = self._getPostGISLayer('testlayer_èé', authcfg=auth_config.id(), sslmode=QgsDataSourceUri.SslRequire) self.assertTrue(pg_layer.isValid()) def testSslVerifyFullCaCheck(self): """ Access the protected layer with valid credentials and ssl verify full but without the required cert authority. This should not work. """ with ScopedCertAuthority(self.username, self.password) as auth_config: pg_layer = self._getPostGISLayer('testlayer_èé', authcfg=auth_config.id()) self.assertFalse(pg_layer.isValid()) if __name__ == '__main__': unittest.main()
gpl-2.0
5,967,242,638,824,178,000
33.944785
148
0.685218
false
elpaso/QGIS
python/plugins/processing/algs/qgis/voronoi.py
11
29797
# -*- coding: utf-8 -*- """ *************************************************************************** voronoi.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' ############################################################################# # # Voronoi diagram calculator/ Delaunay triangulator # Translated to Python by Bill Simons # September, 2005 # # Additional changes by Carson Farmer added November 2010 # # Calculate Delaunay triangulation or the Voronoi polygons for a set of # 2D input points. # # Derived from code bearing the following notice: # # The author of this software is Steven Fortune. Copyright (c) 1994 by AT&T # Bell Laboratories. # Permission to use, copy, modify, and distribute this software for any # purpose without fee is hereby granted, provided that this entire notice # is included in all copies of any software which is or includes a copy # or modification of this software and in all copies of the supporting # documentation for such software. # THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED # WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR AT&T MAKE ANY # REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY # OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. # # Comments were incorporated from Shane O'Sullivan's translation of the # original code into C++ (http://mapviewer.skynet.ie/voronoi.html) # # Steve Fortune's homepage: http://netlib.bell-labs.com/cm/cs/who/sjf/index.html # ############################################################################# def usage(): # fix_print_with_import print(""" voronoi - compute Voronoi diagram or Delaunay triangulation voronoi [-t -p -d] [filename] Voronoi reads from filename (or standard input if no filename given) for a set of points in the plane and writes either the Voronoi diagram or the Delaunay triangulation to the standard output. Each input line should consist of two real numbers, separated by white space. If option -t is present, the Delaunay triangulation is produced. Each output line is a triple i j k, which are the indices of the three points in a Delaunay triangle. Points are numbered starting at 0. If option -t is not present, the Voronoi diagram is produced. There are four output record types. s a b indicates that an input point at coordinates a b was seen. l a b c indicates a line with equation ax + by = c. v a b indicates a vertex at a b. e l v1 v2 indicates a Voronoi segment which is a subsegment of line number l with endpoints numbered v1 and v2. If v1 or v2 is -1, the line extends to infinity. Other options include: d Print debugging info p Produce output suitable for input to plot (1), rather than the forms described above. On unsorted data uniformly distributed in the unit square, voronoi uses about 20n+140 bytes of storage. AUTHOR Steve J. Fortune (1987) A Sweepline Algorithm for Voronoi Diagrams, Algorithmica 2, 153-174. """) ############################################################################# # # For programmatic use two functions are available: # # computeVoronoiDiagram(points) # # Takes a list of point objects (which must have x and y fields). # Returns a 3-tuple of: # # (1) a list of 2-tuples, which are the x,y coordinates of the # Voronoi diagram vertices # (2) a list of 3-tuples (a,b,c) which are the equations of the # lines in the Voronoi diagram: a*x + b*y = c # (3) a list of 3-tuples, (l, v1, v2) representing edges of the # Voronoi diagram. l is the index of the line, v1 and v2 are # the indices of the vetices at the end of the edge. If # v1 or v2 is -1, the line extends to infinity. # # computeDelaunayTriangulation(points): # # Takes a list of point objects (which must have x and y fields). # Returns a list of 3-tuples: the indices of the points that form a # Delaunay triangle. # ############################################################################# import math import sys import getopt TOLERANCE = 1e-9 BIG_FLOAT = 1e38 # ------------------------------------------------------------------ class Context(object): def __init__(self): self.doPrint = 0 self.debug = 0 self.plot = 0 self.triangulate = False self.vertices = [] # list of vertex 2-tuples: (x,y) self.lines = [] # equation of line 3-tuple (a b c), for the equation of the line a*x+b*y = c self.edges = [] # edge 3-tuple: (line index, vertex 1 index, vertex 2 index) if either vertex index is -1, the edge extends to infiinity self.triangles = [] # 3-tuple of vertex indices self.polygons = {} # a dict of site:[edges] pairs def circle(self, x, y, rad): pass def clip_line(self, edge): pass def line(self, x0, y0, x1, y1): pass def outSite(self, s): if self.debug: # fix_print_with_import print("site (%d) at %f %f" % (s.sitenum, s.x, s.y)) elif(self.triangulate): pass elif self.plot: self.circle(s.x, s.y, None) # No radius? elif(self.doPrint): # fix_print_with_import print("s %f %f" % (s.x, s.y)) def outVertex(self, s): self.vertices.append((s.x, s.y)) if(self.debug): # fix_print_with_import print("vertex(%d) at %f %f" % (s.sitenum, s.x, s.y)) elif(self.triangulate): pass elif(self.doPrint and not self.plot): # fix_print_with_import print("v %f %f" % (s.x, s.y)) def outTriple(self, s1, s2, s3): self.triangles.append((s1.sitenum, s2.sitenum, s3.sitenum)) if(self.debug): # fix_print_with_import print("circle through left=%d right=%d bottom=%d" % (s1.sitenum, s2.sitenum, s3.sitenum)) elif(self.triangulate and self.doPrint and not self.plot): # fix_print_with_import print("%d %d %d" % (s1.sitenum, s2.sitenum, s3.sitenum)) def outBisector(self, edge): self.lines.append((edge.a, edge.b, edge.c)) if(self.debug): # fix_print_with_import print("line(%d) %gx+%gy=%g, bisecting %d %d" % (edge.edgenum, edge.a, edge.b, edge.c, edge.reg[0].sitenum, edge.reg[1].sitenum)) elif(self.triangulate): if(self.plot): self.line(edge.reg[0].x, edge.reg[0].y, edge.reg[1].x, edge.reg[1].y) elif(self.doPrint and not self.plot): # fix_print_with_import print("l %f %f %f" % (edge.a, edge.b, edge.c)) def outEdge(self, edge): sitenumL = -1 if edge.ep[Edge.LE] is not None: sitenumL = edge.ep[Edge.LE].sitenum sitenumR = -1 if edge.ep[Edge.RE] is not None: sitenumR = edge.ep[Edge.RE].sitenum if edge.reg[0].sitenum not in self.polygons: self.polygons[edge.reg[0].sitenum] = [] if edge.reg[1].sitenum not in self.polygons: self.polygons[edge.reg[1].sitenum] = [] self.polygons[edge.reg[0].sitenum].append((edge.edgenum, sitenumL, sitenumR)) self.polygons[edge.reg[1].sitenum].append((edge.edgenum, sitenumL, sitenumR)) self.edges.append((edge.edgenum, sitenumL, sitenumR)) if(not self.triangulate): if self.plot: self.clip_line(edge) elif(self.doPrint): # fix_print_with_import print("e %d %d %d" % (edge.edgenum, sitenumL, sitenumR)) # ------------------------------------------------------------------ def voronoi(siteList, context): edgeList = EdgeList(siteList.xmin, siteList.xmax, len(siteList)) priorityQ = PriorityQueue(siteList.ymin, siteList.ymax, len(siteList)) siteIter = siteList.iterator() bottomsite = next(siteIter) context.outSite(bottomsite) newsite = next(siteIter) minpt = Site(-BIG_FLOAT, -BIG_FLOAT) while True: if not priorityQ.isEmpty(): minpt = priorityQ.getMinPt() if (newsite and (priorityQ.isEmpty() or cmp(newsite, minpt) < 0)): # newsite is smallest - this is a site event context.outSite(newsite) # get first Halfedge to the LEFT and RIGHT of the new site lbnd = edgeList.leftbnd(newsite) rbnd = lbnd.right # if this halfedge has no edge, bot = bottom site (whatever that is) # create a new edge that bisects bot = lbnd.rightreg(bottomsite) edge = Edge.bisect(bot, newsite) context.outBisector(edge) # create a new Halfedge, setting its pm field to 0 and insert # this new bisector edge between the left and right vectors in # a linked list bisector = Halfedge(edge, Edge.LE) edgeList.insert(lbnd, bisector) # if the new bisector intersects with the left edge, remove # the left edge's vertex, and put in the new one p = lbnd.intersect(bisector) if p is not None: priorityQ.delete(lbnd) priorityQ.insert(lbnd, p, newsite.distance(p)) # create a new Halfedge, setting its pm field to 1 # insert the new Halfedge to the right of the original bisector lbnd = bisector bisector = Halfedge(edge, Edge.RE) edgeList.insert(lbnd, bisector) # if this new bisector intersects with the right Halfedge p = bisector.intersect(rbnd) if p is not None: # push the Halfedge into the ordered linked list of vertices priorityQ.insert(bisector, p, newsite.distance(p)) newsite = next(siteIter) elif not priorityQ.isEmpty(): # intersection is smallest - this is a vector (circle) event # pop the Halfedge with the lowest vector off the ordered list of # vectors. Get the Halfedge to the left and right of the above HE # and also the Halfedge to the right of the right HE lbnd = priorityQ.popMinHalfedge() llbnd = lbnd.left rbnd = lbnd.right rrbnd = rbnd.right # get the Site to the left of the left HE and to the right of # the right HE which it bisects bot = lbnd.leftreg(bottomsite) top = rbnd.rightreg(bottomsite) # output the triple of sites, stating that a circle goes through them mid = lbnd.rightreg(bottomsite) context.outTriple(bot, top, mid) # get the vertex that caused this event and set the vertex number # couldn't do this earlier since we didn't know when it would be processed v = lbnd.vertex siteList.setSiteNumber(v) context.outVertex(v) # set the endpoint of the left and right Halfedge to be this vector if lbnd.edge.setEndpoint(lbnd.pm, v): context.outEdge(lbnd.edge) if rbnd.edge.setEndpoint(rbnd.pm, v): context.outEdge(rbnd.edge) # delete the lowest HE, remove all vertex events to do with the # right HE and delete the right HE edgeList.delete(lbnd) priorityQ.delete(rbnd) edgeList.delete(rbnd) # if the site to the left of the event is higher than the Site # to the right of it, then swap them and set 'pm' to RIGHT pm = Edge.LE if bot.y > top.y: bot, top = top, bot pm = Edge.RE # Create an Edge (or line) that is between the two Sites. This # creates the formula of the line, and assigns a line number to it edge = Edge.bisect(bot, top) context.outBisector(edge) # create a HE from the edge bisector = Halfedge(edge, pm) # insert the new bisector to the right of the left HE # set one endpoint to the new edge to be the vector point 'v' # If the site to the left of this bisector is higher than the right # Site, then this endpoint is put in position 0; otherwise in pos 1 edgeList.insert(llbnd, bisector) if edge.setEndpoint(Edge.RE - pm, v): context.outEdge(edge) # if left HE and the new bisector don't intersect, then delete # the left HE, and reinsert it p = llbnd.intersect(bisector) if p is not None: priorityQ.delete(llbnd) priorityQ.insert(llbnd, p, bot.distance(p)) # if right HE and the new bisector don't intersect, then reinsert it p = bisector.intersect(rrbnd) if p is not None: priorityQ.insert(bisector, p, bot.distance(p)) else: break he = edgeList.leftend.right while he is not edgeList.rightend: context.outEdge(he.edge) he = he.right Edge.EDGE_NUM = 0 # ------------------------------------------------------------------ def isEqual(a, b, relativeError=TOLERANCE): # is nearly equal to within the allowed relative error norm = max(abs(a), abs(b)) return (norm < relativeError) or (abs(a - b) < (relativeError * norm)) # ------------------------------------------------------------------ class Site(object): def __init__(self, x=0.0, y=0.0, sitenum=0): self.x = x self.y = y self.sitenum = sitenum def dump(self): # fix_print_with_import print("Site #%d (%g, %g)" % (self.sitenum, self.x, self.y)) def __eq__(self, other): return (self.x == other.x) and (self.y == other.y) def __lt__(self, other): if self.y < other.y: return True elif self.y > other.y: return False elif self.x < other.x: return True else: return False def distance(self, other): dx = self.x - other.x dy = self.y - other.y return math.sqrt(dx * dx + dy * dy) # ------------------------------------------------------------------ class Edge(object): LE = 0 RE = 1 EDGE_NUM = 0 DELETED = {} # marker value def __init__(self): self.a = 0.0 self.b = 0.0 self.c = 0.0 self.ep = [None, None] self.reg = [None, None] self.edgenum = 0 def dump(self): # fix_print_with_import print("(#%d a=%g, b=%g, c=%g)" % (self.edgenum, self.a, self.b, self.c)) # fix_print_with_import print("ep", self.ep) # fix_print_with_import print("reg", self.reg) def setEndpoint(self, lrFlag, site): self.ep[lrFlag] = site if self.ep[Edge.RE - lrFlag] is None: return False return True @staticmethod def bisect(s1, s2): newedge = Edge() newedge.reg[0] = s1 # store the sites that this edge is bisecting newedge.reg[1] = s2 # to begin with, there are no endpoints on the bisector - it goes to infinity # ep[0] and ep[1] are None # get the difference in x dist between the sites dx = float(s2.x - s1.x) dy = float(s2.y - s1.y) adx = abs(dx) # make sure that the difference in positive ady = abs(dy) # get the slope of the line newedge.c = float(s1.x * dx + s1.y * dy + (dx * dx + dy * dy) * 0.5) if adx > ady: # set formula of line, with x fixed to 1 newedge.a = 1.0 newedge.b = dy / dx newedge.c /= dx else: # set formula of line, with y fixed to 1 newedge.b = 1.0 newedge.a = dx / dy newedge.c /= dy newedge.edgenum = Edge.EDGE_NUM Edge.EDGE_NUM += 1 return newedge # ------------------------------------------------------------------ class Halfedge(object): def __init__(self, edge=None, pm=Edge.LE): self.left = None # left Halfedge in the edge list self.right = None # right Halfedge in the edge list self.qnext = None # priority queue linked list pointer self.edge = edge # edge list Edge self.pm = pm self.vertex = None # Site() self.ystar = BIG_FLOAT def dump(self): # fix_print_with_import print("Halfedge--------------------------") # fix_print_with_import print("left: ", self.left) # fix_print_with_import print("right: ", self.right) # fix_print_with_import print("edge: ", self.edge) # fix_print_with_import print("pm: ", self.pm) # fix_print_with_import print("vertex:") if self.vertex: self.vertex.dump() else: # fix_print_with_import print("None") # fix_print_with_import print("ystar: ", self.ystar) def __eq__(self, other): return (self.vertex.x == other.vertex.x) and (self.ystar == other.ystar) def __lt__(self, other): if self.ystar < other.ystar: return True elif self.ystar > other.ystar: return False elif self.vertex.x < other.vertex.x: return True else: return False def leftreg(self, default): if not self.edge: return default elif self.pm == Edge.LE: return self.edge.reg[Edge.LE] else: return self.edge.reg[Edge.RE] def rightreg(self, default): if not self.edge: return default elif self.pm == Edge.LE: return self.edge.reg[Edge.RE] else: return self.edge.reg[Edge.LE] # returns True if p is to right of halfedge self def isPointRightOf(self, pt): e = self.edge topsite = e.reg[1] right_of_site = pt.x > topsite.x if(right_of_site and self.pm == Edge.LE): return True if(not right_of_site and self.pm == Edge.RE): return False if(e.a == 1.0): dyp = pt.y - topsite.y dxp = pt.x - topsite.x fast = 0 if ((not right_of_site and e.b < 0.0) or (right_of_site and e.b >= 0.0)): above = dyp >= e.b * dxp fast = above else: above = pt.x + pt.y * e.b > e.c if(e.b < 0.0): above = not above if (not above): fast = 1 if (not fast): dxs = topsite.x - (e.reg[0]).x above = e.b * (dxp * dxp - dyp * dyp) < dxs * dyp * (1.0 + 2.0 * dxp / dxs + e.b * e.b) if(e.b < 0.0): above = not above else: # e.b == 1.0 yl = e.c - e.a * pt.x t1 = pt.y - yl t2 = pt.x - topsite.x t3 = yl - topsite.y above = t1 * t1 > t2 * t2 + t3 * t3 if(self.pm == Edge.LE): return above else: return not above # -------------------------- # create a new site where the Halfedges el1 and el2 intersect def intersect(self, other): e1 = self.edge e2 = other.edge if (e1 is None) or (e2 is None): return None # if the two edges bisect the same parent return None if e1.reg[1] is e2.reg[1]: return None d = e1.a * e2.b - e1.b * e2.a if isEqual(d, 0.0): return None xint = (e1.c * e2.b - e2.c * e1.b) / d yint = (e2.c * e1.a - e1.c * e2.a) / d if(cmp(e1.reg[1], e2.reg[1]) < 0): he = self e = e1 else: he = other e = e2 rightOfSite = xint >= e.reg[1].x if((rightOfSite and he.pm == Edge.LE) or (not rightOfSite and he.pm == Edge.RE)): return None # create a new site at the point of intersection - this is a new # vector event waiting to happen return Site(xint, yint) # ------------------------------------------------------------------ class EdgeList(object): def __init__(self, xmin, xmax, nsites): if xmin > xmax: xmin, xmax = xmax, xmin self.hashsize = int(2 * math.sqrt(nsites + 4)) self.xmin = xmin self.deltax = float(xmax - xmin) self.hash = [None] * self.hashsize self.leftend = Halfedge() self.rightend = Halfedge() self.leftend.right = self.rightend self.rightend.left = self.leftend self.hash[0] = self.leftend self.hash[-1] = self.rightend def insert(self, left, he): he.left = left he.right = left.right left.right.left = he left.right = he def delete(self, he): he.left.right = he.right he.right.left = he.left he.edge = Edge.DELETED # Get entry from hash table, pruning any deleted nodes def gethash(self, b): if(b < 0 or b >= self.hashsize): return None he = self.hash[b] if he is None or he.edge is not Edge.DELETED: return he # Hash table points to deleted half edge. Patch as necessary. self.hash[b] = None return None def leftbnd(self, pt): # Use hash table to get close to desired halfedge bucket = int(((pt.x - self.xmin) / self.deltax * self.hashsize)) if(bucket < 0): bucket = 0 if(bucket >= self.hashsize): bucket = self.hashsize - 1 he = self.gethash(bucket) if(he is None): i = 1 while True: he = self.gethash(bucket - i) if (he is not None): break he = self.gethash(bucket + i) if (he is not None): break i += 1 # Now search linear list of halfedges for the correct one if (he is self.leftend) or (he is not self.rightend and he.isPointRightOf(pt)): he = he.right while he is not self.rightend and he.isPointRightOf(pt): he = he.right he = he.left else: he = he.left while (he is not self.leftend and not he.isPointRightOf(pt)): he = he.left # Update hash table and reference counts if(bucket > 0 and bucket < self.hashsize - 1): self.hash[bucket] = he return he # ------------------------------------------------------------------ class PriorityQueue(object): def __init__(self, ymin, ymax, nsites): self.ymin = ymin self.deltay = ymax - ymin self.hashsize = int(4 * math.sqrt(nsites)) self.count = 0 self.minidx = 0 self.hash = [] for i in range(self.hashsize): self.hash.append(Halfedge()) def __len__(self): return self.count def isEmpty(self): return self.count == 0 def insert(self, he, site, offset): he.vertex = site he.ystar = site.y + offset last = self.hash[self.getBucket(he)] next = last.qnext while((next is not None) and cmp(he, next) > 0): last = next next = last.qnext he.qnext = last.qnext last.qnext = he self.count += 1 def delete(self, he): if (he.vertex is not None): last = self.hash[self.getBucket(he)] while last.qnext is not he: last = last.qnext last.qnext = he.qnext self.count -= 1 he.vertex = None def getBucket(self, he): bucket = int(((he.ystar - self.ymin) / self.deltay) * self.hashsize) if bucket < 0: bucket = 0 if bucket >= self.hashsize: bucket = self.hashsize - 1 if bucket < self.minidx: self.minidx = bucket return bucket def getMinPt(self): while(self.hash[self.minidx].qnext is None): self.minidx += 1 he = self.hash[self.minidx].qnext x = he.vertex.x y = he.ystar return Site(x, y) def popMinHalfedge(self): curr = self.hash[self.minidx].qnext self.hash[self.minidx].qnext = curr.qnext self.count -= 1 return curr # ------------------------------------------------------------------ class SiteList(object): def __init__(self, pointList): self.__sites = [] self.__sitenum = 0 self.__xmin = pointList[0].x self.__ymin = pointList[0].y self.__xmax = pointList[0].x self.__ymax = pointList[0].y for i, pt in enumerate(pointList): self.__sites.append(Site(pt.x, pt.y, i)) if pt.x < self.__xmin: self.__xmin = pt.x if pt.y < self.__ymin: self.__ymin = pt.y if pt.x > self.__xmax: self.__xmax = pt.x if pt.y > self.__ymax: self.__ymax = pt.y self.__sites.sort() def setSiteNumber(self, site): site.sitenum = self.__sitenum self.__sitenum += 1 class Iterator(object): def __init__(this, lst): this.generator = (s for s in lst) def __iter__(this): return this def __next__(this): try: return next(this.generator) except StopIteration: return None def iterator(self): return SiteList.Iterator(self.__sites) def __iter__(self): return SiteList.Iterator(self.__sites) def __len__(self): return len(self.__sites) def _getxmin(self): return self.__xmin def _getymin(self): return self.__ymin def _getxmax(self): return self.__xmax def _getymax(self): return self.__ymax xmin = property(_getxmin) ymin = property(_getymin) xmax = property(_getxmax) ymax = property(_getymax) # ------------------------------------------------------------------ def computeVoronoiDiagram(points): """ Takes a list of point objects (which must have x and y fields). Returns a 3-tuple of: (1) a list of 2-tuples, which are the x,y coordinates of the Voronoi diagram vertices (2) a list of 3-tuples (a,b,c) which are the equations of the lines in the Voronoi diagram: a*x + b*y = c (3) a list of 3-tuples, (l, v1, v2) representing edges of the Voronoi diagram. l is the index of the line, v1 and v2 are the indices of the vetices at the end of the edge. If v1 or v2 is -1, the line extends to infinity. """ siteList = SiteList(points) context = Context() voronoi(siteList, context) return (context.vertices, context.lines, context.edges) # ------------------------------------------------------------------ def computeDelaunayTriangulation(points): """ Takes a list of point objects (which must have x and y fields). Returns a list of 3-tuples: the indices of the points that form a Delaunay triangle. """ siteList = SiteList(points) context = Context() context.triangulate = True voronoi(siteList, context) return context.triangles # ----------------------------------------------------------------------------- if __name__ == "__main__": try: optlist, args = getopt.getopt(sys.argv[1:], "thdp") except getopt.GetoptError: usage() sys.exit(2) doHelp = 0 c = Context() c.doPrint = 1 for opt in optlist: if opt[0] == "-d": c.debug = 1 if opt[0] == "-p": c.plot = 1 if opt[0] == "-t": c.triangulate = 1 if opt[0] == "-h": doHelp = 1 if not doHelp: pts = [] fp = sys.stdin if len(args) > 0: fp = open(args[0], 'r') for line in fp: fld = line.split() x = float(fld[0]) y = float(fld[1]) pts.append(Site(x, y)) if len(args) > 0: fp.close() if doHelp or len(pts) == 0: usage() sys.exit(2) sl = SiteList(pts) voronoi(sl, c) def cmp(a, b): """Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y. In python 2 cmp() was a built in function but in python 3 is gone. """ return (b < a) - (a < b)
gpl-2.0
2,008,679,989,056,150,000
32.107778
149
0.525254
false
AlertMe/cerbero
test/test_cerbero_packages_wix.py
27
8020
# cerbero - a multi-platform build system for Open Source software # Copyright (C) 2012 Andoni Morales Alastruey <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Library General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Library General Public License for more details. # # You should have received a copy of the GNU Library General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. import unittest import StringIO from cerbero import hacks from cerbero.build import recipe from cerbero.config import Platform from cerbero.packages import package from cerbero.packages.wix import MergeModule from cerbero.utils import etree from test.test_build_common import create_cookbook from test.test_packages_common import create_store from test.test_common import DummyConfig class Recipe1(recipe.Recipe): name = 'recipe-test' files_misc = ['bin/test.exe', 'bin/test2.exe', 'bin/test3.exe', 'README', 'lib/libfoo.dll', 'lib/gstreamer-0.10/libgstplugins.dll'] class Package(package.Package): name = 'gstreamer-test' shortdesc = 'GStreamer Test' longdesc = 'test' version = '1.0' licences = ['LGPL'] uuid = '1' vendor = 'GStreamer Project' files = ['recipe-test:misc'] MERGE_MODULE = '''\ <?xml version="1.0" ?> <Wix xmlns="http://schemas.microsoft.com/wix/2006/wi"> <Module Id="_gstreamer_test" Language="1033" Version="1.0"> <Package Comments="test" Description="GStreamer Test" Id="1" Manufacturer="GStreamer Project"/> <Directory Id="TARGETDIR" Name="SourceDir"> <Component Guid="1" Id="_readme"> <File Id="_readme_1" Name="README" Source="z:\\\\\\test\\\\README"/> </Component> <Directory Id="_bin" Name="bin"> <Component Guid="1" Id="_test.exe"> <File Id="_testexe" Name="test.exe" Source="z:\\\\\\test\\\\bin\\\\test.exe"/> </Component> <Component Guid="1" Id="_test2.exe"> <File Id="_test2exe" Name="test2.exe" Source="z:\\\\\\test\\\\bin\\\\test2.exe"/> </Component> <Component Guid="1" Id="_test3.exe"> <File Id="_test3exe" Name="test3.exe" Source="z:\\\\\\test\\\\bin\\\\test3.exe"/> </Component> </Directory> <Directory Id="_lib" Name="lib"> <Directory Id="_gstreamer_0.10" Name="gstreamer-0.10"> <Component Guid="1" Id="_libgstplugins.dll"> <File Id="_libgstpluginsdll" Name="libgstplugins.dll" Source="z:\\\\\\test\\\\lib\\\\gstreamer-0.10\\\\libgstplugins.dll"/> </Component> </Directory> <Component Guid="1" Id="_libfoo.dll"> <File Id="_libfoodll" Name="libfoo.dll" Source="z:\\\\\\test\\\\lib\\\\libfoo.dll"/> </Component> </Directory> </Directory> </Module> </Wix> ''' class MergeModuleTest(unittest.TestCase): def setUp(self): self.config = DummyConfig() cb = create_cookbook(self.config) store = create_store(self.config) cb.add_recipe(Recipe1(self.config)) self.package = Package(self.config, store, cb) self.mergemodule = MergeModule(self.config, self.package.files_list(), self.package) def test_add_root(self): self.mergemodule._add_root() self.assertEquals( '<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi" />', etree.tostring(self.mergemodule.root)) def test_add_module(self): self.mergemodule._add_root() self.mergemodule._add_module() self.assertEquals( '<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">' '<Module Id="_gstreamer_test" Language="1033" Version="1.0" />' '</Wix>', etree.tostring(self.mergemodule.root)) def test_add_package(self): self.mergemodule._add_root() self.mergemodule._add_module() self.mergemodule._add_package() self.assertEquals( '<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">' '<Module Id="_gstreamer_test" Language="1033" Version="1.0">' '<Package Comments="test" Description="GStreamer Test" Id="1" ' 'Manufacturer="GStreamer Project" />' '</Module>' '</Wix>', etree.tostring(self.mergemodule.root)) def test_add_root_dir(self): self.mergemodule._add_root() self.mergemodule._add_module() self.mergemodule._add_package() self.mergemodule._add_root_dir() self.assertEquals( '<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">' '<Module Id="_gstreamer_test" Language="1033" Version="1.0">' '<Package Comments="test" Description="GStreamer Test" Id="1" ' 'Manufacturer="GStreamer Project" />' '<Directory Id="TARGETDIR" Name="SourceDir" />' '</Module>' '</Wix>', etree.tostring(self.mergemodule.root)) def test_add_directory(self): self.mergemodule._add_root() self.mergemodule._add_module() self.mergemodule._add_package() self.mergemodule._add_root_dir() self.assertEquals(len(self.mergemodule._dirnodes), 1) self.assertEquals(self.mergemodule._dirnodes[''], self.mergemodule.rdir) self.mergemodule._add_directory('lib/gstreamer-0.10') self.assertEquals(len(self.mergemodule._dirnodes), 3) self.assertTrue('lib' in self.mergemodule._dirnodes) self.assertTrue('lib/gstreamer-0.10' in self.mergemodule._dirnodes) self.mergemodule._add_directory('bin') self.assertEquals(len(self.mergemodule._dirnodes), 4) self.assertTrue('bin' in self.mergemodule._dirnodes) def test_add_file(self): self.mergemodule._add_root() self.mergemodule._add_module() self.mergemodule._add_package() self.mergemodule._add_root_dir() self.assertEquals(len(self.mergemodule._dirnodes), 1) self.assertEquals(self.mergemodule._dirnodes[''], self.mergemodule.rdir) self.mergemodule._add_file('bin/gst-inspect-0.10.exe') self.assertEquals(len(self.mergemodule._dirnodes), 2) self.assertTrue('bin' in self.mergemodule._dirnodes) self.assertTrue('gstreamer-0.10.exe' not in self.mergemodule._dirnodes) self.mergemodule._add_file('bin/gst-launch-0.10.exe') self.assertEquals(len(self.mergemodule._dirnodes), 2) self.assertTrue('bin' in self.mergemodule._dirnodes) self.assertTrue('gstreamer-0.10.exe' not in self.mergemodule._dirnodes) def test_render_xml(self): self.config.platform = Platform.WINDOWS self.mergemodule._get_uuid = lambda : '1' self.mergemodule.fill() tmp = StringIO.StringIO() self.mergemodule.write(tmp) #self._compstr(tmp.getvalue(), MERGE_MODULE) self.assertEquals(MERGE_MODULE, tmp.getvalue()) def _compstr(self, str1, str2): str1 = str1.split('\n') str2 = str2.split('\n') for i in range(len(str1)): if str1[i] != str2[i]: print str1[i] print str2[i] print "" class InstallerTest(unittest.TestCase): def setUp(self): pass def testAddRoot(self): pass def testAddProduct(self): pass def testAddPackage(self): pass def testAddInstallDir(self): pass def testAddUIProps(self): pass def testAddMedia(self): pass def testAddMergeModules(self): pass def testAddMergeModules(self): pass def testRender(self): pass
lgpl-2.1
-2,471,404,032,102,076,400
35.621005
129
0.631047
false
jabl/offlineimap
offlineimap/CustomConfig.py
8
4401
# Copyright (C) 2003 John Goerzen # <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA from ConfigParser import ConfigParser from offlineimap.localeval import LocalEval import os class CustomConfigParser(ConfigParser): def getdefault(self, section, option, default, *args, **kwargs): """Same as config.get, but returns the "default" option if there is no such option specified.""" if self.has_option(section, option): return apply(self.get, [section, option] + list(args), kwargs) else: return default def getdefaultint(self, section, option, default, *args, **kwargs): if self.has_option(section, option): return apply(self.getint, [section, option] + list(args), kwargs) else: return default def getdefaultfloat(self, section, option, default, *args, **kwargs): if self.has_option(section, option): return apply(self.getfloat, [section, option] + list(args), kwargs) else: return default def getdefaultboolean(self, section, option, default, *args, **kwargs): if self.has_option(section, option): return apply(self.getboolean, [section, option] + list(args), kwargs) else: return default def getmetadatadir(self): metadatadir = os.path.expanduser(self.getdefault("general", "metadata", "~/.offlineimap")) if not os.path.exists(metadatadir): os.mkdir(metadatadir, 0700) return metadatadir def getlocaleval(self): if self.has_option("general", "pythonfile"): path = os.path.expanduser(self.get("general", "pythonfile")) else: path = None return LocalEval(path) def getsectionlist(self, key): """Returns a list of sections that start with key + " ". That is, if key is "Account", returns all section names that start with "Account ", but strips off the "Account ". For instance, for "Account Test", returns "Test".""" key = key + ' ' return [x[len(key):] for x in self.sections() \ if x.startswith(key)] def CustomConfigDefault(): """Just a sample constant that won't occur anywhere else to use for the default.""" pass class ConfigHelperMixin: def _confighelper_runner(self, option, default, defaultfunc, mainfunc): if default != CustomConfigDefault: return apply(defaultfunc, [self.getsection(), option, default]) else: return apply(mainfunc, [self.getsection(), option]) def getconf(self, option, default = CustomConfigDefault): return self._confighelper_runner(option, default, self.getconfig().getdefault, self.getconfig().get) def getconfboolean(self, option, default = CustomConfigDefault): return self._confighelper_runner(option, default, self.getconfig().getdefaultboolean, self.getconfig().getboolean) def getconfint(self, option, default = CustomConfigDefault): return self._confighelper_runner(option, default, self.getconfig().getdefaultint, self.getconfig().getint) def getconffloat(self, option, default = CustomConfigDefault): return self._confighelper_runner(option, default, self.getconfig().getdefaultfloat, self.getconfig().getfloat)
gpl-2.0
5,373,335,043,325,085,000
41.317308
98
0.613497
false
Mushirahmed/gnuradio
docs/sphinx/hieroglyph/test/test_nodes.py
25
12795
import unittest from hieroglyph.nodes import Node, Arg, Raises, Except, Returns, Warning, Note __author__ = 'Robert Smallshire' class NodeTests(unittest.TestCase): def test_create_default_node(self): node = Node() self.assertEqual(node.indent, 0) self.assertEqual(node.lines, []) self.assertIsNone(node.parent) def test_create_with_indent(self): node = Node(indent=4) self.assertEqual(node.indent, 4) self.assertEqual(node.lines, []) self.assertIsNone(node.parent) def test_create_with_lines(self): node = Node(lines= ['First', 'Second', 'Third']) self.assertEqual(node.indent, 0) self.assertEqual(node.lines, ['First', 'Second', 'Third']) self.assertIsNone(node.parent) def test_repr(self): node = Node(5, ['One', 'Two', 'Three']) actual = repr(node) expected = "Node(5, ['One', 'Two', 'Three'], children=[])" self.assertEqual(expected, actual) def test_add_one_child(self): node = Node() child = Node(parent=node) node.add_child(child) self.assertIs(node.children[0], child) def test_add_two_children(self): node = Node() child0 = Node(parent=node) child1 = Node(parent=node) node.add_child(child0) node.add_child(child1) self.assertIs(node.children[0], child0) self.assertIs(node.children[1], child1) def test_render_rst_empty(self): node = Node() rst = node.render_rst() self.assertEqual(len(rst), 0) def test_render_rst_indent(self): node = Node(indent=4) rst = node.render_rst() self.assertEqual(len(rst), 0) def test_render_rst_lines(self): node = Node(lines= ['First', 'Second', 'Third']) rst = node.render_rst() self.assertEqual(rst, ['First', 'Second', 'Third']) def test_render_rst_indented_lines(self): node = Node(indent=3, lines= ['First', 'Second', 'Third']) rst = node.render_rst() self.assertEqual(rst, [' First', ' Second', ' Third']) def test_render_rst_with_child(self): node = Node(indent=4, lines=["Parent"]) child = Node(indent=8, lines=["Child"], parent=node) node.add_child(child) rst = node.render_rst() self.assertEqual(rst, [' Parent', ' Child']) def test_render_rst_with_children(self): node = Node(indent=4, lines=["Parent"]) child_a = Node(indent=8, lines=["ChildA"], parent=node) node.add_child(child_a) child_b = Node(indent=6, lines=["ChildB"], parent=node) node.add_child(child_b) rst = node.render_rst() self.assertEqual(rst, [' Parent', ' ChildA', ' ChildB']) class ArgTests(unittest.TestCase): def test_create(self): node = Arg(5, 10, 'foo') self.assertEqual(node.indent, 5) self.assertEqual(node.child_indent, 10) self.assertEqual(node.name, 'foo') self.assertEqual(node.lines, []) self.assertIsNone(node.parent) def test_set_type(self): node = Arg(5, 10, 'foo') node.type = 'str' self.assertEqual(node.type, 'str') def test_add_one_child(self): node = Arg(5, 10, 'foo') child = Node(parent=node) node.add_child(child) self.assertIs(node.children[0], child) def test_add_two_children(self): node = Arg(5, 10, 'foo') child0 = Node(parent=node) child1 = Node(parent=node) node.add_child(child0) node.add_child(child1) self.assertIs(node.children[0], child0) self.assertIs(node.children[1], child1) def test_repr(self): node = Arg(5, 10, 'foo') actual = repr(node) expected = "Arg('foo', None, children=[])" self.assertEqual(expected, actual) def test_render_rst_empty(self): node = Arg(5, 10, 'bar') rst = node.render_rst() self.assertEqual(rst, [' :param bar: ', '']) def test_render_rst_with_child(self): node = Arg(5, 10, 'bar') child = Node(indent=10, lines=["Description"], parent=node) node.add_child(child) rst = node.render_rst() self.assertEqual(rst, [' :param bar: Description', '']) def test_render_rst_with_children(self): node = Arg(5, 10, 'bar') child_a = Node(indent=10, lines=["ChildA"], parent=node) node.add_child(child_a) child_b = Node(indent=10, lines=["ChildB"], parent=node) node.add_child(child_b) rst = node.render_rst() self.assertEqual(rst, [' :param bar: ChildA', ' ChildB', '']) def test_render_rst_with_type(self): node = Arg(5, 10, 'bar') node.type = 'str' rst = node.render_rst() self.assertEqual(rst, [' :param bar: ', ' :type bar: str', '']) class RaisesTests(unittest.TestCase): def test_create_default_node(self): node = Raises() self.assertEqual(node.indent, 0) self.assertEqual(node.lines, []) self.assertIsNone(node.parent) def test_create_with_indent(self): node = Raises(indent=4) self.assertEqual(node.indent, 4) self.assertEqual(node.lines, []) self.assertIsNone(node.parent) def test_repr(self): node = Raises(5) actual = repr(node) expected = "Raises(5, children=[])" self.assertEqual(expected, actual) def test_add_one_child(self): node = Raises() child = Node(parent=node) node.add_child(child) self.assertIs(node.children[0], child) def test_add_two_children(self): node = Raises() child0 = Node(parent=node) child1 = Node(parent=node) node.add_child(child0) node.add_child(child1) self.assertIs(node.children[0], child0) self.assertIs(node.children[1], child1) def test_render_rst_empty(self): node = Raises() rst = node.render_rst() self.assertEqual(rst, [':raises:', '']) def test_render_rst_indent(self): node = Raises(indent=5) rst = node.render_rst() self.assertEqual(rst, [' :raises:', '']) def test_render_rst_with_child(self): node = Raises(5) child = Node(indent=10, lines=["Description"], parent=node) node.add_child(child) rst = node.render_rst() self.assertEqual(rst, [' :raises:', ' Description', '']) def test_render_rst_with_children(self): node = Raises(5) child_a = Node(indent=10, lines=["ChildA"], parent=node) node.add_child(child_a) child_b = Node(indent=10, lines=["ChildB"], parent=node) node.add_child(child_b) rst = node.render_rst() self.assertEqual(rst, [' :raises:', ' ChildA', ' ChildB', '']) class ExceptTests(unittest.TestCase): def test_create(self): node = Except(5, 'FooError') self.assertEqual(node.indent, 5) self.assertEqual(node.type, 'FooError') self.assertEqual(node.lines, []) self.assertIsNone(node.parent) def test_add_one_child(self): node = Except(5, 'FooError') child = Node(parent=node) node.add_child(child) self.assertIs(node.children[0], child) def test_add_two_children(self): node = Except(5, 'FooError') child0 = Node(parent=node) child1 = Node(parent=node) node.add_child(child0) node.add_child(child1) self.assertIs(node.children[0], child0) self.assertIs(node.children[1], child1) def test_repr(self): node = Except(5,'FooError') actual = repr(node) expected = "Except('FooError', children=[])" self.assertEqual(expected, actual) def test_render_rst_empty(self): node = Except(5, 'FooError') rst = node.render_rst() self.assertEqual(rst, [' * FooError - ', '']) def test_render_rst_indent(self): node = Except(5, 'FooError') rst = node.render_rst() self.assertEqual(rst, [' * FooError - ', '']) def test_render_rst_with_child(self): node = Except(5, 'FooError') child = Node(indent=10, lines=["Description"], parent=node) node.add_child(child) rst = node.render_rst() self.assertEqual(rst, [' * FooError - Description', '']) def test_render_rst_with_children(self): node = Except(5, 'FooError') child_a = Node(indent=10, lines=["ChildA"], parent=node) node.add_child(child_a) child_b = Node(indent=10, lines=["ChildB"], parent=node) node.add_child(child_b) rst = node.render_rst() self.assertEqual(rst, [' * FooError - ChildA', ' ChildB', '']) class ReturnsTests(unittest.TestCase): def test_create(self): node = Returns(5) self.assertEqual(node.indent, 5) self.assertEqual(node.lines, []) self.assertIsNone(node.parent) def test_add_one_child(self): node = Returns(5) child = Node(parent=node) node.add_child(child) self.assertIs(node.children[0], child) def test_add_two_children(self): node = Returns(5) child0 = Node(parent=node) child1 = Node(parent=node) node.add_child(child0) node.add_child(child1) self.assertIs(node.children[0], child0) self.assertIs(node.children[1], child1) def test_repr(self): node = Returns(5) actual = repr(node) expected = "Returns(5, children=[])" self.assertEqual(expected, actual) # TODO test_render_rst class WarningTests(unittest.TestCase): def test_create(self): node = Warning(5) self.assertEqual(node.indent, 5) self.assertEqual(node.lines, []) self.assertIsNone(node.parent) def test_add_one_child(self): node = Warning(5) child = Node(parent=node) node.add_child(child) self.assertIs(node.children[0], child) def test_add_two_children(self): node = Warning(5) child0 = Node(parent=node) child1 = Node(parent=node) node.add_child(child0) node.add_child(child1) self.assertIs(node.children[0], child0) self.assertIs(node.children[1], child1) def test_repr(self): node = Warning(5) actual = repr(node) expected = "Warning(5, children=[])" self.assertEqual(expected, actual) # TODO test_render_rst class NoteTests(unittest.TestCase): def test_create(self): node = Note(5) self.assertEqual(node.indent, 5) self.assertEqual(node.lines, []) self.assertIsNone(node.parent) def test_add_one_child(self): node = Note(5) child = Node(parent=node) node.add_child(child) self.assertIs(node.children[0], child) def test_add_two_children(self): node = Note(5) child0 = Node(parent=node) child1 = Node(parent=node) node.add_child(child0) node.add_child(child1) self.assertIs(node.children[0], child0) self.assertIs(node.children[1], child1) def test_repr(self): node = Note(5) actual = repr(node) expected = "Note(5, children=[])" self.assertEqual(expected, actual) # TODO test_render_rst
gpl-3.0
-1,049,117,481,040,743,200
31.147668
78
0.519109
false
TinajaLabs/tinajagate
downloads/pyserial-2.5/serial/__init__.py
8
2268
#!/usr/bin/env python # portable serial port access with python # this is a wrapper module for different platform implementations # # (C) 2001-2010 Chris Liechti <[email protected]> # this is distributed under a free software license, see license.txt VERSION = '2.5' import sys if sys.platform == 'cli': from serialcli import * else: import os # chose an implementation, depending on os if os.name == 'nt': #sys.platform == 'win32': from serialwin32 import * elif os.name == 'posix': from serialposix import * elif os.name == 'java': from serialjava import * else: raise Exception("Sorry: no implementation for your platform ('%s') available" % os.name) def serial_for_url(url, *args, **kwargs): """Get a native, a RFC2217 or socket implementation of the Serial class, depending on port/url. The port is not opened when the keyword parameter 'do_not_open' is true, by default it is.""" # check remove extra parameter to not confuse the Serial class do_open = 'do_not_open' not in kwargs or not kwargs['do_not_open'] if 'do_not_open' in kwargs: del kwargs['do_not_open'] # the default is to use the native version klass = Serial # 'native' implementation # check port type and get class try: url_nocase = url.lower() except AttributeError: # its not a string, use default pass else: if url_nocase.startswith('rfc2217://'): import rfc2217 # late import, so that users that don't use it don't have to load it klass = rfc2217.Serial # RFC2217 implementation elif url_nocase.startswith('socket://'): import socket_connection # late import, so that users that don't use it don't have to load it klass = socket_connection.Serial elif url_nocase.startswith('loop://'): import loopback_connection # late import, so that users that don't use it don't have to load it klass = loopback_connection.Serial else: klass = Serial # 'native' implementation # instantiate and open when desired instance = klass(None, *args, **kwargs) instance.port = url if do_open: instance.open() return instance
mit
-7,845,773,645,466,870,000
36.8
108
0.64903
false
heeraj123/oh-mainline
vendor/packages/Django/django/contrib/sites/managers.py
491
1985
from django.conf import settings from django.db import models from django.db.models.fields import FieldDoesNotExist class CurrentSiteManager(models.Manager): "Use this to limit objects to those associated with the current site." def __init__(self, field_name=None): super(CurrentSiteManager, self).__init__() self.__field_name = field_name self.__is_validated = False def _validate_field_name(self): field_names = self.model._meta.get_all_field_names() # If a custom name is provided, make sure the field exists on the model if self.__field_name is not None and self.__field_name not in field_names: raise ValueError("%s couldn't find a field named %s in %s." % \ (self.__class__.__name__, self.__field_name, self.model._meta.object_name)) # Otherwise, see if there is a field called either 'site' or 'sites' else: for potential_name in ['site', 'sites']: if potential_name in field_names: self.__field_name = potential_name self.__is_validated = True break # Now do a type check on the field (FK or M2M only) try: field = self.model._meta.get_field(self.__field_name) if not isinstance(field, (models.ForeignKey, models.ManyToManyField)): raise TypeError("%s must be a ForeignKey or ManyToManyField." %self.__field_name) except FieldDoesNotExist: raise ValueError("%s couldn't find a field named %s in %s." % \ (self.__class__.__name__, self.__field_name, self.model._meta.object_name)) self.__is_validated = True def get_query_set(self): if not self.__is_validated: self._validate_field_name() return super(CurrentSiteManager, self).get_query_set().filter(**{self.__field_name + '__id__exact': settings.SITE_ID})
agpl-3.0
-3,065,119,792,742,246,000
47.414634
126
0.59597
false
sysbot/contrail-controller
src/config/api-server/vnc_cfg_ifmap.py
1
38425
# # Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. # """ Layer that transforms VNC config objects to ifmap representation """ from gevent import ssl, monkey monkey.patch_all() import sys import time from pprint import pformat from lxml import etree, objectify import StringIO import re import socket import errno import subprocess from cfgm_common.ifmap.client import client, namespaces from cfgm_common.ifmap.request import NewSessionRequest, RenewSessionRequest,\ EndSessionRequest, PublishRequest, SearchRequest, SubscribeRequest,\ PurgeRequest, PollRequest from cfgm_common.ifmap.id import IPAddress, MACAddress, Device,\ AccessRequest, Identity, CustomIdentity from cfgm_common.ifmap.operations import PublishUpdateOperation,\ PublishNotifyOperation, PublishDeleteOperation, SubscribeUpdateOperation,\ SubscribeDeleteOperation from cfgm_common.ifmap.util import attr, link_ids from cfgm_common.ifmap.response import Response, newSessionResult from cfgm_common.ifmap.metadata import Metadata import copy import json import uuid import datetime import pycassa import pycassa.util from pycassa.system_manager import * from datetime import datetime from pycassa.util import * #from cfgm_common import vnc_type_conv from provision_defaults import * import cfgm_common.imid from cfgm_common.exceptions import * from gen.vnc_ifmap_client_gen import * from gen.vnc_cassandra_client_gen import * class VncIfmapClient(VncIfmapClientGen): def __init__(self, db_client_mgr, ifmap_srv_ip, ifmap_srv_port, uname, passwd, ssl_options, ifmap_srv_loc=None): super(VncIfmapClient, self).__init__() # TODO username/passwd from right place self._CONTRAIL_XSD = "http://www.contrailsystems.com/vnc_cfg.xsd" self._IPERMS_NAME = "id-perms" self._IPERMS_FQ_NAME = "contrail:" + self._IPERMS_NAME self._SUBNETS_NAME = "contrail:subnets" self._IPAMS_NAME = "contrail:ipams" self._SG_RULE_NAME = "contrail:sg_rules" self._POLICY_ENTRY_NAME = "contrail:policy_entry" self._NAMESPACES = { 'a': 'http://www.w3.org/2003/05/soap-envelope', 'b': 'http://www.trustedcomputinggroup.org/2010/IFMAP/2', 'c': self._CONTRAIL_XSD } namespaces = { 'env': "http://www.w3.org/2003/05/soap-envelope", 'ifmap': "http://www.trustedcomputinggroup.org/2010/IFMAP/2", 'meta': "http://www.trustedcomputinggroup.org/2010/IFMAP-METADATA/2", 'contrail': self._CONTRAIL_XSD } self._db_client_mgr = db_client_mgr # launch mapserver if ifmap_srv_loc: self._launch_mapserver(ifmap_srv_ip, ifmap_srv_port, ifmap_srv_loc) mapclient = client(("%s" % (ifmap_srv_ip), "%s" % (ifmap_srv_port)), uname, passwd, namespaces, ssl_options) self._mapclient = mapclient connected = False while not connected: try: result = mapclient.call('newSession', NewSessionRequest()) connected = True except socket.error as e: time.sleep(3) mapclient.set_session_id(newSessionResult(result).get_session_id()) mapclient.set_publisher_id(newSessionResult(result).get_publisher_id()) # Initialize ifmap-id handler (alloc|convert|parse etc.) self._imid_handler = Imid() imid = self._imid_handler # Publish init config (TODO this should come from api-server init) # config-root buf = cStringIO.StringIO() perms = Provision.defaults.perms['config-root'] perms.exportChildren(buf, level=1, pretty_print=False) id_perms_xml = buf.getvalue() buf.close() meta = str(Metadata(self._IPERMS_NAME, '', {'ifmap-cardinality': 'singleValue'}, ns_prefix='contrail', elements=id_perms_xml)) self._publish_id_self_meta("contrail:config-root:root", meta) # end __init__ def get_imid_handler(self): return self._imid_handler # end get_imid_handler # Parse ifmap-server returned search results and create list of tuples # of (ident-1, ident-2, link-attribs) def parse_result_items(self, srch_result, my_imid): xpath_expr = '/a:Envelope/a:Body/b:response/searchResult/resultItem' result_items = self._parse(srch_result, xpath_expr) return cfgm_common.imid.parse_result_items(result_items, my_imid) # end parse_result_items # In list of (ident-1, ident-2, link-attribs) tuples, return list of # ifmap-ids of other idents def get_others_in_result_list(self, result_list, my_imid): other_imid_list = [] for result_elem in result_list: ident_1, ident_2, meta = result_elem if (ident_1 is None) or (ident_2 is None): continue other_imid = None if ident_1.attrib['name'] == my_imid: other_imid = ident_2.attrib['name'] elif ident_2.attrib['name'] == my_imid: other_imid = ident_1.attrib['name'] other_imid_list.append(other_imid) return other_imid_list # end get_others_in_result_list def _ensure_port_not_listened(self, server_ip, server_port): try: s = socket.create_connection((server_ip, server_port)) s.close() print "IP %s port %s already listened on"\ % (server_ip, server_port) except Exception as err: if err.errno == errno.ECONNREFUSED: return # all is well # end _ensure_port_not_listened def _block_till_port_listened(self, server_name, server_ip, server_port): svr_running = False while not svr_running: try: s = socket.create_connection((server_ip, server_port)) s.close() svr_running = True except Exception as err: if err.errno == errno.ECONNREFUSED: print "%s not up, retrying in 2 secs" % (server_name) time.sleep(2) else: raise err # end _block_till_port_listened # launch ifmap server def _launch_mapserver(self, ifmap_srv_ip, ifmap_srv_port, ifmap_srv_loc): print 'Starting IFMAP server ...' self._ensure_port_not_listened(ifmap_srv_ip, ifmap_srv_port) logf_out = open('ifmap-server.out', 'w') logf_err = open('ifmap-server.err', 'w') self._mapserver = subprocess.Popen(['java', '-jar', 'build/irond.jar'], cwd=ifmap_srv_loc, stdout=logf_out, stderr=logf_err) self._block_till_port_listened( 'ifmap-server', ifmap_srv_ip, ifmap_srv_port) # end _launch_mapserver # Helper routines for IFMAP def _publish_id_self_meta(self, self_imid, meta): mapclient = self._mapclient pubreq = PublishRequest(mapclient.get_session_id(), str(PublishUpdateOperation( id1=str(Identity( name=self_imid, type="other", other_type="extended")), metadata=meta, lifetime='forever'))) result = mapclient.call('publish', pubreq) # end _publish_id_self_meta def _delete_id_self_meta(self, self_imid, meta_name): mapclient = self._mapclient pubreq = PublishRequest(mapclient.get_session_id(), str(PublishDeleteOperation( id1=str(Identity( name=self_imid, type="other", other_type="extended")), filter=meta_name))) result = mapclient.call('publish', pubreq) # end _delete_id_self_meta def _publish_id_pair_meta(self, id1, id2, metadata): mapclient = self._mapclient pubreq = PublishRequest(mapclient.get_session_id(), str(PublishUpdateOperation( id1=str(Identity(name=id1, type="other", other_type="extended")), id2=str(Identity(name=id2, type="other", other_type="extended")), metadata=metadata, lifetime='forever'))) result = mapclient.call('publish', pubreq) # end _publish_id_pair_meta def _delete_id_pair_meta(self, id1, id2, metadata): mapclient = self._mapclient pubreq = PublishRequest(mapclient.get_session_id(), str(PublishDeleteOperation( id1=str(Identity( name=id1, type="other", other_type="extended")), id2=str(Identity( name=id2, type="other", other_type="extended")), filter=metadata))) result = mapclient.call('publish', pubreq) # end _delete_id_pair_meta def _search(self, start_id, match_meta=None, result_meta=None, max_depth=1): # set ifmap search parmeters srch_params = {} srch_params['max-depth'] = str(max_depth) if match_meta is not None: srch_params['match-links'] = match_meta if result_meta is not None: # all => don't set result-filter, so server returns all id + meta if result_meta == "all": pass else: srch_params['result-filter'] = result_meta else: # default to return match_meta metadata types only srch_params['result-filter'] = match_meta mapclient = self._mapclient srch_req = SearchRequest(mapclient.get_session_id(), start_id, search_parameters=srch_params ) result = mapclient.call('search', srch_req) return result # end _search def _parse(self, srch_result, xpath_expr): soap_doc = etree.parse(StringIO.StringIO(srch_result)) result_items = soap_doc.xpath(xpath_expr, namespaces=self._NAMESPACES) return result_items # end _parse def _search_and_parse(self, start_id, xpath_expr, match_meta=None, result_meta=None, max_depth=0): result = self._search(start_id, match_meta, result_meta, max_depth) result_items = self._parse(result, xpath_expr) return result_items # end _search_and_parse def _get_id_meta_refs(self, result_items, self_type, parent_type): # Given parsed result items from search, returns # of idents + metadata # referring to this ident (incl self + parent). In addition, parent's # name and names of non-parent, non-self idents referring to this ident # are returned. TODO should this be moved to cfgm/common ref_cnt = 0 ref_set = set() ref_names = "" parent_imid = "" imid = self._imid_handler for r_item in result_items: if r_item.tag == 'identity': ident_name = r_item.attrib['name'] ident_type = cfgm_common.imid.ifmap_id_to_type(ident_name) # No action if already encountered if ident_name in ref_set: continue ref_cnt = ref_cnt + 1 ref_set.add(ident_name) if (ident_type == self_type): continue if (ident_type == parent_type): parent_imid = r_item.attrib['name'] continue # non-parent, non-self refs ref_names = "%s %s" % (ref_names, ident_name) elif r_item.tag == 'metadata': # TBI figure out meta only belonging to self ref_cnt = ref_cnt + 1 meta_elem = r_item.getchildren()[0] meta_name = re.sub("{.*}", "", meta_elem.tag) ref_names = "%s %s" % (ref_names, meta_name) return ref_cnt, parent_imid, ref_names # end _get_id_meta_refs def fq_name_to_ifmap_id(self, obj_type, fq_name): return cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, fq_name) # end fq_name_to_ifmap_id def ifmap_id_to_fq_name(self, ifmap_id): return cfgm_common.imid.get_fq_name_from_ifmap_id(ifmap_id) # end ifmap_id_to_fq_name # end class VncIfmapClient class Imid(ImidGen): pass # end class Imid class VncCassandraClient(VncCassandraClientGen): # Name to ID mapping keyspace + tables _UUID_KEYSPACE_NAME = 'config_db_uuid' # TODO describe layout _OBJ_UUID_CF_NAME = 'obj_uuid_table' # TODO describe layout _OBJ_FQ_NAME_CF_NAME = 'obj_fq_name_table' # has obj uuid as rowkey; ascii as column type; <fq_name>, <ifmap_id> # <obj_json> <child_cf_col_name> as column values _UUID_CF_NAME = 'uuid_table' # has type:fq_name as rowkey; ascii as column type; <obj uuid> <ifmap_id> # as column values _FQ_NAME_CF_NAME = 'fq_name_table' # has ifmap_id as rowkey; ascii as column type # <obj uuid>, <fq_name> as column values # ifmap_id itself is contrail:<type>:<fq-name delimited by ':'> _IFMAP_ID_CF_NAME = 'ifmap_id_table' # has obj uuid:<child-type> as rowkey; timeuuid column type; <child obj # uuid> as column values _CHILDREN_CF_NAME = 'children_table' _SUBNET_CF_NAME = 'subnet_bitmask_table' # Useragent datastore keyspace + tables (used by quantum plugin currently) _USERAGENT_KEYSPACE_NAME = 'useragent' _USERAGENT_KV_CF_NAME = 'useragent_keyval_table' def __init__(self, db_client_mgr, cass_srv_list, reset_config): self._db_client_mgr = db_client_mgr self._reset_config = reset_config self._cassandra_init(cass_srv_list) # end __init__ # Helper routines for cassandra def _cassandra_init(self, server_list): # 1. Ensure keyspace and schema/CFs exist # 2. Read in persisted data and publish to ifmap server uuid_ks_name = VncCassandraClient._UUID_KEYSPACE_NAME obj_uuid_cf_info = (VncCassandraClient._OBJ_UUID_CF_NAME, None) obj_fq_name_cf_info = (VncCassandraClient._OBJ_FQ_NAME_CF_NAME, None) uuid_cf_info = (VncCassandraClient._UUID_CF_NAME, None) fq_name_cf_info = (VncCassandraClient._FQ_NAME_CF_NAME, None) ifmap_id_cf_info = (VncCassandraClient._IFMAP_ID_CF_NAME, None) subnet_cf_info = (VncCassandraClient._SUBNET_CF_NAME, None) children_cf_info = ( VncCassandraClient._CHILDREN_CF_NAME, TIME_UUID_TYPE) self._cassandra_ensure_keyspace( server_list, uuid_ks_name, [obj_uuid_cf_info, obj_fq_name_cf_info, uuid_cf_info, fq_name_cf_info, ifmap_id_cf_info, subnet_cf_info, children_cf_info]) useragent_ks_name = VncCassandraClient._USERAGENT_KEYSPACE_NAME useragent_kv_cf_info = (VncCassandraClient._USERAGENT_KV_CF_NAME, None) self._cassandra_ensure_keyspace(server_list, useragent_ks_name, [useragent_kv_cf_info]) uuid_pool = pycassa.ConnectionPool( uuid_ks_name, server_list, max_overflow=-1, pool_timeout=300, max_retries=100, timeout=300) useragent_pool = pycassa.ConnectionPool( useragent_ks_name, server_list, max_overflow=-1, pool_timeout=300, max_retries=100, timeout=300) self._obj_uuid_cf = pycassa.ColumnFamily( uuid_pool, VncCassandraClient._OBJ_UUID_CF_NAME) self._obj_fq_name_cf = pycassa.ColumnFamily( uuid_pool, VncCassandraClient._OBJ_FQ_NAME_CF_NAME) self._useragent_kv_cf = pycassa.ColumnFamily( useragent_pool, VncCassandraClient._USERAGENT_KV_CF_NAME) self._subnet_cf = pycassa.ColumnFamily( uuid_pool, VncCassandraClient._SUBNET_CF_NAME) # end _cassandra_init def _cassandra_ensure_keyspace(self, server_list, keyspace_name, cf_info_list): # Retry till cassandra is up connected = False while not connected: try: sys_mgr = SystemManager(server_list[0]) connected = True except Exception as e: # TODO do only for # thrift.transport.TTransport.TTransportException time.sleep(3) if self._reset_config: try: sys_mgr.drop_keyspace(keyspace_name) except pycassa.cassandra.ttypes.InvalidRequestException as e: # TODO verify only EEXISTS print "Warning! " + str(e) try: # TODO replication_factor adjust? sys_mgr.create_keyspace(keyspace_name, SIMPLE_STRATEGY, {'replication_factor': '1'}) except pycassa.cassandra.ttypes.InvalidRequestException as e: # TODO verify only EEXISTS print "Warning! " + str(e) for cf_info in cf_info_list: try: (cf_name, comparator_type) = cf_info if comparator_type: sys_mgr.create_column_family( keyspace_name, cf_name, comparator_type=comparator_type) else: sys_mgr.create_column_family(keyspace_name, cf_name) except pycassa.cassandra.ttypes.InvalidRequestException as e: # TODO verify only EEXISTS print "Warning! " + str(e) # end _cassandra_ensure_keyspace def _create_prop(self, bch, obj_uuid, prop_name, prop_val): bch.insert(obj_uuid, {'prop:%s' % (prop_name): json.dumps(prop_val)}) # end _create_prop def _update_prop(self, bch, obj_uuid, prop_name, new_props): if new_props[prop_name] is None: bch.remove(obj_uuid, columns=['prop:' + prop_name]) else: bch.insert( obj_uuid, {'prop:' + prop_name: json.dumps(new_props[prop_name])}) # prop has been accounted for, remove so only new ones remain del new_props[prop_name] # end _update_prop def _create_child(self, bch, parent_type, parent_uuid, child_type, child_uuid): child_col = {'children:%s:%s' % (child_type, child_uuid): json.dumps(None)} bch.insert(parent_uuid, child_col) parent_col = {'parent:%s:%s' % (parent_type, parent_uuid): json.dumps(None)} bch.insert(child_uuid, parent_col) # end _create_child def _read_child(self, result, obj_uuid, child_type, child_uuid, child_tstamp): if '%ss' % (child_type) not in result: result['%ss' % (child_type)] = [] child_info = {} child_info['to'] = self.uuid_to_fq_name(child_uuid) child_info['href'] = self._db_client_mgr.generate_url( child_type, child_uuid) child_info['uuid'] = child_uuid child_info['tstamp'] = child_tstamp result['%ss' % (child_type)].append(child_info) # end _read_child def _delete_child(self, bch, parent_type, parent_uuid, child_type, child_uuid): child_col = {'children:%s:%s' % (child_type, child_uuid): json.dumps(None)} bch.remove(parent_uuid, columns=[ 'children:%s:%s' % (child_type, child_uuid)]) # end _delete_child def _create_ref(self, bch, obj_type, obj_uuid, ref_type, ref_uuid, ref_data): bch.insert( obj_uuid, {'ref:%s:%s' % (ref_type, ref_uuid): json.dumps(ref_data)}) if obj_type == ref_type: bch.insert( ref_uuid, {'ref:%s:%s' % (obj_type, obj_uuid): json.dumps(ref_data)}) else: bch.insert( ref_uuid, {'backref:%s:%s' % (obj_type, obj_uuid): json.dumps(ref_data)}) # end _create_ref def _read_ref(self, result, obj_uuid, ref_type, ref_uuid, ref_data_json): if '%s_refs' % (ref_type) not in result: result['%s_refs' % (ref_type)] = [] ref_data = json.loads(ref_data_json) try: ref_info = {} ref_info['to'] = self.uuid_to_fq_name(ref_uuid) if ref_data: try: ref_info['attr'] = ref_data['attr'] except KeyError: # TODO remove backward compat old format had attr directly ref_info['attr'] = ref_data ref_info['href'] = self._db_client_mgr.generate_url( ref_type, ref_uuid) ref_info['uuid'] = ref_uuid result['%s_refs' % (ref_type)].append(ref_info) except NoIdError as e: if not ref_data['is_weakref']: raise e # end _read_ref def _read_back_ref(self, result, obj_uuid, back_ref_type, back_ref_uuid, back_ref_data_json): if '%s_back_refs' % (back_ref_type) not in result: result['%s_back_refs' % (back_ref_type)] = [] back_ref_info = {} back_ref_info['to'] = self.uuid_to_fq_name(back_ref_uuid) back_ref_data = json.loads(back_ref_data_json) if back_ref_data: try: back_ref_info['attr'] = back_ref_data['attr'] except KeyError: # TODO remove backward compat old format had attr directly back_ref_info['attr'] = back_ref_data back_ref_info['href'] = self._db_client_mgr.generate_url( back_ref_type, back_ref_uuid) back_ref_info['uuid'] = back_ref_uuid result['%s_back_refs' % (back_ref_type)].append(back_ref_info) # end _read_back_ref def _update_ref(self, bch, obj_type, obj_uuid, ref_type, old_ref_uuid, new_ref_infos): if ref_type not in new_ref_infos: # update body didn't touch this type, nop return if old_ref_uuid not in new_ref_infos[ref_type]: # remove old ref bch.remove(obj_uuid, columns=[ 'ref:%s:%s' % (ref_type, old_ref_uuid)]) if obj_type == ref_type: bch.remove(old_ref_uuid, columns=[ 'ref:%s:%s' % (obj_type, obj_uuid)]) else: bch.remove(old_ref_uuid, columns=[ 'backref:%s:%s' % (obj_type, obj_uuid)]) else: # retain old ref with new ref attr new_ref_data = new_ref_infos[ref_type][old_ref_uuid] bch.insert( obj_uuid, {'ref:%s:%s' % (ref_type, old_ref_uuid): json.dumps(new_ref_data)}) if obj_type == ref_type: bch.insert( old_ref_uuid, {'ref:%s:%s' % (obj_type, obj_uuid): json.dumps(new_ref_data)}) else: bch.insert( old_ref_uuid, {'backref:%s:%s' % (obj_type, obj_uuid): json.dumps(new_ref_data)}) # uuid has been accounted for, remove so only new ones remain del new_ref_infos[ref_type][old_ref_uuid] # end _update_ref def _delete_ref(self, bch, obj_type, obj_uuid, ref_type, ref_uuid): bch.remove(obj_uuid, columns=['ref:%s:%s' % (ref_type, ref_uuid)]) if obj_type == ref_type: bch.remove(ref_uuid, columns=[ 'ref:%s:%s' % (obj_type, obj_uuid)]) else: bch.remove(ref_uuid, columns=[ 'backref:%s:%s' % (obj_type, obj_uuid)]) # end _delete_ref def is_latest(self, id, tstamp): id_perms_json = self._obj_uuid_cf.get( id, columns=['prop:id_perms'])['prop:id_perms'] id_perms = json.loads(id_perms_json) if id_perms['last_modified'] == tstamp: return True else: return False # end is_latest def uuid_to_fq_name(self, id): try: fq_name_json = self._obj_uuid_cf.get( id, columns=['fq_name'])['fq_name'] except pycassa.NotFoundException: raise NoIdError(id) return json.loads(fq_name_json) # end uuid_to_fq_name def uuid_to_obj_type(self, id): try: type_json = self._obj_uuid_cf.get(id, columns=['type'])['type'] except pycassa.NotFoundException: raise NoIdError(id) return json.loads(type_json) # end uuid_to_fq_name def fq_name_to_uuid(self, obj_type, fq_name): method_name = obj_type.replace('-', '_') fq_name_str = ':'.join(fq_name) col_start = '%s:' % (fq_name_str) col_fin = '%s;' % (fq_name_str) try: col_info_iter = self._obj_fq_name_cf.xget( method_name, column_start=col_start, column_finish=col_fin) except pycassa.NotFoundException: raise NoIdError('%s %s' % (obj_type, fq_name)) col_infos = list(col_info_iter) if len(col_infos) == 0: raise NoIdError('%s %s' % (obj_type, fq_name)) for (col_name, col_val) in col_infos: obj_uuid = col_name.split(':')[-1] return obj_uuid # end fq_name_to_uuid def uuid_to_obj_dict(self, id): try: obj_cols = self._obj_uuid_cf.get(id) except pycassa.NotFoundException: raise NoIdError(id) return obj_cols # end uuid_to_obj_dict def useragent_kv_store(self, key, value): columns = {'value': value} self._useragent_kv_cf.insert(key, columns) # end useragent_kv_store def useragent_kv_retrieve(self, key): if key: try: columns = self._useragent_kv_cf.get(key) except pycassa.NotFoundException: raise NoUserAgentKey return columns['value'] else: # no key specified, return entire contents kv_list = [] for ua_key, ua_cols in self._useragent_kv_cf.get_range(): kv_list.append({'key': ua_key, 'value': ua_cols['value']}) return kv_list # end useragent_kv_retrieve def useragent_kv_delete(self, key): self._useragent_kv_cf.remove(key) # end useragent_kv_delete def subnet_store(self, name, bitmask): columns = {'bitmask': bitmask} self._subnet_cf.insert(name, columns) # end subnet_store def subnet_retrieve(self, key): try: columns = self._subnet_cf.get(key) except pycassa.NotFoundException: # ok to fail as not all subnets will have bitmask allocated return None return columns['bitmask'] # end subnet_retrieve def subnet_delete(self, key): try: self._subnet_cf.remove(key) except pycassa.NotFoundException: # ok to fail as not all subnets will have bitmask allocated return None # end subnet_delete def walk(self, fn): walk_results = [] for obj_uuid, _ in self._obj_uuid_cf.get_range(): obj_cols_iter = self._obj_uuid_cf.xget(obj_uuid) obj_cols = dict((k, v) for k, v in obj_cols_iter) result = fn(obj_uuid, obj_cols) if result: walk_results.append(result) return walk_results # end walk # end class VncCassandraClient class VncDbClient(object): def __init__(self, api_svr_mgr, ifmap_srv_ip, ifmap_srv_port, uname, passwd, cass_srv_list, reset_config=False, ifmap_srv_loc=None): self._api_svr_mgr = api_svr_mgr # certificate auth ssl_options = None if api_svr_mgr._args.use_certs: ssl_options = { 'keyfile': api_svr_mgr._args.keyfile, 'certfile': api_svr_mgr._args.certfile, 'ca_certs': api_svr_mgr._args.ca_certs, 'cert_reqs': ssl.CERT_REQUIRED, 'ciphers': 'ALL' } self._ifmap_db = VncIfmapClient( self, ifmap_srv_ip, ifmap_srv_port, uname, passwd, ssl_options, ifmap_srv_loc) self._cassandra_db = VncCassandraClient( self, cass_srv_list, reset_config) # end __init__ def db_resync(self): # Read contents from cassandra and publish to ifmap self._cassandra_db.walk(self._dbe_resync) # end db_resync def db_check(self): # Read contents from cassandra and report any read exceptions check_results = self._cassandra_db.walk(self._dbe_check) return check_results # end db_check def set_uuid(self, obj_dict, id): # set uuid in the perms meta msb_id = id.int >> 64 lsb_id = id.int & ((1 << 64) - 1) obj_dict['id_perms']['uuid'] = {} obj_dict['id_perms']['uuid']['uuid_mslong'] = msb_id obj_dict['id_perms']['uuid']['uuid_lslong'] = lsb_id obj_dict['uuid'] = str(id) return True # end set_uuid def _alloc_set_uuid(self, obj_dict): id = uuid.uuid4() ok = self.set_uuid(obj_dict, id) return (ok, obj_dict['uuid']) # end _alloc_set_uuid def match_uuid(self, obj_dict, obj_uuid): new_dict = {'id_perms': {}} self.set_uuid(new_dict, uuid.UUID(obj_uuid)) return new_dict['id_perms']['uuid'] == obj_dict['id_perms']['uuid'] # end def _dbe_resync(self, obj_uuid, obj_cols): obj_type = json.loads(obj_cols['type']) method = getattr(self._cassandra_db, "_cassandra_%s_read" % (obj_type)) try: (ok, obj_dict) = method(obj_uuid) except Exception as e: self.config_object_error( obj_uuid, None, obj_type, 'dbe_resync:cassandra_read', str(e)) return parent_type = obj_dict.get('parent_type', None) method = getattr(self._ifmap_db, "_ifmap_%s_alloc" % (obj_type)) try: (ok, result) = method(parent_type, obj_dict['fq_name']) (my_imid, parent_imid) = result except Exception as e: self.config_object_error( obj_uuid, None, obj_type, 'dbe_resync:ifmap_alloc', str(e)) return obj_ids = {'uuid': obj_uuid, 'imid': my_imid, 'parent_imid': parent_imid} method = getattr(self._ifmap_db, "_ifmap_%s_create" % (obj_type)) try: (ok, result) = method(obj_ids, obj_dict) except Exception as e: self.config_object_error( obj_uuid, None, obj_type, 'dbe_resync:ifmap_create', str(e)) return # end _dbe_resync def _dbe_check(self, obj_uuid, obj_cols): obj_type = json.loads(obj_cols['type']) method = getattr(self._cassandra_db, "_cassandra_%s_read" % (obj_type)) try: (ok, obj_dict) = method(obj_uuid) except Exception as e: return {'uuid': obj_uuid, 'type': obj_type, 'error': str(e)} # end _dbe_check # Public Methods # Returns created ifmap_id def dbe_alloc(self, obj_type, obj_dict, uuid_requested=None): if uuid_requested: ok = self.set_uuid(obj_dict, uuid.UUID(uuid_requested)) else: (ok, obj_uuid) = self._alloc_set_uuid(obj_dict) parent_type = obj_dict.get('parent_type', None) method_name = obj_type.replace('-', '_') method = getattr(self._ifmap_db, "_ifmap_%s_alloc" % (method_name)) (ok, result) = method(parent_type, obj_dict['fq_name']) if not ok: return False, result (my_imid, parent_imid) = result obj_ids = { 'uuid': obj_dict['uuid'], 'imid': my_imid, 'parent_imid': parent_imid} return (True, obj_ids) # end dbe_alloc def dbe_create(self, obj_type, obj_ids, obj_dict): #self._cassandra_db.uuid_create(obj_type, obj_ids, obj_dict) method_name = obj_type.replace('-', '_') method = getattr( self._cassandra_db, "_cassandra_%s_create" % (method_name)) (ok, result) = method(obj_ids, obj_dict) # publish to ifmap method_name = obj_type.replace('-', '_') method = getattr(self._ifmap_db, "_ifmap_%s_create" % (method_name)) (ok, result) = method(obj_ids, obj_dict) return (ok, result) # end dbe_create # input id is ifmap-id + uuid def dbe_read(self, obj_type, obj_ids, obj_fields=None): method_name = obj_type.replace('-', '_') method = getattr( self._cassandra_db, "_cassandra_%s_read" % (method_name)) try: (ok, cassandra_result) = method(obj_ids['uuid'], obj_fields) except NoIdError as e: return (False, str(e)) return (ok, cassandra_result) # end dbe_read def dbe_is_latest(self, obj_ids, tstamp): try: is_latest = self._cassandra_db.is_latest(obj_ids['uuid'], tstamp) return (True, is_latest) except Exception as e: return (False, str(e)) # end dbe_is_latest def dbe_update(self, obj_type, obj_ids, new_obj_dict): method_name = obj_type.replace('-', '_') # read old value to get diff for ifmap method = getattr( self._cassandra_db, "_cassandra_%s_read" % (method_name)) try: (ok, old_obj_dict) = method(obj_ids['uuid']) except NoIdError as e: return (False, str(e)) method = getattr( self._cassandra_db, "_cassandra_%s_update" % (method_name)) (ok, cassandra_result) = method(obj_ids['uuid'], None, new_obj_dict) # publish to ifmap method = getattr(self._ifmap_db, "_ifmap_%s_update" % (method_name)) fq_name = self._cassandra_db.uuid_to_fq_name(obj_ids['uuid']) ifmap_id = self._ifmap_db.fq_name_to_ifmap_id(obj_type, fq_name) (ok, ifmap_result) = method(ifmap_id, old_obj_dict, new_obj_dict) return (ok, cassandra_result) # end dbe_update def dbe_list(self, obj_type, parent_uuid=None): method_name = obj_type.replace('-', '_') method = getattr( self._cassandra_db, "_cassandra_%s_list" % (method_name)) (ok, cassandra_result) = method(parent_uuid) return (ok, cassandra_result) # end dbe_list def dbe_delete(self, obj_type, obj_ids): fq_name = self._cassandra_db.uuid_to_fq_name(obj_ids['uuid']) method_name = obj_type.replace('-', '_') method = getattr( self._cassandra_db, "_cassandra_%s_delete" % (method_name)) (ok, cassandra_result) = method(obj_ids['uuid']) # publish to ifmap method = getattr(self._ifmap_db, "_ifmap_%s_delete" % (method_name)) (ok, ifmap_result) = method(obj_ids) if not ok: return ok, ifmap_result return ok, ifmap_result # end dbe_delete def useragent_kv_store(self, key, value): self._cassandra_db.useragent_kv_store(key, value) # end useragent_kv_store def useragent_kv_retrieve(self, key): return self._cassandra_db.useragent_kv_retrieve(key) # end useragent_kv_retrieve def useragent_kv_delete(self, key): return self._cassandra_db.useragent_kv_delete(key) # end useragent_kv_delete def subnet_store(self, name, bitmask): self._cassandra_db.subnet_store(name, bitmask) # end subnet_store def subnet_retrieve(self, key): return self._cassandra_db.subnet_retrieve(key) # end subnet_retrieve def subnet_delete(self, key): self._cassandra_db.subnet_delete(key) # end subnet_delete def uuid_vnlist(self): return self._cassandra_db.uuid_vnlist() # end uuid_vnlist def uuid_to_ifmap_id(self, id): return self._cassandra_db.uuid_to_ifmap_id(id) # end uuid_to_ifmap_id def fq_name_to_uuid(self, obj_type, fq_name): return self._cassandra_db.fq_name_to_uuid(obj_type, fq_name) # end fq_name_to_uuid def uuid_to_fq_name(self, obj_uuid): return self._cassandra_db.uuid_to_fq_name(obj_uuid) # end uuid_to_fq_name def uuid_to_obj_type(self, obj_uuid): return self._cassandra_db.uuid_to_obj_type(obj_uuid) # end uuid_to_obj_type def ifmap_id_to_fq_name(self, ifmap_id): return self._ifmap_db.ifmap_id_to_fq_name(ifmap_id) # end ifmap_id_to_fq_name # def ifmap_id_to_uuid(self, ifmap_id): # return self._cassandra_db.fq_name_to_uuid(fq_name) # end ifmap_id_to_uuid def uuid_to_obj_dict(self, obj_uuid): return self._cassandra_db.uuid_to_obj_dict(obj_uuid) # end uuid_to_obj_dict # Helper routines for REST def generate_url(self, obj_type, obj_uuid): return self._api_svr_mgr.generate_url(obj_type, obj_uuid) # end generate_url def config_object_error(self, id, fq_name_str, obj_type, operation, err_str): self._api_svr_mgr.config_object_error( id, fq_name_str, obj_type, operation, err_str) # end config_object_error # end class VncDbClient
apache-2.0
-562,423,781,886,344,640
36.378405
79
0.552661
false
tinloaf/home-assistant
tests/components/image_processing/test_openalpr_cloud.py
6
7163
"""The tests for the openalpr cloud platform.""" import asyncio from unittest.mock import patch, PropertyMock from homeassistant.core import callback from homeassistant.setup import setup_component from homeassistant.components import camera, image_processing as ip from homeassistant.components.image_processing.openalpr_cloud import ( OPENALPR_API_URL) from tests.common import ( get_test_home_assistant, assert_setup_component, load_fixture, mock_coro) from tests.components.image_processing import common class TestOpenAlprCloudSetup: """Test class for image processing.""" def setup_method(self): """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() def teardown_method(self): """Stop everything that was started.""" self.hass.stop() def test_setup_platform(self): """Set up platform with one entity.""" config = { ip.DOMAIN: { 'platform': 'openalpr_cloud', 'source': { 'entity_id': 'camera.demo_camera' }, 'region': 'eu', 'api_key': 'sk_abcxyz123456', }, 'camera': { 'platform': 'demo' }, } with assert_setup_component(1, ip.DOMAIN): setup_component(self.hass, ip.DOMAIN, config) assert self.hass.states.get('image_processing.openalpr_demo_camera') def test_setup_platform_name(self): """Set up platform with one entity and set name.""" config = { ip.DOMAIN: { 'platform': 'openalpr_cloud', 'source': { 'entity_id': 'camera.demo_camera', 'name': 'test local' }, 'region': 'eu', 'api_key': 'sk_abcxyz123456', }, 'camera': { 'platform': 'demo' }, } with assert_setup_component(1, ip.DOMAIN): setup_component(self.hass, ip.DOMAIN, config) assert self.hass.states.get('image_processing.test_local') def test_setup_platform_without_api_key(self): """Set up platform with one entity without api_key.""" config = { ip.DOMAIN: { 'platform': 'openalpr_cloud', 'source': { 'entity_id': 'camera.demo_camera' }, 'region': 'eu', }, 'camera': { 'platform': 'demo' }, } with assert_setup_component(0, ip.DOMAIN): setup_component(self.hass, ip.DOMAIN, config) def test_setup_platform_without_region(self): """Set up platform with one entity without region.""" config = { ip.DOMAIN: { 'platform': 'openalpr_cloud', 'source': { 'entity_id': 'camera.demo_camera' }, 'api_key': 'sk_abcxyz123456', }, 'camera': { 'platform': 'demo' }, } with assert_setup_component(0, ip.DOMAIN): setup_component(self.hass, ip.DOMAIN, config) class TestOpenAlprCloud: """Test class for image processing.""" def setup_method(self): """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() config = { ip.DOMAIN: { 'platform': 'openalpr_cloud', 'source': { 'entity_id': 'camera.demo_camera', 'name': 'test local' }, 'region': 'eu', 'api_key': 'sk_abcxyz123456', }, 'camera': { 'platform': 'demo' }, } with patch('homeassistant.components.image_processing.openalpr_cloud.' 'OpenAlprCloudEntity.should_poll', new_callable=PropertyMock(return_value=False)): setup_component(self.hass, ip.DOMAIN, config) self.alpr_events = [] @callback def mock_alpr_event(event): """Mock event.""" self.alpr_events.append(event) self.hass.bus.listen('image_processing.found_plate', mock_alpr_event) self.params = { 'secret_key': "sk_abcxyz123456", 'tasks': "plate", 'return_image': 0, 'country': 'eu' } def teardown_method(self): """Stop everything that was started.""" self.hass.stop() def test_openalpr_process_image(self, aioclient_mock): """Set up and scan a picture and test plates from event.""" aioclient_mock.post( OPENALPR_API_URL, params=self.params, text=load_fixture('alpr_cloud.json'), status=200 ) with patch('homeassistant.components.camera.async_get_image', return_value=mock_coro( camera.Image('image/jpeg', b'image'))): common.scan(self.hass, entity_id='image_processing.test_local') self.hass.block_till_done() state = self.hass.states.get('image_processing.test_local') assert len(aioclient_mock.mock_calls) == 1 assert len(self.alpr_events) == 5 assert state.attributes.get('vehicles') == 1 assert state.state == 'H786P0J' event_data = [event.data for event in self.alpr_events if event.data.get('plate') == 'H786P0J'] assert len(event_data) == 1 assert event_data[0]['plate'] == 'H786P0J' assert event_data[0]['confidence'] == float(90.436699) assert event_data[0]['entity_id'] == \ 'image_processing.test_local' def test_openalpr_process_image_api_error(self, aioclient_mock): """Set up and scan a picture and test api error.""" aioclient_mock.post( OPENALPR_API_URL, params=self.params, text="{'error': 'error message'}", status=400 ) with patch('homeassistant.components.camera.async_get_image', return_value=mock_coro( camera.Image('image/jpeg', b'image'))): common.scan(self.hass, entity_id='image_processing.test_local') self.hass.block_till_done() assert len(aioclient_mock.mock_calls) == 1 assert len(self.alpr_events) == 0 def test_openalpr_process_image_api_timeout(self, aioclient_mock): """Set up and scan a picture and test api error.""" aioclient_mock.post( OPENALPR_API_URL, params=self.params, exc=asyncio.TimeoutError() ) with patch('homeassistant.components.camera.async_get_image', return_value=mock_coro( camera.Image('image/jpeg', b'image'))): common.scan(self.hass, entity_id='image_processing.test_local') self.hass.block_till_done() assert len(aioclient_mock.mock_calls) == 1 assert len(self.alpr_events) == 0
apache-2.0
7,926,611,749,753,851,000
32.787736
78
0.535669
false
aniketpuranik/pynet_test
ANSIBLE/library/eos_staticroute.py
8
18931
#!/usr/bin/python # # Copyright (c) 2015, Arista Networks, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of Arista Networks nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN # IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # DOCUMENTATION = """ --- module: eos_staticroute short_description: Configure static routes in EOS description: - The eos_staticroute module manages static route configuration options on Arista EOS nodes. version_added: 1.2.0 category: Route Policy author: Arista EOS+ CS requirements: - Arista EOS 4.13.7M or later with command API enabled - Python Client for eAPI 0.4.0 or later notes: - All configuration is idempotent unless otherwise specified - Supports eos metaparameters for using the eAPI transport - Supports stateful resource configuration. options: ip_dest: description: - Destination IP address required: true default: null choices: [] version_added: 1.2.0 next_hop_ip: description: - IP address of the next router. Only valid when next_hop is an egress interface required: false default: null choices: [] version_added: 1.2.0 next_hop: description: - Next hop IP address or egress interface required: true default: null choices: [] version_added: 1.2.0 distance: description: - Distance designated for this route required: false default: 1 choices: [] version_added: 1.2.0 route_name: description: - Descriptive name for the route required: false default: null choices: [] version_added: 1.2.0 tag: description: - Tag assigned for the route required: false default: null choices: [] version_added: 1.2.0 """ EXAMPLES = """ - eos_staticroute: ip_dest=1.1.1.0/24 next_hop=Ethernet1 next_hop_ip=1.1.1.1 distance=1 tag=15 name=route1 """ #<<EOS_COMMON_MODULE_START>> import syslog import collections from ansible.module_utils.basic import * try: import pyeapi PYEAPI_AVAILABLE = True except ImportError: PYEAPI_AVAILABLE = False DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE DEFAULT_CONNECTION = 'localhost' TRANSPORTS = ['socket', 'http', 'https', 'http_local'] class EosConnection(object): __attributes__ = ['username', 'password', 'host', 'transport', 'port'] def __init__(self, **kwargs): self.connection = kwargs['connection'] self.transport = kwargs.get('transport') self.username = kwargs.get('username') self.password = kwargs.get('password') self.host = kwargs.get('host') self.port = kwargs.get('port') self.config = kwargs.get('config') def connect(self): if self.config is not None: pyeapi.load_config(self.config) config = dict() if self.connection is not None: config = pyeapi.config_for(self.connection) if not config: msg = 'Connection name "{}" not found'.format(self.connection) for key in self.__attributes__: if getattr(self, key) is not None: config[key] = getattr(self, key) if 'transport' not in config: raise ValueError('Connection must define a transport') connection = pyeapi.client.make_connection(**config) node = pyeapi.client.Node(connection, **config) try: node.enable('show version') except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError): raise ValueError('unable to connect to {}'.format(node)) return node class EosAnsibleModule(AnsibleModule): meta_args = { 'config': dict(), 'username': dict(), 'password': dict(), 'host': dict(), 'connection': dict(default=DEFAULT_CONNECTION), 'transport': dict(choices=TRANSPORTS), 'port': dict(), 'debug': dict(type='bool', default='false'), 'logging': dict(type='bool', default='true') } stateful_args = { 'state': dict(default='present', choices=['present', 'absent']), } def __init__(self, stateful=True, autorefresh=False, *args, **kwargs): kwargs['argument_spec'].update(self.meta_args) self._stateful = stateful if stateful: kwargs['argument_spec'].update(self.stateful_args) ## Ok, so in Ansible 2.0, ## AnsibleModule.__init__() sets self.params and then ## calls self.log() ## (through self._log_invocation()) ## ## However, self.log() (overridden in EosAnsibleModule) ## references self._logging ## and self._logging (defined in EosAnsibleModule) ## references self.params. ## ## So ... I'm defining self._logging without "or self.params['logging']" ## *before* AnsibleModule.__init__() to avoid a "ref before def". ## ## I verified that this works with Ansible 1.9.4 and 2.0.0.2. ## The only caveat is that the first log message in ## AnsibleModule.__init__() won't be subject to the value of ## self.params['logging']. self._logging = kwargs.get('logging') super(EosAnsibleModule, self).__init__(*args, **kwargs) self.result = dict(changed=False, changes=dict()) self._debug = kwargs.get('debug') or self.boolean(self.params['debug']) self._logging = kwargs.get('logging') or self.params['logging'] self.log('DEBUG flag is %s' % self._debug) self.debug('pyeapi_version', self.check_pyeapi()) self.debug('stateful', self._stateful) self.debug('params', self.params) self._attributes = self.map_argument_spec() self.validate() self._autorefresh = autorefresh self._node = EosConnection(**self.params) self._node.connect() self._node = self.connect() self._instance = None self.desired_state = self.params['state'] if self._stateful else None self.exit_after_flush = kwargs.get('exit_after_flush') @property def instance(self): if self._instance: return self._instance func = self.func('instance') if not func: self.fail('Module does not support "instance"') try: self._instance = func(self) except Exception as exc: self.fail('instance[error]: %s' % exc.message) self.log("called instance: %s" % self._instance) return self._instance @property def attributes(self): return self._attributes @property def node(self): return self._node def check_pyeapi(self): if not PYEAPI_AVAILABLE: self.fail('Unable to import pyeapi, is it installed?') return pyeapi.__version__ def map_argument_spec(self): """map_argument_spec maps only the module argument spec to attrs This method will map the argumentspec minus the meta_args to attrs and return the attrs. This returns a dict object that includes only the original argspec plus the stateful_args (if self._stateful=True) Returns: dict: Returns a dict object that includes the original argument_spec plus stateful_args with values minus meta_args """ keys = set(self.params).difference(self.meta_args) attrs = dict() attrs = dict([(k, self.params[k]) for k in self.params if k in keys]) if 'CHECKMODE' in attrs: del attrs['CHECKMODE'] return attrs def validate(self): for key, value in self.attributes.iteritems(): func = self.func('validate_%s' % key) if func: self.attributes[key] = func(value) def create(self): if not self.check_mode: func = self.func('create') if not func: self.fail('Module must define "create" function') return self.invoke(func, self) def remove(self): if not self.check_mode: func = self.func('remove') if not func: self.fail('Module most define "remove" function') return self.invoke(func, self) def flush(self, exit_after_flush=False): self.exit_after_flush = exit_after_flush if self.desired_state == 'present' or not self._stateful: if self.instance.get('state') == 'absent': changed = self.create() self.result['changed'] = changed or True self.refresh() # After a create command, flush the running-config # so we get the latest for any other attributes self._node._running_config = None changeset = self.attributes.viewitems() - self.instance.viewitems() if self._debug: self.debug('desired_state', self.attributes) self.debug('current_state', self.instance) changes = self.update(changeset) if changes: self.result['changes'] = changes self.result['changed'] = True self._attributes.update(changes) flush = self.func('flush') if flush: self.invoke(flush, self) elif self.desired_state == 'absent' and self._stateful: if self.instance.get('state') == 'present': changed = self.remove() self.result['changed'] = changed or True elif self._stateful: if self.desired_state != self.instance.get('state'): func = self.func(self.desired_state) changed = self.invoke(func, self) self.result['changed'] = changed or True self.refresh() # By calling self.instance here we trigger another show running-config # all which causes delay. Only if debug is enabled do we call this # since it will display the latest state of the object. if self._debug: self.result['instance'] = self.instance if self.exit_after_flush: self.exit() def update(self, changeset): changes = dict() for key, value in changeset: if value is not None: changes[key] = value func = self.func('set_%s' % key) if func and not self.check_mode: try: self.invoke(func, self) except Exception as exc: self.fail(exc.message) return changes def connect(self): if self.params['config']: pyeapi.load_config(self.params['config']) config = dict() if self.params['connection']: config = pyeapi.config_for(self.params['connection']) if not config: msg = 'Connection name "%s" not found' % self.params['connection'] self.fail(msg) if self.params['username']: config['username'] = self.params['username'] if self.params['password']: config['password'] = self.params['password'] if self.params['transport']: config['transport'] = self.params['transport'] if self.params['port']: config['port'] = self.params['port'] if self.params['host']: config['host'] = self.params['host'] if 'transport' not in config: self.fail('Connection must define a transport') connection = pyeapi.client.make_connection(**config) self.log('Creating connection with autorefresh=%s' % self._autorefresh) node = pyeapi.client.Node(connection, autorefresh=self._autorefresh, **config) try: resp = node.enable('show version') self.debug('eos_version', resp[0]['result']['version']) self.debug('eos_model', resp[0]['result']['modelName']) except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError): self.fail('unable to connect to %s' % node) else: self.log('Connected to node %s' % node) self.debug('node', str(node)) return node def config(self, commands): self.result['changed'] = True if not self.check_mode: self.node.config(commands) def api(self, module): return self.node.api(module) def func(self, name): return globals().get(name) def invoke(self, func, *args, **kwargs): try: return func(*args, **kwargs) except Exception as exc: self.fail(exc.message) def invoke_function(self, name, *args, **kwargs): func = self.func(name) if func: return self.invoke(func, *args, **kwargs) def fail(self, msg): self.invoke_function('on_fail', self) self.log('ERROR: %s' % msg, syslog.LOG_ERR) self.fail_json(msg=msg) def exit(self): self.invoke_function('on_exit', self) self.log('Module completed successfully') self.exit_json(**self.result) def refresh(self): self._instance = None def debug(self, key, value): if self._debug: if 'debug' not in self.result: self.result['debug'] = dict() self.result['debug'][key] = value def log(self, message, log_args=None, priority=None): if self._logging: syslog.openlog('ansible-eos') priority = priority or DEFAULT_SYSLOG_PRIORITY syslog.syslog(priority, str(message)) @classmethod def add_state(cls, name): cls.stateful_args['state']['choices'].append(name) #<<EOS_COMMON_MODULE_END>> def instance(module): """ Returns an instance of StaticRoute """ ip_dest = module.attributes['ip_dest'] next_hop = module.attributes['next_hop'] next_hop_ip = module.attributes['next_hop_ip'] distance = module.attributes['distance'] _instance = dict(ip_dest=ip_dest, next_hop=next_hop, next_hop_ip=next_hop_ip, distance=distance, state='absent') try: result = module.node.api('staticroute').\ get(ip_dest)[next_hop][next_hop_ip][distance] module.log(result) except: result = None if result: _instance['state'] = 'present' _instance['route_name'] = result['route_name'] _instance['tag'] = result['tag'] return _instance def create(module): """ Creates a new instance of a static route on the node """ ip_dest = module.attributes['ip_dest'] next_hop = module.attributes['next_hop'] next_hop_ip = module.attributes['next_hop_ip'] distance = module.attributes['distance'] tag = module.attributes['tag'] route_name = module.attributes['route_name'] module.node.api('staticroute').create(ip_dest, next_hop, next_hop_ip=next_hop_ip, distance=distance, tag=tag, route_name=route_name) def remove(module): """ Removes an instance of a static route on the node """ ip_dest = module.attributes['ip_dest'] next_hop = module.attributes['next_hop'] next_hop_ip = module.attributes['next_hop_ip'] distance = module.attributes['distance'] tag = module.attributes['tag'] route_name = module.attributes['route_name'] module.node.api('staticroute').delete(ip_dest, next_hop, next_hop_ip=next_hop_ip, distance=distance, tag=tag, route_name=route_name) def set_tag(module): """ Modifies the tag for an existing route """ ip_dest = module.attributes['ip_dest'] next_hop = module.attributes['next_hop'] next_hop_ip = module.attributes['next_hop_ip'] distance = module.attributes['distance'] tag = module.attributes['tag'] route_name = module.attributes['route_name'] module.node.api('staticroute').set_tag(ip_dest, next_hop, next_hop_ip=next_hop_ip, distance=distance, tag=tag, route_name=route_name) def set_route_name(module): """ Modifies the route name for an existing route """ ip_dest = module.attributes['ip_dest'] next_hop = module.attributes['next_hop'] next_hop_ip = module.attributes['next_hop_ip'] distance = module.attributes['distance'] tag = module.attributes['tag'] route_name = module.attributes['route_name'] module.node.api('staticroute').set_route_name(ip_dest, next_hop, next_hop_ip=next_hop_ip, distance=distance, tag=tag, route_name=route_name) def main(): """ The main module routine called when the module is run by Ansible """ argument_spec = dict( ip_dest=dict(required=True), next_hop=dict(required=True), next_hop_ip=dict(default=None), distance=dict(type='int', default=1), route_name=dict(default=None), tag=dict(type='int', default=0) ) module = EosAnsibleModule(argument_spec=argument_spec, supports_check_mode=True) module.flush(True) main()
apache-2.0
-3,473,176,559,020,257,300
32.389771
82
0.591728
false
cswiercz/sympy
sympy/physics/quantum/tests/test_matrixutils.py
98
4111
from random import randint from sympy import Matrix, zeros, ones, Integer from sympy.physics.quantum.matrixutils import ( to_sympy, to_numpy, to_scipy_sparse, matrix_tensor_product, matrix_to_zero, matrix_zeros, numpy_ndarray, scipy_sparse_matrix ) from sympy.core.compatibility import range from sympy.external import import_module from sympy.utilities.pytest import skip m = Matrix([[1, 2], [3, 4]]) def test_sympy_to_sympy(): assert to_sympy(m) == m def test_matrix_to_zero(): assert matrix_to_zero(m) == m assert matrix_to_zero(Matrix([[0, 0], [0, 0]])) == Integer(0) np = import_module('numpy') def test_to_numpy(): if not np: skip("numpy not installed.") result = np.matrix([[1, 2], [3, 4]], dtype='complex') assert (to_numpy(m) == result).all() def test_matrix_tensor_product(): if not np: skip("numpy not installed.") l1 = zeros(4) for i in range(16): l1[i] = 2**i l2 = zeros(4) for i in range(16): l2[i] = i l3 = zeros(2) for i in range(4): l3[i] = i vec = Matrix([1, 2, 3]) #test for Matrix known 4x4 matricies numpyl1 = np.matrix(l1.tolist()) numpyl2 = np.matrix(l2.tolist()) numpy_product = np.kron(numpyl1, numpyl2) args = [l1, l2] sympy_product = matrix_tensor_product(*args) assert numpy_product.tolist() == sympy_product.tolist() numpy_product = np.kron(numpyl2, numpyl1) args = [l2, l1] sympy_product = matrix_tensor_product(*args) assert numpy_product.tolist() == sympy_product.tolist() #test for other known matrix of different dimensions numpyl2 = np.matrix(l3.tolist()) numpy_product = np.kron(numpyl1, numpyl2) args = [l1, l3] sympy_product = matrix_tensor_product(*args) assert numpy_product.tolist() == sympy_product.tolist() numpy_product = np.kron(numpyl2, numpyl1) args = [l3, l1] sympy_product = matrix_tensor_product(*args) assert numpy_product.tolist() == sympy_product.tolist() #test for non square matrix numpyl2 = np.matrix(vec.tolist()) numpy_product = np.kron(numpyl1, numpyl2) args = [l1, vec] sympy_product = matrix_tensor_product(*args) assert numpy_product.tolist() == sympy_product.tolist() numpy_product = np.kron(numpyl2, numpyl1) args = [vec, l1] sympy_product = matrix_tensor_product(*args) assert numpy_product.tolist() == sympy_product.tolist() #test for random matrix with random values that are floats random_matrix1 = np.random.rand(randint(1, 5), randint(1, 5)) random_matrix2 = np.random.rand(randint(1, 5), randint(1, 5)) numpy_product = np.kron(random_matrix1, random_matrix2) args = [Matrix(random_matrix1.tolist()), Matrix(random_matrix2.tolist())] sympy_product = matrix_tensor_product(*args) assert not (sympy_product - Matrix(numpy_product.tolist())).tolist() > \ (ones(sympy_product.rows, sympy_product.cols)*epsilon).tolist() #test for three matrix kronecker sympy_product = matrix_tensor_product(l1, vec, l2) numpy_product = np.kron(l1, np.kron(vec, l2)) assert numpy_product.tolist() == sympy_product.tolist() scipy = import_module('scipy', __import__kwargs={'fromlist': ['sparse']}) def test_to_scipy_sparse(): if not np: skip("numpy not installed.") if not scipy: skip("scipy not installed.") else: sparse = scipy.sparse result = sparse.csr_matrix([[1, 2], [3, 4]], dtype='complex') assert np.linalg.norm((to_scipy_sparse(m) - result).todense()) == 0.0 epsilon = .000001 def test_matrix_zeros_sympy(): sym = matrix_zeros(4, 4, format='sympy') assert isinstance(sym, Matrix) def test_matrix_zeros_numpy(): if not np: skip("numpy not installed.") num = matrix_zeros(4, 4, format='numpy') assert isinstance(num, numpy_ndarray) def test_matrix_zeros_scipy(): if not np: skip("numpy not installed.") if not scipy: skip("scipy not installed.") sci = matrix_zeros(4, 4, format='scipy.sparse') assert isinstance(sci, scipy_sparse_matrix)
bsd-3-clause
-6,833,396,716,264,097,000
29.227941
77
0.646315
false
Tehsmash/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/drivers/phy_asa.py
2
5011
# Copyright 2016 Cisco Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from networking_cisco.apps.saf.common import dfa_logger as logging from networking_cisco.apps.saf.server.services.firewall.native import ( fabric_setup_base as FP) from networking_cisco.apps.saf.server.services.firewall.native.drivers import ( asa_rest as asa) from networking_cisco.apps.saf.server.services.firewall.native.drivers import ( base) LOG = logging.getLogger(__name__) class PhyAsa(base.BaseDriver, FP.FabricApi): """Physical ASA Driver. """ def __init__(self): LOG.info("Initializing physical ASA") super(PhyAsa, self).__init__() def initialize(self, cfg_dict): self.mgmt_ip_addr = cfg_dict.get('mgmt_ip_addr').strip() self.user = cfg_dict.get('user').strip() self.pwd = cfg_dict.get('pwd').strip() self.interface_in = cfg_dict.get('interface_in').strip() self.interface_out = cfg_dict.get('interface_out').strip() LOG.info("ASA with mgmt %s getting initialized", self.mgmt_ip_addr) self.asa5585 = asa.Asa5585(self.mgmt_ip_addr, self.user, self.pwd) def populate_event_que(self, que_obj): LOG.info("Populate Event for PhyAsa") def populate_dcnm_obj(self, dcnm_obj): LOG.info("Populate Event for DCNM obj") def network_create_notif(self, tenant_id, tenant_name, cidr): """Network Create Notification. """ LOG.info("Nwk Create Notif PhyAsa") def network_delete_notif(self, tenant_id, tenant_name, network_id): """Network Delete Notification. """ LOG.info("Nwk Delete Notif PhyAsa") def is_device_virtual(self): return False def get_name(self): return 'phy_asa' def get_max_quota(self): return self.asa5585.get_quota() def create_fw(self, tenant_id, data): LOG.info("In creating phy ASA FW data is %s", data) tenant_name = data.get('tenant_name') in_ip_dict = self.get_in_ip_addr(tenant_id) in_gw = in_ip_dict.get('gateway') in_sec_gw = in_ip_dict.get('sec_gateway') in_serv_node = self.get_in_srvc_node_ip_addr(tenant_id) out_ip_dict = self.get_out_ip_addr(tenant_id) out_ip_gw = out_ip_dict.get('gateway') out_sec_gw = out_ip_dict.get('sec_gateway') out_serv_node = self.get_out_srvc_node_ip_addr(tenant_id) in_seg, in_vlan = self.get_in_seg_vlan(tenant_id) out_seg, out_vlan = self.get_out_seg_vlan(tenant_id) kw = {'params': {'tenant_name': tenant_name, 'in_vlan': in_vlan, 'out_vlan': out_vlan, 'in_ip': in_serv_node, 'in_mask': '255.255.255.0', 'in_gw': in_gw, 'in_sec_gw': in_sec_gw, 'out_ip': out_serv_node, 'out_mask': '255.255.255.0', 'out_gw': out_ip_gw, 'out_sec_gw': out_sec_gw, 'intf_in': self.interface_in, 'intf_out': self.interface_out}} status = self.asa5585.setup(**kw) if status is False: LOG.error("Physical FW instance creation failure for " "tenant %s", tenant_name) return False status = self.asa5585.apply_policy(data) if status is False: LOG.error("Applying FW policy failure for tenant %s", tenant_name) return status def delete_fw(self, tenant_id, data): LOG.info("In Delete fw data is %s", data) tenant_name = data.get('tenant_name') in_serv_node = self.get_in_srvc_node_ip_addr(tenant_id) out_serv_node = self.get_out_srvc_node_ip_addr(tenant_id) in_seg, in_vlan = self.get_in_seg_vlan(tenant_id) out_seg, out_vlan = self.get_out_seg_vlan(tenant_id) kw = dict(params=dict(tenant_name=tenant_name, in_vlan=in_vlan, out_vlan=out_vlan, in_ip=in_serv_node, in_mask='255.255.255.0', out_ip=out_serv_node, out_mask='255.255.255.0', intf_in=self.interface_in, intf_out=self.interface_out)) status = self.asa5585.cleanup(**kw) return status def modify_fw(self, tenant_id, data): LOG.info("In Modify fw data is %s", data) return self.asa5585.apply_policy(data)
apache-2.0
922,873,644,558,718,700
39.739837
79
0.595889
false
teochenglim/ansible-modules-extras
monitoring/logentries.py
153
4566
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Ivan Vanderbyl <[email protected]> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: logentries author: "Ivan Vanderbyl (@ivanvanderbyl)" short_description: Module for tracking logs via logentries.com description: - Sends logs to LogEntries in realtime version_added: "1.6" options: path: description: - path to a log file required: true state: description: - following state of the log choices: [ 'present', 'absent' ] required: false default: present name: description: - name of the log required: false logtype: description: - type of the log required: false notes: - Requires the LogEntries agent which can be installed following the instructions at logentries.com ''' EXAMPLES = ''' - logentries: path=/var/log/nginx/access.log state=present name=nginx-access-log - logentries: path=/var/log/nginx/error.log state=absent ''' def query_log_status(module, le_path, path, state="present"): """ Returns whether a log is followed or not. """ if state == "present": rc, out, err = module.run_command("%s followed %s" % (le_path, path)) if rc == 0: return True return False def follow_log(module, le_path, logs, name=None, logtype=None): """ Follows one or more logs if not already followed. """ followed_count = 0 for log in logs: if query_log_status(module, le_path, log): continue if module.check_mode: module.exit_json(changed=True) cmd = [le_path, 'follow', log] if name: cmd.extend(['--name',name]) if logtype: cmd.extend(['--type',logtype]) rc, out, err = module.run_command(' '.join(cmd)) if not query_log_status(module, le_path, log): module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip())) followed_count += 1 if followed_count > 0: module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,)) module.exit_json(changed=False, msg="logs(s) already followed") def unfollow_log(module, le_path, logs): """ Unfollows one or more logs if followed. """ removed_count = 0 # Using a for loop incase of error, we can report the package that failed for log in logs: # Query the log first, to see if we even need to remove. if not query_log_status(module, le_path, log): continue if module.check_mode: module.exit_json(changed=True) rc, out, err = module.run_command([le_path, 'rm', log]) if query_log_status(module, le_path, log): module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip())) removed_count += 1 if removed_count > 0: module.exit_json(changed=True, msg="removed %d package(s)" % removed_count) module.exit_json(changed=False, msg="logs(s) already unfollowed") def main(): module = AnsibleModule( argument_spec = dict( path = dict(required=True), state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), name = dict(required=False, default=None, type='str'), logtype = dict(required=False, default=None, type='str', aliases=['type']) ), supports_check_mode=True ) le_path = module.get_bin_path('le', True, ['/usr/local/bin']) p = module.params # Handle multiple log files logs = p["path"].split(",") logs = filter(None, logs) if p["state"] in ["present", "followed"]: follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype']) elif p["state"] in ["absent", "unfollowed"]: unfollow_log(module, le_path, logs) # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
-6,479,124,863,782,169,000
30.061224
103
0.621332
false
chrisxue815/leetcode_python
problems/test_0457_floyd.py
1
1491
import unittest from typing import List import utils # O(n) time. O(1) space. Floyd's tortoise and hare cycle detection algorithm. class Solution: def circularArrayLoop(self, nums: List[int]) -> bool: n = len(nums) for start, move in enumerate(nums): if move == 0: continue t = h = start forward = move > 0 def advance(curr): move = nums[curr] if move == 0 or (move > 0) != forward: return False, curr nxt = (curr + move) % n return nxt != curr, nxt while True: ok, h = advance(h) if not ok: break ok, h = advance(h) if not ok: break ok, t = advance(t) if t == h: return True t = start while True: ok, nxt = advance(t) if not ok: break nums[t] = 0 t = nxt return False class Test(unittest.TestCase): def test(self): cases = utils.load_test_json(__file__).test_cases for case in cases: args = str(case.args) actual = Solution().circularArrayLoop(**case.args.__dict__) self.assertEqual(case.expected, actual, msg=args) if __name__ == '__main__': unittest.main()
unlicense
-3,434,594,329,990,518,000
22.296875
77
0.441985
false
emijrp/robobrowser
robobrowser/cache.py
5
2908
""" Caching utilities for robotic browsers. Credit to https://github.com/Lukasa/httpcache """ import logging import datetime from requests.adapters import HTTPAdapter from robobrowser.compat import OrderedDict, iteritems logger = logging.getLogger(__name__) # Modified from https://github.com/Lukasa/httpcache/blob/master/httpcache/cache.py # RoboBrowser should only cache GET requests; HEAD and OPTIONS not exposed CACHE_VERBS = ['GET'] CACHE_CODES = [200, 203, 300, 301, 410] class RoboCache(object): def __init__(self, max_age=None, max_count=None): self.data = OrderedDict() self.max_age = max_age self.max_count = max_count def _reduce_age(self, now): """Reduce size of cache by date. :param datetime.datetime now: Current time """ if self.max_age: keys = [ key for key, value in iteritems(self.data) if now - value['date'] > self.max_age ] for key in keys: del self.data[key] def _reduce_count(self): """Reduce size of cache by count. """ if self.max_count: while len(self.data) > self.max_count: self.data.popitem(last=False) def store(self, response): """Store response in cache, skipping if code is forbidden. :param requests.Response response: HTTP response """ if response.status_code not in CACHE_CODES: return now = datetime.datetime.now() self.data[response.url] = { 'date': now, 'response': response, } logger.info('Stored response in cache') self._reduce_age(now) self._reduce_count() def retrieve(self, request): """Look up request in cache, skipping if verb is forbidden. :param requests.Request request: HTTP request """ if request.method not in CACHE_VERBS: return try: response = self.data[request.url]['response'] logger.info('Retrieved response from cache') return response except KeyError: return None def clear(self): "Clear cache." self.data = OrderedDict() class RoboHTTPAdapter(HTTPAdapter): def __init__(self, max_age=None, max_count=None, **kwargs): super(RoboHTTPAdapter, self).__init__(**kwargs) self.cache = RoboCache(max_age=max_age, max_count=max_count) def send(self, request, **kwargs): cached_resp = self.cache.retrieve(request) if cached_resp is not None: return cached_resp else: return super(RoboHTTPAdapter, self).send(request, **kwargs) def build_response(self, request, response): resp = super(RoboHTTPAdapter, self).build_response(request, response) self.cache.store(resp) return resp
bsd-3-clause
4,901,131,204,689,519,000
28.08
82
0.600757
false
hkariti/ansible
lib/ansible/module_utils/facts/system/apparmor.py
232
1311
# Collect facts related to apparmor # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ansible.module_utils.facts.collector import BaseFactCollector class ApparmorFactCollector(BaseFactCollector): name = 'apparmor' _fact_ids = set() def collect(self, module=None, collected_facts=None): facts_dict = {} apparmor_facts = {} if os.path.exists('/sys/kernel/security/apparmor'): apparmor_facts['status'] = 'enabled' else: apparmor_facts['status'] = 'disabled' facts_dict['apparmor'] = apparmor_facts return facts_dict
gpl-3.0
4,839,400,280,262,603,000
32.615385
70
0.707094
false
MiLk/ansible
test/units/modules/network/junos/test_junos_rpc.py
43
3370
# (c) 2017 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type try: from lxml.etree import tostring except ImportError: from xml.etree.ElementTree import tostring from ansible.compat.tests.mock import patch from ansible.modules.network.junos import junos_rpc from .junos_module import TestJunosModule, load_fixture, set_module_args RPC_CLI_MAP = { 'get-software-information': 'show version', 'get-interface-information': 'show interfaces details', 'get-system-memory-information': 'show system memory', 'get-chassis-inventory': 'show chassis hardware', 'get-system-storage': 'show system storage' } class TestJunosCommandModule(TestJunosModule): module = junos_rpc def setUp(self): self.mock_send_request = patch('ansible.modules.network.junos.junos_rpc.send_request') self.send_request = self.mock_send_request.start() def tearDown(self): self.mock_send_request.stop() def load_fixtures(self, commands=None, format='text', changed=False): def load_from_file(*args, **kwargs): module, element = args if element.text: path = str(element.text) else: tag = str(element.tag) if tag.startswith('{'): tag = tag.split('}', 1)[1] path = RPC_CLI_MAP[tag] filename = path.replace(' ', '_') filename = '%s_%s.txt' % (filename, format) return load_fixture(filename) self.send_request.side_effect = load_from_file def test_junos_rpc_xml(self): set_module_args(dict(rpc='get-chassis-inventory')) result = self.execute_module(format='xml') self.assertTrue(result['xml'].find('<chassis-inventory>\n')) def test_junos_rpc_text(self): set_module_args(dict(rpc='get-software-information', output='text')) result = self.execute_module(format='text') self.assertTrue(result['output_lines'][0].startswith('Hostname: vsrx01')) def test_junos_rpc_json(self): set_module_args(dict(rpc='get-software-information', output='json')) result = self.execute_module(format='json') self.assertTrue('software-information' in result['output']) def test_junos_rpc_args(self): set_module_args(dict(rpc='get-software-information', args={'interface': 'em0', 'media': True})) result = self.execute_module(format='xml') args, kwargs = self.send_request.call_args reply = tostring(args[1]).decode() self.assertTrue(reply.find('<interface>em0</interface><media /></get-software-information>'))
gpl-3.0
1,790,549,385,918,700,800
36.444444
103
0.664985
false
palladius/gcloud
packages/gsutil/boto/boto/gs/key.py
51
31785
# Copyright 2010 Google Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import base64 import binascii import os import re import StringIO from boto.exception import BotoClientError from boto.s3.key import Key as S3Key from boto.s3.keyfile import KeyFile class Key(S3Key): """ Represents a key (object) in a GS bucket. :ivar bucket: The parent :class:`boto.gs.bucket.Bucket`. :ivar name: The name of this Key object. :ivar metadata: A dictionary containing user metadata that you wish to store with the object or that has been retrieved from an existing object. :ivar cache_control: The value of the `Cache-Control` HTTP header. :ivar content_type: The value of the `Content-Type` HTTP header. :ivar content_encoding: The value of the `Content-Encoding` HTTP header. :ivar content_disposition: The value of the `Content-Disposition` HTTP header. :ivar content_language: The value of the `Content-Language` HTTP header. :ivar etag: The `etag` associated with this object. :ivar last_modified: The string timestamp representing the last time this object was modified in GS. :ivar owner: The ID of the owner of this object. :ivar storage_class: The storage class of the object. Currently, one of: STANDARD | DURABLE_REDUCED_AVAILABILITY. :ivar md5: The MD5 hash of the contents of the object. :ivar size: The size, in bytes, of the object. :ivar generation: The generation number of the object. :ivar meta_generation: The generation number of the object metadata. :ivar encrypted: Whether the object is encrypted while at rest on the server. """ generation = None meta_generation = None def __repr__(self): if self.generation and self.meta_generation: ver_str = '#%s.%s' % (self.generation, self.meta_generation) else: ver_str = '' if self.bucket: return '<Key: %s,%s%s>' % (self.bucket.name, self.name, ver_str) else: return '<Key: None,%s%s>' % (self.name, ver_str) def endElement(self, name, value, connection): if name == 'Key': self.name = value elif name == 'ETag': self.etag = value elif name == 'IsLatest': if value == 'true': self.is_latest = True else: self.is_latest = False elif name == 'LastModified': self.last_modified = value elif name == 'Size': self.size = int(value) elif name == 'StorageClass': self.storage_class = value elif name == 'Owner': pass elif name == 'VersionId': self.version_id = value elif name == 'Generation': self.generation = value elif name == 'MetaGeneration': self.meta_generation = value else: setattr(self, name, value) def handle_version_headers(self, resp, force=False): self.meta_generation = resp.getheader('x-goog-metageneration', None) self.generation = resp.getheader('x-goog-generation', None) def get_file(self, fp, headers=None, cb=None, num_cb=10, torrent=False, version_id=None, override_num_retries=None, response_headers=None): query_args = None if self.generation: query_args = ['generation=%s' % self.generation] self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, override_num_retries=override_num_retries, response_headers=response_headers, query_args=query_args) def delete(self): return self.bucket.delete_key(self.name, version_id=self.version_id, generation=self.generation) def add_email_grant(self, permission, email_address): """ Convenience method that provides a quick way to add an email grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type email_address: string :param email_address: The email address associated with the Google account to which you are granting the permission. """ acl = self.get_acl() acl.add_email_grant(permission, email_address) self.set_acl(acl) def add_user_grant(self, permission, user_id): """ Convenience method that provides a quick way to add a canonical user grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type user_id: string :param user_id: The canonical user id associated with the GS account to which you are granting the permission. """ acl = self.get_acl() acl.add_user_grant(permission, user_id) self.set_acl(acl) def add_group_email_grant(self, permission, email_address, headers=None): """ Convenience method that provides a quick way to add an email group grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type email_address: string :param email_address: The email address associated with the Google Group to which you are granting the permission. """ acl = self.get_acl(headers=headers) acl.add_group_email_grant(permission, email_address) self.set_acl(acl, headers=headers) def add_group_grant(self, permission, group_id): """ Convenience method that provides a quick way to add a canonical group grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type group_id: string :param group_id: The canonical group id associated with the Google Groups account you are granting the permission to. """ acl = self.get_acl() acl.add_group_grant(permission, group_id) self.set_acl(acl) def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, res_upload_handler=None, size=None, rewind=False, if_generation=None): """ Store an object in GS using the name of the Key object as the key in GS and the contents of the file pointed to by 'fp' as the contents. :type fp: file :param fp: the file whose contents are to be uploaded :type headers: dict :param headers: additional HTTP headers to be sent with the PUT request. :type replace: bool :param replace: If this parameter is False, the method will first check to see if an object exists in the bucket with the same key. If it does, it won't overwrite it. The default value is True which will overwrite the object. :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to GS and the second representing the total number of bytes that need to be transmitted. :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter, this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.gs.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in GS. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: If you need to compute the MD5 for any reason prior to upload, it's silly to have to do it twice so this param, if present, will be used as the MD5 values of the file. Otherwise, the checksum will be computed. :type res_upload_handler: ResumableUploadHandler :param res_upload_handler: If provided, this handler will perform the upload. :type size: int :param size: (optional) The Maximum number of bytes to read from the file pointer (fp). This is useful when uploading a file in multiple parts where you are splitting the file up into different ranges to be uploaded. If not specified, the default behaviour is to read all bytes from the file pointer. Less bytes may be available. Notes: 1. The "size" parameter currently cannot be used when a resumable upload handler is given but is still useful for uploading part of a file as implemented by the parent class. 2. At present Google Cloud Storage does not support multipart uploads. :type rewind: bool :param rewind: (optional) If True, the file pointer (fp) will be rewound to the start before any bytes are read from it. The default behaviour is False which reads from the current position of the file pointer (fp). :type if_generation: int :param if_generation: (optional) If set to a generation number, the object will only be written to if its current generation number is this value. If set to the value 0, the object will only be written if it doesn't already exist. :rtype: int :return: The number of bytes written to the key. TODO: At some point we should refactor the Bucket and Key classes, to move functionality common to all providers into a parent class, and provider-specific functionality into subclasses (rather than just overriding/sharing code the way it currently works). """ provider = self.bucket.connection.provider if res_upload_handler and size: # could use size instead of file_length if provided but... raise BotoClientError('"size" param not supported for resumable uploads.') headers = headers or {} if policy: headers[provider.acl_header] = policy if rewind: # caller requests reading from beginning of fp. fp.seek(0, os.SEEK_SET) else: # The following seek/tell/seek logic is intended # to detect applications using the older interface to # set_contents_from_file(), which automatically rewound the # file each time the Key was reused. This changed with commit # 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads # split into multiple parts and uploaded in parallel, and at # the time of that commit this check was added because otherwise # older programs would get a success status and upload an empty # object. Unfortuantely, it's very inefficient for fp's implemented # by KeyFile (used, for example, by gsutil when copying between # providers). So, we skip the check for the KeyFile case. # TODO: At some point consider removing this seek/tell/seek # logic, after enough time has passed that it's unlikely any # programs remain that assume the older auto-rewind interface. if not isinstance(fp, KeyFile): spos = fp.tell() fp.seek(0, os.SEEK_END) if fp.tell() == spos: fp.seek(0, os.SEEK_SET) if fp.tell() != spos: # Raise an exception as this is likely a programming # error whereby there is data before the fp but nothing # after it. fp.seek(spos) raise AttributeError('fp is at EOF. Use rewind option ' 'or seek() to data start.') # seek back to the correct position. fp.seek(spos) if hasattr(fp, 'name'): self.path = fp.name if self.bucket != None: if isinstance(fp, KeyFile): # Avoid EOF seek for KeyFile case as it's very inefficient. key = fp.getkey() size = key.size - fp.tell() self.size = size # At present both GCS and S3 use MD5 for the etag for # non-multipart-uploaded objects. If the etag is 32 hex # chars use it as an MD5, to avoid having to read the file # twice while transferring. if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)): etag = key.etag.strip('"') md5 = (etag, base64.b64encode(binascii.unhexlify(etag))) if size: self.size = size else: # If md5 is provided, still need to size so # calculate based on bytes to end of content spos = fp.tell() fp.seek(0, os.SEEK_END) self.size = fp.tell() - spos fp.seek(spos) size = self.size if md5 == None: md5 = self.compute_md5(fp, size) self.md5 = md5[0] self.base64md5 = md5[1] if self.name == None: self.name = self.md5 if not replace: if self.bucket.lookup(self.name): return if if_generation is not None: headers['x-goog-if-generation-match'] = str(if_generation) if res_upload_handler: res_upload_handler.send_file(self, fp, headers, cb, num_cb) else: # Not a resumable transfer so use basic send_file mechanism. self.send_file(fp, headers, cb, num_cb, size=size) def set_contents_from_filename(self, filename, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, reduced_redundancy=None, res_upload_handler=None, if_generation=None): """ Store an object in GS using the name of the Key object as the key in GS and the contents of the file named by 'filename'. See set_contents_from_file method for details about the parameters. :type filename: string :param filename: The name of the file that you want to put onto GS :type headers: dict :param headers: Additional headers to pass along with the request to GS. :type replace: bool :param replace: If True, replaces the contents of the file if it already exists. :type cb: function :param cb: (optional) a callback function that will be called to report progress on the download. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted from GS and the second representing the total number of bytes that need to be transmitted. :type cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.gs.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in GS. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: If you need to compute the MD5 for any reason prior to upload, it's silly to have to do it twice so this param, if present, will be used as the MD5 values of the file. Otherwise, the checksum will be computed. :type res_upload_handler: ResumableUploadHandler :param res_upload_handler: If provided, this handler will perform the upload. :type if_generation: int :param if_generation: (optional) If set to a generation number, the object will only be written to if its current generation number is this value. If set to the value 0, the object will only be written if it doesn't already exist. """ # Clear out any previously computed md5 hashes, since we are setting the content. self.md5 = None self.base64md5 = None fp = open(filename, 'rb') self.set_contents_from_file(fp, headers, replace, cb, num_cb, policy, md5, res_upload_handler, if_generation=if_generation) fp.close() def set_contents_from_string(self, s, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, if_generation=None): """ Store an object in S3 using the name of the Key object as the key in S3 and the string 's' as the contents. See set_contents_from_file method for details about the parameters. :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type replace: bool :param replace: If True, replaces the contents of the file if it already exists. :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to S3 and the second representing the size of the to be transmitted object. :type cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: If you need to compute the MD5 for any reason prior to upload, it's silly to have to do it twice so this param, if present, will be used as the MD5 values of the file. Otherwise, the checksum will be computed. :type if_generation: int :param if_generation: (optional) If set to a generation number, the object will only be written to if its current generation number is this value. If set to the value 0, the object will only be written if it doesn't already exist. """ # Clear out any previously computed md5 hashes, since we are setting the content. self.md5 = None self.base64md5 = None if isinstance(s, unicode): s = s.encode("utf-8") fp = StringIO.StringIO(s) r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, policy, md5, if_generation=if_generation) fp.close() return r def set_contents_from_stream(self, *args, **kwargs): """ Store an object using the name of the Key object as the key in cloud and the contents of the data stream pointed to by 'fp' as the contents. The stream object is not seekable and total size is not known. This has the implication that we can't specify the Content-Size and Content-MD5 in the header. So for huge uploads, the delay in calculating MD5 is avoided but with a penalty of inability to verify the integrity of the uploaded data. :type fp: file :param fp: the file whose contents are to be uploaded :type headers: dict :param headers: additional HTTP headers to be sent with the PUT request. :type replace: bool :param replace: If this parameter is False, the method will first check to see if an object exists in the bucket with the same key. If it does, it won't overwrite it. The default value is True which will overwrite the object. :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to GS and the second representing the total number of bytes that need to be transmitted. :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter, this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.gs.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in GS. :type reduced_redundancy: bool :param reduced_redundancy: If True, this will set the storage class of the new Key to be REDUCED_REDUNDANCY. The Reduced Redundancy Storage (RRS) feature of S3, provides lower redundancy at lower storage cost. :type size: int :param size: (optional) The Maximum number of bytes to read from the file pointer (fp). This is useful when uploading a file in multiple parts where you are splitting the file up into different ranges to be uploaded. If not specified, the default behaviour is to read all bytes from the file pointer. Less bytes may be available. :type if_generation: int :param if_generation: (optional) If set to a generation number, the object will only be written to if its current generation number is this value. If set to the value 0, the object will only be written if it doesn't already exist. """ if_generation = kwargs.pop('if_generation', None) if if_generation is not None: headers = kwargs.get('headers', {}) headers['x-goog-if-generation-match'] = str(if_generation) kwargs['headers'] = headers super(Key, self).set_contents_from_stream(*args, **kwargs) def set_acl(self, acl_or_str, headers=None, generation=None, if_generation=None, if_metageneration=None): """Sets the ACL for this object. :type acl_or_str: string or :class:`boto.gs.acl.ACL` :param acl_or_str: A canned ACL string (see :data:`~.gs.acl.CannedACLStrings`) or an ACL object. :type headers: dict :param headers: Additional headers to set during the request. :type generation: int :param generation: If specified, sets the ACL for a specific generation of a versioned object. If not specified, the current version is modified. :type if_generation: int :param if_generation: (optional) If set to a generation number, the acl will only be updated if its current generation number is this value. :type if_metageneration: int :param if_metageneration: (optional) If set to a metageneration number, the acl will only be updated if its current metageneration number is this value. """ if self.bucket != None: self.bucket.set_acl(acl_or_str, self.name, headers=headers, generation=generation, if_generation=if_generation, if_metageneration=if_metageneration) def get_acl(self, headers=None, generation=None): """Returns the ACL of this object. :param dict headers: Additional headers to set during the request. :param int generation: If specified, gets the ACL for a specific generation of a versioned object. If not specified, the current version is returned. :rtype: :class:`.gs.acl.ACL` """ if self.bucket != None: return self.bucket.get_acl(self.name, headers=headers, generation=generation) def get_xml_acl(self, headers=None, generation=None): """Returns the ACL string of this object. :param dict headers: Additional headers to set during the request. :param int generation: If specified, gets the ACL for a specific generation of a versioned object. If not specified, the current version is returned. :rtype: str """ if self.bucket != None: return self.bucket.get_xml_acl(self.name, headers=headers, generation=generation) def set_xml_acl(self, acl_str, headers=None, generation=None, if_generation=None, if_metageneration=None): """Sets this objects's ACL to an XML string. :type acl_str: string :param acl_str: A string containing the ACL XML. :type headers: dict :param headers: Additional headers to set during the request. :type generation: int :param generation: If specified, sets the ACL for a specific generation of a versioned object. If not specified, the current version is modified. :type if_generation: int :param if_generation: (optional) If set to a generation number, the acl will only be updated if its current generation number is this value. :type if_metageneration: int :param if_metageneration: (optional) If set to a metageneration number, the acl will only be updated if its current metageneration number is this value. """ if self.bucket != None: return self.bucket.set_xml_acl(acl_str, self.name, headers=headers, generation=generation, if_generation=if_generation, if_metageneration=if_metageneration) def set_canned_acl(self, acl_str, headers=None, generation=None, if_generation=None, if_metageneration=None): """Sets this objects's ACL using a predefined (canned) value. :type acl_str: string :param acl_str: A canned ACL string. See :data:`~.gs.acl.CannedACLStrings`. :type headers: dict :param headers: Additional headers to set during the request. :type generation: int :param generation: If specified, sets the ACL for a specific generation of a versioned object. If not specified, the current version is modified. :type if_generation: int :param if_generation: (optional) If set to a generation number, the acl will only be updated if its current generation number is this value. :type if_metageneration: int :param if_metageneration: (optional) If set to a metageneration number, the acl will only be updated if its current metageneration number is this value. """ if self.bucket != None: return self.bucket.set_canned_acl( acl_str, self.name, headers=headers, generation=generation, if_generation=if_generation, if_metageneration=if_metageneration )
gpl-3.0
7,293,693,749,808,680,000
44.149148
91
0.607016
false
eReuse/DeviceHub
ereuse_devicehub/security/request_auth.py
1
2169
import requests from flask import current_app from requests.auth import AuthBase class Auth(AuthBase): """ Handles the authorization method. If there is no available token for us, it logs-in and stores the token. Appends the token to the header accordingly. """ def __init__(self, domain: str, email: str, password: str, login_path: str = 'login', auth_header_title='Basic'): self.domain = domain self.email = email self.password = password self.token = None self.login_path = login_path self.auth_header_title = auth_header_title def __call__(self, r): if self.token is None: self.token = self.login() r.headers['Authorization'] = '{} {}'.format(self.auth_header_title, self.token) return r def login(self): account = { 'email': self.email, 'password': self.password } r = requests.post('{}/{}'.format(self.domain, self.login_path), json=account) data = r.json() return data['token'] class AgentAuth(Auth): """ Handles Authorization for Agents which credentials are stored in the 'BASE_URL_FOR_AGENTS' config dict. The 'BASE_URL_FOR_AGENTS' is a dict with the following structure: { base_url|'self': (email_of_agent_account, password_of_agent_account)} Like in the following example: {'self': ('[email protected]', '12345'), 'https://example.com': ('[email protected]', '123')} Note that 'self' is a reserved keyword that is interpreted as the own DeviceHub. """ def __init__(self, base_url: str, **kwargs): if current_app.config['BASE_URL_FOR_AGENTS'] in base_url: base_url = 'self' # To use this in another thread we should remove 'current_app' and get the configuration file through # another way email, password = current_app.config['AGENT_ACCOUNTS'][base_url] login_path = kwargs.get('login_path', 'login') auth_header_title = kwargs.get('auth_header_title', 'Basic') super().__init__(base_url, email, password, login_path, auth_header_title)
agpl-3.0
-6,317,574,629,146,352,000
35.15
120
0.616413
false
lancezlin/ml_template_py
lib/python2.7/site-packages/prompt_toolkit/key_binding/vi_state.py
20
1734
from __future__ import unicode_literals __all__ = ( 'InputMode', 'CharacterFind', 'ViState', ) class InputMode(object): INSERT = 'vi-insert' INSERT_MULTIPLE = 'vi-insert-multiple' NAVIGATION = 'vi-navigation' REPLACE = 'vi-replace' class CharacterFind(object): def __init__(self, character, backwards=False): self.character = character self.backwards = backwards class ViState(object): """ Mutable class to hold the state of the Vi navigation. """ def __init__(self): #: None or CharacterFind instance. (This is used to repeat the last #: search in Vi mode, by pressing the 'n' or 'N' in navigation mode.) self.last_character_find = None # When an operator is given and we are waiting for text object, # -- e.g. in the case of 'dw', after the 'd' --, an operator callback # is set here. self.operator_func = None self.operator_arg = None #: Named registers. Maps register name (e.g. 'a') to #: :class:`ClipboardData` instances. self.named_registers = {} #: The Vi mode we're currently in to. self.input_mode = InputMode.INSERT #: Waiting for digraph. self.waiting_for_digraph = False self.digraph_symbol1 = None # (None or a symbol.) #: When true, make ~ act as an operator. self.tilde_operator = False def reset(self, mode=InputMode.INSERT): """ Reset state, go back to the given mode. INSERT by default. """ # Go back to insert mode. self.input_mode = mode self.waiting_for_digraph = False self.operator_func = None self.operator_arg = None
mit
-7,611,011,695,079,861,000
27.42623
77
0.599193
false
mtford90/silk
silk/management/commands/silk_clear_request_log.py
2
1832
from django.conf import settings from django.core.management.base import BaseCommand from django.db import connection import silk.models class Command(BaseCommand): help = "Clears silk's log of requests." @staticmethod def delete_model(model): engine = settings.DATABASES['default']['ENGINE'] table = model._meta.db_table if 'mysql' in engine or 'postgresql' in engine: # Use "TRUNCATE" on the table with connection.cursor() as cursor: if 'mysql' in engine: cursor.execute("SET FOREIGN_KEY_CHECKS=0;") cursor.execute("TRUNCATE TABLE {0}".format(table)) cursor.execute("SET FOREIGN_KEY_CHECKS=1;") elif 'postgres' in engine: cursor.execute("ALTER TABLE {0} DISABLE TRIGGER USER;".format(table)) cursor.execute("TRUNCATE TABLE {0} CASCADE".format(table)) cursor.execute("ALTER TABLE {0} ENABLE TRIGGER USER;".format(table)) return # Manually delete rows because sqlite does not support TRUNCATE and # oracle doesn't provide good support for disabling foreign key checks while True: items_to_delete = list( model.objects.values_list('pk', flat=True).all()[:1000]) if not items_to_delete: break model.objects.filter(pk__in=items_to_delete).delete() def handle(self, *args, **options): # Django takes a long time to traverse foreign key relations, # so delete in the order that makes it easy. Command.delete_model(silk.models.Profile) Command.delete_model(silk.models.SQLQuery) Command.delete_model(silk.models.Response) Command.delete_model(silk.models.Request)
mit
9,048,619,164,179,852,000
41.604651
89
0.614629
false
buguen/pylayers
pylayers/location/algebraic/crlb.py
1
17260
import os from numpy import * from scipy import * from scipy import optimize from string import * from numpy.linalg import * class CRBLocation(object): """ A CRBLocation contains: 1- a set of RadioNodes (RN) with associated position accuracies (RNQoS), 2- a set of measurements (RSS, TOA, TDOA) with associated accuracies. This class manages the CRB techniques. MEMBERS: RN : An Array that defines the Radio nodes implied in localization (coordiantes in meters) : shape(RN)= (2 or 3,RNnum) RNQoS : An Array that defines the precision of positions of RN (std in meters) : shape(RNQoS)= (2 or 3, RNnum) param : a set of parameters depending on the type of measurement [list]. """ def __init__(self, RN): self.RN = RN def info(self): """ Display scenario information """ print "Reference Radio Nodes:\n", self.RN print "parameters:\n", self.param def FIM_RSS(self, P, RN_RSS, RSSnp, RSSStd): """ FIM in P of RSS positioning """ shP = np.shape(P) shRN = np.shape(RN_RSS) RNnum = np.shRN[1] S = (np.log(10)/10)* RSSStd/RSSnp RNmP = RN_RSS - outer(P,ones(RNnum)) mRNmP = (sqrt(diag(dot(RNmP.T,RNmP)))) j11 = sum(((1+S[:,0]**2)*RNmP[0,:]**2)/((S[:,0]**2)*mRNmP**4),axis=0) j22 = sum(((1+S[:,0]**2)*RNmP[1,:]**2)/((S[:,0]**2)*mRNmP**4),axis=0) j12=j21 = sum(((1+S[:,0]**2)*(RNmP.prod(axis=0)))/((S[:,0]**2)*mRNmP**4),axis=0) FIM = array([[j11,j12],[j21,j22]]) return FIM def FIM_TOA(self, P, RN_TOA, TOAStd): """ Compute the FIM in P for the given scenario """ c = 3e08 shP = shape(P) shRN = shape(RN_TOA) RNnum = shRN[1] RoAStd = c*TOAStd num = sum(1/(RoAStd**2),axis=0)[0] # the numerator of the CRLB RNmP = RN_TOA - outer(P,ones(RNnum)) mRNmP = (sqrt(diag(dot(RNmP.T,RNmP)))) j11 = sum(RNmP[0,:]**2/((RoAStd[:,0]**2)*mRNmP**2),axis=0) j22 = sum(RNmP[1,:]**2/((RoAStd[:,0]**2)*mRNmP**2),axis=0) j12=j21 = sum((RNmP.prod(axis=0))/((RoAStd[:,0]**2)*mRNmP**2),axis=0) FIM = array([[j11,j12],[j21,j22]]) return FIM def FIM_TDOA(self, P, RN1_TDOA, RN2_TDOA, TDOAStd): """ Compute the FIM in P for the given scenario Parameters ---------- P RN1_TDOA RN2_TDOA TDOAStd """ c = 0.3 shP = np.shape(P) shRN = np.shape(RN1_TDOA) RNnum = shRN[1] RDoAStd = c*TDOAStd RN1mP = outer(P,ones(RNnum))- RN1_TDOA mRN1mP = (sqrt(diag(dot(RN1mP.T,RN1mP)))).reshape(RNnum,1) RN2mP = outer(P,ones(RNnum))- RN2_TDOA mRN2mP = (sqrt(diag(dot(RN2mP.T,RN2mP)))).reshape(RNnum,1) j11 = sum((1/RDoAStd[:,0]**2)*(RN1mP/mRN1mP[:,0]-RN2mP/mRN2mP[:,0])**2,axis=1)[0] j22 = sum((1/RDoAStd[:,0]**2)*(RN1mP/mRN1mP[:,0]-RN2mP/mRN2mP[:,0])**2,axis=1)[1] j12=j21 = sum(prod((1/RDoAStd[:,0]**2)*(RN1mP/mRN1mP[:,0]-RN2mP/mRN2mP[:,0]),axis=0),axis=0) j12a=j21a = sum(prod((1/RDoAStd[:,0]**2)*(-RN1mP/mRN1mP[:,0]+RN2mP/mRN2mP[:,0]),axis=0),axis=0) FIM = array([[j11,j12],[j21,j22]]) return FIM def CRB_RSS_fim(self, P, RN_RSS, RSSnp, RSSStd): """ computes CRB of RSS positioning as the trace of inv of fim Parameters ---------- P RN_RSS RSSnp RSSSstd """ FIM=self.FIM_RSS(P, RN_RSS, RSSnp, RSSStd) return trace(inv(FIM)) def CRB_TOA_fim(self, P, RN_TOA, TOAStd): """ compute the CRB in P for the given scenario as the trace of inv of fim """ FIM=self.FIM_TOA(P, RN_TOA, TOAStd) return trace(inv(FIM)) def CRB_TDOA_fim(self, P, RN1_TDOA, RN2_TDOA, TDOAStd): """ compute the CRB in P for the given scenario as the trace of inv of fim """ FIM=self.FIM_TDOA(P, RN1_TDOA, RN2_TDOA, TDOAStd) return la.trace(la.inv(FIM)) def CRB_RSS_TOA_fim(self, P, RN_RSS, RN_TOA, RSSnp, RSSStd, TOAStd): """ compute CRB of RSS/TOA positioning as the trace of inv of fim """ FIM=self.FIM_RSS(P, RN_RSS, RSSnp, RSSStd)+self.FIM_TOA(P, RN_TOA, TOAStd) return la.trace(la.inv(FIM)) def CRB_RSS_TDOA_fim(self, P, RN_RSS, RN1_TDOA, RN2_TDOA, RSSnp, RSSStd, TDOAStd): """ computes CRB of RSS/TDOA positioning as the trace of inv of fim """ FIM=self.FIM_RSS(P, RN_RSS, RSSnp, RSSStd)+self.FIM_TDOA(P, RN1_TDOA, RN2_TDOA, TDOAStd) return la.trace(la.inv(FIM)) def CRB_TOA_TDOA_fim(self, P, RN_TOA, RN1_TDOA, RN2_TDOA,TOAStd, TDOAStd): """ computes CRB of TOA/TDOA positioning as the trace of inv of fim """ FIM=self.FIM_TOA(P, RN_TOA, TOAStd)+self.FIM_TDOA(P, RN1_TDOA, RN2_TDOA, TDOAStd) return la.trace(la.inv(FIM)) def CRB_RSS_TOA_TDOA_fim(self, P, RN_RSS, RN_TOA, RN1_TDOA, RN2_TDOA, RSSnp, RSSStd, TOAStd, TDOAStd): """ computes CRB of RSS/TOA/TDOA positioning as the trace of inv of fim """ FIM=self.FIM_RSS(P, RN_RSS, RSSnp, RSSStd)+self.FIM_TOA(P, RN_TOA, TOAStd)+self.FIM_TDOA(P, RN1_TDOA, RN2_TDOA, TDOAStd) return la.trace(la.inv(FIM)) def MCRB_RSS_fim(self, L, RN_RSS, RSSnp, RSSStd): """ This computes mean CRB of RSS positioning over the area of length L """ delta = L/50.0 CRB=[] for x in arange(0.001,L+0.1,delta): for y in arange(0.001,L+0.1,delta): P=array([[x],[y]]) f1=self.CRB_RSS_fim(P, RN_RSS, RSSnp, RSSStd) if isnan(f1)==0 : CRB.append(f1) moy=mean(CRB) return moy def MCRB_TOA_fim(self, L, RN_TOA, TOAStd): """ computes mean CRB of TOA positioning over the area of length L """ delta = L/50.0 CRB=[] for x in arange(0.001,L+0.1,delta): for y in arange(0.001,L+0.1,delta): P=array([[x],[y]]) f1=self.CRB_TOA_fim(P, RN_TOA, TOAStd) if isnan(f1)==0 : CRB.append(f1) moy=mean(CRB) return moy def MCRB_TDOA_fim(self, L, RN1_TDOA, RN2_TDOA, TDOAStd): """ computes mean CRB of TDOA positioning over the area of length L """ delta = L/50.0 CRB=[] for x in arange(0.001,L+0.1,delta): for y in arange(0.001,L+0.1,delta): P=array([[x],[y]]) f1=self.CRB_TDOA_fim(P, RN1_TDOA, RN2_TDOA, TDOAStd) if isnan(f1)==0 : CRB.append(f1) moy=mean(CRB) return moy def MCRB_RSS_TOA_fim(self, L, RN_RSS, RN_TOA, RSSnp, RSSStd, TOAStd): """ computes mean CRB of RSS/TOA positioning over the area of length L """ delta = L/50.0 CRB=[] for x in arange(0.001,L+0.1,delta): for y in arange(0.001,L+0.1,delta): P=array([[x],[y]]) f1=self.CRB_RSS_TOA_fim(P, RN_RSS, RN_TOA, RSSnp, RSSStd, TOAStd) if not isnan(f1): CRB.append(f1) moy=mean(CRB) return moy def MCRB_RSS_TDOA_fim(self, L, RN_RSS, RN1_TDOA, RN2_TDOA, RSSnp, RSSStd, TDOAStd): """ This computes mean CRB of RSS/TDOA positioning over the area of length L """ delta = L/50.0 CRB=[] for x in arange(0.001,L+0.1,delta): for y in arange(0.001,L+0.1,delta): P=array([[x],[y]]) f1=self.CRB_RSS_TDOA_fim(P, RN_RSS, RN1_TDOA, RN2_TDOA, RSSnp, RSSStd, TDOAStd) if isnan(f1)==0 : CRB.append(f1) moy=mean(CRB) return moy def MCRB_TOA_TDOA_fim(self, L, RN_TOA, RN1_TDOA, RN2_TDOA,TOAStd, TDOAStd): """ This computes mean CRB of TOA/TDOA positioning over the area of length L """ delta = L/50.0 CRB=[] for x in arange(0.001,L+0.1,delta): for y in arange(0.001,L+0.1,delta): P=array([[x],[y]]) f1=self.CRB_TOA_TDOA_fim(P, RN_TOA, RN1_TDOA, RN2_TDOA,TOAStd, TDOAStd) if not isnan(f1): CRB.append(f1) moy=mean(CRB) return moy def MCRB_RSS_TOA_TDOA_fim(self, L, RN_RSS, RN_TOA, RN1_TDOA, RN2_TDOA, RSSnp, RSSStd, TOAStd, TDOAStd): """ This computes mean CRB of RSS/TOA/TDOA positioning over the area of length L """ delta = L/50.0 CRB=[] for x in arange(0.001,L+0.1,delta): for y in arange(0.001,L+0.1,delta): P=array([[x],[y]]) f1=self.CRB_RSS_TOA_TDOA_fim(P, RN_RSS, RN_TOA, RN1_TDOA, RN2_TDOA, RSSnp, RSSStd, TOAStd, TDOAStd) if isnan(f1)==0 : CRB.append(f1) moy=mean(CRB) return moy def CRB_RSS(self, P, RN_RSS, RSSnp, RSSStd): """ This computes CRB of RSS positioning """ shP = shape(P) shRN = shape(RN_RSS) RNnum = shRN[1] S = (log(10)/10)* RSSStd/RSSnp RNmP = RN_RSS - outer(P,ones(RNnum)) mRNmP = (sqrt(diag(dot(RNmP.T,RNmP)))) num = sum((1+S[:,0]**2)/((S[:,0]**2)*mRNmP**2),axis=0) # the numerator of the CRLB div1 = sum(((1+S[:,0]**2)*RNmP**2)/((S[:,0]**2)*mRNmP**4),axis=1).reshape(shP) don1 = div1.prod(axis=0)#[0] # first term of the doniminator div2 = sum(((1+S[:,0]**2)*(RNmP.prod(axis=0)))/((S[:,0]**2)*mRNmP**4),axis=0) don2 = div2**2 # second term of the doniminator CRB = num/(don1-don2) # the CRB return CRB def CRB_TOA(self, P, RN_TOA, TOAStd): """ Compute the CRB in P for the given scenario """ c = 3e08 shP = shape(P) shRN = shape(RN_TOA) RNnum = shRN[1] RoAStd = c*TOAStd num = sum(1/(RoAStd**2),axis=0)[0] # the numerator of the CRLB RNmP = RN_TOA - outer(P,ones(RNnum)) mRNmP = (sqrt(diag(dot(RNmP.T,RNmP)))) div1 = sum(RNmP**2/((RoAStd[:,0]**2)*mRNmP**2),axis=1).reshape(shP) don1 = div1.prod(axis=0)[0] # first term of the doniminator div2 = sum((RNmP.prod(axis=0))/((RoAStd[:,0]**2)*mRNmP**2),axis=0) don2 = div2**2 # second term of the doniminator CRB = num/(don1-don2) # the CRB return CRB def CRB_TDOA(self, P, RN1_TDOA, RN2_TDOA, TDOAStd): """ Compute the CRB in P for the given scenario """ c = 3e08 shP = shape(P) shRN = shape(RN1_TDOA) RNnum = shRN[1] RDoAStd = c*TDOAStd RN1mP = outer(P,ones(RNnum))- RN1_TDOA mRN1mP = (sqrt(diag(dot(RN1mP.T,RN1mP)))).reshape(RNnum,1) RN2mP = outer(P,ones(RNnum))- RN2_TDOA mRN2mP = (sqrt(diag(dot(RN2mP.T,RN2mP)))).reshape(RNnum,1) num = sum(2/(RDoAStd[:,0]**2)*(1-sum((RN1mP/mRN1mP[:,0])*(RN2mP/mRN2mP[:,0]),axis=0)),axis=0) # the numerator of the CRLB div1 = sum((1/RDoAStd[:,0]**2)*(RN1mP/mRN1mP[:,0]-RN2mP/mRN2mP[:,0])**2,axis=1).reshape(shP) don1 = div1.prod(axis=0)[0] # first term of the doniminator div2 = sum(prod((1/RDoAStd[:,0]**2)*(RN1mP/mRN1mP[:,0]-RN2mP/mRN2mP[:,0]),axis=0),axis=0) don2 = div2**2 # second term of the doniminator CRB = num/(don1-don2) # the CRB return CRB def Angle_RSS(self, P, RN_RSS, RSSnp, RSSStd): """ This computes CRB of RSS positioning """ shP = shape(P) shRN = shape(RN_RSS) RNnum = shRN[1] angle =0.0 S = (log(10)/10)* RSSStd/RSSnp RNmP = RN_RSS - outer(P,ones(RNnum)) mRNmP = (sqrt(diag(dot(RNmP.T,RNmP)))) num = sum((1+S[:,0]**2)/((S[:,0]**2)*mRNmP**2),axis=0) # the numerator of the CRLB for i in range(shRN[1]): for j in range(shRN[1]): #angle=angle+((RNmP[0,i]*RNmP[1,j])/(mRNmP[i]*mRNmP[j])-(RNmP[0,j]*RNmP[1,i])/(mRNmP[j]*mRNmP[i]))**2 angle=angle+(((RNmP[0,i]*RNmP[1,j])/(mRNmP[i]*mRNmP[j])-(RNmP[0,j]*RNmP[1,i])/(mRNmP[j]*mRNmP[i]))**2)*(((1+S[i,:]**2)*(1+S[j,:]**2))/(2*(S[i,:]*S[j,:]*mRNmP[j]*mRNmP[i])**2)) return num/angle def Angle_TOA(self, P, RN_TOA, TOAStd): """ This computes CRB of RSS positioning """ c = 3e08 shP = shape(P) shRN = shape(RN_TOA) RNnum = shRN[1] angle = 0.0 RNmP = RN_TOA - outer(P,ones(RNnum)) mRNmP = (sqrt(diag(dot(RNmP.T,RNmP)))) RoAStd = c*TOAStd num = sum(1/(RoAStd**2),axis=0)[0] for i in range(shRN[1]): for j in range(shRN[1]): #angle=angle+((RNmP[0,i]*RNmP[1,j])/(mRNmP[i]*mRNmP[j])-(RNmP[0,j]*RNmP[1,i])/(mRNmP[j]*mRNmP[i]))**2 angle=angle+(((RNmP[0,i]*RNmP[1,j])/(mRNmP[i]*mRNmP[j])-(RNmP[0,j]*RNmP[1,i])/(mRNmP[j]*mRNmP[i]))**2)/(2*(RoAStd[i,:]*RoAStd[j,:])**2) return num/angle def Angle_TDOA(self, P, RN1_TDOA, RN2_TDOA, TDOAStd): """ Compute the CRB in P for the given scenario """ c = 3e08 shP = shape(P) shRN = shape(RN1_TDOA) RNnum = shRN[1] RDoAStd = c*TDOAStd angle=0.0 RN1mP = outer(P,ones(RNnum))- RN1_TDOA mRN1mP = (sqrt(diag(dot(RN1mP.T,RN1mP)))).reshape(RNnum,1) RN2mP = outer(P,ones(RNnum))- RN2_TDOA mRN2mP = (sqrt(diag(dot(RN2mP.T,RN2mP)))).reshape(RNnum,1) num = 0.0 for i in range(shRN[1]): num=num+(1-(RN1mP[0,i]*RN2mP[0,i])/(mRN1mP[i]*mRN2mP[i])-(RN1mP[1,i]*RN2mP[1,i])/(mRN1mP[i]*mRN2mP[i]))/RDoAStd[i,0]**2 div1 = sum((1/RDoAStd[:,0]**2)*(RN1mP/mRN1mP[:,0]-RN2mP/mRN2mP[:,0])**2,axis=1).reshape(shP) don1 = div1.prod(axis=0)[0] # first term of the doniminator div2 = sum(prod((1/RDoAStd[:,0]**2)*(-RN1mP/mRN1mP[:,0]+RN2mP/mRN2mP[:,0]),axis=0),axis=0) don2 = div2**2 for i in range(shRN[1]): for j in range(shRN[1]): anglei1=(RN1mP[0,i]*RN2mP[1,i])/(mRN1mP[i]*mRN2mP[i])-(RN2mP[0,i]*RN1mP[1,i])/(mRN1mP[i]*mRN2mP[i]) anglej1=(RN1mP[0,j]*RN2mP[1,j])/(mRN1mP[j]*mRN2mP[j])-(RN2mP[0,j]*RN1mP[1,j])/(mRN1mP[j]*mRN2mP[j]) angleji=(RN1mP[0,i]*RN1mP[1,j])/(mRN1mP[i]*mRN1mP[j])-(RN1mP[0,j]*RN1mP[1,i])/(mRN1mP[i]*mRN1mP[j]) angle=angle+((angleji-anglej1+anglei1)**2)/(RDoAStd[i,0]*RDoAStd[j,0])**2 '''print RN1_TDOA print RN2_TDOA print 2*num/(don1-don2) print (2*num)/(angle*0.5) print self.CRB_TDOA_fim(P, RN1_TDOA, RN2_TDOA, TDOAStd)''' return num/angle def P_CRB_RSS(self, P0, RN_RSS, RSSnp, RSSStd): """ This uses RSS CRB to compute the best position of additional reference node """ #P0=array([[0.0],[0.0]]) P=optimize.fmin(self.CRB_RSS,P0,args=(RN_RSS, RSSnp, RSSStd),xtol=1e-10,ftol=1e-10) return P def P_CRB_TOA(self, P0, RN_TOA, TOAStd): """ This uses TOA CRB to compute the best position of additional reference node """ #P0=array([[0.0],[0.0]]) P=optimize.fmin(self.CRB_TOA,P0,args=(RN_TOA, TOAStd),xtol=1e-10,ftol=1e-10) return P
lgpl-3.0
5,342,399,385,796,055,000
36.278618
203
0.472943
false
Juzley/golfstats
lib/werkzeug/datastructures.py
122
87447
# -*- coding: utf-8 -*- """ werkzeug.datastructures ~~~~~~~~~~~~~~~~~~~~~~~ This module provides mixins and classes with an immutable interface. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re import codecs import mimetypes from copy import deepcopy from itertools import repeat from werkzeug._internal import _missing, _empty_stream from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \ PY2, text_type, integer_types, string_types, make_literal_wrapper, \ to_native from werkzeug.filesystem import get_filesystem_encoding _locale_delim_re = re.compile(r'[_-]') def is_immutable(self): raise TypeError('%r objects are immutable' % self.__class__.__name__) def iter_multi_items(mapping): """Iterates over the items of a mapping yielding keys and values without dropping any from more complex structures. """ if isinstance(mapping, MultiDict): for item in iteritems(mapping, multi=True): yield item elif isinstance(mapping, dict): for key, value in iteritems(mapping): if isinstance(value, (tuple, list)): for value in value: yield key, value else: yield key, value else: for item in mapping: yield item def native_itermethods(names): if not PY2: return lambda x: x def setmethod(cls, name): itermethod = getattr(cls, name) setattr(cls, 'iter%s' % name, itermethod) listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw)) listmethod.__doc__ = \ 'Like :py:meth:`iter%s`, but returns a list.' % name setattr(cls, name, listmethod) def wrap(cls): for name in names: setmethod(cls, name) return cls return wrap class ImmutableListMixin(object): """Makes a :class:`list` immutable. .. versionadded:: 0.5 :private: """ _hash_cache = None def __hash__(self): if self._hash_cache is not None: return self._hash_cache rv = self._hash_cache = hash(tuple(self)) return rv def __reduce_ex__(self, protocol): return type(self), (list(self),) def __delitem__(self, key): is_immutable(self) def __delslice__(self, i, j): is_immutable(self) def __iadd__(self, other): is_immutable(self) __imul__ = __iadd__ def __setitem__(self, key, value): is_immutable(self) def __setslice__(self, i, j, value): is_immutable(self) def append(self, item): is_immutable(self) remove = append def extend(self, iterable): is_immutable(self) def insert(self, pos, value): is_immutable(self) def pop(self, index=-1): is_immutable(self) def reverse(self): is_immutable(self) def sort(self, cmp=None, key=None, reverse=None): is_immutable(self) class ImmutableList(ImmutableListMixin, list): """An immutable :class:`list`. .. versionadded:: 0.5 :private: """ def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, list.__repr__(self), ) class ImmutableDictMixin(object): """Makes a :class:`dict` immutable. .. versionadded:: 0.5 :private: """ _hash_cache = None @classmethod def fromkeys(cls, keys, value=None): instance = super(cls, cls).__new__(cls) instance.__init__(zip(keys, repeat(value))) return instance def __reduce_ex__(self, protocol): return type(self), (dict(self),) def _iter_hashitems(self): return iteritems(self) def __hash__(self): if self._hash_cache is not None: return self._hash_cache rv = self._hash_cache = hash(frozenset(self._iter_hashitems())) return rv def setdefault(self, key, default=None): is_immutable(self) def update(self, *args, **kwargs): is_immutable(self) def pop(self, key, default=None): is_immutable(self) def popitem(self): is_immutable(self) def __setitem__(self, key, value): is_immutable(self) def __delitem__(self, key): is_immutable(self) def clear(self): is_immutable(self) class ImmutableMultiDictMixin(ImmutableDictMixin): """Makes a :class:`MultiDict` immutable. .. versionadded:: 0.5 :private: """ def __reduce_ex__(self, protocol): return type(self), (list(iteritems(self, multi=True)),) def _iter_hashitems(self): return iteritems(self, multi=True) def add(self, key, value): is_immutable(self) def popitemlist(self): is_immutable(self) def poplist(self, key): is_immutable(self) def setlist(self, key, new_list): is_immutable(self) def setlistdefault(self, key, default_list=None): is_immutable(self) class UpdateDictMixin(object): """Makes dicts call `self.on_update` on modifications. .. versionadded:: 0.5 :private: """ on_update = None def calls_update(name): def oncall(self, *args, **kw): rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw) if self.on_update is not None: self.on_update(self) return rv oncall.__name__ = name return oncall def setdefault(self, key, default=None): modified = key not in self rv = super(UpdateDictMixin, self).setdefault(key, default) if modified and self.on_update is not None: self.on_update(self) return rv def pop(self, key, default=_missing): modified = key in self if default is _missing: rv = super(UpdateDictMixin, self).pop(key) else: rv = super(UpdateDictMixin, self).pop(key, default) if modified and self.on_update is not None: self.on_update(self) return rv __setitem__ = calls_update('__setitem__') __delitem__ = calls_update('__delitem__') clear = calls_update('clear') popitem = calls_update('popitem') update = calls_update('update') del calls_update class TypeConversionDict(dict): """Works like a regular dict but the :meth:`get` method can perform type conversions. :class:`MultiDict` and :class:`CombinedMultiDict` are subclasses of this class and provide the same feature. .. versionadded:: 0.5 """ def get(self, key, default=None, type=None): """Return the default value if the requested data doesn't exist. If `type` is provided and is a callable it should convert the value, return it or raise a :exc:`ValueError` if that is not possible. In this case the function will return the default as if the value was not found: >>> d = TypeConversionDict(foo='42', bar='blub') >>> d.get('foo', type=int) 42 >>> d.get('bar', -1, type=int) -1 :param key: The key to be looked up. :param default: The default value to be returned if the key can't be looked up. If not further specified `None` is returned. :param type: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the default value is returned. """ try: rv = self[key] if type is not None: rv = type(rv) except (KeyError, ValueError): rv = default return rv class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict): """Works like a :class:`TypeConversionDict` but does not support modifications. .. versionadded:: 0.5 """ def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return TypeConversionDict(self) def __copy__(self): return self @native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) class MultiDict(TypeConversionDict): """A :class:`MultiDict` is a dictionary subclass customized to deal with multiple values for the same key which is for example used by the parsing functions in the wrappers. This is necessary because some HTML form elements pass multiple values for the same key. :class:`MultiDict` implements all standard dictionary methods. Internally, it saves all values for a key as a list, but the standard dict access methods will only return the first value for a key. If you want to gain access to the other values, too, you have to use the `list` methods as explained below. Basic Usage: >>> d = MultiDict([('a', 'b'), ('a', 'c')]) >>> d MultiDict([('a', 'b'), ('a', 'c')]) >>> d['a'] 'b' >>> d.getlist('a') ['b', 'c'] >>> 'a' in d True It behaves like a normal dict thus all dict functions will only return the first value when multiple values for one key are found. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. A :class:`MultiDict` can be constructed from an iterable of ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2 onwards some keyword parameters. :param mapping: the initial value for the :class:`MultiDict`. Either a regular dict, an iterable of ``(key, value)`` tuples or `None`. """ def __init__(self, mapping=None): if isinstance(mapping, MultiDict): dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping))) elif isinstance(mapping, dict): tmp = {} for key, value in iteritems(mapping): if isinstance(value, (tuple, list)): value = list(value) else: value = [value] tmp[key] = value dict.__init__(self, tmp) else: tmp = {} for key, value in mapping or (): tmp.setdefault(key, []).append(value) dict.__init__(self, tmp) def __getstate__(self): return dict(self.lists()) def __setstate__(self, value): dict.clear(self) dict.update(self, value) def __getitem__(self, key): """Return the first data value for this key; raises KeyError if not found. :param key: The key to be looked up. :raise KeyError: if the key does not exist. """ if key in self: return dict.__getitem__(self, key)[0] raise exceptions.BadRequestKeyError(key) def __setitem__(self, key, value): """Like :meth:`add` but removes an existing key first. :param key: the key for the value. :param value: the value to set. """ dict.__setitem__(self, key, [value]) def add(self, key, value): """Adds a new value for the key. .. versionadded:: 0.6 :param key: the key for the value. :param value: the value to add. """ dict.setdefault(self, key, []).append(value) def getlist(self, key, type=None): """Return the list of items for a given key. If that key is not in the `MultiDict`, the return value will be an empty list. Just as `get` `getlist` accepts a `type` parameter. All items will be converted with the callable defined there. :param key: The key to be looked up. :param type: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. """ try: rv = dict.__getitem__(self, key) except KeyError: return [] if type is None: return list(rv) result = [] for item in rv: try: result.append(type(item)) except ValueError: pass return result def setlist(self, key, new_list): """Remove the old values for a key and add new ones. Note that the list you pass the values in will be shallow-copied before it is inserted in the dictionary. >>> d = MultiDict() >>> d.setlist('foo', ['1', '2']) >>> d['foo'] '1' >>> d.getlist('foo') ['1', '2'] :param key: The key for which the values are set. :param new_list: An iterable with the new values for the key. Old values are removed first. """ dict.__setitem__(self, key, list(new_list)) def setdefault(self, key, default=None): """Returns the value for the key if it is in the dict, otherwise it returns `default` and sets that value for `key`. :param key: The key to be looked up. :param default: The default value to be returned if the key is not in the dict. If not further specified it's `None`. """ if key not in self: self[key] = default else: default = self[key] return default def setlistdefault(self, key, default_list=None): """Like `setdefault` but sets multiple values. The list returned is not a copy, but the list that is actually used internally. This means that you can put new values into the dict by appending items to the list: >>> d = MultiDict({"foo": 1}) >>> d.setlistdefault("foo").extend([2, 3]) >>> d.getlist("foo") [1, 2, 3] :param key: The key to be looked up. :param default: An iterable of default values. It is either copied (in case it was a list) or converted into a list before returned. :return: a :class:`list` """ if key not in self: default_list = list(default_list or ()) dict.__setitem__(self, key, default_list) else: default_list = dict.__getitem__(self, key) return default_list def items(self, multi=False): """Return an iterator of ``(key, value)`` pairs. :param multi: If set to `True` the iterator returned will have a pair for each value of each key. Otherwise it will only contain pairs for the first value of each key. """ for key, values in iteritems(dict, self): if multi: for value in values: yield key, value else: yield key, values[0] def lists(self): """Return a list of ``(key, values)`` pairs, where values is the list of all values associated with the key.""" for key, values in iteritems(dict, self): yield key, list(values) def keys(self): return iterkeys(dict, self) __iter__ = keys def values(self): """Returns an iterator of the first value on every key's value list.""" for values in itervalues(dict, self): yield values[0] def listvalues(self): """Return an iterator of all values associated with a key. Zipping :meth:`keys` and this is the same as calling :meth:`lists`: >>> d = MultiDict({"foo": [1, 2, 3]}) >>> zip(d.keys(), d.listvalues()) == d.lists() True """ return itervalues(dict, self) def copy(self): """Return a shallow copy of this object.""" return self.__class__(self) def deepcopy(self, memo=None): """Return a deep copy of this object.""" return self.__class__(deepcopy(self.to_dict(flat=False), memo)) def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first value for each key. :return: a :class:`dict` """ if flat: return dict(iteritems(self)) return dict(self.lists()) def update(self, other_dict): """update() extends rather than replaces existing key lists: >>> a = MultiDict({'x': 1}) >>> b = MultiDict({'x': 2, 'y': 3}) >>> a.update(b) >>> a MultiDict([('y', 3), ('x', 1), ('x', 2)]) If the value list for a key in ``other_dict`` is empty, no new values will be added to the dict and the key will not be created: >>> x = {'empty_list': []} >>> y = MultiDict() >>> y.update(x) >>> y MultiDict([]) """ for key, value in iter_multi_items(other_dict): MultiDict.add(self, key, value) def pop(self, key, default=_missing): """Pop the first item for a list on the dict. Afterwards the key is removed from the dict, so additional values are discarded: >>> d = MultiDict({"foo": [1, 2, 3]}) >>> d.pop("foo") 1 >>> "foo" in d False :param key: the key to pop. :param default: if provided the value to return if the key was not in the dictionary. """ try: return dict.pop(self, key)[0] except KeyError as e: if default is not _missing: return default raise exceptions.BadRequestKeyError(str(e)) def popitem(self): """Pop an item from the dict.""" try: item = dict.popitem(self) return (item[0], item[1][0]) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) def poplist(self, key): """Pop the list for a key from the dict. If the key is not in the dict an empty list is returned. .. versionchanged:: 0.5 If the key does no longer exist a list is returned instead of raising an error. """ return dict.pop(self, key, []) def popitemlist(self): """Pop a ``(key, list)`` tuple from the dict.""" try: return dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) def __copy__(self): return self.copy() def __deepcopy__(self, memo): return self.deepcopy(memo=memo) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True))) class _omd_bucket(object): """Wraps values in the :class:`OrderedMultiDict`. This makes it possible to keep an order over multiple different keys. It requires a lot of extra memory and slows down access a lot, but makes it possible to access elements in O(1) and iterate in O(n). """ __slots__ = ('prev', 'key', 'value', 'next') def __init__(self, omd, key, value): self.prev = omd._last_bucket self.key = key self.value = value self.next = None if omd._first_bucket is None: omd._first_bucket = self if omd._last_bucket is not None: omd._last_bucket.next = self omd._last_bucket = self def unlink(self, omd): if self.prev: self.prev.next = self.next if self.next: self.next.prev = self.prev if omd._first_bucket is self: omd._first_bucket = self.next if omd._last_bucket is self: omd._last_bucket = self.prev @native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) class OrderedMultiDict(MultiDict): """Works like a regular :class:`MultiDict` but preserves the order of the fields. To convert the ordered multi dict into a list you can use the :meth:`items` method and pass it ``multi=True``. In general an :class:`OrderedMultiDict` is an order of magnitude slower than a :class:`MultiDict`. .. admonition:: note Due to a limitation in Python you cannot convert an ordered multi dict into a regular dict by using ``dict(multidict)``. Instead you have to use the :meth:`to_dict` method, otherwise the internal bucket objects are exposed. """ def __init__(self, mapping=None): dict.__init__(self) self._first_bucket = self._last_bucket = None if mapping is not None: OrderedMultiDict.update(self, mapping) def __eq__(self, other): if not isinstance(other, MultiDict): return NotImplemented if isinstance(other, OrderedMultiDict): iter1 = iteritems(self, multi=True) iter2 = iteritems(other, multi=True) try: for k1, v1 in iter1: k2, v2 = next(iter2) if k1 != k2 or v1 != v2: return False except StopIteration: return False try: next(iter2) except StopIteration: return True return False if len(self) != len(other): return False for key, values in iterlists(self): if other.getlist(key) != values: return False return True def __ne__(self, other): return not self.__eq__(other) def __reduce_ex__(self, protocol): return type(self), (list(iteritems(self, multi=True)),) def __getstate__(self): return list(iteritems(self, multi=True)) def __setstate__(self, values): dict.clear(self) for key, value in values: self.add(key, value) def __getitem__(self, key): if key in self: return dict.__getitem__(self, key)[0].value raise exceptions.BadRequestKeyError(key) def __setitem__(self, key, value): self.poplist(key) self.add(key, value) def __delitem__(self, key): self.pop(key) def keys(self): return (key for key, value in iteritems(self)) __iter__ = keys def values(self): return (value for key, value in iteritems(self)) def items(self, multi=False): ptr = self._first_bucket if multi: while ptr is not None: yield ptr.key, ptr.value ptr = ptr.next else: returned_keys = set() while ptr is not None: if ptr.key not in returned_keys: returned_keys.add(ptr.key) yield ptr.key, ptr.value ptr = ptr.next def lists(self): returned_keys = set() ptr = self._first_bucket while ptr is not None: if ptr.key not in returned_keys: yield ptr.key, self.getlist(ptr.key) returned_keys.add(ptr.key) ptr = ptr.next def listvalues(self): for key, values in iterlists(self): yield values def add(self, key, value): dict.setdefault(self, key, []).append(_omd_bucket(self, key, value)) def getlist(self, key, type=None): try: rv = dict.__getitem__(self, key) except KeyError: return [] if type is None: return [x.value for x in rv] result = [] for item in rv: try: result.append(type(item.value)) except ValueError: pass return result def setlist(self, key, new_list): self.poplist(key) for value in new_list: self.add(key, value) def setlistdefault(self, key, default_list=None): raise TypeError('setlistdefault is unsupported for ' 'ordered multi dicts') def update(self, mapping): for key, value in iter_multi_items(mapping): OrderedMultiDict.add(self, key, value) def poplist(self, key): buckets = dict.pop(self, key, ()) for bucket in buckets: bucket.unlink(self) return [x.value for x in buckets] def pop(self, key, default=_missing): try: buckets = dict.pop(self, key) except KeyError as e: if default is not _missing: return default raise exceptions.BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return buckets[0].value def popitem(self): try: key, buckets = dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return key, buckets[0].value def popitemlist(self): try: key, buckets = dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return key, [x.value for x in buckets] def _options_header_vkw(value, kw): return dump_options_header(value, dict((k.replace('_', '-'), v) for k, v in kw.items())) def _unicodify_header_value(value): if isinstance(value, bytes): value = value.decode('latin-1') if not isinstance(value, text_type): value = text_type(value) return value @native_itermethods(['keys', 'values', 'items']) class Headers(object): """An object that stores some headers. It has a dict-like interface but is ordered and can store the same keys multiple times. This data structure is useful if you want a nicer way to handle WSGI headers which are stored as tuples in a list. From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is also a subclass of the :class:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers` class, with the exception of `__getitem__`. :mod:`wsgiref` will return `None` for ``headers['missing']``, whereas :class:`Headers` will raise a :class:`KeyError`. To create a new :class:`Headers` object pass it a list or dict of headers which are used as default values. This does not reuse the list passed to the constructor for internal usage. :param defaults: The list of default values for the :class:`Headers`. .. versionchanged:: 0.9 This data structure now stores unicode values similar to how the multi dicts do it. The main difference is that bytes can be set as well which will automatically be latin1 decoded. .. versionchanged:: 0.9 The :meth:`linked` function was removed without replacement as it was an API that does not support the changes to the encoding model. """ def __init__(self, defaults=None): self._list = [] if defaults is not None: if isinstance(defaults, (list, Headers)): self._list.extend(defaults) else: self.extend(defaults) def __getitem__(self, key, _get_mode=False): if not _get_mode: if isinstance(key, integer_types): return self._list[key] elif isinstance(key, slice): return self.__class__(self._list[key]) if not isinstance(key, string_types): raise exceptions.BadRequestKeyError(key) ikey = key.lower() for k, v in self._list: if k.lower() == ikey: return v # micro optimization: if we are in get mode we will catch that # exception one stack level down so we can raise a standard # key error instead of our special one. if _get_mode: raise KeyError() raise exceptions.BadRequestKeyError(key) def __eq__(self, other): return other.__class__ is self.__class__ and \ set(other._list) == set(self._list) def __ne__(self, other): return not self.__eq__(other) def get(self, key, default=None, type=None, as_bytes=False): """Return the default value if the requested data doesn't exist. If `type` is provided and is a callable it should convert the value, return it or raise a :exc:`ValueError` if that is not possible. In this case the function will return the default as if the value was not found: >>> d = Headers([('Content-Length', '42')]) >>> d.get('Content-Length', type=int) 42 If a headers object is bound you must not add unicode strings because no encoding takes place. .. versionadded:: 0.9 Added support for `as_bytes`. :param key: The key to be looked up. :param default: The default value to be returned if the key can't be looked up. If not further specified `None` is returned. :param type: A callable that is used to cast the value in the :class:`Headers`. If a :exc:`ValueError` is raised by this callable the default value is returned. :param as_bytes: return bytes instead of unicode strings. """ try: rv = self.__getitem__(key, _get_mode=True) except KeyError: return default if as_bytes: rv = rv.encode('latin1') if type is None: return rv try: return type(rv) except ValueError: return default def getlist(self, key, type=None, as_bytes=False): """Return the list of items for a given key. If that key is not in the :class:`Headers`, the return value will be an empty list. Just as :meth:`get` :meth:`getlist` accepts a `type` parameter. All items will be converted with the callable defined there. .. versionadded:: 0.9 Added support for `as_bytes`. :param key: The key to be looked up. :param type: A callable that is used to cast the value in the :class:`Headers`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. :param as_bytes: return bytes instead of unicode strings. """ ikey = key.lower() result = [] for k, v in self: if k.lower() == ikey: if as_bytes: v = v.encode('latin1') if type is not None: try: v = type(v) except ValueError: continue result.append(v) return result def get_all(self, name): """Return a list of all the values for the named field. This method is compatible with the :mod:`wsgiref` :meth:`~wsgiref.headers.Headers.get_all` method. """ return self.getlist(name) def items(self, lower=False): for key, value in self: if lower: key = key.lower() yield key, value def keys(self, lower=False): for key, _ in iteritems(self, lower): yield key def values(self): for _, value in iteritems(self): yield value def extend(self, iterable): """Extend the headers with a dict or an iterable yielding keys and values. """ if isinstance(iterable, dict): for key, value in iteritems(iterable): if isinstance(value, (tuple, list)): for v in value: self.add(key, v) else: self.add(key, value) else: for key, value in iterable: self.add(key, value) def __delitem__(self, key, _index_operation=True): if _index_operation and isinstance(key, (integer_types, slice)): del self._list[key] return key = key.lower() new = [] for k, v in self._list: if k.lower() != key: new.append((k, v)) self._list[:] = new def remove(self, key): """Remove a key. :param key: The key to be removed. """ return self.__delitem__(key, _index_operation=False) def pop(self, key=None, default=_missing): """Removes and returns a key or index. :param key: The key to be popped. If this is an integer the item at that position is removed, if it's a string the value for that key is. If the key is omitted or `None` the last item is removed. :return: an item. """ if key is None: return self._list.pop() if isinstance(key, integer_types): return self._list.pop(key) try: rv = self[key] self.remove(key) except KeyError: if default is not _missing: return default raise return rv def popitem(self): """Removes a key or index and returns a (key, value) item.""" return self.pop() def __contains__(self, key): """Check if a key is present.""" try: self.__getitem__(key, _get_mode=True) except KeyError: return False return True has_key = __contains__ def __iter__(self): """Yield ``(key, value)`` tuples.""" return iter(self._list) def __len__(self): return len(self._list) def add(self, _key, _value, **kw): """Add a new header tuple to the list. Keyword arguments can specify additional parameters for the header value, with underscores converted to dashes:: >>> d = Headers() >>> d.add('Content-Type', 'text/plain') >>> d.add('Content-Disposition', 'attachment', filename='foo.png') The keyword argument dumping uses :func:`dump_options_header` behind the scenes. .. versionadded:: 0.4.1 keyword arguments were added for :mod:`wsgiref` compatibility. """ if kw: _value = _options_header_vkw(_value, kw) _value = _unicodify_header_value(_value) self._validate_value(_value) self._list.append((_key, _value)) def _validate_value(self, value): if not isinstance(value, text_type): raise TypeError('Value should be unicode.') if u'\n' in value or u'\r' in value: raise ValueError('Detected newline in header value. This is ' 'a potential security problem') def add_header(self, _key, _value, **_kw): """Add a new header tuple to the list. An alias for :meth:`add` for compatibility with the :mod:`wsgiref` :meth:`~wsgiref.headers.Headers.add_header` method. """ self.add(_key, _value, **_kw) def clear(self): """Clears all headers.""" del self._list[:] def set(self, _key, _value, **kw): """Remove all header tuples for `key` and add a new one. The newly added key either appears at the end of the list if there was no entry or replaces the first one. Keyword arguments can specify additional parameters for the header value, with underscores converted to dashes. See :meth:`add` for more information. .. versionchanged:: 0.6.1 :meth:`set` now accepts the same arguments as :meth:`add`. :param key: The key to be inserted. :param value: The value to be inserted. """ if kw: _value = _options_header_vkw(_value, kw) _value = _unicodify_header_value(_value) self._validate_value(_value) if not self._list: self._list.append((_key, _value)) return listiter = iter(self._list) ikey = _key.lower() for idx, (old_key, old_value) in enumerate(listiter): if old_key.lower() == ikey: # replace first ocurrence self._list[idx] = (_key, _value) break else: self._list.append((_key, _value)) return self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey] def setdefault(self, key, value): """Returns the value for the key if it is in the dict, otherwise it returns `default` and sets that value for `key`. :param key: The key to be looked up. :param default: The default value to be returned if the key is not in the dict. If not further specified it's `None`. """ if key in self: return self[key] self.set(key, value) return value def __setitem__(self, key, value): """Like :meth:`set` but also supports index/slice based setting.""" if isinstance(key, (slice, integer_types)): if isinstance(key, integer_types): value = [value] value = [(k, _unicodify_header_value(v)) for (k, v) in value] [self._validate_value(v) for (k, v) in value] if isinstance(key, integer_types): self._list[key] = value[0] else: self._list[key] = value else: self.set(key, value) def to_list(self, charset='iso-8859-1'): """Convert the headers into a list suitable for WSGI.""" from warnings import warn warn(DeprecationWarning('Method removed, use to_wsgi_list instead'), stacklevel=2) return self.to_wsgi_list() def to_wsgi_list(self): """Convert the headers into a list suitable for WSGI. The values are byte strings in Python 2 converted to latin1 and unicode strings in Python 3 for the WSGI server to encode. :return: list """ if PY2: return [(to_native(k), v.encode('latin1')) for k, v in self] return list(self) def copy(self): return self.__class__(self._list) def __copy__(self): return self.copy() def __str__(self): """Returns formatted headers suitable for HTTP transmission.""" strs = [] for key, value in self.to_wsgi_list(): strs.append('%s: %s' % (key, value)) strs.append('\r\n') return '\r\n'.join(strs) def __repr__(self): return '%s(%r)' % ( self.__class__.__name__, list(self) ) class ImmutableHeadersMixin(object): """Makes a :class:`Headers` immutable. We do not mark them as hashable though since the only usecase for this datastructure in Werkzeug is a view on a mutable structure. .. versionadded:: 0.5 :private: """ def __delitem__(self, key): is_immutable(self) def __setitem__(self, key, value): is_immutable(self) set = __setitem__ def add(self, item): is_immutable(self) remove = add_header = add def extend(self, iterable): is_immutable(self) def insert(self, pos, value): is_immutable(self) def pop(self, index=-1): is_immutable(self) def popitem(self): is_immutable(self) def setdefault(self, key, default): is_immutable(self) class EnvironHeaders(ImmutableHeadersMixin, Headers): """Read only version of the headers from a WSGI environment. This provides the same interface as `Headers` and is constructed from a WSGI environment. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. """ def __init__(self, environ): self.environ = environ def __eq__(self, other): return self.environ is other.environ def __getitem__(self, key, _get_mode=False): # _get_mode is a no-op for this class as there is no index but # used because get() calls it. key = key.upper().replace('-', '_') if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): return _unicodify_header_value(self.environ[key]) return _unicodify_header_value(self.environ['HTTP_' + key]) def __len__(self): # the iter is necessary because otherwise list calls our # len which would call list again and so forth. return len(list(iter(self))) def __iter__(self): for key, value in iteritems(self.environ): if key.startswith('HTTP_') and key not in \ ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): yield (key[5:].replace('_', '-').title(), _unicodify_header_value(value)) elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): yield (key.replace('_', '-').title(), _unicodify_header_value(value)) def copy(self): raise TypeError('cannot create %r copies' % self.__class__.__name__) @native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict): """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict` instances as sequence and it will combine the return values of all wrapped dicts: >>> from werkzeug.datastructures import CombinedMultiDict, MultiDict >>> post = MultiDict([('foo', 'bar')]) >>> get = MultiDict([('blub', 'blah')]) >>> combined = CombinedMultiDict([get, post]) >>> combined['foo'] 'bar' >>> combined['blub'] 'blah' This works for all read operations and will raise a `TypeError` for methods that usually change data which isn't possible. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. """ def __reduce_ex__(self, protocol): return type(self), (self.dicts,) def __init__(self, dicts=None): self.dicts = dicts or [] @classmethod def fromkeys(cls): raise TypeError('cannot create %r instances by fromkeys' % cls.__name__) def __getitem__(self, key): for d in self.dicts: if key in d: return d[key] raise exceptions.BadRequestKeyError(key) def get(self, key, default=None, type=None): for d in self.dicts: if key in d: if type is not None: try: return type(d[key]) except ValueError: continue return d[key] return default def getlist(self, key, type=None): rv = [] for d in self.dicts: rv.extend(d.getlist(key, type)) return rv def _keys_impl(self): """This function exists so __len__ can be implemented more efficiently, saving one list creation from an iterator. Using this for Python 2's ``dict.keys`` behavior would be useless since `dict.keys` in Python 2 returns a list, while we have a set here. """ rv = set() for d in self.dicts: rv.update(iterkeys(d)) return rv def keys(self): return iter(self._keys_impl()) __iter__ = keys def items(self, multi=False): found = set() for d in self.dicts: for key, value in iteritems(d, multi): if multi: yield key, value elif key not in found: found.add(key) yield key, value def values(self): for key, value in iteritems(self): yield value def lists(self): rv = {} for d in self.dicts: for key, values in iterlists(d): rv.setdefault(key, []).extend(values) return iteritems(rv) def listvalues(self): return (x[1] for x in self.lists()) def copy(self): """Return a shallow copy of this object.""" return self.__class__(self.dicts[:]) def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first item for each key. :return: a :class:`dict` """ rv = {} for d in reversed(self.dicts): rv.update(d.to_dict(flat)) return rv def __len__(self): return len(self._keys_impl()) def __contains__(self, key): for d in self.dicts: if key in d: return True return False has_key = __contains__ def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self.dicts) class FileMultiDict(MultiDict): """A special :class:`MultiDict` that has convenience methods to add files to it. This is used for :class:`EnvironBuilder` and generally useful for unittesting. .. versionadded:: 0.5 """ def add_file(self, name, file, filename=None, content_type=None): """Adds a new file to the dict. `file` can be a file name or a :class:`file`-like or a :class:`FileStorage` object. :param name: the name of the field. :param file: a filename or :class:`file`-like object :param filename: an optional filename :param content_type: an optional content type """ if isinstance(file, FileStorage): value = file else: if isinstance(file, string_types): if filename is None: filename = file file = open(file, 'rb') if filename and content_type is None: content_type = mimetypes.guess_type(filename)[0] or \ 'application/octet-stream' value = FileStorage(file, filename, name, content_type) self.add(name, value) class ImmutableDict(ImmutableDictMixin, dict): """An immutable :class:`dict`. .. versionadded:: 0.5 """ def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, dict.__repr__(self), ) def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return dict(self) def __copy__(self): return self class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict): """An immutable :class:`MultiDict`. .. versionadded:: 0.5 """ def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return MultiDict(self) def __copy__(self): return self class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict): """An immutable :class:`OrderedMultiDict`. .. versionadded:: 0.6 """ def _iter_hashitems(self): return enumerate(iteritems(self, multi=True)) def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return OrderedMultiDict(self) def __copy__(self): return self @native_itermethods(['values']) class Accept(ImmutableList): """An :class:`Accept` object is just a list subclass for lists of ``(value, quality)`` tuples. It is automatically sorted by quality. All :class:`Accept` objects work similar to a list but provide extra functionality for working with the data. Containment checks are normalized to the rules of that header: >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)]) >>> a.best 'ISO-8859-1' >>> 'iso-8859-1' in a True >>> 'UTF8' in a True >>> 'utf7' in a False To get the quality for an item you can use normal item lookup: >>> print a['utf-8'] 0.7 >>> a['utf7'] 0 .. versionchanged:: 0.5 :class:`Accept` objects are forced immutable now. """ def __init__(self, values=()): if values is None: list.__init__(self) self.provided = False elif isinstance(values, Accept): self.provided = values.provided list.__init__(self, values) else: self.provided = True values = [(a, b) for b, a in values] values.sort() values.reverse() list.__init__(self, [(a, b) for b, a in values]) def _value_matches(self, value, item): """Check if a value matches a given accept item.""" return item == '*' or item.lower() == value.lower() def __getitem__(self, key): """Besides index lookup (getting item n) you can also pass it a string to get the quality for the item. If the item is not in the list, the returned quality is ``0``. """ if isinstance(key, string_types): return self.quality(key) return list.__getitem__(self, key) def quality(self, key): """Returns the quality of the key. .. versionadded:: 0.6 In previous versions you had to use the item-lookup syntax (eg: ``obj[key]`` instead of ``obj.quality(key)``) """ for item, quality in self: if self._value_matches(key, item): return quality return 0 def __contains__(self, value): for item, quality in self: if self._value_matches(value, item): return True return False def __repr__(self): return '%s([%s])' % ( self.__class__.__name__, ', '.join('(%r, %s)' % (x, y) for x, y in self) ) def index(self, key): """Get the position of an entry or raise :exc:`ValueError`. :param key: The key to be looked up. .. versionchanged:: 0.5 This used to raise :exc:`IndexError`, which was inconsistent with the list API. """ if isinstance(key, string_types): for idx, (item, quality) in enumerate(self): if self._value_matches(key, item): return idx raise ValueError(key) return list.index(self, key) def find(self, key): """Get the position of an entry or return -1. :param key: The key to be looked up. """ try: return self.index(key) except ValueError: return -1 def values(self): """Iterate over all values.""" for item in self: yield item[0] def to_header(self): """Convert the header set into an HTTP header string.""" result = [] for value, quality in self: if quality != 1: value = '%s;q=%s' % (value, quality) result.append(value) return ','.join(result) def __str__(self): return self.to_header() def best_match(self, matches, default=None): """Returns the best match from a list of possible matches based on the quality of the client. If two items have the same quality, the one is returned that comes first. :param matches: a list of matches to check for :param default: the value that is returned if none match """ best_quality = -1 result = default for server_item in matches: for client_item, quality in self: if quality <= best_quality: break if self._value_matches(server_item, client_item) \ and quality > 0: best_quality = quality result = server_item return result @property def best(self): """The best match as value.""" if self: return self[0][0] class MIMEAccept(Accept): """Like :class:`Accept` but with special methods and behavior for mimetypes. """ def _value_matches(self, value, item): def _normalize(x): x = x.lower() return x == '*' and ('*', '*') or x.split('/', 1) # this is from the application which is trusted. to avoid developer # frustration we actually check these for valid values if '/' not in value: raise ValueError('invalid mimetype %r' % value) value_type, value_subtype = _normalize(value) if value_type == '*' and value_subtype != '*': raise ValueError('invalid mimetype %r' % value) if '/' not in item: return False item_type, item_subtype = _normalize(item) if item_type == '*' and item_subtype != '*': return False return ( (item_type == item_subtype == '*' or value_type == value_subtype == '*') or (item_type == value_type and (item_subtype == '*' or value_subtype == '*' or item_subtype == value_subtype)) ) @property def accept_html(self): """True if this object accepts HTML.""" return ( 'text/html' in self or 'application/xhtml+xml' in self or self.accept_xhtml ) @property def accept_xhtml(self): """True if this object accepts XHTML.""" return ( 'application/xhtml+xml' in self or 'application/xml' in self ) @property def accept_json(self): """True if this object accepts JSON.""" return 'application/json' in self class LanguageAccept(Accept): """Like :class:`Accept` but with normalization for languages.""" def _value_matches(self, value, item): def _normalize(language): return _locale_delim_re.split(language.lower()) return item == '*' or _normalize(value) == _normalize(item) class CharsetAccept(Accept): """Like :class:`Accept` but with normalization for charsets.""" def _value_matches(self, value, item): def _normalize(name): try: return codecs.lookup(name).name except LookupError: return name.lower() return item == '*' or _normalize(value) == _normalize(item) def cache_property(key, empty, type): """Return a new property object for a cache header. Useful if you want to add support for a cache extension in a subclass.""" return property(lambda x: x._get_cache_value(key, empty, type), lambda x, v: x._set_cache_value(key, v, type), lambda x: x._del_cache_value(key), 'accessor for %r' % key) class _CacheControl(UpdateDictMixin, dict): """Subclass of a dict that stores values for a Cache-Control header. It has accessors for all the cache-control directives specified in RFC 2616. The class does not differentiate between request and response directives. Because the cache-control directives in the HTTP header use dashes the python descriptors use underscores for that. To get a header of the :class:`CacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionchanged:: 0.4 Setting `no_cache` or `private` to boolean `True` will set the implicit none-value which is ``*``: >>> cc = ResponseCacheControl() >>> cc.no_cache = True >>> cc <ResponseCacheControl 'no-cache'> >>> cc.no_cache '*' >>> cc.no_cache = None >>> cc <ResponseCacheControl ''> In versions before 0.5 the behavior documented here affected the now no longer existing `CacheControl` class. """ no_cache = cache_property('no-cache', '*', None) no_store = cache_property('no-store', None, bool) max_age = cache_property('max-age', -1, int) no_transform = cache_property('no-transform', None, None) def __init__(self, values=(), on_update=None): dict.__init__(self, values or ()) self.on_update = on_update self.provided = values is not None def _get_cache_value(self, key, empty, type): """Used internally by the accessor properties.""" if type is bool: return key in self if key in self: value = self[key] if value is None: return empty elif type is not None: try: value = type(value) except ValueError: pass return value def _set_cache_value(self, key, value, type): """Used internally by the accessor properties.""" if type is bool: if value: self[key] = None else: self.pop(key, None) else: if value is None: self.pop(key) elif value is True: self[key] = None else: self[key] = value def _del_cache_value(self, key): """Used internally by the accessor properties.""" if key in self: del self[key] def to_header(self): """Convert the stored values into a cache control header.""" return dump_header(self) def __str__(self): return self.to_header() def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, " ".join( "%s=%r" % (k, v) for k, v in sorted(self.items()) ), ) class RequestCacheControl(ImmutableDictMixin, _CacheControl): """A cache control for requests. This is immutable and gives access to all the request-relevant cache control headers. To get a header of the :class:`RequestCacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 0.5 In previous versions a `CacheControl` class existed that was used both for request and response. """ max_stale = cache_property('max-stale', '*', int) min_fresh = cache_property('min-fresh', '*', int) no_transform = cache_property('no-transform', None, None) only_if_cached = cache_property('only-if-cached', None, bool) class ResponseCacheControl(_CacheControl): """A cache control for responses. Unlike :class:`RequestCacheControl` this is mutable and gives access to response-relevant cache control headers. To get a header of the :class:`ResponseCacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 0.5 In previous versions a `CacheControl` class existed that was used both for request and response. """ public = cache_property('public', None, bool) private = cache_property('private', '*', None) must_revalidate = cache_property('must-revalidate', None, bool) proxy_revalidate = cache_property('proxy-revalidate', None, bool) s_maxage = cache_property('s-maxage', None, None) # attach cache_property to the _CacheControl as staticmethod # so that others can reuse it. _CacheControl.cache_property = staticmethod(cache_property) class CallbackDict(UpdateDictMixin, dict): """A dict that calls a function passed every time something is changed. The function is passed the dict instance. """ def __init__(self, initial=None, on_update=None): dict.__init__(self, initial or ()) self.on_update = on_update def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, dict.__repr__(self) ) class HeaderSet(object): """Similar to the :class:`ETags` class this implements a set-like structure. Unlike :class:`ETags` this is case insensitive and used for vary, allow, and content-language headers. If not constructed using the :func:`parse_set_header` function the instantiation works like this: >>> hs = HeaderSet(['foo', 'bar', 'baz']) >>> hs HeaderSet(['foo', 'bar', 'baz']) """ def __init__(self, headers=None, on_update=None): self._headers = list(headers or ()) self._set = set([x.lower() for x in self._headers]) self.on_update = on_update def add(self, header): """Add a new header to the set.""" self.update((header,)) def remove(self, header): """Remove a header from the set. This raises an :exc:`KeyError` if the header is not in the set. .. versionchanged:: 0.5 In older versions a :exc:`IndexError` was raised instead of a :exc:`KeyError` if the object was missing. :param header: the header to be removed. """ key = header.lower() if key not in self._set: raise KeyError(header) self._set.remove(key) for idx, key in enumerate(self._headers): if key.lower() == header: del self._headers[idx] break if self.on_update is not None: self.on_update(self) def update(self, iterable): """Add all the headers from the iterable to the set. :param iterable: updates the set with the items from the iterable. """ inserted_any = False for header in iterable: key = header.lower() if key not in self._set: self._headers.append(header) self._set.add(key) inserted_any = True if inserted_any and self.on_update is not None: self.on_update(self) def discard(self, header): """Like :meth:`remove` but ignores errors. :param header: the header to be discarded. """ try: return self.remove(header) except KeyError: pass def find(self, header): """Return the index of the header in the set or return -1 if not found. :param header: the header to be looked up. """ header = header.lower() for idx, item in enumerate(self._headers): if item.lower() == header: return idx return -1 def index(self, header): """Return the index of the header in the set or raise an :exc:`IndexError`. :param header: the header to be looked up. """ rv = self.find(header) if rv < 0: raise IndexError(header) return rv def clear(self): """Clear the set.""" self._set.clear() del self._headers[:] if self.on_update is not None: self.on_update(self) def as_set(self, preserve_casing=False): """Return the set as real python set type. When calling this, all the items are converted to lowercase and the ordering is lost. :param preserve_casing: if set to `True` the items in the set returned will have the original case like in the :class:`HeaderSet`, otherwise they will be lowercase. """ if preserve_casing: return set(self._headers) return set(self._set) def to_header(self): """Convert the header set into an HTTP header string.""" return ', '.join(map(quote_header_value, self._headers)) def __getitem__(self, idx): return self._headers[idx] def __delitem__(self, idx): rv = self._headers.pop(idx) self._set.remove(rv.lower()) if self.on_update is not None: self.on_update(self) def __setitem__(self, idx, value): old = self._headers[idx] self._set.remove(old.lower()) self._headers[idx] = value self._set.add(value.lower()) if self.on_update is not None: self.on_update(self) def __contains__(self, header): return header.lower() in self._set def __len__(self): return len(self._set) def __iter__(self): return iter(self._headers) def __nonzero__(self): return bool(self._set) def __str__(self): return self.to_header() def __repr__(self): return '%s(%r)' % ( self.__class__.__name__, self._headers ) class ETags(object): """A set that can be used to check if one etag is present in a collection of etags. """ def __init__(self, strong_etags=None, weak_etags=None, star_tag=False): self._strong = frozenset(not star_tag and strong_etags or ()) self._weak = frozenset(weak_etags or ()) self.star_tag = star_tag def as_set(self, include_weak=False): """Convert the `ETags` object into a python set. Per default all the weak etags are not part of this set.""" rv = set(self._strong) if include_weak: rv.update(self._weak) return rv def is_weak(self, etag): """Check if an etag is weak.""" return etag in self._weak def contains_weak(self, etag): """Check if an etag is part of the set including weak and strong tags.""" return self.is_weak(etag) or self.contains(etag) def contains(self, etag): """Check if an etag is part of the set ignoring weak tags. It is also possible to use the ``in`` operator. """ if self.star_tag: return True return etag in self._strong def contains_raw(self, etag): """When passed a quoted tag it will check if this tag is part of the set. If the tag is weak it is checked against weak and strong tags, otherwise strong only.""" etag, weak = unquote_etag(etag) if weak: return self.contains_weak(etag) return self.contains(etag) def to_header(self): """Convert the etags set into a HTTP header string.""" if self.star_tag: return '*' return ', '.join( ['"%s"' % x for x in self._strong] + ['w/"%s"' % x for x in self._weak] ) def __call__(self, etag=None, data=None, include_weak=False): if [etag, data].count(None) != 1: raise TypeError('either tag or data required, but at least one') if etag is None: etag = generate_etag(data) if include_weak: if etag in self._weak: return True return etag in self._strong def __bool__(self): return bool(self.star_tag or self._strong or self._weak) __nonzero__ = __bool__ def __str__(self): return self.to_header() def __iter__(self): return iter(self._strong) def __contains__(self, etag): return self.contains(etag) def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class IfRange(object): """Very simple object that represents the `If-Range` header in parsed form. It will either have neither a etag or date or one of either but never both. .. versionadded:: 0.7 """ def __init__(self, etag=None, date=None): #: The etag parsed and unquoted. Ranges always operate on strong #: etags so the weakness information is not necessary. self.etag = etag #: The date in parsed format or `None`. self.date = date def to_header(self): """Converts the object back into an HTTP header.""" if self.date is not None: return http_date(self.date) if self.etag is not None: return quote_etag(self.etag) return '' def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class Range(object): """Represents a range header. All the methods are only supporting bytes as unit. It does store multiple ranges but :meth:`range_for_length` will only work if only one range is provided. .. versionadded:: 0.7 """ def __init__(self, units, ranges): #: The units of this range. Usually "bytes". self.units = units #: A list of ``(begin, end)`` tuples for the range header provided. #: The ranges are non-inclusive. self.ranges = ranges def range_for_length(self, length): """If the range is for bytes, the length is not None and there is exactly one range and it is satisfiable it returns a ``(start, stop)`` tuple, otherwise `None`. """ if self.units != 'bytes' or length is None or len(self.ranges) != 1: return None start, end = self.ranges[0] if end is None: end = length if start < 0: start += length if is_byte_range_valid(start, end, length): return start, min(end, length) def make_content_range(self, length): """Creates a :class:`~werkzeug.datastructures.ContentRange` object from the current range and given content length. """ rng = self.range_for_length(length) if rng is not None: return ContentRange(self.units, rng[0], rng[1], length) def to_header(self): """Converts the object back into an HTTP header.""" ranges = [] for begin, end in self.ranges: if end is None: ranges.append(begin >= 0 and '%s-' % begin or str(begin)) else: ranges.append('%s-%s' % (begin, end - 1)) return '%s=%s' % (self.units, ','.join(ranges)) def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class ContentRange(object): """Represents the content range header. .. versionadded:: 0.7 """ def __init__(self, units, start, stop, length=None, on_update=None): assert is_byte_range_valid(start, stop, length), \ 'Bad range provided' self.on_update = on_update self.set(start, stop, length, units) def _callback_property(name): def fget(self): return getattr(self, name) def fset(self, value): setattr(self, name, value) if self.on_update is not None: self.on_update(self) return property(fget, fset) #: The units to use, usually "bytes" units = _callback_property('_units') #: The start point of the range or `None`. start = _callback_property('_start') #: The stop point of the range (non-inclusive) or `None`. Can only be #: `None` if also start is `None`. stop = _callback_property('_stop') #: The length of the range or `None`. length = _callback_property('_length') def set(self, start, stop, length=None, units='bytes'): """Simple method to update the ranges.""" assert is_byte_range_valid(start, stop, length), \ 'Bad range provided' self._units = units self._start = start self._stop = stop self._length = length if self.on_update is not None: self.on_update(self) def unset(self): """Sets the units to `None` which indicates that the header should no longer be used. """ self.set(None, None, units=None) def to_header(self): if self.units is None: return '' if self.length is None: length = '*' else: length = self.length if self.start is None: return '%s */%s' % (self.units, length) return '%s %s-%s/%s' % ( self.units, self.start, self.stop - 1, length ) def __nonzero__(self): return self.units is not None __bool__ = __nonzero__ def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class Authorization(ImmutableDictMixin, dict): """Represents an `Authorization` header sent by the client. You should not create this kind of object yourself but use it when it's returned by the `parse_authorization_header` function. This object is a dict subclass and can be altered by setting dict items but it should be considered immutable as it's returned by the client and not meant for modifications. .. versionchanged:: 0.5 This object became immutable. """ def __init__(self, auth_type, data=None): dict.__init__(self, data or {}) self.type = auth_type username = property(lambda x: x.get('username'), doc=''' The username transmitted. This is set for both basic and digest auth all the time.''') password = property(lambda x: x.get('password'), doc=''' When the authentication type is basic this is the password transmitted by the client, else `None`.''') realm = property(lambda x: x.get('realm'), doc=''' This is the server realm sent back for HTTP digest auth.''') nonce = property(lambda x: x.get('nonce'), doc=''' The nonce the server sent for digest auth, sent back by the client. A nonce should be unique for every 401 response for HTTP digest auth.''') uri = property(lambda x: x.get('uri'), doc=''' The URI from Request-URI of the Request-Line; duplicated because proxies are allowed to change the Request-Line in transit. HTTP digest auth only.''') nc = property(lambda x: x.get('nc'), doc=''' The nonce count value transmitted by clients if a qop-header is also transmitted. HTTP digest auth only.''') cnonce = property(lambda x: x.get('cnonce'), doc=''' If the server sent a qop-header in the ``WWW-Authenticate`` header, the client has to provide this value for HTTP digest auth. See the RFC for more details.''') response = property(lambda x: x.get('response'), doc=''' A string of 32 hex digits computed as defined in RFC 2617, which proves that the user knows a password. Digest auth only.''') opaque = property(lambda x: x.get('opaque'), doc=''' The opaque header from the server returned unchanged by the client. It is recommended that this string be base64 or hexadecimal data. Digest auth only.''') @property def qop(self): """Indicates what "quality of protection" the client has applied to the message for HTTP digest auth.""" def on_update(header_set): if not header_set and 'qop' in self: del self['qop'] elif header_set: self['qop'] = header_set.to_header() return parse_set_header(self.get('qop'), on_update) class WWWAuthenticate(UpdateDictMixin, dict): """Provides simple access to `WWW-Authenticate` headers.""" #: list of keys that require quoting in the generated header _require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm', 'qop']) def __init__(self, auth_type=None, values=None, on_update=None): dict.__init__(self, values or ()) if auth_type: self['__auth_type__'] = auth_type self.on_update = on_update def set_basic(self, realm='authentication required'): """Clear the auth info and enable basic auth.""" dict.clear(self) dict.update(self, {'__auth_type__': 'basic', 'realm': realm}) if self.on_update: self.on_update(self) def set_digest(self, realm, nonce, qop=('auth',), opaque=None, algorithm=None, stale=False): """Clear the auth info and enable digest auth.""" d = { '__auth_type__': 'digest', 'realm': realm, 'nonce': nonce, 'qop': dump_header(qop) } if stale: d['stale'] = 'TRUE' if opaque is not None: d['opaque'] = opaque if algorithm is not None: d['algorithm'] = algorithm dict.clear(self) dict.update(self, d) if self.on_update: self.on_update(self) def to_header(self): """Convert the stored values into a WWW-Authenticate header.""" d = dict(self) auth_type = d.pop('__auth_type__', None) or 'basic' return '%s %s' % (auth_type.title(), ', '.join([ '%s=%s' % (key, quote_header_value(value, allow_token=key not in self._require_quoting)) for key, value in iteritems(d) ])) def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % ( self.__class__.__name__, self.to_header() ) def auth_property(name, doc=None): """A static helper function for subclasses to add extra authentication system properties onto a class:: class FooAuthenticate(WWWAuthenticate): special_realm = auth_property('special_realm') For more information have a look at the sourcecode to see how the regular properties (:attr:`realm` etc.) are implemented. """ def _set_value(self, value): if value is None: self.pop(name, None) else: self[name] = str(value) return property(lambda x: x.get(name), _set_value, doc=doc) def _set_property(name, doc=None): def fget(self): def on_update(header_set): if not header_set and name in self: del self[name] elif header_set: self[name] = header_set.to_header() return parse_set_header(self.get(name), on_update) return property(fget, doc=doc) type = auth_property('__auth_type__', doc=''' The type of the auth mechanism. HTTP currently specifies `Basic` and `Digest`.''') realm = auth_property('realm', doc=''' A string to be displayed to users so they know which username and password to use. This string should contain at least the name of the host performing the authentication and might additionally indicate the collection of users who might have access.''') domain = _set_property('domain', doc=''' A list of URIs that define the protection space. If a URI is an absolute path, it is relative to the canonical root URL of the server being accessed.''') nonce = auth_property('nonce', doc=''' A server-specified data string which should be uniquely generated each time a 401 response is made. It is recommended that this string be base64 or hexadecimal data.''') opaque = auth_property('opaque', doc=''' A string of data, specified by the server, which should be returned by the client unchanged in the Authorization header of subsequent requests with URIs in the same protection space. It is recommended that this string be base64 or hexadecimal data.''') algorithm = auth_property('algorithm', doc=''' A string indicating a pair of algorithms used to produce the digest and a checksum. If this is not present it is assumed to be "MD5". If the algorithm is not understood, the challenge should be ignored (and a different one used, if there is more than one).''') qop = _set_property('qop', doc=''' A set of quality-of-privacy directives such as auth and auth-int.''') def _get_stale(self): val = self.get('stale') if val is not None: return val.lower() == 'true' def _set_stale(self, value): if value is None: self.pop('stale', None) else: self['stale'] = value and 'TRUE' or 'FALSE' stale = property(_get_stale, _set_stale, doc=''' A flag, indicating that the previous request from the client was rejected because the nonce value was stale.''') del _get_stale, _set_stale # make auth_property a staticmethod so that subclasses of # `WWWAuthenticate` can use it for new properties. auth_property = staticmethod(auth_property) del _set_property class FileStorage(object): """The :class:`FileStorage` class is a thin wrapper over incoming files. It is used by the request object to represent uploaded files. All the attributes of the wrapper stream are proxied by the file storage so it's possible to do ``storage.read()`` instead of the long form ``storage.stream.read()``. """ def __init__(self, stream=None, filename=None, name=None, content_type=None, content_length=None, headers=None): self.name = name self.stream = stream or _empty_stream # if no filename is provided we can attempt to get the filename # from the stream object passed. There we have to be careful to # skip things like <fdopen>, <stderr> etc. Python marks these # special filenames with angular brackets. if filename is None: filename = getattr(stream, 'name', None) s = make_literal_wrapper(filename) if filename and filename[0] == s('<') and filename[-1] == s('>'): filename = None # On Python 3 we want to make sure the filename is always unicode. # This might not be if the name attribute is bytes due to the # file being opened from the bytes API. if not PY2 and isinstance(filename, bytes): filename = filename.decode(get_filesystem_encoding(), 'replace') self.filename = filename if headers is None: headers = Headers() self.headers = headers if content_type is not None: headers['Content-Type'] = content_type if content_length is not None: headers['Content-Length'] = str(content_length) def _parse_content_type(self): if not hasattr(self, '_parsed_content_type'): self._parsed_content_type = \ parse_options_header(self.content_type) @property def content_type(self): """The content-type sent in the header. Usually not available""" return self.headers.get('content-type') @property def content_length(self): """The content-length sent in the header. Usually not available""" return int(self.headers.get('content-length') or 0) @property def mimetype(self): """Like :attr:`content_type`, but without parameters (eg, without charset, type etc.) and always lowercase. For example if the content type is ``text/HTML; charset=utf-8`` the mimetype would be ``'text/html'``. .. versionadded:: 0.7 """ self._parse_content_type() return self._parsed_content_type[0].lower() @property def mimetype_params(self): """The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. .. versionadded:: 0.7 """ self._parse_content_type() return self._parsed_content_type[1] def save(self, dst, buffer_size=16384): """Save the file to a destination path or file object. If the destination is a file object you have to close it yourself after the call. The buffer size is the number of bytes held in memory during the copy process. It defaults to 16KB. For secure file saving also have a look at :func:`secure_filename`. :param dst: a filename or open file object the uploaded file is saved to. :param buffer_size: the size of the buffer. This works the same as the `length` parameter of :func:`shutil.copyfileobj`. """ from shutil import copyfileobj close_dst = False if isinstance(dst, string_types): dst = open(dst, 'wb') close_dst = True try: copyfileobj(self.stream, dst, buffer_size) finally: if close_dst: dst.close() def close(self): """Close the underlying file if possible.""" try: self.stream.close() except Exception: pass def __nonzero__(self): return bool(self.filename) __bool__ = __nonzero__ def __getattr__(self, name): return getattr(self.stream, name) def __iter__(self): return iter(self.readline, '') def __repr__(self): return '<%s: %r (%r)>' % ( self.__class__.__name__, self.filename, self.content_type ) # circular dependencies from werkzeug.http import dump_options_header, dump_header, generate_etag, \ quote_header_value, parse_set_header, unquote_etag, quote_etag, \ parse_options_header, http_date, is_byte_range_valid from werkzeug import exceptions
mit
-1,074,565,960,800,933,600
31.508178
93
0.567944
false
jtovar2/demo_app
backend/resources/org_user_relationships.py
1
4196
from flask import Flask, request, abort import json import ndb_util from model import User from google.appengine.api import users from google.appengine.ext import ndb from google.appengine.api import app_identity from google.appengine.api import mail from flask_restful import Resource from google.appengine.runtime import apiproxy_errors base_url = 'http://demolisherapp.appspot.com/' app_name = 'DemolisherApp' new_user_message = '<p> Create a new account at {app_name} today</p>' \ '<form action="{path}" method="get">'\ '<input type="submit" value="Sign Up">' \ '</form>' user_message = '<p>Join the team and start demoing today</p' \ '<form action="{path}" method="get">' \ '<input type="submit" value="Join">' \ '</form>' class InviteUserToOrg(Resource): def get(self, org_id, user_email): print "we in here" client_id = users.get_current_user().user_id() user_email = str(user_email) print user_email org_id = str(org_id) if org_id != client_id: abort(401) org_key = ndb.Key('Organization', org_id) org = org_key.get() sender = '[email protected]' ender = '{}@appspot.gserviceaccount.com'.format( app_identity.get_application_id()) subject = 'Welcome to the ' + org.name + ' Team!' body = '<h3>{org_name} has invited you to join their team</h3>' \ '<hr>' query = User.query() query = query.filter(User.email == user_email) query_results = query.fetch() if len(query_results) == 0: add_new_user_path = base_url + 'signup?referral=' + org_id print add_new_user_path body = body + new_user_message.format(path=add_new_user_path, app_name=app_name) else: user = query_results[0] user_id = query_results[0].key.id() add_user_path = base_url + 'signup?referral=' + org_id print add_user_path body = body + user_message.format(path=add_user_path) response = mail.send_mail(sender=sender, to=user_email, subject=subject, body="", html=body) return response class AddUserToOrg(Resource): def get(self, org_id, user_id): client_id = users.get_current_user().user_id() if client_id != user_id: abort(401) org_key = ndb.Key('Organization', org_id) org = org_key.get() user_key = ndb.Key('User', user_id) user = user_key.get() if user_key in org.workers or org_key in user.works_for_organizations: abort(403) user.add_organization(org_key) org.add_worker(user_key) return user.to_json() class RemoveUserFromOrg(Resource): def delete(self, org_id, user_id): client_id = users.get_current_user().user_id() if client_id != org_id: abort(401) org_key = ndb.Key('Organization', org_id) org = org_key.get() user_key = ndb.Key('User', user_id) user = user_key.get() user.delete_organization(org_key) org.remove_worker(user_key) return 'OK' class GetAllWorkersForOrg(Resource): def get(self, org_id): client_id = users.get_current_user().user_id() if client_id != org_id: abort(401) org_key = ndb.Key('Organization', org_id) org = org_key.get() workers_entities = ndb.get_multi(org.workers) workers_json = [] for entity in workers_entities: workers_json.append(entity.to_json()) return {"workers" : workers_json} class GetAllOrgsForWorker(Resource): def get(self, user_id): client_id = users.get_current_user().user_id() if client_id != user_id: abort(401) user_key = ndb.Key('User', user_id) user = user_key.get() orgs_entities = ndb.get_multi(user.works_for_organizations) orgs_json = [] for entity in orgs_entities: orgs_json.append(entity.to_json()) return {'organizations': orgs_json}
mit
8,948,874,900,414,760,000
32.568
100
0.583174
false
Wallacoloo/qemu-kinetis
scripts/tracetool.py
94
4095
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Command-line wrapper for the tracetool machinery. """ __author__ = "Lluís Vilanova <[email protected]>" __copyright__ = "Copyright 2012-2014, Lluís Vilanova <[email protected]>" __license__ = "GPL version 2 or (at your option) any later version" __maintainer__ = "Stefan Hajnoczi" __email__ = "[email protected]" import sys import getopt from tracetool import error_write, out import tracetool.backend import tracetool.format _SCRIPT = "" def error_opt(msg = None): if msg is not None: error_write("Error: " + msg + "\n") backend_descr = "\n".join([ " %-15s %s" % (n, d) for n,d in tracetool.backend.get_list() ]) format_descr = "\n".join([ " %-15s %s" % (n, d) for n,d in tracetool.format.get_list() ]) error_write("""\ Usage: %(script)s --format=<format> --backends=<backends> [<options>] Backends: %(backends)s Formats: %(formats)s Options: --help This help message. --list-backends Print list of available backends. --check-backends Check if the given backend is valid. --binary <path> Full path to QEMU binary. --target-type <type> QEMU emulator target type ('system' or 'user'). --target-name <name> QEMU emulator target name. --probe-prefix <prefix> Prefix for dtrace probe names (default: qemu-<target-type>-<target-name>).\ """ % { "script" : _SCRIPT, "backends" : backend_descr, "formats" : format_descr, }) if msg is None: sys.exit(0) else: sys.exit(1) def main(args): global _SCRIPT _SCRIPT = args[0] long_opts = ["backends=", "format=", "help", "list-backends", "check-backends"] long_opts += ["binary=", "target-type=", "target-name=", "probe-prefix="] try: opts, args = getopt.getopt(args[1:], "", long_opts) except getopt.GetoptError, err: error_opt(str(err)) check_backends = False arg_backends = [] arg_format = "" binary = None target_type = None target_name = None probe_prefix = None for opt, arg in opts: if opt == "--help": error_opt() elif opt == "--backends": arg_backends = arg.split(",") elif opt == "--format": arg_format = arg elif opt == "--list-backends": public_backends = tracetool.backend.get_list(only_public = True) out(", ".join([ b for b,_ in public_backends ])) sys.exit(0) elif opt == "--check-backends": check_backends = True elif opt == "--binary": binary = arg elif opt == '--target-type': target_type = arg elif opt == '--target-name': target_name = arg elif opt == '--probe-prefix': probe_prefix = arg else: error_opt("unhandled option: %s" % opt) if len(arg_backends) == 0: error_opt("no backends specified") if check_backends: for backend in arg_backends: if not tracetool.backend.exists(backend): sys.exit(1) sys.exit(0) if arg_format == "stap": if binary is None: error_opt("--binary is required for SystemTAP tapset generator") if probe_prefix is None and target_type is None: error_opt("--target-type is required for SystemTAP tapset generator") if probe_prefix is None and target_name is None: error_opt("--target-name is required for SystemTAP tapset generator") if probe_prefix is None: probe_prefix = ".".join(["qemu", target_type, target_name]) try: tracetool.generate(sys.stdin, arg_format, arg_backends, binary=binary, probe_prefix=probe_prefix) except tracetool.TracetoolError, e: error_opt(str(e)) if __name__ == "__main__": main(sys.argv)
gpl-2.0
-6,875,961,456,145,663,000
28.446043
81
0.550941
false
jostmey/rwa
length_problem_100/rwa_model/dataplumbing.py
5
1694
#!/usr/bin/env python3 ########################################################################################## # Author: Jared L. Ostmeyer # Date Started: 2017-01-01 # Purpose: Load dataset and create interfaces for piping the data to the model ########################################################################################## ########################################################################################## # Libraries ########################################################################################## import numpy as np ########################################################################################## # Class definitions ########################################################################################## # Defines interface between the data and model # class Dataset: def __init__(self, xs, ls, ys): self.xs = xs # Store the features self.ls = ls # Store the length of each sequence self.ys = ys # Store the labels self.num_samples = len(ys) self.num_features = len(xs[0,0,:]) self.max_length = len(xs[0,:,0]) self.num_classes = 1 def batch(self, batch_size): js = np.random.randint(0, self.num_samples, batch_size) return self.xs[js,:,:], self.ls[js], self.ys[js] ########################################################################################## # Import dataset ########################################################################################## # Load data # import sys sys.path.append('../dataset') import input_data # Create split of data # train = Dataset(input_data.xs_train, input_data.ls_train, input_data.ys_train) test = Dataset(input_data.xs_test, input_data.ls_test, input_data.ys_test)
bsd-3-clause
1,537,229,458,021,423,400
35.042553
90
0.397285
false
laribee/mochachino
node_modules/js-yaml/support/pyyaml-src/tokens.py
985
2573
class Token(object): def __init__(self, start_mark, end_mark): self.start_mark = start_mark self.end_mark = end_mark def __repr__(self): attributes = [key for key in self.__dict__ if not key.endswith('_mark')] attributes.sort() arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes]) return '%s(%s)' % (self.__class__.__name__, arguments) #class BOMToken(Token): # id = '<byte order mark>' class DirectiveToken(Token): id = '<directive>' def __init__(self, name, value, start_mark, end_mark): self.name = name self.value = value self.start_mark = start_mark self.end_mark = end_mark class DocumentStartToken(Token): id = '<document start>' class DocumentEndToken(Token): id = '<document end>' class StreamStartToken(Token): id = '<stream start>' def __init__(self, start_mark=None, end_mark=None, encoding=None): self.start_mark = start_mark self.end_mark = end_mark self.encoding = encoding class StreamEndToken(Token): id = '<stream end>' class BlockSequenceStartToken(Token): id = '<block sequence start>' class BlockMappingStartToken(Token): id = '<block mapping start>' class BlockEndToken(Token): id = '<block end>' class FlowSequenceStartToken(Token): id = '[' class FlowMappingStartToken(Token): id = '{' class FlowSequenceEndToken(Token): id = ']' class FlowMappingEndToken(Token): id = '}' class KeyToken(Token): id = '?' class ValueToken(Token): id = ':' class BlockEntryToken(Token): id = '-' class FlowEntryToken(Token): id = ',' class AliasToken(Token): id = '<alias>' def __init__(self, value, start_mark, end_mark): self.value = value self.start_mark = start_mark self.end_mark = end_mark class AnchorToken(Token): id = '<anchor>' def __init__(self, value, start_mark, end_mark): self.value = value self.start_mark = start_mark self.end_mark = end_mark class TagToken(Token): id = '<tag>' def __init__(self, value, start_mark, end_mark): self.value = value self.start_mark = start_mark self.end_mark = end_mark class ScalarToken(Token): id = '<scalar>' def __init__(self, value, plain, start_mark, end_mark, style=None): self.value = value self.plain = plain self.start_mark = start_mark self.end_mark = end_mark self.style = style
mit
1,572,538,475,633,698,800
23.740385
71
0.593082
false
ufcg-lsd/python-hpOneView
examples/scripts/get-providers.py
1
3951
#!/usr/bin/env python ### # (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ### from __future__ import print_function from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from builtins import range from future import standard_library standard_library.install_aliases() import sys PYTHON_VERSION = sys.version_info[:3] PY2 = (PYTHON_VERSION[0] == 2) if PY2: if PYTHON_VERSION < (2, 7, 9): raise Exception('Must use Python 2.7.9 or later') elif PYTHON_VERSION < (3, 4): raise Exception('Must use Python 3.4 or later') import hpOneView as hpov from pprint import pprint def acceptEULA(con): # See if we need to accept the EULA before we try to log in con.get_eula_status() try: if con.get_eula_status() is True: print("EULA display needed") con.set_eula('no') except Exception as e: print('EXCEPTION:') print(e) def login(con, credential): # Login with givin credentials try: con.login(credential) except: print('Login failed') def getproviders(fcs): ret = fcs.get_providers() pprint(ret) def main(): parser = argparse.ArgumentParser(add_help=True, formatter_class=argparse.RawTextHelpFormatter, description=''' Display Providers Usage: ''') parser.add_argument('-a', dest='host', required=True, help=''' HP OneView Appliance hostname or IP address''') parser.add_argument('-u', dest='user', required=False, default='Administrator', help=''' HP OneView Username''') parser.add_argument('-p', dest='passwd', required=True, help=''' HP OneView Password''') parser.add_argument('-c', dest='cert', required=False, help=''' Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''') parser.add_argument('-y', dest='proxy', required=False, help=''' Proxy (host:port format''') parser.add_argument('-j', dest='domain', required=False, default='Local', help=''' HP OneView Authorized Login Domain''') args = parser.parse_args() credential = {'authLoginDomain': args.domain.upper(), 'userName': args.user, 'password': args.passwd} con = hpov.connection(args.host) fcs = hpov.fcsans(con) if args.proxy: con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1]) if args.cert: con.set_trusted_ssl_bundle(args.cert) login(con, credential) acceptEULA(con) getproviders(fcs) if __name__ == '__main__': import sys import argparse sys.exit(main()) # vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
mit
-6,812,556,689,504,105,000
32.201681
105
0.648697
false
petrus-v/odoo
addons/payment_adyen/models/adyen.py
33
7845
# -*- coding: utf-'8' "-*-" import base64 try: import simplejson as json except ImportError: import json from hashlib import sha1 import hmac import logging import urlparse from openerp.addons.payment.models.payment_acquirer import ValidationError from openerp.addons.payment_adyen.controllers.main import AdyenController from openerp.osv import osv, fields from openerp.tools import float_round _logger = logging.getLogger(__name__) class AcquirerAdyen(osv.Model): _inherit = 'payment.acquirer' def _get_adyen_urls(self, cr, uid, environment, context=None): """ Adyen URLs - yhpp: hosted payment page: pay.shtml for single, select.shtml for multiple """ return { 'adyen_form_url': 'https://%s.adyen.com/hpp/pay.shtml' % ('live' if environment == 'prod' else environment), } def _get_providers(self, cr, uid, context=None): providers = super(AcquirerAdyen, self)._get_providers(cr, uid, context=context) providers.append(['adyen', 'Adyen']) return providers _columns = { 'adyen_merchant_account': fields.char('Merchant Account', required_if_provider='adyen', groups='base.group_user'), 'adyen_skin_code': fields.char('Skin Code', required_if_provider='adyen', groups='base.group_user'), 'adyen_skin_hmac_key': fields.char('Skin HMAC Key', required_if_provider='adyen', groups='base.group_user'), } def _adyen_generate_merchant_sig(self, acquirer, inout, values): """ Generate the shasign for incoming or outgoing communications. :param browse acquirer: the payment.acquirer browse record. It should have a shakey in shaky out :param string inout: 'in' (openerp contacting ogone) or 'out' (adyen contacting openerp). In this last case only some fields should be contained (see e-Commerce basic) :param dict values: transaction values :return string: shasign """ assert inout in ('in', 'out') assert acquirer.provider == 'adyen' if inout == 'in': keys = "paymentAmount currencyCode shipBeforeDate merchantReference skinCode merchantAccount sessionValidity shopperEmail shopperReference recurringContract allowedMethods blockedMethods shopperStatement merchantReturnData billingAddressType deliveryAddressType offset".split() else: keys = "authResult pspReference merchantReference skinCode merchantReturnData".split() def get_value(key): if values.get(key): return values[key] return '' sign = ''.join('%s' % get_value(k) for k in keys).encode('ascii') key = acquirer.adyen_skin_hmac_key.encode('ascii') return base64.b64encode(hmac.new(key, sign, sha1).digest()) def adyen_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None): base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url') acquirer = self.browse(cr, uid, id, context=context) # tmp import datetime from dateutil import relativedelta tmp_date = datetime.date.today() + relativedelta.relativedelta(days=1) adyen_tx_values = dict(tx_values) adyen_tx_values.update({ 'merchantReference': tx_values['reference'], 'paymentAmount': '%d' % int(float_round(tx_values['amount'], 2) * 100), 'currencyCode': tx_values['currency'] and tx_values['currency'].name or '', 'shipBeforeDate': tmp_date, 'skinCode': acquirer.adyen_skin_code, 'merchantAccount': acquirer.adyen_merchant_account, 'shopperLocale': partner_values['lang'], 'sessionValidity': tmp_date, 'resURL': '%s' % urlparse.urljoin(base_url, AdyenController._return_url), }) if adyen_tx_values.get('return_url'): adyen_tx_values['merchantReturnData'] = json.dumps({'return_url': '%s' % adyen_tx_values.pop('return_url')}) adyen_tx_values['merchantSig'] = self._adyen_generate_merchant_sig(acquirer, 'in', adyen_tx_values) return partner_values, adyen_tx_values def adyen_get_form_action_url(self, cr, uid, id, context=None): acquirer = self.browse(cr, uid, id, context=context) return self._get_adyen_urls(cr, uid, acquirer.environment, context=context)['adyen_form_url'] class TxAdyen(osv.Model): _inherit = 'payment.transaction' _columns = { 'adyen_psp_reference': fields.char('Adyen PSP Reference'), } # -------------------------------------------------- # FORM RELATED METHODS # -------------------------------------------------- def _adyen_form_get_tx_from_data(self, cr, uid, data, context=None): reference, pspReference = data.get('merchantReference'), data.get('pspReference') if not reference or not pspReference: error_msg = 'Adyen: received data with missing reference (%s) or missing pspReference (%s)' % (reference, pspReference) _logger.error(error_msg) raise ValidationError(error_msg) # find tx -> @TDENOTE use pspReference ? tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context) if not tx_ids or len(tx_ids) > 1: error_msg = 'Adyen: received data for reference %s' % (reference) if not tx_ids: error_msg += '; no order found' else: error_msg += '; multiple order found' _logger.error(error_msg) raise ValidationError(error_msg) tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context) # verify shasign shasign_check = self.pool['payment.acquirer']._adyen_generate_merchant_sig(tx.acquirer_id, 'out', data) if shasign_check != data.get('merchantSig'): error_msg = 'Adyen: invalid merchantSig, received %s, computed %s' % (data.get('merchantSig'), shasign_check) _logger.warning(error_msg) raise ValidationError(error_msg) return tx def _adyen_form_get_invalid_parameters(self, cr, uid, tx, data, context=None): invalid_parameters = [] # reference at acquirer: pspReference if tx.acquirer_reference and data.get('pspReference') != tx.acquirer_reference: invalid_parameters.append(('pspReference', data.get('pspReference'), tx.acquirer_reference)) # seller if data.get('skinCode') != tx.acquirer_id.adyen_skin_code: invalid_parameters.append(('skinCode', data.get('skinCode'), tx.acquirer_id.adyen_skin_code)) # result if not data.get('authResult'): invalid_parameters.append(('authResult', data.get('authResult'), 'something')) return invalid_parameters def _adyen_form_validate(self, cr, uid, tx, data, context=None): status = data.get('authResult', 'PENDING') if status == 'AUTHORISED': tx.write({ 'state': 'done', 'adyen_psp_reference': data.get('pspReference'), # 'date_validate': data.get('payment_date', fields.datetime.now()), # 'paypal_txn_type': data.get('express_checkout') }) return True elif status == 'PENDING': tx.write({ 'state': 'pending', 'adyen_psp_reference': data.get('pspReference'), }) return True else: error = 'Adyen: feedback error' _logger.info(error) tx.write({ 'state': 'error', 'state_message': error }) return False
agpl-3.0
662,589,893,016,907,400
42.583333
289
0.607266
false
tvalacarta/tvalacarta
python/main-classic/lib/youtube_dl/extractor/expotv.py
64
2913
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, unified_strdate, ) class ExpoTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?expotv\.com/videos/[^?#]*/(?P<id>[0-9]+)($|[?#])' _TEST = { 'url': 'http://www.expotv.com/videos/reviews/3/40/NYX-Butter-lipstick/667916', 'md5': 'fe1d728c3a813ff78f595bc8b7a707a8', 'info_dict': { 'id': '667916', 'ext': 'mp4', 'title': 'NYX Butter Lipstick Little Susie', 'description': 'Goes on like butter, but looks better!', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Stephanie S.', 'upload_date': '20150520', 'view_count': int, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) player_key = self._search_regex( r'<param name="playerKey" value="([^"]+)"', webpage, 'player key') config = self._download_json( 'http://client.expotv.com/video/config/%s/%s' % (video_id, player_key), video_id, 'Downloading video configuration') formats = [] for fcfg in config['sources']: media_url = fcfg.get('file') if not media_url: continue if fcfg.get('type') == 'm3u8': formats.extend(self._extract_m3u8_formats( media_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')) else: formats.append({ 'url': media_url, 'height': int_or_none(fcfg.get('height')), 'format_id': fcfg.get('label'), 'ext': self._search_regex( r'filename=.*\.([a-z0-9_A-Z]+)&', media_url, 'file extension', default=None) or fcfg.get('type'), }) self._sort_formats(formats) title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = config.get('image') view_count = int_or_none(self._search_regex( r'<h5>Plays: ([0-9]+)</h5>', webpage, 'view counts')) uploader = self._search_regex( r'<div class="reviewer">\s*<img alt="([^"]+)"', webpage, 'uploader', fatal=False) upload_date = unified_strdate(self._search_regex( r'<h5>Reviewed on ([0-9/.]+)</h5>', webpage, 'upload date', fatal=False), day_first=False) return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'view_count': view_count, 'thumbnail': thumbnail, 'uploader': uploader, 'upload_date': upload_date, }
gpl-3.0
-5,408,609,859,273,183,000
36.831169
93
0.505664
false
NicolasDorier/bitcoin
qa/rpc-tests/p2p-acceptblock.py
10
11968
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test processing of unrequested blocks. Since behavior differs when receiving unrequested blocks from whitelisted peers versus non-whitelisted peers, this tests the behavior of both (effectively two separate tests running in parallel). Setup: two nodes, node0 and node1, not connected to each other. Node0 does not whitelist localhost, but node1 does. They will each be on their own chain for this test. We have one NodeConn connection to each, test_node and white_node respectively. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance. 3. Mine a block that forks the previous block, and deliver to each node from corresponding peer. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more work than the tip. Node1 should process because this is coming from a whitelisted peer. 4. Send another block that builds on the forking block. Node0 should process this block but be stuck on the shorter chain, because it's missing an intermediate block. Node1 should reorg to this longer chain. 4b.Send 288 more blocks on the longer chain. Node0 should process all but the last block (too far ahead in height). Send all headers to Node1, and then send the last block in that chain. Node1 should accept the block because it's coming from a whitelisted peer. 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. """ from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time from test_framework.blocktools import create_block, create_coinbase # TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending # p2p messages to a node, generating the messages in the main testing logic. class TestNode(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.connection = None self.ping_counter = 1 self.last_pong = msg_pong() def add_connection(self, conn): self.connection = conn # Track the last getdata message we receive (used in the test) def on_getdata(self, conn, message): self.last_getdata = message # Spin until verack message is received from the node. # We use this to signal that our test can begin. This # is called from the testing thread, so it needs to acquire # the global lock. def wait_for_verack(self): while True: with mininode_lock: if self.verack_received: return time.sleep(0.05) # Wrapper for the NodeConn's send_message function def send_message(self, message): self.connection.send_message(message) def on_pong(self, conn, message): self.last_pong = message # Sync up with the node after delivery of a block def sync_with_ping(self, timeout=30): self.connection.send_message(msg_ping(nonce=self.ping_counter)) received_pong = False sleep_time = 0.05 while not received_pong and timeout > 0: time.sleep(sleep_time) timeout -= sleep_time with mininode_lock: if self.last_pong.nonce == self.ping_counter: received_pong = True self.ping_counter += 1 return received_pong class AcceptBlockTest(BitcoinTestFramework): def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("BITCOIND", "bitcoind"), help="bitcoind binary to test") def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 2 def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, binary=self.options.testbinary)) self.nodes.append(start_node(1, self.options.tmpdir, ["-whitelist=127.0.0.1"], binary=self.options.testbinary)) def run_test(self): # Setup the p2p connections and start up the network thread. test_node = TestNode() # connects to node0 (not whitelisted) white_node = TestNode() # connects to node1 (whitelisted) connections = [] connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node)) test_node.add_connection(connections[0]) white_node.add_connection(connections[1]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here test_node.wait_for_verack() white_node.wait_for_verack() # 1. Have both nodes mine a block (leave IBD) [ n.generate(1) for n in self.nodes ] tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] # 2. Send one block that builds on each tip. # This should be accepted. blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) white_node.send_message(msg_block(blocks_h2[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 2) self.log.info("First height 2 block accepted by both nodes") # 3. Send another block that builds on the original tip. blocks_h2f = [] # Blocks at height 2 that fork off the main chain for i in range(2): blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1)) blocks_h2f[i].solve() test_node.send_message(msg_block(blocks_h2f[0])) white_node.send_message(msg_block(blocks_h2f[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h2f[0].hash: assert_equal(x['status'], "headers-only") for x in self.nodes[1].getchaintips(): if x['hash'] == blocks_h2f[1].hash: assert_equal(x['status'], "valid-headers") self.log.info("Second height 2 block accepted only from whitelisted peer") # 4. Now send another block that builds on the forking chain. blocks_h3 = [] for i in range(2): blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1)) blocks_h3[i].solve() test_node.send_message(msg_block(blocks_h3[0])) white_node.send_message(msg_block(blocks_h3[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] # Since the earlier block was not processed by node0, the new block # can't be fully validated. for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h3[0].hash: assert_equal(x['status'], "headers-only") # But this block should be accepted by node0 since it has more work. self.nodes[0].getblock(blocks_h3[0].hash) self.log.info("Unrequested more-work block accepted from non-whitelisted peer") # Node1 should have accepted and reorged. assert_equal(self.nodes[1].getblockcount(), 3) self.log.info("Successfully reorged to length 3 chain from whitelisted peer") # 4b. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node0. Node1 should process the tip if # we give it the headers chain leading to the tip. tips = blocks_h3 headers_message = msg_headers() all_blocks = [] # node0's blocks for j in range(2): for i in range(288): next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1) next_block.solve() if j==0: test_node.send_message(msg_block(next_block)) all_blocks.append(next_block) else: headers_message.headers.append(CBlockHeader(next_block)) tips[j] = next_block time.sleep(2) # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_jsonrpc(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) headers_message.headers.pop() # Ensure the last block is unrequested white_node.send_message(headers_message) # Send headers leading to tip white_node.send_message(msg_block(tips[1])) # Now deliver the tip white_node.sync_with_ping() self.nodes[1].getblock(tips[1].hash) self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer") # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). test_node.send_message(msg_block(blocks_h2f[0])) # Here, if the sleep is too short, the test could falsely succeed (if the # node hasn't processed the block by the time the sleep returns, and then # the node processes it and incorrectly advances the tip). # But this would be caught later on, when we verify that an inv triggers # a getdata request for this block. test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info("Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_getdata = None test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_getdata # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(blocks_h2f[0])) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.log.info("Successfully reorged to longer chain from non-whitelisted peer") [ c.disconnect_node() for c in connections ] if __name__ == '__main__': AcceptBlockTest().main()
mit
-4,566,284,284,909,739,500
42.205776
107
0.64004
false
jalexvig/tensorflow
tensorflow/python/training/basic_session_run_hooks_test.py
14
58484
# pylint: disable=g-bad-file-header # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for basic_session_run_hooks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path import shutil import tempfile import threading import time from tensorflow.contrib.framework.python.framework import checkpoint_utils from tensorflow.contrib.framework.python.ops import variables from tensorflow.contrib.testing.python.framework import fake_summary_writer from tensorflow.python.client import session as session_lib from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as variables_lib import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging from tensorflow.python.summary import summary as summary_lib from tensorflow.python.summary.writer import writer_cache from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import monitored_session from tensorflow.python.training import session_run_hook from tensorflow.python.training import training_util class MockCheckpointSaverListener( basic_session_run_hooks.CheckpointSaverListener): def __init__(self): self.begin_count = 0 self.before_save_count = 0 self.after_save_count = 0 self.end_count = 0 self.ask_for_stop = False def begin(self): self.begin_count += 1 def before_save(self, session, global_step): self.before_save_count += 1 def after_save(self, session, global_step): self.after_save_count += 1 if self.ask_for_stop: return True def end(self, session, global_step): self.end_count += 1 def get_counts(self): return { 'begin': self.begin_count, 'before_save': self.before_save_count, 'after_save': self.after_save_count, 'end': self.end_count } class SecondOrStepTimerTest(test.TestCase): def test_raise_in_both_secs_and_steps(self): with self.assertRaises(ValueError): basic_session_run_hooks.SecondOrStepTimer(every_secs=2.0, every_steps=10) def test_raise_in_none_secs_and_steps(self): with self.assertRaises(ValueError): basic_session_run_hooks.SecondOrStepTimer() def test_every_secs(self): timer = basic_session_run_hooks.SecondOrStepTimer(every_secs=1.0) self.assertTrue(timer.should_trigger_for_step(1)) timer.update_last_triggered_step(1) self.assertFalse(timer.should_trigger_for_step(1)) self.assertFalse(timer.should_trigger_for_step(2)) time.sleep(1.0) self.assertFalse(timer.should_trigger_for_step(1)) self.assertTrue(timer.should_trigger_for_step(2)) def test_every_steps(self): timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=3) self.assertTrue(timer.should_trigger_for_step(1)) timer.update_last_triggered_step(1) self.assertFalse(timer.should_trigger_for_step(1)) self.assertFalse(timer.should_trigger_for_step(2)) self.assertFalse(timer.should_trigger_for_step(3)) self.assertTrue(timer.should_trigger_for_step(4)) def test_update_last_triggered_step(self): timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=1) elapsed_secs, elapsed_steps = timer.update_last_triggered_step(1) self.assertEqual(None, elapsed_secs) self.assertEqual(None, elapsed_steps) elapsed_secs, elapsed_steps = timer.update_last_triggered_step(5) self.assertLess(0, elapsed_secs) self.assertEqual(4, elapsed_steps) elapsed_secs, elapsed_steps = timer.update_last_triggered_step(7) self.assertLess(0, elapsed_secs) self.assertEqual(2, elapsed_steps) class StopAtStepTest(test.TestCase): def test_raise_in_both_last_step_and_num_steps(self): with self.assertRaises(ValueError): basic_session_run_hooks.StopAtStepHook(num_steps=10, last_step=20) def test_stop_based_on_last_step(self): h = basic_session_run_hooks.StopAtStepHook(last_step=10) with ops.Graph().as_default(): global_step = variables.get_or_create_global_step() no_op = control_flow_ops.no_op() h.begin() with session_lib.Session() as sess: mon_sess = monitored_session._HookedSession(sess, [h]) sess.run(state_ops.assign(global_step, 5)) h.after_create_session(sess, None) mon_sess.run(no_op) self.assertFalse(mon_sess.should_stop()) sess.run(state_ops.assign(global_step, 9)) mon_sess.run(no_op) self.assertFalse(mon_sess.should_stop()) sess.run(state_ops.assign(global_step, 10)) mon_sess.run(no_op) self.assertTrue(mon_sess.should_stop()) sess.run(state_ops.assign(global_step, 11)) mon_sess._should_stop = False mon_sess.run(no_op) self.assertTrue(mon_sess.should_stop()) def test_stop_based_on_num_step(self): h = basic_session_run_hooks.StopAtStepHook(num_steps=10) with ops.Graph().as_default(): global_step = variables.get_or_create_global_step() no_op = control_flow_ops.no_op() h.begin() with session_lib.Session() as sess: mon_sess = monitored_session._HookedSession(sess, [h]) sess.run(state_ops.assign(global_step, 5)) h.after_create_session(sess, None) mon_sess.run(no_op) self.assertFalse(mon_sess.should_stop()) sess.run(state_ops.assign(global_step, 13)) mon_sess.run(no_op) self.assertFalse(mon_sess.should_stop()) sess.run(state_ops.assign(global_step, 14)) mon_sess.run(no_op) self.assertFalse(mon_sess.should_stop()) sess.run(state_ops.assign(global_step, 15)) mon_sess.run(no_op) self.assertTrue(mon_sess.should_stop()) sess.run(state_ops.assign(global_step, 16)) mon_sess._should_stop = False mon_sess.run(no_op) self.assertTrue(mon_sess.should_stop()) def test_stop_based_with_multiple_steps(self): h = basic_session_run_hooks.StopAtStepHook(num_steps=10) with ops.Graph().as_default(): global_step = variables.get_or_create_global_step() no_op = control_flow_ops.no_op() h.begin() with session_lib.Session() as sess: mon_sess = monitored_session._HookedSession(sess, [h]) sess.run(state_ops.assign(global_step, 5)) h.after_create_session(sess, None) mon_sess.run(no_op) self.assertFalse(mon_sess.should_stop()) sess.run(state_ops.assign(global_step, 15)) mon_sess.run(no_op) self.assertTrue(mon_sess.should_stop()) class LoggingTensorHookTest(test.TestCase): def setUp(self): # Mock out logging calls so we can verify whether correct tensors are being # monitored. self._actual_log = tf_logging.info self.logged_message = None def mock_log(*args, **kwargs): self.logged_message = args self._actual_log(*args, **kwargs) tf_logging.info = mock_log def tearDown(self): tf_logging.info = self._actual_log def test_illegal_args(self): with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'): basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=0) with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'): basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=-10) with self.assertRaisesRegexp(ValueError, 'xactly one of'): basic_session_run_hooks.LoggingTensorHook( tensors=['t'], every_n_iter=5, every_n_secs=5) with self.assertRaisesRegexp(ValueError, 'xactly one of'): basic_session_run_hooks.LoggingTensorHook(tensors=['t']) def test_print_at_end_only(self): with ops.Graph().as_default(), session_lib.Session() as sess: t = constant_op.constant(42.0, name='foo') train_op = constant_op.constant(3) hook = basic_session_run_hooks.LoggingTensorHook( tensors=[t.name], at_end=True) hook.begin() mon_sess = monitored_session._HookedSession(sess, [hook]) sess.run(variables_lib.global_variables_initializer()) self.logged_message = '' for _ in range(3): mon_sess.run(train_op) # assertNotRegexpMatches is not supported by python 3.1 and later self.assertEqual(str(self.logged_message).find(t.name), -1) hook.end(sess) self.assertRegexpMatches(str(self.logged_message), t.name) def _validate_print_every_n_steps(self, sess, at_end): t = constant_op.constant(42.0, name='foo') train_op = constant_op.constant(3) hook = basic_session_run_hooks.LoggingTensorHook( tensors=[t.name], every_n_iter=10, at_end=at_end) hook.begin() mon_sess = monitored_session._HookedSession(sess, [hook]) sess.run(variables_lib.global_variables_initializer()) mon_sess.run(train_op) self.assertRegexpMatches(str(self.logged_message), t.name) for _ in range(3): self.logged_message = '' for _ in range(9): mon_sess.run(train_op) # assertNotRegexpMatches is not supported by python 3.1 and later self.assertEqual(str(self.logged_message).find(t.name), -1) mon_sess.run(train_op) self.assertRegexpMatches(str(self.logged_message), t.name) # Add additional run to verify proper reset when called multiple times. self.logged_message = '' mon_sess.run(train_op) # assertNotRegexpMatches is not supported by python 3.1 and later self.assertEqual(str(self.logged_message).find(t.name), -1) self.logged_message = '' hook.end(sess) if at_end: self.assertRegexpMatches(str(self.logged_message), t.name) else: # assertNotRegexpMatches is not supported by python 3.1 and later self.assertEqual(str(self.logged_message).find(t.name), -1) def test_print_every_n_steps(self): with ops.Graph().as_default(), session_lib.Session() as sess: self._validate_print_every_n_steps(sess, at_end=False) # Verify proper reset. self._validate_print_every_n_steps(sess, at_end=False) def test_print_every_n_steps_and_end(self): with ops.Graph().as_default(), session_lib.Session() as sess: self._validate_print_every_n_steps(sess, at_end=True) # Verify proper reset. self._validate_print_every_n_steps(sess, at_end=True) def test_print_first_step(self): # if it runs every iteration, first iteration has None duration. with ops.Graph().as_default(), session_lib.Session() as sess: t = constant_op.constant(42.0, name='foo') train_op = constant_op.constant(3) hook = basic_session_run_hooks.LoggingTensorHook( tensors={'foo': t}, every_n_iter=1) hook.begin() mon_sess = monitored_session._HookedSession(sess, [hook]) sess.run(variables_lib.global_variables_initializer()) mon_sess.run(train_op) self.assertRegexpMatches(str(self.logged_message), 'foo') # in first run, elapsed time is None. self.assertEqual(str(self.logged_message).find('sec'), -1) def _validate_print_every_n_secs(self, sess, at_end): t = constant_op.constant(42.0, name='foo') train_op = constant_op.constant(3) hook = basic_session_run_hooks.LoggingTensorHook( tensors=[t.name], every_n_secs=1.0, at_end=at_end) hook.begin() mon_sess = monitored_session._HookedSession(sess, [hook]) sess.run(variables_lib.global_variables_initializer()) mon_sess.run(train_op) self.assertRegexpMatches(str(self.logged_message), t.name) # assertNotRegexpMatches is not supported by python 3.1 and later self.logged_message = '' mon_sess.run(train_op) self.assertEqual(str(self.logged_message).find(t.name), -1) time.sleep(1.0) self.logged_message = '' mon_sess.run(train_op) self.assertRegexpMatches(str(self.logged_message), t.name) self.logged_message = '' hook.end(sess) if at_end: self.assertRegexpMatches(str(self.logged_message), t.name) else: # assertNotRegexpMatches is not supported by python 3.1 and later self.assertEqual(str(self.logged_message).find(t.name), -1) def test_print_every_n_secs(self): with ops.Graph().as_default(), session_lib.Session() as sess: self._validate_print_every_n_secs(sess, at_end=False) # Verify proper reset. self._validate_print_every_n_secs(sess, at_end=False) def test_print_every_n_secs_and_end(self): with ops.Graph().as_default(), session_lib.Session() as sess: self._validate_print_every_n_secs(sess, at_end=True) # Verify proper reset. self._validate_print_every_n_secs(sess, at_end=True) def test_print_formatter(self): with ops.Graph().as_default(), session_lib.Session() as sess: t = constant_op.constant(42.0, name='foo') train_op = constant_op.constant(3) hook = basic_session_run_hooks.LoggingTensorHook( tensors=[t.name], every_n_iter=10, formatter=lambda items: 'qqq=%s' % items[t.name]) hook.begin() mon_sess = monitored_session._HookedSession(sess, [hook]) sess.run(variables_lib.global_variables_initializer()) mon_sess.run(train_op) self.assertEqual(self.logged_message[0], 'qqq=42.0') class CheckpointSaverHookTest(test.TestCase): def setUp(self): self.model_dir = tempfile.mkdtemp() self.graph = ops.Graph() with self.graph.as_default(): self.scaffold = monitored_session.Scaffold() self.global_step = variables.get_or_create_global_step() self.train_op = training_util._increment_global_step(1) def tearDown(self): shutil.rmtree(self.model_dir, ignore_errors=True) def test_saves_when_saver_and_scaffold_both_missing(self): with self.graph.as_default(): hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=1) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(self.train_op) self.assertEqual(1, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) def test_raise_when_saver_and_scaffold_both_present(self): with self.assertRaises(ValueError): basic_session_run_hooks.CheckpointSaverHook( self.model_dir, saver=self.scaffold.saver, scaffold=self.scaffold) def test_raise_in_both_secs_and_steps(self): with self.assertRaises(ValueError): basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_secs=10, save_steps=20) def test_raise_in_none_secs_and_steps(self): with self.assertRaises(ValueError): basic_session_run_hooks.CheckpointSaverHook(self.model_dir) def test_save_secs_saves_in_first_step(self): with self.graph.as_default(): hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_secs=2, scaffold=self.scaffold) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(self.train_op) self.assertEqual(1, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) def test_save_secs_calls_listeners_at_begin_and_end(self): with self.graph.as_default(): listener = MockCheckpointSaverListener() hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_secs=2, scaffold=self.scaffold, listeners=[listener]) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(self.train_op) # hook runs here mon_sess.run(self.train_op) # hook won't run here, so it does at end hook.end(sess) # hook runs here self.assertEqual({ 'begin': 1, 'before_save': 2, 'after_save': 2, 'end': 1 }, listener.get_counts()) def test_listener_with_monitored_session(self): with ops.Graph().as_default(): scaffold = monitored_session.Scaffold() global_step = variables.get_or_create_global_step() train_op = training_util._increment_global_step(1) listener = MockCheckpointSaverListener() hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=1, scaffold=scaffold, listeners=[listener]) with monitored_session.SingularMonitoredSession( hooks=[hook], scaffold=scaffold, checkpoint_dir=self.model_dir) as sess: sess.run(train_op) sess.run(train_op) global_step_val = sess.raw_session().run(global_step) listener_counts = listener.get_counts() self.assertEqual(2, global_step_val) self.assertEqual({ 'begin': 1, 'before_save': 3, 'after_save': 3, 'end': 1 }, listener_counts) def test_listener_stops_training_in_after_save(self): with ops.Graph().as_default(): scaffold = monitored_session.Scaffold() variables.get_or_create_global_step() train_op = training_util._increment_global_step(1) listener = MockCheckpointSaverListener() hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=1, scaffold=scaffold, listeners=[listener]) with monitored_session.SingularMonitoredSession( hooks=[hook], scaffold=scaffold, checkpoint_dir=self.model_dir) as sess: sess.run(train_op) self.assertFalse(sess.should_stop()) sess.run(train_op) self.assertFalse(sess.should_stop()) listener.ask_for_stop = True sess.run(train_op) self.assertTrue(sess.should_stop()) def test_listener_with_default_saver(self): with ops.Graph().as_default(): global_step = variables.get_or_create_global_step() train_op = training_util._increment_global_step(1) listener = MockCheckpointSaverListener() hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=1, listeners=[listener]) with monitored_session.SingularMonitoredSession( hooks=[hook], checkpoint_dir=self.model_dir) as sess: sess.run(train_op) sess.run(train_op) global_step_val = sess.raw_session().run(global_step) listener_counts = listener.get_counts() self.assertEqual(2, global_step_val) self.assertEqual({ 'begin': 1, 'before_save': 3, 'after_save': 3, 'end': 1 }, listener_counts) with ops.Graph().as_default(): global_step = variables.get_or_create_global_step() with monitored_session.SingularMonitoredSession( checkpoint_dir=self.model_dir) as sess2: global_step_saved_val = sess2.run(global_step) self.assertEqual(2, global_step_saved_val) def test_two_listeners_with_default_saver(self): with ops.Graph().as_default(): global_step = variables.get_or_create_global_step() train_op = training_util._increment_global_step(1) listener1 = MockCheckpointSaverListener() listener2 = MockCheckpointSaverListener() hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=1, listeners=[listener1, listener2]) with monitored_session.SingularMonitoredSession( hooks=[hook], checkpoint_dir=self.model_dir) as sess: sess.run(train_op) sess.run(train_op) global_step_val = sess.raw_session().run(global_step) listener1_counts = listener1.get_counts() listener2_counts = listener2.get_counts() self.assertEqual(2, global_step_val) self.assertEqual({ 'begin': 1, 'before_save': 3, 'after_save': 3, 'end': 1 }, listener1_counts) self.assertEqual(listener1_counts, listener2_counts) with ops.Graph().as_default(): global_step = variables.get_or_create_global_step() with monitored_session.SingularMonitoredSession( checkpoint_dir=self.model_dir) as sess2: global_step_saved_val = sess2.run(global_step) self.assertEqual(2, global_step_saved_val) @test.mock.patch.object(time, 'time') def test_save_secs_saves_periodically(self, mock_time): # Let's have a realistic start time current_time = 1484695987.209386 with self.graph.as_default(): mock_time.return_value = current_time hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_secs=2, scaffold=self.scaffold) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) mock_time.return_value = current_time mon_sess.run(self.train_op) # Saved. mock_time.return_value = current_time + 0.5 mon_sess.run(self.train_op) # Not saved. self.assertEqual(1, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) # Simulate 2.5 seconds of sleep. mock_time.return_value = current_time + 2.5 mon_sess.run(self.train_op) # Saved. mock_time.return_value = current_time + 2.6 mon_sess.run(self.train_op) # Not saved. mock_time.return_value = current_time + 2.7 mon_sess.run(self.train_op) # Not saved. self.assertEqual(3, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) # Simulate 7.5 more seconds of sleep (10 seconds from start. mock_time.return_value = current_time + 10 mon_sess.run(self.train_op) # Saved. self.assertEqual(6, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) @test.mock.patch.object(time, 'time') def test_save_secs_calls_listeners_periodically(self, mock_time): # Let's have a realistic start time current_time = 1484695987.209386 with self.graph.as_default(): mock_time.return_value = current_time listener = MockCheckpointSaverListener() hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_secs=2, scaffold=self.scaffold, listeners=[listener]) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) mock_time.return_value = current_time + 0.5 mon_sess.run(self.train_op) # hook runs here mock_time.return_value = current_time + 0.5 mon_sess.run(self.train_op) mock_time.return_value = current_time + 3.0 mon_sess.run(self.train_op) # hook runs here mock_time.return_value = current_time + 3.5 mon_sess.run(self.train_op) mock_time.return_value = current_time + 4.0 mon_sess.run(self.train_op) mock_time.return_value = current_time + 6.5 mon_sess.run(self.train_op) # hook runs here mock_time.return_value = current_time + 7.0 mon_sess.run(self.train_op) # hook won't run here, so it does at end mock_time.return_value = current_time + 7.5 hook.end(sess) # hook runs here self.assertEqual({ 'begin': 1, 'before_save': 4, 'after_save': 4, 'end': 1 }, listener.get_counts()) def test_save_steps_saves_in_first_step(self): with self.graph.as_default(): hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=2, scaffold=self.scaffold) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(self.train_op) self.assertEqual(1, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) def test_save_steps_saves_periodically(self): with self.graph.as_default(): hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=2, scaffold=self.scaffold) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(self.train_op) mon_sess.run(self.train_op) # Not saved self.assertEqual(1, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) mon_sess.run(self.train_op) # saved self.assertEqual(3, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) mon_sess.run(self.train_op) # Not saved self.assertEqual(3, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) mon_sess.run(self.train_op) # saved self.assertEqual(5, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) def test_save_saves_at_end(self): with self.graph.as_default(): hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_secs=2, scaffold=self.scaffold) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(self.train_op) mon_sess.run(self.train_op) hook.end(sess) self.assertEqual(2, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) def test_summary_writer_defs(self): fake_summary_writer.FakeSummaryWriter.install() writer_cache.FileWriterCache.clear() summary_writer = writer_cache.FileWriterCache.get(self.model_dir) with self.graph.as_default(): hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=2, scaffold=self.scaffold) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) hook.after_create_session(sess, None) mon_sess.run(self.train_op) summary_writer.assert_summaries( test_case=self, expected_logdir=self.model_dir, expected_added_meta_graphs=[ meta_graph.create_meta_graph_def( graph_def=self.graph.as_graph_def(add_shapes=True), saver_def=self.scaffold.saver.saver_def) ]) fake_summary_writer.FakeSummaryWriter.uninstall() def test_save_checkpoint_before_first_train_step(self): with self.graph.as_default(): hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=2, scaffold=self.scaffold) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: mon_sess = monitored_session._HookedSession(sess, [hook]) sess.run(self.scaffold.init_op) hook.after_create_session(sess, None) # Verifies that checkpoint is saved at step 0. self.assertEqual(0, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) # Verifies that no checkpoint is saved after one training step. mon_sess.run(self.train_op) self.assertEqual(0, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) # Verifies that checkpoint is saved after save_steps. mon_sess.run(self.train_op) self.assertEqual(2, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) class CheckpointSaverHookMultiStepTest(test.TestCase): def setUp(self): self.model_dir = tempfile.mkdtemp() self.graph = ops.Graph() self.steps_per_run = 5 with self.graph.as_default(): self.scaffold = monitored_session.Scaffold() self.global_step = variables.get_or_create_global_step() self.train_op = training_util._increment_global_step(self.steps_per_run) def tearDown(self): shutil.rmtree(self.model_dir, ignore_errors=True) def test_save_steps_saves_in_first_step(self): with self.graph.as_default(): hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=2*self.steps_per_run, scaffold=self.scaffold) hook._set_steps_per_run(self.steps_per_run) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(self.train_op) self.assertEqual(5, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) def test_save_steps_saves_periodically(self): with self.graph.as_default(): hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=2*self.steps_per_run, scaffold=self.scaffold) hook._set_steps_per_run(self.steps_per_run) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(self.train_op) # Saved (step=5) self.assertEqual(5, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) mon_sess.run(self.train_op) # Not saved (step=10) self.assertEqual(5, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) mon_sess.run(self.train_op) # Saved (step=15) self.assertEqual(15, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) mon_sess.run(self.train_op) # Not saved (step=20) self.assertEqual(15, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) mon_sess.run(self.train_op) # Saved (step=25) self.assertEqual(25, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) def test_save_steps_saves_at_end(self): with self.graph.as_default(): hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=2*self.steps_per_run, scaffold=self.scaffold) hook._set_steps_per_run(self.steps_per_run) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(self.train_op) mon_sess.run(self.train_op) hook.end(sess) self.assertEqual(10, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) class ResourceCheckpointSaverHookTest(test.TestCase): def setUp(self): self.model_dir = tempfile.mkdtemp() self.graph = ops.Graph() with self.graph.as_default(): self.scaffold = monitored_session.Scaffold() with variable_scope.variable_scope('foo', use_resource=True): self.global_step = training_util.get_or_create_global_step() self.train_op = training_util._increment_global_step(1) def test_save_steps_saves_periodically(self): with self.graph.as_default(): hook = basic_session_run_hooks.CheckpointSaverHook( self.model_dir, save_steps=2, scaffold=self.scaffold) hook.begin() self.scaffold.finalize() with session_lib.Session() as sess: sess.run(self.scaffold.init_op) mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(self.train_op) mon_sess.run(self.train_op) # Not saved self.assertEqual(1, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) mon_sess.run(self.train_op) # saved self.assertEqual(3, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) mon_sess.run(self.train_op) # Not saved self.assertEqual(3, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) mon_sess.run(self.train_op) # saved self.assertEqual(5, checkpoint_utils.load_variable(self.model_dir, self.global_step.name)) class StepCounterHookTest(test.TestCase): def setUp(self): self.log_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.log_dir, ignore_errors=True) def test_step_counter_every_n_steps(self): with ops.Graph().as_default() as g, session_lib.Session() as sess: variables.get_or_create_global_step() train_op = training_util._increment_global_step(1) summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g) hook = basic_session_run_hooks.StepCounterHook( summary_writer=summary_writer, every_n_steps=10) hook.begin() sess.run(variables_lib.global_variables_initializer()) mon_sess = monitored_session._HookedSession(sess, [hook]) with test.mock.patch.object(tf_logging, 'warning') as mock_log: for _ in range(30): time.sleep(0.01) mon_sess.run(train_op) # logging.warning should not be called. self.assertIsNone(mock_log.call_args) hook.end(sess) summary_writer.assert_summaries( test_case=self, expected_logdir=self.log_dir, expected_graph=g, expected_summaries={}) self.assertItemsEqual([11, 21], summary_writer.summaries.keys()) for step in [11, 21]: summary_value = summary_writer.summaries[step][0].value[0] self.assertEqual('global_step/sec', summary_value.tag) self.assertGreater(summary_value.simple_value, 0) def test_step_counter_every_n_secs(self): with ops.Graph().as_default() as g, session_lib.Session() as sess: variables.get_or_create_global_step() train_op = training_util._increment_global_step(1) summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g) hook = basic_session_run_hooks.StepCounterHook( summary_writer=summary_writer, every_n_steps=None, every_n_secs=0.1) hook.begin() sess.run(variables_lib.global_variables_initializer()) mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(train_op) time.sleep(0.2) mon_sess.run(train_op) time.sleep(0.2) mon_sess.run(train_op) hook.end(sess) summary_writer.assert_summaries( test_case=self, expected_logdir=self.log_dir, expected_graph=g, expected_summaries={}) self.assertTrue(summary_writer.summaries, 'No summaries were created.') self.assertItemsEqual([2, 3], summary_writer.summaries.keys()) for summary in summary_writer.summaries.values(): summary_value = summary[0].value[0] self.assertEqual('global_step/sec', summary_value.tag) self.assertGreater(summary_value.simple_value, 0) def test_global_step_name(self): with ops.Graph().as_default() as g, session_lib.Session() as sess: with variable_scope.variable_scope('bar'): variable_scope.get_variable( 'foo', initializer=0, trainable=False, collections=[ ops.GraphKeys.GLOBAL_STEP, ops.GraphKeys.GLOBAL_VARIABLES ]) train_op = training_util._increment_global_step(1) summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g) hook = basic_session_run_hooks.StepCounterHook( summary_writer=summary_writer, every_n_steps=1, every_n_secs=None) hook.begin() sess.run(variables_lib.global_variables_initializer()) mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(train_op) mon_sess.run(train_op) hook.end(sess) summary_writer.assert_summaries( test_case=self, expected_logdir=self.log_dir, expected_graph=g, expected_summaries={}) self.assertTrue(summary_writer.summaries, 'No summaries were created.') self.assertItemsEqual([2], summary_writer.summaries.keys()) summary_value = summary_writer.summaries[2][0].value[0] self.assertEqual('bar/foo/sec', summary_value.tag) def test_log_warning_if_global_step_not_increased(self): with ops.Graph().as_default(), session_lib.Session() as sess: variables.get_or_create_global_step() train_op = training_util._increment_global_step(0) # keep same. sess.run(variables_lib.global_variables_initializer()) hook = basic_session_run_hooks.StepCounterHook( every_n_steps=1, every_n_secs=None) hook.begin() mon_sess = monitored_session._HookedSession(sess, [hook]) mon_sess.run(train_op) # Run one step to record global step. with test.mock.patch.object(tf_logging, 'warning') as mock_log: for _ in range(30): mon_sess.run(train_op) self.assertRegexpMatches( str(mock_log.call_args), 'global step.*has not been increased') hook.end(sess) def _setup_steps_per_run_test(self, every_n_steps, steps_per_run, graph, sess): variables.get_or_create_global_step() self.train_op = training_util._increment_global_step(steps_per_run) self.summary_writer = fake_summary_writer.FakeSummaryWriter( self.log_dir, graph) self.hook = basic_session_run_hooks.StepCounterHook( summary_writer=self.summary_writer, every_n_steps=every_n_steps) self.hook._set_steps_per_run(steps_per_run) self.hook.begin() sess.run(variables_lib.global_variables_initializer()) self.mon_sess = monitored_session._HookedSession(sess, [self.hook]) def test_steps_per_run_less_than_every_n_steps(self): with ops.Graph().as_default() as g, session_lib.Session() as sess: self._setup_steps_per_run_test(10, 5, g, sess) # Logs at 15, 25 for _ in range(5): time.sleep(0.01) self.mon_sess.run(self.train_op) self.hook.end(sess) self.summary_writer.assert_summaries( test_case=self, expected_logdir=self.log_dir, expected_graph=g, expected_summaries={}) self.assertItemsEqual([15, 25], self.summary_writer.summaries.keys()) for step in [15, 25]: summary_value = self.summary_writer.summaries[step][0].value[0] self.assertEqual('global_step/sec', summary_value.tag) self.assertGreater(summary_value.simple_value, 0) def test_steps_per_run_equal_every_n_steps(self): with ops.Graph().as_default() as g, session_lib.Session() as sess: self._setup_steps_per_run_test(5, 5, g, sess) # Logs at 10, 15, 20, 25 for _ in range(5): time.sleep(0.01) self.mon_sess.run(self.train_op) self.hook.end(sess) self.summary_writer.assert_summaries( test_case=self, expected_logdir=self.log_dir, expected_graph=g, expected_summaries={}) self.assertItemsEqual([10, 15, 20, 25], self.summary_writer.summaries.keys()) for step in [10, 15, 20, 25]: summary_value = self.summary_writer.summaries[step][0].value[0] self.assertEqual('global_step/sec', summary_value.tag) self.assertGreater(summary_value.simple_value, 0) def test_steps_per_run_greater_than_every_n_steps(self): with ops.Graph().as_default() as g, session_lib.Session() as sess: self._setup_steps_per_run_test(5, 10, g, sess) # Logs at 20, 30, 40, 50 for _ in range(5): time.sleep(0.01) self.mon_sess.run(self.train_op) self.hook.end(sess) self.summary_writer.assert_summaries( test_case=self, expected_logdir=self.log_dir, expected_graph=g, expected_summaries={}) self.assertItemsEqual([20, 30, 40, 50], self.summary_writer.summaries.keys()) for step in [20, 30, 40, 50]: summary_value = self.summary_writer.summaries[step][0].value[0] self.assertEqual('global_step/sec', summary_value.tag) self.assertGreater(summary_value.simple_value, 0) class SummarySaverHookTest(test.TestCase): def setUp(self): test.TestCase.setUp(self) self.log_dir = 'log/dir' self.summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir) var = variables_lib.Variable(0.0) tensor = state_ops.assign_add(var, 1.0) tensor2 = tensor * 2 self.summary_op = summary_lib.scalar('my_summary', tensor) self.summary_op2 = summary_lib.scalar('my_summary2', tensor2) variables.get_or_create_global_step() self.train_op = training_util._increment_global_step(1) def test_raise_when_scaffold_and_summary_op_both_missing(self): with self.assertRaises(ValueError): basic_session_run_hooks.SummarySaverHook() def test_raise_when_scaffold_and_summary_op_both_present(self): with self.assertRaises(ValueError): basic_session_run_hooks.SummarySaverHook( scaffold=monitored_session.Scaffold(), summary_op=self.summary_op) def test_raise_in_both_secs_and_steps(self): with self.assertRaises(ValueError): basic_session_run_hooks.SummarySaverHook( save_secs=10, save_steps=20, summary_writer=self.summary_writer) def test_raise_in_none_secs_and_steps(self): with self.assertRaises(ValueError): basic_session_run_hooks.SummarySaverHook( save_secs=None, save_steps=None, summary_writer=self.summary_writer) def test_save_steps(self): hook = basic_session_run_hooks.SummarySaverHook( save_steps=8, summary_writer=self.summary_writer, summary_op=self.summary_op) with self.test_session() as sess: hook.begin() sess.run(variables_lib.global_variables_initializer()) mon_sess = monitored_session._HookedSession(sess, [hook]) for _ in range(30): mon_sess.run(self.train_op) hook.end(sess) self.summary_writer.assert_summaries( test_case=self, expected_logdir=self.log_dir, expected_summaries={ 1: { 'my_summary': 1.0 }, 9: { 'my_summary': 2.0 }, 17: { 'my_summary': 3.0 }, 25: { 'my_summary': 4.0 }, }) def test_multiple_summaries(self): hook = basic_session_run_hooks.SummarySaverHook( save_steps=8, summary_writer=self.summary_writer, summary_op=[self.summary_op, self.summary_op2]) with self.test_session() as sess: hook.begin() sess.run(variables_lib.global_variables_initializer()) mon_sess = monitored_session._HookedSession(sess, [hook]) for _ in range(10): mon_sess.run(self.train_op) hook.end(sess) self.summary_writer.assert_summaries( test_case=self, expected_logdir=self.log_dir, expected_summaries={ 1: { 'my_summary': 1.0, 'my_summary2': 2.0 }, 9: { 'my_summary': 2.0, 'my_summary2': 4.0 }, }) def test_save_secs_saving_once_every_step(self): hook = basic_session_run_hooks.SummarySaverHook( save_secs=0.5, summary_writer=self.summary_writer, summary_op=self.summary_op) with self.test_session() as sess: hook.begin() sess.run(variables_lib.global_variables_initializer()) mon_sess = monitored_session._HookedSession(sess, [hook]) for _ in range(4): mon_sess.run(self.train_op) time.sleep(0.5) hook.end(sess) self.summary_writer.assert_summaries( test_case=self, expected_logdir=self.log_dir, expected_summaries={ 1: { 'my_summary': 1.0 }, 2: { 'my_summary': 2.0 }, 3: { 'my_summary': 3.0 }, 4: { 'my_summary': 4.0 }, }) @test.mock.patch.object(time, 'time') def test_save_secs_saving_once_every_three_steps(self, mock_time): mock_time.return_value = 1484695987.209386 hook = basic_session_run_hooks.SummarySaverHook( save_secs=9., summary_writer=self.summary_writer, summary_op=self.summary_op) with self.test_session() as sess: hook.begin() sess.run(variables_lib.global_variables_initializer()) mon_sess = monitored_session._HookedSession(sess, [hook]) for _ in range(8): mon_sess.run(self.train_op) mock_time.return_value += 3.1 hook.end(sess) # 24.8 seconds passed (3.1*8), it saves every 9 seconds starting from first: self.summary_writer.assert_summaries( test_case=self, expected_logdir=self.log_dir, expected_summaries={ 1: { 'my_summary': 1.0 }, 4: { 'my_summary': 2.0 }, 7: { 'my_summary': 3.0 }, }) class GlobalStepWaiterHookTest(test.TestCase): def test_not_wait_for_step_zero(self): with ops.Graph().as_default(): variables.get_or_create_global_step() hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=0) hook.begin() with session_lib.Session() as sess: # Before run should return without waiting gstep increment. hook.before_run( session_run_hook.SessionRunContext( original_args=None, session=sess)) def test_wait_for_step(self): with ops.Graph().as_default(): gstep = variables.get_or_create_global_step() hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=1000) hook.begin() with session_lib.Session() as sess: sess.run(variables_lib.global_variables_initializer()) waiter = threading.Thread( target=hook.before_run, args=(session_run_hook.SessionRunContext( original_args=None, session=sess),)) waiter.daemon = True waiter.start() time.sleep(1.0) self.assertTrue(waiter.is_alive()) sess.run(state_ops.assign(gstep, 500)) time.sleep(1.0) self.assertTrue(waiter.is_alive()) sess.run(state_ops.assign(gstep, 1100)) time.sleep(1.2) self.assertFalse(waiter.is_alive()) class FinalOpsHookTest(test.TestCase): def test_final_ops_is_scalar_tensor(self): with ops.Graph().as_default(): expected_value = 4 final_ops = constant_op.constant(expected_value) hook = basic_session_run_hooks.FinalOpsHook(final_ops) hook.begin() with session_lib.Session() as session: hook.end(session) self.assertEqual(expected_value, hook.final_ops_values) def test_final_ops_is_tensor(self): with ops.Graph().as_default(): expected_values = [1, 6, 3, 5, 2, 4] final_ops = constant_op.constant(expected_values) hook = basic_session_run_hooks.FinalOpsHook(final_ops) hook.begin() with session_lib.Session() as session: hook.end(session) self.assertListEqual(expected_values, hook.final_ops_values.tolist()) def test_final_ops_triggers_out_of_range_error(self): with ops.Graph().as_default(): dataset = dataset_ops.Dataset.range(1) iterator = dataset.make_one_shot_iterator() read_ops = iterator.get_next() final_ops = read_ops hook = basic_session_run_hooks.FinalOpsHook(final_ops) hook.begin() with session_lib.Session() as session: session.run(read_ops) with test.mock.patch.object(tf_logging, 'warning') as mock_log: with self.assertRaisesRegexp(errors.OutOfRangeError, 'End of sequence'): hook.end(session) self.assertRegexpMatches( str(mock_log.call_args), 'dependency back to some input source') def test_final_ops_with_dictionary(self): with ops.Graph().as_default(): expected_values = [4, -3] final_ops = array_ops.placeholder(dtype=dtypes.float32) final_ops_feed_dict = {final_ops: expected_values} hook = basic_session_run_hooks.FinalOpsHook( final_ops, final_ops_feed_dict) hook.begin() with session_lib.Session() as session: hook.end(session) self.assertListEqual(expected_values, hook.final_ops_values.tolist()) class ResourceSummarySaverHookTest(test.TestCase): def setUp(self): test.TestCase.setUp(self) self.log_dir = 'log/dir' self.summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir) var = variable_scope.get_variable('var', initializer=0.0, use_resource=True) tensor = state_ops.assign_add(var, 1.0) self.summary_op = summary_lib.scalar('my_summary', tensor) with variable_scope.variable_scope('foo', use_resource=True): variables.create_global_step() self.train_op = training_util._increment_global_step(1) def test_save_steps(self): hook = basic_session_run_hooks.SummarySaverHook( save_steps=8, summary_writer=self.summary_writer, summary_op=self.summary_op) with self.test_session() as sess: hook.begin() sess.run(variables_lib.global_variables_initializer()) mon_sess = monitored_session._HookedSession(sess, [hook]) for _ in range(30): mon_sess.run(self.train_op) hook.end(sess) self.summary_writer.assert_summaries( test_case=self, expected_logdir=self.log_dir, expected_summaries={ 1: { 'my_summary': 1.0 }, 9: { 'my_summary': 2.0 }, 17: { 'my_summary': 3.0 }, 25: { 'my_summary': 4.0 }, }) class FeedFnHookTest(test.TestCase): def test_feeding_placeholder(self): with ops.Graph().as_default(), session_lib.Session() as sess: x = array_ops.placeholder(dtype=dtypes.float32) y = x + 1 hook = basic_session_run_hooks.FeedFnHook( feed_fn=lambda: {x: 1.0}) hook.begin() mon_sess = monitored_session._HookedSession(sess, [hook]) self.assertEqual(mon_sess.run(y), 2) class ProfilerHookTest(test.TestCase): def setUp(self): super(ProfilerHookTest, self).setUp() self.output_dir = tempfile.mkdtemp() self.graph = ops.Graph() self.filepattern = os.path.join(self.output_dir, 'timeline-*.json') with self.graph.as_default(): self.global_step = variables.get_or_create_global_step() self.train_op = state_ops.assign_add(self.global_step, 1) def tearDown(self): super(ProfilerHookTest, self).tearDown() shutil.rmtree(self.output_dir, ignore_errors=True) def _count_timeline_files(self): return len(gfile.Glob(self.filepattern)) def test_raise_in_both_secs_and_steps(self): with self.assertRaises(ValueError): basic_session_run_hooks.ProfilerHook(save_secs=10, save_steps=20) def test_raise_in_none_secs_and_steps(self): with self.assertRaises(ValueError): basic_session_run_hooks.ProfilerHook(save_secs=None, save_steps=None) def test_save_secs_saves_in_first_step(self): with self.graph.as_default(): hook = basic_session_run_hooks.ProfilerHook( save_secs=2, output_dir=self.output_dir) with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess: sess.run(self.train_op) self.assertEqual(1, self._count_timeline_files()) @test.mock.patch.object(time, 'time') def test_save_secs_saves_periodically(self, mock_time): # Pick a fixed start time. current_time = 1484863632.320497 with self.graph.as_default(): mock_time.return_value = current_time hook = basic_session_run_hooks.ProfilerHook( save_secs=2, output_dir=self.output_dir) with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess: sess.run(self.train_op) # Saved. self.assertEqual(1, self._count_timeline_files()) sess.run(self.train_op) # Not saved. self.assertEqual(1, self._count_timeline_files()) # Simulate 2.5 seconds of sleep. mock_time.return_value = current_time + 2.5 sess.run(self.train_op) # Saved. # Pretend some small amount of time has passed. mock_time.return_value = current_time + 0.1 sess.run(self.train_op) # Not saved. # Edge test just before we should save the timeline. mock_time.return_value = current_time + 1.9 sess.run(self.train_op) # Not saved. self.assertEqual(2, self._count_timeline_files()) mock_time.return_value = current_time + 4.5 sess.run(self.train_op) # Saved. self.assertEqual(3, self._count_timeline_files()) def test_save_steps_saves_in_first_step(self): with self.graph.as_default(): hook = basic_session_run_hooks.ProfilerHook( save_secs=2, output_dir=self.output_dir) with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess: sess.run(self.train_op) # Saved. sess.run(self.train_op) # Not saved. self.assertEqual(1, self._count_timeline_files()) def test_save_steps_saves_periodically(self): with self.graph.as_default(): hook = basic_session_run_hooks.ProfilerHook( save_steps=2, output_dir=self.output_dir) with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess: self.assertEqual(0, self._count_timeline_files()) sess.run(self.train_op) # Saved. self.assertEqual(1, self._count_timeline_files()) sess.run(self.train_op) # Not saved. self.assertEqual(1, self._count_timeline_files()) sess.run(self.train_op) # Saved. self.assertEqual(2, self._count_timeline_files()) sess.run(self.train_op) # Not saved. self.assertEqual(2, self._count_timeline_files()) sess.run(self.train_op) # Saved. self.assertEqual(3, self._count_timeline_files()) def test_run_metadata_saves_in_first_step(self): writer_cache.FileWriterCache.clear() fake_summary_writer.FakeSummaryWriter.install() fake_writer = writer_cache.FileWriterCache.get(self.output_dir) with self.graph.as_default(): hook = basic_session_run_hooks.ProfilerHook( save_secs=2, output_dir=self.output_dir) with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess: sess.run(self.train_op) # Saved. self.assertEqual( list(fake_writer._added_run_metadata.keys()), ['step_1']) fake_summary_writer.FakeSummaryWriter.uninstall() if __name__ == '__main__': test.main()
apache-2.0
-8,755,424,843,415,826,000
37.075521
80
0.626599
false
babbage/zulip
zerver/management/commands/rename_stream.py
115
1189
from __future__ import absolute_import from django.core.management.base import BaseCommand from zerver.lib.actions import do_rename_stream from zerver.models import Realm, get_realm import sys class Command(BaseCommand): help = """Change the stream name for a realm.""" def add_arguments(self, parser): parser.add_argument('domain', metavar='<domain>', type=str, help="domain to operate on") parser.add_argument('old_name', metavar='<old name>', type=str, help='name of stream to be renamed') parser.add_argument('new_name', metavar='<new name>', type=str, help='new name to rename the stream to') def handle(self, *args, **options): domain = options['domain'] old_name = options['old_name'] new_name = options['new_name'] encoding = sys.getfilesystemencoding() try: realm = get_realm(domain) except Realm.DoesNotExist: print "Unknown domain %s" % (domain,) exit(1) do_rename_stream(realm, old_name.decode(encoding), new_name.decode(encoding))
apache-2.0
-3,184,361,532,363,996,700
33.970588
71
0.591253
false
elkingtonmcb/pattern
pattern/server/cherrypy/cherrypy/lib/caching.py
37
17046
""" CherryPy implements a simple caching system as a pluggable Tool. This tool tries to be an (in-process) HTTP/1.1-compliant cache. It's not quite there yet, but it's probably good enough for most sites. In general, GET responses are cached (along with selecting headers) and, if another request arrives for the same resource, the caching Tool will return 304 Not Modified if possible, or serve the cached response otherwise. It also sets request.cached to True if serving a cached representation, and sets request.cacheable to False (so it doesn't get cached again). If POST, PUT, or DELETE requests are made for a cached resource, they invalidate (delete) any cached response. Usage ===== Configuration file example:: [/] tools.caching.on = True tools.caching.delay = 3600 You may use a class other than the default :class:`MemoryCache<cherrypy.lib.caching.MemoryCache>` by supplying the config entry ``cache_class``; supply the full dotted name of the replacement class as the config value. It must implement the basic methods ``get``, ``put``, ``delete``, and ``clear``. You may set any attribute, including overriding methods, on the cache instance by providing them in config. The above sets the :attr:`delay<cherrypy.lib.caching.MemoryCache.delay>` attribute, for example. """ import datetime import sys import threading import time import cherrypy from cherrypy.lib import cptools, httputil from cherrypy._cpcompat import copyitems, ntob, set_daemon, sorted, Event class Cache(object): """Base class for Cache implementations.""" def get(self): """Return the current variant if in the cache, else None.""" raise NotImplemented def put(self, obj, size): """Store the current variant in the cache.""" raise NotImplemented def delete(self): """Remove ALL cached variants of the current resource.""" raise NotImplemented def clear(self): """Reset the cache to its initial, empty state.""" raise NotImplemented # ------------------------------- Memory Cache ------------------------------- # class AntiStampedeCache(dict): """A storage system for cached items which reduces stampede collisions.""" def wait(self, key, timeout=5, debug=False): """Return the cached value for the given key, or None. If timeout is not None, and the value is already being calculated by another thread, wait until the given timeout has elapsed. If the value is available before the timeout expires, it is returned. If not, None is returned, and a sentinel placed in the cache to signal other threads to wait. If timeout is None, no waiting is performed nor sentinels used. """ value = self.get(key) if isinstance(value, Event): if timeout is None: # Ignore the other thread and recalc it ourselves. if debug: cherrypy.log('No timeout', 'TOOLS.CACHING') return None # Wait until it's done or times out. if debug: cherrypy.log('Waiting up to %s seconds' % timeout, 'TOOLS.CACHING') value.wait(timeout) if value.result is not None: # The other thread finished its calculation. Use it. if debug: cherrypy.log('Result!', 'TOOLS.CACHING') return value.result # Timed out. Stick an Event in the slot so other threads wait # on this one to finish calculating the value. if debug: cherrypy.log('Timed out', 'TOOLS.CACHING') e = threading.Event() e.result = None dict.__setitem__(self, key, e) return None elif value is None: # Stick an Event in the slot so other threads wait # on this one to finish calculating the value. if debug: cherrypy.log('Timed out', 'TOOLS.CACHING') e = threading.Event() e.result = None dict.__setitem__(self, key, e) return value def __setitem__(self, key, value): """Set the cached value for the given key.""" existing = self.get(key) dict.__setitem__(self, key, value) if isinstance(existing, Event): # Set Event.result so other threads waiting on it have # immediate access without needing to poll the cache again. existing.result = value existing.set() class MemoryCache(Cache): """An in-memory cache for varying response content. Each key in self.store is a URI, and each value is an AntiStampedeCache. The response for any given URI may vary based on the values of "selecting request headers"; that is, those named in the Vary response header. We assume the list of header names to be constant for each URI throughout the lifetime of the application, and store that list in ``self.store[uri].selecting_headers``. The items contained in ``self.store[uri]`` have keys which are tuples of request header values (in the same order as the names in its selecting_headers), and values which are the actual responses. """ maxobjects = 1000 """The maximum number of cached objects; defaults to 1000.""" maxobj_size = 100000 """The maximum size of each cached object in bytes; defaults to 100 KB.""" maxsize = 10000000 """The maximum size of the entire cache in bytes; defaults to 10 MB.""" delay = 600 """Seconds until the cached content expires; defaults to 600 (10 minutes).""" antistampede_timeout = 5 """Seconds to wait for other threads to release a cache lock.""" expire_freq = 0.1 """Seconds to sleep between cache expiration sweeps.""" debug = False def __init__(self): self.clear() # Run self.expire_cache in a separate daemon thread. t = threading.Thread(target=self.expire_cache, name='expire_cache') self.expiration_thread = t set_daemon(t, True) t.start() def clear(self): """Reset the cache to its initial, empty state.""" self.store = {} self.expirations = {} self.tot_puts = 0 self.tot_gets = 0 self.tot_hist = 0 self.tot_expires = 0 self.tot_non_modified = 0 self.cursize = 0 def expire_cache(self): """Continuously examine cached objects, expiring stale ones. This function is designed to be run in its own daemon thread, referenced at ``self.expiration_thread``. """ # It's possible that "time" will be set to None # arbitrarily, so we check "while time" to avoid exceptions. # See tickets #99 and #180 for more information. while time: now = time.time() # Must make a copy of expirations so it doesn't change size # during iteration for expiration_time, objects in copyitems(self.expirations): if expiration_time <= now: for obj_size, uri, sel_header_values in objects: try: del self.store[uri][tuple(sel_header_values)] self.tot_expires += 1 self.cursize -= obj_size except KeyError: # the key may have been deleted elsewhere pass del self.expirations[expiration_time] time.sleep(self.expire_freq) def get(self): """Return the current variant if in the cache, else None.""" request = cherrypy.serving.request self.tot_gets += 1 uri = cherrypy.url(qs=request.query_string) uricache = self.store.get(uri) if uricache is None: return None header_values = [request.headers.get(h, '') for h in uricache.selecting_headers] variant = uricache.wait(key=tuple(sorted(header_values)), timeout=self.antistampede_timeout, debug=self.debug) if variant is not None: self.tot_hist += 1 return variant def put(self, variant, size): """Store the current variant in the cache.""" request = cherrypy.serving.request response = cherrypy.serving.response uri = cherrypy.url(qs=request.query_string) uricache = self.store.get(uri) if uricache is None: uricache = AntiStampedeCache() uricache.selecting_headers = [ e.value for e in response.headers.elements('Vary')] self.store[uri] = uricache if len(self.store) < self.maxobjects: total_size = self.cursize + size # checks if there's space for the object if (size < self.maxobj_size and total_size < self.maxsize): # add to the expirations list expiration_time = response.time + self.delay bucket = self.expirations.setdefault(expiration_time, []) bucket.append((size, uri, uricache.selecting_headers)) # add to the cache header_values = [request.headers.get(h, '') for h in uricache.selecting_headers] uricache[tuple(sorted(header_values))] = variant self.tot_puts += 1 self.cursize = total_size def delete(self): """Remove ALL cached variants of the current resource.""" uri = cherrypy.url(qs=cherrypy.serving.request.query_string) self.store.pop(uri, None) def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs): """Try to obtain cached output. If fresh enough, raise HTTPError(304). If POST, PUT, or DELETE: * invalidates (deletes) any cached response for this resource * sets request.cached = False * sets request.cacheable = False else if a cached copy exists: * sets request.cached = True * sets request.cacheable = False * sets response.headers to the cached values * checks the cached Last-Modified response header against the current If-(Un)Modified-Since request headers; raises 304 if necessary. * sets response.status and response.body to the cached values * returns True otherwise: * sets request.cached = False * sets request.cacheable = True * returns False """ request = cherrypy.serving.request response = cherrypy.serving.response if not hasattr(cherrypy, "_cache"): # Make a process-wide Cache object. cherrypy._cache = kwargs.pop("cache_class", MemoryCache)() # Take all remaining kwargs and set them on the Cache object. for k, v in kwargs.items(): setattr(cherrypy._cache, k, v) cherrypy._cache.debug = debug # POST, PUT, DELETE should invalidate (delete) the cached copy. # See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10. if request.method in invalid_methods: if debug: cherrypy.log('request.method %r in invalid_methods %r' % (request.method, invalid_methods), 'TOOLS.CACHING') cherrypy._cache.delete() request.cached = False request.cacheable = False return False if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]: request.cached = False request.cacheable = True return False cache_data = cherrypy._cache.get() request.cached = bool(cache_data) request.cacheable = not request.cached if request.cached: # Serve the cached copy. max_age = cherrypy._cache.delay for v in [e.value for e in request.headers.elements('Cache-Control')]: atoms = v.split('=', 1) directive = atoms.pop(0) if directive == 'max-age': if len(atoms) != 1 or not atoms[0].isdigit(): raise cherrypy.HTTPError(400, "Invalid Cache-Control header") max_age = int(atoms[0]) break elif directive == 'no-cache': if debug: cherrypy.log('Ignoring cache due to Cache-Control: no-cache', 'TOOLS.CACHING') request.cached = False request.cacheable = True return False if debug: cherrypy.log('Reading response from cache', 'TOOLS.CACHING') s, h, b, create_time = cache_data age = int(response.time - create_time) if (age > max_age): if debug: cherrypy.log('Ignoring cache due to age > %d' % max_age, 'TOOLS.CACHING') request.cached = False request.cacheable = True return False # Copy the response headers. See http://www.cherrypy.org/ticket/721. response.headers = rh = httputil.HeaderMap() for k in h: dict.__setitem__(rh, k, dict.__getitem__(h, k)) # Add the required Age header response.headers["Age"] = str(age) try: # Note that validate_since depends on a Last-Modified header; # this was put into the cached copy, and should have been # resurrected just above (response.headers = cache_data[1]). cptools.validate_since() except cherrypy.HTTPRedirect: x = sys.exc_info()[1] if x.status == 304: cherrypy._cache.tot_non_modified += 1 raise # serve it & get out from the request response.status = s response.body = b else: if debug: cherrypy.log('request is not cached', 'TOOLS.CACHING') return request.cached def tee_output(): """Tee response output to cache storage. Internal.""" # Used by CachingTool by attaching to request.hooks request = cherrypy.serving.request if 'no-store' in request.headers.values('Cache-Control'): return def tee(body): """Tee response.body into a list.""" if ('no-cache' in response.headers.values('Pragma') or 'no-store' in response.headers.values('Cache-Control')): for chunk in body: yield chunk return output = [] for chunk in body: output.append(chunk) yield chunk # save the cache data body = ntob('').join(output) cherrypy._cache.put((response.status, response.headers or {}, body, response.time), len(body)) response = cherrypy.serving.response response.body = tee(response.body) def expires(secs=0, force=False, debug=False): """Tool for influencing cache mechanisms using the 'Expires' header. secs Must be either an int or a datetime.timedelta, and indicates the number of seconds between response.time and when the response should expire. The 'Expires' header will be set to response.time + secs. If secs is zero, the 'Expires' header is set one year in the past, and the following "cache prevention" headers are also set: * Pragma: no-cache * Cache-Control': no-cache, must-revalidate force If False, the following headers are checked: * Etag * Last-Modified * Age * Expires If any are already present, none of the above response headers are set. """ response = cherrypy.serving.response headers = response.headers cacheable = False if not force: # some header names that indicate that the response can be cached for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'): if indicator in headers: cacheable = True break if not cacheable and not force: if debug: cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES') else: if debug: cherrypy.log('request is cacheable', 'TOOLS.EXPIRES') if isinstance(secs, datetime.timedelta): secs = (86400 * secs.days) + secs.seconds if secs == 0: if force or ("Pragma" not in headers): headers["Pragma"] = "no-cache" if cherrypy.serving.request.protocol >= (1, 1): if force or "Cache-Control" not in headers: headers["Cache-Control"] = "no-cache, must-revalidate" # Set an explicit Expires date in the past. expiry = httputil.HTTPDate(1169942400.0) else: expiry = httputil.HTTPDate(response.time + secs) if force or "Expires" not in headers: headers["Expires"] = expiry
bsd-3-clause
4,876,507,412,008,284,000
35.658065
83
0.594509
false
wbyne/QGIS
tests/src/python/test_qgsrelationmanager.py
5
4936
# -*- coding: utf-8 -*- """QGIS Unit tests for QgsRelationManager. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Nyall Dawson' __date__ = '17/05/2016' __copyright__ = 'Copyright 2016, The QGIS Project' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import qgis # NOQA from qgis.core import (QgsVectorLayer, QgsRelation, QgsRelationManager, QgsMapLayerRegistry ) from qgis.testing import start_app, unittest start_app() def createReferencingLayer(): layer = QgsVectorLayer("Point?field=fldtxt:string&field=foreignkey:integer", "referencinglayer", "memory") return layer def createReferencedLayer(): layer = QgsVectorLayer( "Point?field=x:string&field=y:integer&field=z:integer", "referencedlayer", "memory") return layer class TestQgsRelationManager(unittest.TestCase): def setUp(self): self.referencedLayer = createReferencedLayer() self.referencingLayer = createReferencingLayer() QgsMapLayerRegistry.instance().addMapLayers([self.referencedLayer, self.referencingLayer]) def tearDown(self): QgsMapLayerRegistry.instance().removeAllMapLayers() def createRelation(self): rel = QgsRelation() rel.setReferencingLayer(self.referencingLayer.id()) rel.setReferencedLayer(self.referencedLayer.id()) rel.addFieldPair('foreignkey', 'y') return rel def test_addRelation(self): """ test adding relations to a manager """ manager = QgsRelationManager() relations = manager.relations() self.assertEqual(len(relations), 0) rel = self.createRelation() rel.setRelationId('rel1') rel.setRelationName('Relation Number One') assert rel.isValid() manager.addRelation(rel) relations = manager.relations() self.assertEqual(len(relations), 1) self.assertEqual(relations['rel1'].id(), 'rel1') rel = self.createRelation() rel.setRelationId('rel2') rel.setRelationName('Relation Number Two') assert rel.isValid() manager.addRelation(rel) relations = manager.relations() self.assertEqual(len(relations), 2) ids = [r.id() for r in list(relations.values())] self.assertEqual(set(ids), set(['rel1', 'rel2'])) def test_relationById(self): """ test retrieving relation by id""" manager = QgsRelationManager() rel = manager.relation('does not exist') self.assertFalse(rel.isValid()) # add two relations rel = self.createRelation() rel.setRelationId('rel1') rel.setRelationName('Relation Number One') assert rel.isValid() manager.addRelation(rel) rel = self.createRelation() rel.setRelationId('rel2') rel.setRelationName('Relation Number Two') assert rel.isValid() manager.addRelation(rel) rel = manager.relation('does not exist') self.assertFalse(rel.isValid()) rel = manager.relation('rel1') self.assertEqual(rel.id(), 'rel1') rel = manager.relation('rel2') self.assertEqual(rel.id(), 'rel2') def test_relationByName(self): """ test retrieving relations by name""" manager = QgsRelationManager() rels = manager.relationsByName('does not exist') self.assertEqual(rels, []) # add some relations rel = self.createRelation() rel.setRelationId('rel1') rel.setRelationName('my relation') assert rel.isValid() manager.addRelation(rel) rel = self.createRelation() rel.setRelationId('rel2') rel.setRelationName('dupe name') assert rel.isValid() manager.addRelation(rel) rel = self.createRelation() rel.setRelationId('rel3') rel.setRelationName('dupe name') assert rel.isValid() manager.addRelation(rel) rels = manager.relationsByName('does not exist') self.assertEqual(rels, []) rels = manager.relationsByName('my relation') ids = [r.id() for r in rels] self.assertEqual(set(ids), set(['rel1'])) # case insensitive rels = manager.relationsByName('My RelAtion') ids = [r.id() for r in rels] self.assertEqual(set(ids), set(['rel1'])) # multiple results rels = manager.relationsByName('dupe name') ids = [r.id() for r in rels] self.assertEqual(set(ids), set(['rel2', 'rel3'])) if __name__ == '__main__': unittest.main()
gpl-2.0
1,281,265,277,639,509,800
30.240506
98
0.622366
false
googleapis/googleapis-gen
google/cloud/dialogflow/cx/v3/dialogflow-cx-v3-py/google/cloud/dialogflowcx_v3/services/security_settings_service/transports/__init__.py
2
1275
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from typing import Dict, Type from .base import SecuritySettingsServiceTransport from .grpc import SecuritySettingsServiceGrpcTransport from .grpc_asyncio import SecuritySettingsServiceGrpcAsyncIOTransport # Compile a registry of transports. _transport_registry = OrderedDict() # type: Dict[str, Type[SecuritySettingsServiceTransport]] _transport_registry['grpc'] = SecuritySettingsServiceGrpcTransport _transport_registry['grpc_asyncio'] = SecuritySettingsServiceGrpcAsyncIOTransport __all__ = ( 'SecuritySettingsServiceTransport', 'SecuritySettingsServiceGrpcTransport', 'SecuritySettingsServiceGrpcAsyncIOTransport', )
apache-2.0
-3,903,571,294,625,227,300
37.636364
94
0.791373
false
camptocamp/QGIS
python/plugins/processing/taudem/peukerdouglas.py
1
4055
# -*- coding: utf-8 -*- """ *************************************************************************** peukerdouglas.py --------------------- Date : October 2012 Copyright : (C) 2012 by Alexander Bruy Email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Alexander Bruy' __date__ = 'October 2012' __copyright__ = '(C) 2012, Alexander Bruy' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from PyQt4.QtGui import * from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.ProcessingLog import ProcessingLog from processing.tools.system import * from processing.core.ProcessingConfig import ProcessingConfig from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException from processing.parameters.ParameterRaster import ParameterRaster from processing.parameters.ParameterNumber import ParameterNumber from processing.outputs.OutputRaster import OutputRaster from processing.taudem.TauDEMUtils import TauDEMUtils class PeukerDouglas(GeoAlgorithm): ELEVATION_GRID = "ELEVATION_GRID" CENTER_WEIGHT = "CENTER_WEIGHT" SIDE_WEIGHT = "SIDE_WEIGHT" DIAGONAL_WEIGHT = "DIAGONAL_WEIGHT" STREAM_SOURCE_GRID = "STREAM_SOURCE_GRID" def getIcon(self): return QIcon(os.path.dirname(__file__) + "/../images/taudem.png") def defineCharacteristics(self): self.name = "Peuker Douglas" self.cmdName = "peukerdouglas" self.group = "Stream Network Analysis tools" self.addParameter(ParameterRaster(self.ELEVATION_GRID, "Elevation Grid", False)) self.addParameter(ParameterNumber(self.CENTER_WEIGHT, "Center Smoothing Weight", 0, None, 0.4)) self.addParameter(ParameterNumber(self.SIDE_WEIGHT, "Side Smoothing Weight", 0, None, 0.1)) self.addParameter(ParameterNumber(self.DIAGONAL_WEIGHT, "Diagonal Smoothing Weight", 0, None, 0.05)) self.addOutput(OutputRaster(self.STREAM_SOURCE_GRID, "Stream Source Grid")) def processAlgorithm(self, progress): commands = [] commands.append(os.path.join(TauDEMUtils.mpiexecPath(), "mpiexec")) processNum = ProcessingConfig.getSetting(TauDEMUtils.MPI_PROCESSES) if processNum <= 0: raise GeoAlgorithmExecutionException("Wrong number of MPI processes used.\nPlease set correct number before running TauDEM algorithms.") commands.append("-n") commands.append(str(processNum)) commands.append(os.path.join(TauDEMUtils.taudemPath(), self.cmdName)) commands.append("-fel") commands.append(self.getParameterValue(self.ELEVATION_GRID)) commands.append("-par") commands.append(str(self.getParameterValue(self.CENTER_WEIGHT))) commands.append(str(self.getParameterValue(self.SIDE_WEIGHT))) commands.append(str(self.getParameterValue(self.DIAGONAL_WEIGHT))) commands.append("-ss") commands.append(self.getOutputValue(self.STREAM_SOURCE_GRID)) loglines = [] loglines.append("TauDEM execution command") for line in commands: loglines.append(line) ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines) TauDEMUtils.executeTauDEM(commands, progress) #def helpFile(self): # return os.path.join(os.path.dirname(__file__), "help", self.cmdName + ".html")
gpl-2.0
-1,432,047,378,496,479,200
41.239583
146
0.62762
false
youprofit/servo
tests/wpt/web-platform-tests/2dcontext/tools/gentest.py
132
29897
# Copyright (c) 2010 Philip Taylor # Released under the BSD license and W3C Test Suite License: see LICENSE.txt # Current code status: # # This was originally written for use at # http://philip.html5.org/tests/canvas/suite/tests/ # # It has been adapted for use with the Web Platform Test Suite suite at # https://github.com/w3c/web-platform-tests/ # # The W3C version excludes a number of features (multiple versions of each test # case of varying verbosity, Mozilla mochitests, semi-automated test harness) # to focus on simply providing reviewable test cases. It also expects a different # directory structure. # This code attempts to support both versions, but the non-W3C version hasn't # been tested recently and is probably broken. # To update or add test cases: # # * Modify the tests*.yaml files. # 'name' is an arbitrary hierarchical name to help categorise tests. # 'desc' is a rough description of what behaviour the test aims to test. # 'testing' is a list of references to spec.yaml, to show which spec sentences # this test case is primarily testing. # 'code' is JavaScript code to execute, with some special commands starting with '@' # 'expected' is what the final canvas output should be: a string 'green' or 'clear' # (100x50 images in both cases), or a string 'size 100 50' (or any other size) # followed by Python code using Pycairo to generate the image. # # * Run "python gentest.py". # This requires a few Python modules which might not be ubiquitous. # It has only been tested on Linux. # It will usually emit some warnings, which ideally should be fixed but can # generally be safely ignored. # # * Test the tests, add new ones to Git, remove deleted ones from Git, etc. import re import codecs import time import os import shutil import sys import xml.dom.minidom from xml.dom.minidom import Node import cairo try: import syck as yaml # compatible and lots faster except ImportError: import yaml # Default mode is for the W3C test suite; the --standalone option # generates various extra files that aren't needed there W3CMODE = True if '--standalone' in sys.argv: W3CMODE = False TESTOUTPUTDIR = '../../2dcontext' IMAGEOUTPUTDIR = '../../2dcontext' MISCOUTPUTDIR = './output' SPECOUTPUTDIR = '../../annotated-spec' SPECOUTPUTPATH = '../annotated-spec' # relative to TESTOUTPUTDIR def simpleEscapeJS(str): return str.replace('\\', '\\\\').replace('"', '\\"') def escapeJS(str): str = simpleEscapeJS(str) str = re.sub(r'\[(\w+)\]', r'[\\""+(\1)+"\\"]', str) # kind of an ugly hack, for nicer failure-message output return str def escapeHTML(str): return str.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('"', '&quot;') def expand_nonfinite(method, argstr, tail): """ >>> print expand_nonfinite('f', '<0 a>, <0 b>', ';') f(a, 0); f(0, b); f(a, b); >>> print expand_nonfinite('f', '<0 a>, <0 b c>, <0 d>', ';') f(a, 0, 0); f(0, b, 0); f(0, c, 0); f(0, 0, d); f(a, b, 0); f(a, b, d); f(a, 0, d); f(0, b, d); """ # argstr is "<valid-1 invalid1-1 invalid2-1 ...>, ..." (where usually # 'invalid' is Infinity/-Infinity/NaN) args = [] for arg in argstr.split(', '): a = re.match('<(.*)>', arg).group(1) args.append(a.split(' ')) calls = [] # Start with the valid argument list call = [ args[j][0] for j in range(len(args)) ] # For each argument alone, try setting it to all its invalid values: for i in range(len(args)): for a in args[i][1:]: c2 = call[:] c2[i] = a calls.append(c2) # For all combinations of >= 2 arguments, try setting them to their # first invalid values. (Don't do all invalid values, because the # number of combinations explodes.) def f(c, start, depth): for i in range(start, len(args)): if len(args[i]) > 1: a = args[i][1] c2 = c[:] c2[i] = a if depth > 0: calls.append(c2) f(c2, i+1, depth+1) f(call, 0, 0) return '\n'.join('%s(%s)%s' % (method, ', '.join(c), tail) for c in calls) # Run with --test argument to run unit tests if len(sys.argv) > 1 and sys.argv[1] == '--test': import doctest doctest.testmod() sys.exit() templates = yaml.load(open('templates.yaml').read()) name_mapping = yaml.load(open('name2dir.yaml').read()) spec_assertions = [] for s in yaml.load(open('spec.yaml').read())['assertions']: if 'meta' in s: eval(compile(s['meta'], '<meta spec assertion>', 'exec'), {}, {'assertions':spec_assertions}) else: spec_assertions.append(s) tests = [] for t in sum([ yaml.load(open(f).read()) for f in ['tests.yaml', 'tests2d.yaml', 'tests2dtext.yaml']], []): if 'DISABLED' in t: continue if 'meta' in t: eval(compile(t['meta'], '<meta test>', 'exec'), {}, {'tests':tests}) else: tests.append(t) category_names = [] category_contents_direct = {} category_contents_all = {} spec_ids = {} for t in spec_assertions: spec_ids[t['id']] = True spec_refs = {} def backref_html(name): backrefs = [] c = '' for p in name.split('.')[:-1]: c += '.'+p backrefs.append('<a href="index%s.html">%s</a>.' % (c, p)) backrefs.append(name.split('.')[-1]) return ''.join(backrefs) def make_flat_image(filename, w, h, r,g,b,a): if os.path.exists('%s/%s' % (IMAGEOUTPUTDIR, filename)): return filename surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h) cr = cairo.Context(surface) cr.set_source_rgba(r, g, b, a) cr.rectangle(0, 0, w, h) cr.fill() surface.write_to_png('%s/%s' % (IMAGEOUTPUTDIR, filename)) return filename # Ensure the test output directories exist testdirs = [TESTOUTPUTDIR, IMAGEOUTPUTDIR, MISCOUTPUTDIR] if not W3CMODE: testdirs.append('%s/mochitests' % MISCOUTPUTDIR) else: for map_dir in set(name_mapping.values()): testdirs.append("%s/%s" % (TESTOUTPUTDIR, map_dir)) for d in testdirs: try: os.mkdir(d) except: pass # ignore if it already exists mochitests = [] used_images = {} def expand_test_code(code): code = re.sub(r'@nonfinite ([^(]+)\(([^)]+)\)(.*)', lambda m: expand_nonfinite(m.group(1), m.group(2), m.group(3)), code) # must come before '@assert throws' code = re.sub(r'@assert pixel (\d+,\d+) == (\d+,\d+,\d+,\d+);', r'_assertPixel(canvas, \1, \2, "\1", "\2");', code) code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+);', r'_assertPixelApprox(canvas, \1, \2, "\1", "\2", 2);', code) code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+) \+/- (\d+);', r'_assertPixelApprox(canvas, \1, \2, "\1", "\2", \3);', code) code = re.sub(r'@assert throws (\S+_ERR) (.*);', r'assert_throws("\1", function() { \2; });', code) code = re.sub(r'@assert throws (\S+Error) (.*);', r'assert_throws(new \1(), function() { \2; });', code) code = re.sub(r'@assert throws (.*);', r'assert_throws(null, function() { \1; });', code) code = re.sub(r'@assert (.*) === (.*);', lambda m: '_assertSame(%s, %s, "%s", "%s");' % (m.group(1), m.group(2), escapeJS(m.group(1)), escapeJS(m.group(2))) , code) code = re.sub(r'@assert (.*) !== (.*);', lambda m: '_assertDifferent(%s, %s, "%s", "%s");' % (m.group(1), m.group(2), escapeJS(m.group(1)), escapeJS(m.group(2))) , code) code = re.sub(r'@assert (.*) =~ (.*);', lambda m: 'assert_regexp_match(%s, %s);' % (m.group(1), m.group(2)) , code) code = re.sub(r'@assert (.*);', lambda m: '_assert(%s, "%s");' % (m.group(1), escapeJS(m.group(1))) , code) code = re.sub(r' @moz-todo', '', code) code = re.sub(r'@moz-UniversalBrowserRead;', "" , code) assert('@' not in code) return code def expand_mochitest_code(code): code = re.sub(r'@nonfinite ([^(]+)\(([^)]+)\)(.*)', lambda m: expand_nonfinite(m.group(1), m.group(2), m.group(3)), code) code = re.sub(r'@assert pixel (\d+,\d+) == (\d+,\d+,\d+,\d+);', r'isPixel(ctx, \1, \2, "\1", "\2", 0);', code) code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+);', r'isPixel(ctx, \1, \2, "\1", "\2", 2);', code) code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+) \+/- (\d+);', r'isPixel(ctx, \1, \2, "\1", "\2", \3);', code) code = re.sub(r'@assert throws (\S+_ERR) (.*);', lambda m: 'var _thrown = undefined; try {\n %s;\n} catch (e) { _thrown = e }; ok(_thrown && _thrown.code == DOMException.%s, "should throw %s");' % (m.group(2), m.group(1), m.group(1)) , code) code = re.sub(r'@assert throws (\S+Error) (.*);', lambda m: 'var _thrown = undefined; try {\n %s;\n} catch (e) { _thrown = e }; ok(_thrown && (_thrown instanceof %s), "should throw %s");' % (m.group(2), m.group(1), m.group(1)) , code) code = re.sub(r'@assert throws (.*);', lambda m: 'try { var _thrown = false;\n %s;\n} catch (e) { _thrown = true; } finally { ok(_thrown, "should throw exception"); }' % (m.group(1)) , code) code = re.sub(r'@assert (.*) =~ (.*);', lambda m: 'ok(%s.match(%s), "%s.match(%s)");' % (m.group(1), m.group(2), escapeJS(m.group(1)), escapeJS(m.group(2))) , code) code = re.sub(r'@assert (.*);', lambda m: 'ok(%s, "%s");' % (m.group(1), escapeJS(m.group(1))) , code) code = re.sub(r'((?:^|\n|;)\s*)ok(.*;) @moz-todo', lambda m: '%stodo%s' % (m.group(1), m.group(2)) , code) code = re.sub(r'((?:^|\n|;)\s*)(is.*;) @moz-todo', lambda m: '%stodo_%s' % (m.group(1), m.group(2)) , code) code = re.sub(r'@moz-UniversalBrowserRead;', "netscape.security.PrivilegeManager.enablePrivilege('UniversalBrowserRead');" , code) code = code.replace('../images/', 'image_') assert '@' not in code, '@ not in code:\n%s' % code return code used_tests = {} for i in range(len(tests)): test = tests[i] name = test['name'] print "\r(%s)" % name, " "*32, "\t", if name in used_tests: print "Test %s is defined twice" % name used_tests[name] = 1 mapped_name = None for mn in sorted(name_mapping.keys(), key=len, reverse=True): if name.startswith(mn): mapped_name = "%s/%s" % (name_mapping[mn], name) break if not mapped_name: print "LIKELY ERROR: %s has no defined target directory mapping" % name mapped_name = name if 'manual' in test: mapped_name += "-manual" cat_total = '' for cat_part in [''] + name.split('.')[:-1]: cat_total += cat_part+'.' if not cat_total in category_names: category_names.append(cat_total) category_contents_all.setdefault(cat_total, []).append(name) category_contents_direct.setdefault(cat_total, []).append(name) for ref in test.get('testing', []): if ref not in spec_ids: print "Test %s uses nonexistent spec point %s" % (name, ref) spec_refs.setdefault(ref, []).append(name) #if not (len(test.get('testing', [])) or 'mozilla' in test): if not test.get('testing', []): print "Test %s doesn't refer to any spec points" % name if test.get('expected', '') == 'green' and re.search(r'@assert pixel .* 0,0,0,0;', test['code']): print "Probable incorrect pixel test in %s" % name code = expand_test_code(test['code']) mochitest = not (W3CMODE or 'manual' in test or 'disabled' in test.get('mozilla', {})) if mochitest: mochi_code = expand_mochitest_code(test['code']) mochi_name = name if 'mozilla' in test: if 'throws' in test['mozilla']: mochi_code = templates['mochitest.exception'] % mochi_code if 'bug' in test['mozilla']: mochi_name = "%s - bug %s" % (name, test['mozilla']['bug']) if 'desc' in test: mochi_desc = '<!-- Testing: %s -->\n' % test['desc'] else: mochi_desc = '' if 'deferTest' in mochi_code: mochi_setup = '' mochi_footer = '' else: mochi_setup = '' mochi_footer = 'SimpleTest.finish();\n' for f in ['isPixel', 'todo_isPixel', 'deferTest', 'wrapFunction']: if f in mochi_code: mochi_setup += templates['mochitest.%s' % f] else: if not W3CMODE: print "Skipping mochitest for %s" % name mochi_name = '' mochi_desc = '' mochi_code = '' mochi_setup = '' mochi_footer = '' expectation_html = '' if 'expected' in test and test['expected'] is not None: expected = test['expected'] expected_img = None if expected == 'green': expected_img = make_flat_image('green-100x50.png', 100, 50, 0,1,0,1) if W3CMODE: expected_img = "/images/" + expected_img elif expected == 'clear': expected_img = make_flat_image('clear-100x50.png', 100, 50, 0,0,0,0) if W3CMODE: expected_img = "/images/" + expected_img else: if ';' in expected: print "Found semicolon in %s" % name expected = re.sub(r'^size (\d+) (\d+)', r'surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, \1, \2)\ncr = cairo.Context(surface)', expected) if mapped_name.endswith("-manual"): png_name = mapped_name[:-len("-manual")] else: png_name = mapped_name expected += "\nsurface.write_to_png('%s/%s.png')\n" % (IMAGEOUTPUTDIR, png_name) eval(compile(expected, '<test %s>' % test['name'], 'exec'), {}, {'cairo':cairo}) expected_img = "%s.png" % name if expected_img: expectation_html = ('<p class="output expectedtext">Expected output:' + '<p><img src="%s" class="output expected" id="expected" alt="">' % (expected_img)) canvas = test.get('canvas', 'width="100" height="50"') prev = tests[i-1]['name'] if i != 0 else 'index' next = tests[i+1]['name'] if i != len(tests)-1 else 'index' name_wrapped = name.replace('.', '.&#8203;') # (see https://bugzilla.mozilla.org/show_bug.cgi?id=376188) refs = ''.join('<li><a href="%s/canvas.html#testrefs.%s">%s</a>\n' % (SPECOUTPUTPATH, n,n) for n in test.get('testing', [])) if not W3CMODE and 'mozilla' in test and 'bug' in test['mozilla']: refs += '<li><a href="https://bugzilla.mozilla.org/show_bug.cgi?id=%d">Bugzilla</a>' % test['mozilla']['bug'] notes = '<p class="notes">%s' % test['notes'] if 'notes' in test else '' images = '' for i in test.get('images', []): id = i.split('/')[-1] if '/' not in i: used_images[i] = 1 i = '../images/%s' % i images += '<img src="%s" id="%s" class="resource">\n' % (i,id) mochi_images = images.replace('../images/', 'image_') if W3CMODE: images = images.replace("../images/", "/images/") fonts = '' fonthack = '' for i in test.get('fonts', []): fonts += '@font-face {\n font-family: %s;\n src: url("/fonts/%s.ttf");\n}\n' % (i, i) # Browsers require the font to actually be used in the page if test.get('fonthack', 1): fonthack += '<span style="font-family: %s; position: absolute; visibility: hidden">A</span>\n' % i if fonts: fonts = '<style>\n%s</style>\n' % fonts fallback = test.get('fallback', '<p class="fallback">FAIL (fallback content)</p>') desc = test.get('desc', '') escaped_desc = simpleEscapeJS(desc) template_params = { 'name':name, 'name_wrapped':name_wrapped, 'backrefs':backref_html(name), 'mapped_name':mapped_name, 'desc':desc, 'escaped_desc':escaped_desc, 'prev':prev, 'next':next, 'refs':refs, 'notes':notes, 'images':images, 'fonts':fonts, 'fonthack':fonthack, 'canvas':canvas, 'expected':expectation_html, 'code':code, 'mochi_name':mochi_name, 'mochi_desc':mochi_desc, 'mochi_code':mochi_code, 'mochi_setup':mochi_setup, 'mochi_footer':mochi_footer, 'mochi_images':mochi_images, 'fallback':fallback } if W3CMODE: f = codecs.open('%s/%s.html' % (TESTOUTPUTDIR, mapped_name), 'w', 'utf-8') f.write(templates['w3c'] % template_params) else: f = codecs.open('%s/%s.html' % (TESTOUTPUTDIR, name), 'w', 'utf-8') f.write(templates['standalone'] % template_params) f = codecs.open('%s/framed.%s.html' % (TESTOUTPUTDIR, name), 'w', 'utf-8') f.write(templates['framed'] % template_params) f = codecs.open('%s/minimal.%s.html' % (TESTOUTPUTDIR, name), 'w', 'utf-8') f.write(templates['minimal'] % template_params) if mochitest: mochitests.append(name) f = codecs.open('%s/mochitests/test_%s.html' % (MISCOUTPUTDIR, name), 'w', 'utf-8') f.write(templates['mochitest'] % template_params) def write_mochitest_makefile(): f = open('%s/mochitests/Makefile.in' % MISCOUTPUTDIR, 'w') f.write(templates['mochitest.Makefile']) files = ['test_%s.html' % n for n in mochitests] + ['image_%s' % n for n in used_images] chunksize = 100 chunks = [] for i in range(0, len(files), chunksize): chunk = files[i:i+chunksize] name = '_TEST_FILES_%d' % (i / chunksize) chunks.append(name) f.write('%s = \\\n' % name) for file in chunk: f.write('\t%s \\\n' % file) f.write('\t$(NULL)\n\n') f.write('# split up into groups to work around command-line length limits\n') for name in chunks: f.write('libs:: $(%s)\n\t$(INSTALL) $(foreach f,$^,"$f") $(DEPTH)/_tests/testing/mochitest/tests/$(relativesrcdir)\n\n' % name) if not W3CMODE: for i in used_images: shutil.copyfile("../../images/%s" % i, "%s/mochitests/image_%s" % (MISCOUTPUTDIR, i)) write_mochitest_makefile() print def write_index(): f = open('%s/index.html' % TESTOUTPUTDIR, 'w') f.write(templates['index.w3c' if W3CMODE else 'index'] % { 'updated':time.strftime('%Y-%m-%d', time.gmtime()) }) f.write('\n<ul class="testlist">\n') depth = 1 for category in category_names: name = category[1:-1] or '' count = len(category_contents_all[category]) new_depth = category.count('.') while new_depth < depth: f.write(' '*(depth-1) + '</ul>\n'); depth -= 1 f.write(' '*depth + templates['index.w3c.category.item' if W3CMODE else 'index.category.item'] % (name or 'all', name, count, '' if count==1 else 's')) while new_depth+1 > depth: f.write(' '*depth + '<ul>\n'); depth += 1 for item in category_contents_direct.get(category, []): f.write(' '*depth + '<li><a href="%s.html">%s</a>\n' % (item, item) ) while 0 < depth: f.write(' '*(depth-1) + '</ul>\n'); depth -= 1 def write_category_indexes(): for category in category_names: name = (category[1:-1] or 'all') f = open('%s/index.%s.html' % (TESTOUTPUTDIR, name), 'w') f.write(templates['index.w3c.frame' if W3CMODE else 'index.frame'] % { 'backrefs':backref_html(name), 'category':name }) for item in category_contents_all[category]: f.write(templates['index.w3c.frame.item' if W3CMODE else 'index.frame.item'] % item) def write_reportgen(): f = open('%s/reportgen.html' % MISCOUTPUTDIR, 'w') items_text = ',\n'.join(('"%s"' % item) for item in category_contents_all['.']) f.write(templates['reportgen'] % {'items':items_text }) def write_results(): results = {} uas = [] uastrings = {} for item in category_contents_all['.']: results[item] = {} f = open('%s/results.html' % MISCOUTPUTDIR, 'w') f.write(templates['results']) if not os.path.exists('results.yaml'): print "Can't find results.yaml" else: for resultset in yaml.load(open('results.yaml').read()): #title = "%s (%s)" % (resultset['ua'], resultset['time']) title = resultset['name'] #assert title not in uas # don't allow repetitions if title not in uas: uas.append(title) uastrings[title] = resultset['ua'] else: assert uastrings[title] == resultset['ua'] for r in resultset['results']: if r['id'] not in results: print 'Skipping results for removed test %s' % r['id'] continue results[r['id']][title] = ( r['status'].lower(), re.sub(r'%(..)', lambda m: chr(int(m.group(1), 16)), re.sub(r'%u(....)', lambda m: unichr(int(m.group(1), 16)), r['notes'])).encode('utf8') ) passes = {} for ua in uas: f.write('<th title="%s">%s\n' % (uastrings[ua], ua)) passes[ua] = 0 for id in category_contents_all['.']: f.write('<tr><td><a href="#%s" id="%s">#</a> <a href="%s.html">%s</a>\n' % (id, id, id, id)) for ua in uas: status, details = results[id].get(ua, ('', '')) f.write('<td class="r %s"><ul class="d">%s</ul>\n' % (status, details)) if status == 'pass': passes[ua] += 1 f.write('<tr><th>Passes\n') for ua in uas: f.write('<td>%.1f%%\n' % ((100.0 * passes[ua]) / len(category_contents_all['.']))) f.write('<tr><td>\n') for ua in uas: f.write('<td>%s\n' % ua) f.write('</table>\n') def getNodeText(node): t, offsets = '', [] # Skip over any previous annotations we added if node.nodeType == node.ELEMENT_NODE and 'testrefs' in node.getAttribute('class').split(' '): return t, offsets if node.nodeType == node.TEXT_NODE: val = node.nodeValue val = val.replace(unichr(0xa0), ' ') # replace &nbsp;s t += val offsets += [ (node, len(node.nodeValue)) ] for n in node.childNodes: child_t, child_offsets = getNodeText(n) t += child_t offsets += child_offsets return t, offsets def htmlSerializer(element): element.normalize() rv = [] specialtext = ['style', 'script', 'xmp', 'iframe', 'noembed', 'noframes', 'noscript'] empty = ['area', 'base', 'basefont', 'bgsound', 'br', 'col', 'embed', 'frame', 'hr', 'img', 'input', 'link', 'meta', 'param', 'spacer', 'wbr'] def serializeElement(element): if element.nodeType == Node.DOCUMENT_TYPE_NODE: rv.append("<!DOCTYPE %s>" % element.name) elif element.nodeType == Node.DOCUMENT_NODE: for child in element.childNodes: serializeElement(child) elif element.nodeType == Node.COMMENT_NODE: rv.append("<!--%s-->" % element.nodeValue) elif element.nodeType == Node.TEXT_NODE: unescaped = False n = element.parentNode while n is not None: if n.nodeName in specialtext: unescaped = True break n = n.parentNode if unescaped: rv.append(element.nodeValue) else: rv.append(escapeHTML(element.nodeValue)) else: rv.append("<%s" % element.nodeName) if element.hasAttributes(): for name, value in element.attributes.items(): rv.append(' %s="%s"' % (name, escapeHTML(value))) rv.append(">") if element.nodeName not in empty: for child in element.childNodes: serializeElement(child) rv.append("</%s>" % element.nodeName) serializeElement(element) return '<!DOCTYPE html>\n' + ''.join(rv) def write_annotated_spec(): # Load the stripped-down XHTMLised copy of the spec doc = xml.dom.minidom.parse(open('current-work-canvas.xhtml', 'r')) # Insert our new stylesheet n = doc.getElementsByTagName('head')[0].appendChild(doc.createElement('link')) n.setAttribute('rel', 'stylesheet') n.setAttribute('href', '../common/canvas-spec.css' if W3CMODE else '../spectest.css') n.setAttribute('type', 'text/css') spec_assertion_patterns = [] for a in spec_assertions: # Warn about problems if a['id'] not in spec_refs: print "Unused spec statement %s" % a['id'] pattern_text = a['text'] if 'keyword' in a: # Explicit keyword override keyword = a['keyword'] else: # Extract the marked keywords, and remove the markers keyword = 'none' for kw in ['must', 'should', 'required']: if ('*%s*' % kw) in pattern_text: keyword = kw pattern_text = pattern_text.replace('*%s*' % kw, kw) break # Make sure there wasn't >1 keyword for kw in ['must', 'should', 'required']: assert('*%s*' % kw not in pattern_text) # Convert the special pattern format into regexp syntax pattern_text = (pattern_text. # Escape relevant characters replace('*', r'\*'). replace('+', r'\+'). replace('.', r'\.'). replace('(', r'\('). replace(')', r'\)'). replace('[', r'\['). replace(']', r'\]'). # Convert special sequences back into unescaped regexp code replace(' ', r'\s+'). replace(r'<\.\.\.>', r'.+'). replace('<^>', r'()'). replace('<eol>', r'\s*?\n') ) pattern = re.compile(pattern_text, re.S) spec_assertion_patterns.append( (a['id'], pattern, keyword, a.get('previously', None)) ) matched_assertions = {} def process_element(e): if e.nodeType == e.ELEMENT_NODE and (e.getAttribute('class') == 'impl' or e.hasAttribute('data-component')): for c in e.childNodes: process_element(c) return t, offsets = getNodeText(e) for id, pattern, keyword, previously in spec_assertion_patterns: m = pattern.search(t) if m: # When the pattern-match isn't enough to uniquely identify a sentence, # allow explicit back-references to earlier paragraphs if previously: if len(previously) >= 3: n, text, exp = previously else: n, text = previously exp = True node = e while n and node.previousSibling: node = node.previousSibling n -= 1 if (text not in getNodeText(node)[0]) == exp: continue # discard this match if id in matched_assertions: print "Spec statement %s matches multiple places" % id matched_assertions[id] = True if m.lastindex != 1: print "Spec statement %s has incorrect number of match groups" % id end = m.end(1) end_node = None for end_node, o in offsets: if end < o: break end -= o assert(end_node) n1 = doc.createElement('span') n1.setAttribute('class', 'testrefs kw-%s' % keyword) n1.setAttribute('id', 'testrefs.%s' % id) n1.appendChild(doc.createTextNode(' ')) n = n1.appendChild(doc.createElement('a')) n.setAttribute('href', '#testrefs.%s' % id) n.setAttribute('title', id) n.appendChild(doc.createTextNode('#')) n1.appendChild(doc.createTextNode(' ')) for test_id in spec_refs.get(id, []): n = n1.appendChild(doc.createElement('a')) n.setAttribute('href', '../canvas/%s.html' % test_id) n.appendChild(doc.createTextNode(test_id)) n1.appendChild(doc.createTextNode(' ')) n0 = doc.createTextNode(end_node.nodeValue[:end]) n2 = doc.createTextNode(end_node.nodeValue[end:]) p = end_node.parentNode p.replaceChild(n2, end_node) p.insertBefore(n1, n2) p.insertBefore(n0, n1) t, offsets = getNodeText(e) for e in doc.getElementsByTagName('body')[0].childNodes: process_element(e) for s in spec_assertions: if s['id'] not in matched_assertions: print "Annotation incomplete: Unmatched spec statement %s" % s['id'] # Convert from XHTML back to HTML doc.documentElement.removeAttribute('xmlns') doc.documentElement.setAttribute('lang', doc.documentElement.getAttribute('xml:lang')) head = doc.documentElement.getElementsByTagName('head')[0] head.insertBefore(doc.createElement('meta'), head.firstChild).setAttribute('charset', 'UTF-8') f = codecs.open('%s/canvas.html' % SPECOUTPUTDIR, 'w', 'utf-8') f.write(htmlSerializer(doc)) if not W3CMODE: write_index() write_category_indexes() write_reportgen() write_results() write_annotated_spec()
mpl-2.0
-4,035,800,425,690,256,400
37.182631
161
0.542396
false
shaftoe/home-assistant
homeassistant/components/sensor/eddystone_temperature.py
4
5839
""" Read temperature information from Eddystone beacons. Your beacons must be configured to transmit UID (for identification) and TLM (for temperature) frames. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.eddystone_temperature/ """ import logging import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_NAME, TEMP_CELSIUS, STATE_UNKNOWN, EVENT_HOMEASSISTANT_STOP, EVENT_HOMEASSISTANT_START) REQUIREMENTS = ['beacontools[scan]==1.0.1'] _LOGGER = logging.getLogger(__name__) CONF_BEACONS = 'beacons' CONF_BT_DEVICE_ID = 'bt_device_id' CONF_INSTANCE = 'instance' CONF_NAMESPACE = 'namespace' BEACON_SCHEMA = vol.Schema({ vol.Required(CONF_NAMESPACE): cv.string, vol.Required(CONF_INSTANCE): cv.string, vol.Optional(CONF_NAME): cv.string }) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_BT_DEVICE_ID, default=0): cv.positive_int, vol.Required(CONF_BEACONS): vol.Schema({cv.string: BEACON_SCHEMA}), }) # pylint: disable=unused-argument def setup_platform(hass, config, add_devices, discovery_info=None): """Validate configuration, create devices and start monitoring thread.""" bt_device_id = config.get("bt_device_id") beacons = config.get("beacons") devices = [] for dev_name, properties in beacons.items(): namespace = get_from_conf(properties, "namespace", 20) instance = get_from_conf(properties, "instance", 12) name = properties.get(CONF_NAME, dev_name) if instance is None or namespace is None: _LOGGER.error("Skipping %s", dev_name) continue else: devices.append(EddystoneTemp(name, namespace, instance)) if devices: mon = Monitor(hass, devices, bt_device_id) def monitor_stop(_service_or_event): """Stop the monitor thread.""" _LOGGER.info("Stopping scanner for Eddystone beacons") mon.stop() def monitor_start(_service_or_event): """Start the monitor thread.""" _LOGGER.info("Starting scanner for Eddystone beacons") mon.start() add_devices(devices) mon.start() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop) hass.bus.listen_once(EVENT_HOMEASSISTANT_START, monitor_start) else: _LOGGER.warning("No devices were added") def get_from_conf(config, config_key, length): """Retrieve value from config and validate length.""" string = config.get(config_key) if len(string) != length: _LOGGER.error("Error in config parameter %s: Must be exactly %d " "bytes. Device will not be added", config_key, length/2) return None else: return string class EddystoneTemp(Entity): """Representation of a temperature sensor.""" def __init__(self, name, namespace, instance): """Initialize a sensor.""" self._name = name self.namespace = namespace self.instance = instance self.bt_addr = None self.temperature = STATE_UNKNOWN @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the device.""" return self.temperature @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return TEMP_CELSIUS @property def should_poll(self): """Return the polling state.""" return False class Monitor(object): """Continously scan for BLE advertisements.""" def __init__(self, hass, devices, bt_device_id): """Construct interface object.""" self.hass = hass # List of beacons to monitor self.devices = devices # Number of the bt device (hciX) self.bt_device_id = bt_device_id def callback(bt_addr, _, packet, additional_info): """Handle new packets.""" self.process_packet( additional_info['namespace'], additional_info['instance'], packet.temperature) # pylint: disable=import-error from beacontools import ( BeaconScanner, EddystoneFilter, EddystoneTLMFrame) device_filters = [EddystoneFilter(d.namespace, d.instance) for d in devices] self.scanner = BeaconScanner( callback, bt_device_id, device_filters, EddystoneTLMFrame) self.scanning = False def start(self): """Continously scan for BLE advertisements.""" if not self.scanning: self.scanner.start() self.scanning = True else: _LOGGER.debug( "start() called, but scanner is already running") def process_packet(self, namespace, instance, temperature): """Assign temperature to device.""" _LOGGER.debug("Received temperature for <%s,%s>: %d", namespace, instance, temperature) for dev in self.devices: if dev.namespace == namespace and dev.instance == instance: if dev.temperature != temperature: dev.temperature = temperature dev.schedule_update_ha_state() def stop(self): """Signal runner to stop and join thread.""" if self.scanning: _LOGGER.debug("Stopping...") self.scanner.stop() _LOGGER.debug("Stopped") self.scanning = False else: _LOGGER.debug( "stop() called but scanner was not running")
apache-2.0
-8,497,068,863,128,658,000
31.082418
78
0.624422
false
orcasgit/py-wsse
doc/conf.py
1
9315
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # py-wsse documentation build configuration file, created by # sphinx-quickstart on Mon May 25 12:37:44 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. #templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'py-wsse' copyright = '2015, Carl Meyer' author = 'Carl Meyer' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'py-wssedoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, 'py-wsse.tex', 'py-wsse Documentation', 'Carl Meyer', 'manual', ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( master_doc, 'py-wsse', 'py-wsse Documentation', [author], 1, ) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, 'py-wsse', 'py-wsse Documentation', author, 'py-wsse', 'One line description of project.', 'Miscellaneous', ), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
bsd-3-clause
-4,973,598,448,278,609,000
30.363636
79
0.691573
false
myint-archive/bpython
bpython/pager.py
1
2528
# The MIT License # # Copyright (c) 2009-2011 Andreas Stuehrk # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import import curses import errno import os import pydoc import subprocess import sys try: unicode except NameError: unicode = str def get_pager_command(): command = os.environ.get('PAGER', 'less -r').split() return command def page_internal(data): """A more than dumb pager function.""" if hasattr(pydoc, 'ttypager'): pydoc.ttypager(data) else: sys.stdout.write(data) def page(data, use_internal=False): command = get_pager_command() if not command or use_internal: page_internal(data) else: curses.endwin() try: popen = subprocess.Popen(command, stdin=subprocess.PIPE) if isinstance(data, unicode): data = data.encode(sys.__stdout__.encoding, 'replace') popen.stdin.write(data) popen.stdin.close() except OSError as e: if e.errno == errno.ENOENT: # pager command not found, fall back to internal pager page_internal(data) return except IOError as e: if e.errno != errno.EPIPE: raise while True: try: popen.wait() except OSError as e: if e.errno != errno.EINTR: raise else: break curses.doupdate()
mit
-8,004,496,379,323,881,000
30.6
79
0.651108
false
marcore/edx-platform
lms/djangoapps/courseware/features/events.py
177
2247
# pylint: disable=missing-docstring from lettuce import step from lettuce import world from lettuce import before from pymongo import MongoClient from nose.tools import assert_equals from nose.tools import assert_in REQUIRED_EVENT_FIELDS = [ 'agent', 'event', 'event_source', 'event_type', 'host', 'ip', 'page', 'time', 'username' ] @before.all def connect_to_mongodb(): world.mongo_client = MongoClient() world.event_collection = world.mongo_client['track']['events'] @before.each_scenario def reset_captured_events(_scenario): world.event_collection.drop() @before.outline def reset_between_outline_scenarios(_scenario, order, outline, reasons_to_fail): world.event_collection.drop() @step(r'[aA]n? course url "(.*)" event is emitted$') def course_url_event_is_emitted(_step, url_regex): event_type = url_regex.format(world.scenario_dict['COURSE'].id) n_events_are_emitted(_step, 1, event_type, "server") @step(r'([aA]n?|\d+) "(.*)" (server|browser) events? is emitted$') def n_events_are_emitted(_step, count, event_type, event_source): # Ensure all events are written out to mongo before querying. world.mongo_client.fsync() # Note that splinter makes 2 requests when you call browser.visit('/foo') # the first just checks to see if the server responds with a status # code of 200, the next actually uses the browser to submit the request. # We filter out events associated with the status code checks by ignoring # events that come directly from splinter. criteria = { 'event_type': event_type, 'event_source': event_source, 'agent': { '$ne': 'python/splinter' } } cursor = world.event_collection.find(criteria) try: number_events = int(count) except ValueError: number_events = 1 assert_equals(cursor.count(), number_events) event = cursor.next() expected_field_values = { "username": world.scenario_dict['USER'].username, "event_type": event_type, } for key, value in expected_field_values.iteritems(): assert_equals(event[key], value) for field in REQUIRED_EVENT_FIELDS: assert_in(field, event)
agpl-3.0
-2,871,520,452,668,130,300
26.072289
80
0.663996
false
Kaisuke5/chainer
cupy/reduction.py
7
19153
import collections import string import numpy import six import cupy from cupy import carray from cupy import elementwise from cupy import util six_range = six.moves.range six_zip = six.moves.zip _broadcast = elementwise._broadcast _check_args = elementwise._check_args _decide_params_type = elementwise._decide_params_type _get_kernel_params = elementwise._get_kernel_params _get_args_info = elementwise._get_args_info _get_out_args = elementwise._get_out_args _get_out_args_with_params = elementwise._get_out_args_with_params _get_param_info = elementwise._get_param_info _get_typename = elementwise._get_typename _guess_routine = elementwise._guess_routine _reduce_dims = elementwise._reduce_dims def _get_simple_reduction_kernel( name, block_size, reduce_type, params, identity, pre_map_expr, reduce_expr, post_map_expr, type_preamble, input_expr, output_expr, preamble, options): if identity is None: identity = '' module_code = string.Template(''' ${type_preamble} ${preamble} #define REDUCE(a, b) (${reduce_expr}) #define POST_MAP(a) (${post_map_expr}) typedef ${reduce_type} _type_reduce; extern "C" __global__ void ${name}(${params}) { if (_out_clp2_size > 256) { CUPY_FOR(_i, _out_ind.size()) { _type_reduce _s = _type_reduce(${identity}); for (int _j = _i, _J = 0; _j < _in_ind.size(); _j += _out_ind.size(), _J++) { _in_ind.set(_j); ${input_expr} _type_reduce _a = ${pre_map_expr}; _s = REDUCE(_s, _a); } _out_ind.set(_i); ${output_expr} POST_MAP(_s); } } else { extern __shared__ _type_reduce _sdata_raw[]; _type_reduce *_sdata = _sdata_raw; int _tid = threadIdx.x; _sdata[_tid] = _type_reduce(${identity}); unsigned int _i = _tid % _out_clp2_size; if (_i >= _out_ind.size()) return; _type_reduce _s = _type_reduce(${identity}); int _J_offset = _tid / _out_clp2_size; int _j_offset = _J_offset * _out_ind.size(); int _J_stride = ${block_size} / _out_clp2_size; int _j_stride = _J_stride * _out_ind.size(); for (int _j = _i + _j_offset, _J = _J_offset; _j < _in_ind.size(); _j += _j_stride, _J += _J_stride) { _in_ind.set(_j); ${input_expr} _type_reduce _a = ${pre_map_expr}; _s = REDUCE(_s, _a); } _sdata[_tid] = _s; __syncthreads(); if (_tid >= 256) return; _sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 256]); __syncthreads(); if (_out_clp2_size <= 128) { _sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 128]); __syncthreads(); if (_out_clp2_size <= 64) { _sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 64]); __syncthreads(); if (_out_clp2_size <= 32) { _sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 32]); if (_out_clp2_size <= 16) { _sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 16]); if (_out_clp2_size <= 8) { _sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 8]); if (_out_clp2_size <= 4) { _sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 4]); if (_out_clp2_size <= 2) { _sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 2]); if (_out_clp2_size <= 1) { _sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 1]); } } } } } } } } _s = _sdata[_tid]; if (_tid >= _out_ind.size()) return; _out_ind.set(_i); ${output_expr} POST_MAP(_s); } }''').substitute( name=name, block_size=block_size, reduce_type=reduce_type, params=params, identity=identity, reduce_expr=reduce_expr, pre_map_expr=pre_map_expr, post_map_expr=post_map_expr, type_preamble=type_preamble, input_expr=input_expr, output_expr=output_expr, preamble=preamble) module = carray.compile_with_cache(module_code, options) return module.get_function(name) def _get_axis(axis, ndim): if axis is None: axis = tuple(six_range(ndim)) elif isinstance(axis, collections.Sequence): axis = tuple(axis) else: axis = axis, for dim in axis: if dim < -ndim or dim >= ndim: raise ValueError('Axis overrun') axis = tuple(sorted([dim % ndim for dim in axis])) raxis = tuple([dim for dim in six_range(ndim) if dim not in axis]) return axis, raxis def _get_out_shape(shape, axis, raxis, keepdims): if keepdims: out_shape = list(shape) for i in axis: out_shape[i] = 1 return tuple(out_shape) return tuple([shape[i] for i in raxis]) def _get_trans_args(args, trans, shape, params=None): if trans == tuple(six_range(len(shape))): return args, shape if params is not None and any(p.raw for p in params): raise NotImplementedError('Illegal conditions') args = [cupy.transpose(a, trans) if isinstance(a, cupy.ndarray) else a for a in args] shape = tuple([shape[i] for i in trans]) return args, shape def _get_inout_args(in_args, out_args, in_indexer, out_indexer, out_clp2_size, params, reduce_dims): if reduce_dims: in_args, in_shape = _reduce_dims( in_args, params, in_indexer.shape) out_args, out_shape = _reduce_dims( out_args, params[len(in_args):], out_indexer.shape) in_indexer.shape = in_shape out_indexer.shape = out_shape args = in_args + out_args + [in_indexer, out_indexer, numpy.int32(out_clp2_size)] return args @util.memoize(for_each_device=True) def _get_simple_reduction_function( routine, params, args_info, in_arg_dtype, out_arg_dtype, out_types, name, block_size, identity, input_expr, output_expr, _preamble, options): reduce_type = routine[3] if reduce_type is None: reduce_type = _get_typename(out_types[0]) t = (_get_typename(in_arg_dtype), _get_typename(out_arg_dtype)) type_preamble = 'typedef %s type_in0_raw; typedef %s type_out0_raw;' % t params = _get_kernel_params(params, args_info) return _get_simple_reduction_kernel( name, block_size, reduce_type, params, identity, routine[0], routine[1], routine[2], type_preamble, input_expr, output_expr, _preamble, options) class simple_reduction_function(object): def __init__(self, name, ops, identity, preamble): self.name = name self._ops = ops self.identity = identity self._preamble = preamble self.nin = 1 self.nout = 1 in_params = _get_param_info('T in0', True) out_params = _get_param_info('T out0', False) self._params = ( in_params + out_params + _get_param_info( 'CIndexer _in_ind, CIndexer _out_ind', False) + _get_param_info('int32 _out_clp2_size', True)) self._input_expr = 'const type_in0_raw in0 = _raw_in0[_in_ind.get()];' self._output_expr = 'type_out0_raw &out0 = _raw_out0[_out_ind.get()];' self._routine_cache = {} def __call__(self, a, axis=None, dtype=None, out=None, keepdims=False): if not isinstance(a, cupy.ndarray): raise TypeError('Input type must be cupy.ndarray') if self.identity is None: assert a.size != 0 if dtype is not None: dtype = numpy.dtype(dtype).type in_args = [a] if out is None: _check_args((a,)) out_args = [] else: _check_args((a, out)) out_args = [out] in_types, out_types, routine = _guess_routine( self.name, self._routine_cache, self._ops, in_args, dtype) axis, raxis = _get_axis(axis, a.ndim) out_shape = _get_out_shape(a.shape, axis, raxis, keepdims) out_args = _get_out_args(out_args, out_types, out_shape) in_args, in_shape = _get_trans_args( in_args, axis + raxis, in_args[0].shape) in_indexer = carray.Indexer(in_shape) out_indexer = carray.Indexer(out_shape) out_clp2_size = 2 ** int.bit_length(int(out_indexer.size - 1)) inout_args = _get_inout_args( in_args, out_args, in_indexer, out_indexer, out_clp2_size, self._params, True) args_info = _get_args_info(inout_args) block_size = 512 kern = _get_simple_reduction_function( routine, self._params, args_info, in_args[0].dtype.type, out_args[0].dtype.type, out_types, self.name, block_size, self.identity, self._input_expr, self._output_expr, self._preamble, ()) shared_mem = 32 * block_size if out_clp2_size > 256: shared_mem = 0 # TODO(okuta) set actual size kern.linear_launch(max(out_indexer.size, block_size), inout_args, shared_mem, block_size) if len(out_args) == 1: return out_args[0] return tuple(out_args) @util.memoize(for_each_device=True) def _get_reduction_kernel( params, args_info, types, name, block_size, reduce_type, identity, map_expr, reduce_expr, post_map_expr, preamble, options): kernel_params = _get_kernel_params(params, args_info) arrays = [p for p, a in six_zip(params, args_info) if not p.raw and a[0] is cupy.ndarray] type_preamble = '\n'.join( 'typedef %s %s;' % (_get_typename(v), k) for k, v in types) input_expr = '\n'.join( ['const {0} {1} = _raw_{1}[_j];'.format(p.ctype, p.name) for p in arrays if p.is_const]) output_expr = '\n'.join( ['{0} &{1} = _raw_{1}[_i];'.format(p.ctype, p.name) for p in arrays if not p.is_const]) return _get_simple_reduction_kernel( name, block_size, reduce_type, kernel_params, identity, map_expr, reduce_expr, post_map_expr, type_preamble, input_expr, output_expr, preamble, options) class ReductionKernel(object): """User-defined reduction kernel. This class can be used to define a reduction kernel with or without broadcasting. The kernel is compiled at an invocation of the :meth:`~ReductionKernel.__call__` method, which is cached for each device. The compiled binary is also cached into a file under the ``$HOME/.cupy/kernel_cache/`` directory with a hashed file name. The cached binary is resued by other processes. Args: in_params (str): Input argument list. out_params (str): Output argument list. map_expr (str): Mapping expression for input values. reduce_expr (str): Reduction expression. post_map_expr (str): Mapping expression for reduced values. identity (str): Identity value for starting the reduction. name (str): Name of the kernel function. It should be set for readability of the performance profiling. reduce_type (str): Type of values to be used for reduction. This type is used to store the special variables ``a``. reduce_dims (bool): If True, input arrays are reshaped without copy to smaller dimensions for efficiency. preamble (str): Fragment of the CUDA-C/C++ code that is inserted at the top of the cu file. options (tuple of str): Additional compilation options. """ def __init__(self, in_params, out_params, map_expr, reduce_expr, post_map_expr, identity, name='reduce_kernel', reduce_type=None, reduce_dims=True, preamble='', options=()): self.in_params = _get_param_info(in_params, True) self.out_params = _get_param_info(out_params, False) self.nin = len(self.in_params) self.nout = len(self.out_params) self.nargs = self.nin + self.nout self.params = ( self.in_params + self.out_params + _get_param_info('CIndexer _in_ind, CIndexer _out_ind', False) + _get_param_info('int32 _out_clp2_size', True)) self.identity = identity self.reduce_expr = reduce_expr self.map_expr = map_expr self.name = name self.options = options self.reduce_dims = reduce_dims self.post_map_expr = post_map_expr if reduce_type is None: self.reduce_type = self.out_params[0].ctype else: self.reduce_type = reduce_type self.preamble = preamble def __call__(self, *args, **kwargs): """Compiles and invokes the reduction kernel. The compilation runs only if the kernel is not cached. Note that the kernels with different argument dtypes, ndims, or axis are not compatible. It means that single ReductionKernel object may be compiled into multiple kernel binaries. Args: args: Arguments of the kernel. Returns: Arrays are returned according to the ``out_params`` argument of the ``__init__`` method. """ out = kwargs.pop('out', None) axis = kwargs.pop('axis', None) keepdims = kwargs.pop('keepdims', False) if kwargs: raise TypeError('Wrong arguments %s' % kwargs) n_args = len(args) if n_args != self.nin and n_args != self.nargs: raise TypeError('Wrong number of arguments for %s' % self.name) out_args = list(args[self.nin:]) if out is not None: if self.nout != 1: raise NotImplementedError('') if len(out_args) != 0: raise ValueError("cannot specify 'out' as both " "a positional and keyword argument") out_args = [out] in_args, broad_shape = _broadcast(args, self.in_params, False) _check_args(in_args + out_args) if self.identity is None: assert 0 in broad_shape cp_array = cupy.ndarray in_ndarray_types = tuple( [a.dtype.type if isinstance(a, cp_array) else None for a in in_args]) out_ndarray_types = tuple( [a.dtype.type if isinstance(a, cp_array) else None for a in out_args]) in_types, out_types, types = _decide_params_type( self.in_params, self.out_params, in_ndarray_types, out_ndarray_types) axis, raxis = _get_axis(axis, len(broad_shape)) out_shape = _get_out_shape(broad_shape, axis, raxis, keepdims) in_args = [x if isinstance(x, cp_array) else t(x) for x, t in six_zip(in_args, in_types)] in_args, in_shape = _get_trans_args( in_args, axis + raxis, broad_shape, self.in_params) out_args = _get_out_args_with_params( out_args, out_types, out_shape, self.out_params) in_indexer = carray.Indexer(in_shape) out_indexer = carray.Indexer(out_shape) out_clp2_size = 2 ** int.bit_length(int(out_indexer.size - 1)) inout_args = _get_inout_args( in_args, out_args, in_indexer, out_indexer, out_clp2_size, self.params, self.reduce_dims) args_info = _get_args_info(inout_args) block_size = 512 kern = _get_reduction_kernel( self.params, args_info, types, self.name, block_size, self.reduce_type, self.identity, self.map_expr, self.reduce_expr, self.post_map_expr, self.preamble, self.options) shared_mem = 32 * block_size if out_clp2_size > 256: shared_mem = 0 # TODO(okuta) set actual size kern.linear_launch(max(out_indexer.size, block_size), inout_args, shared_mem, block_size) return out_args[0] def create_reduction_func(name, ops, routine=None, identity=None, preamble=''): _ops = [] for t in ops: if not isinstance(t, tuple): typ = t rt = routine else: typ, rt = t rt = tuple(i or j for i, j in six_zip(rt, routine)) types = typ.split('->') if len(types) == 1: in_types = out_types = tuple(types) else: in_types, out_types = map(tuple, types) in_types = tuple([numpy.dtype(t).type for t in in_types]) out_types = tuple([numpy.dtype(t).type for t in out_types]) _ops.append((in_types, out_types, rt)) return simple_reduction_function(name, _ops, identity, preamble) _min_max_preamble = ''' struct min_max_st{ type_in0_raw value; int index; __device__ min_max_st() : index(-1) { } __device__ min_max_st(type_in0_raw v) : value(v), index(0) { } __device__ min_max_st(type_in0_raw v, int i) : value(v), index(i) { } }; __device__ min_max_st my_min(const min_max_st& a, const min_max_st& b) { if (a.index == -1) return b; if (b.index == -1) return a; return min_max_st(min(a.value, b.value)); } __device__ min_max_st my_max(const min_max_st& a, const min_max_st& b) { if (a.index == -1) return b; if (b.index == -1) return a; return min_max_st(max(a.value, b.value)); } __device__ min_max_st my_argmin(const min_max_st& a, const min_max_st& b) { if (a.index == -1) return b; if (b.index == -1) return a; return (a.value <= b.value) ? a : b; } __device__ min_max_st my_argmax(const min_max_st& a, const min_max_st& b) { if (a.index == -1) return b; if (b.index == -1) return a; return (a.value >= b.value) ? a : b; }''' amin = create_reduction_func( 'cupy_min', ('?->?', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', 'q->q', 'Q->Q', 'e->e', 'f->f', 'd->d'), ('min_max_st(in0)', 'my_min(a, b)', 'out0 = a.value', 'min_max_st'), None, _min_max_preamble) amax = create_reduction_func( 'cupy_max', ('?->?', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', 'q->q', 'Q->Q', 'e->e', 'f->f', 'd->d'), ('min_max_st(in0)', 'my_max(a, b)', 'out0 = a.value', 'min_max_st'), None, _min_max_preamble) argmin = create_reduction_func( 'cupy_argmin', ('?->l', 'B->l', 'h->l', 'H->l', 'i->l', 'I->l', 'l->l', 'L->l', 'q->l', 'Q->l', 'e->l', 'f->l', 'd->l'), ('min_max_st(in0, _J)', 'my_argmin(a, b)', 'out0 = a.index', 'min_max_st'), None, _min_max_preamble) argmax = create_reduction_func( 'cupy_argmax', ('?->l', 'B->l', 'h->l', 'H->l', 'i->l', 'I->l', 'l->l', 'L->l', 'q->l', 'Q->l', 'e->l', 'f->l', 'd->l'), ('min_max_st(in0, _J)', 'my_argmax(a, b)', 'out0 = a.index', 'min_max_st'), None, _min_max_preamble)
mit
-3,813,787,232,887,063,600
35.761996
79
0.551715
false
our-city-app/oca-backend
src/solutions/common/bizz/payment.py
1
5372
# -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@ from google.appengine.ext import db from rogerthat.bizz.communities.communities import get_community from rogerthat.bizz.payment import get_api_module from rogerthat.consts import DEBUG from rogerthat.dal.profile import get_service_profile from rogerthat.models import ServiceIdentity from rogerthat.rpc import users from rogerthat.service.api.payments import list_providers, put_provider from rogerthat.to.payment import ServicePaymentProviderTO, PAYMENT_SETTINGS_MAPPING, ServicePaymentProviderFeeTO from solutions.common.bizz import broadcast_updates_pending from solutions.common.dal import get_solution_settings, get_solution_identity_settings, \ get_solution_settings_or_identity_settings from solutions.common.to.payments import TransactionDetailsTO from solutions.common.utils import is_default_service_identity def is_in_test_mode(service_user, service_identity): # type: (users.User, unicode) -> bool if is_default_service_identity(service_identity): sln_i_settings = get_solution_settings(service_user) else: sln_i_settings = get_solution_identity_settings(service_user, service_identity) return sln_i_settings.payment_test_mode def save_payment_settings(service_user, service_identity, optional): def trans(): sln_settings = get_solution_settings(service_user) sln_settings.updates_pending = True if is_default_service_identity(service_identity): sln_i_settings = sln_settings else: sln_settings.put() sln_i_settings = get_solution_identity_settings(service_user, service_identity) sln_i_settings.payment_optional = optional sln_i_settings.put() return sln_settings, sln_i_settings sln_settings, sln_i_settings = db.run_in_transaction(trans) broadcast_updates_pending(sln_settings) return sln_i_settings def save_provider(service_user, service_identity, provider_id, data): # type: (users.User, unicode, unicode, ServicePaymentProviderTO) -> ServicePaymentProviderTO sln_settings = get_solution_settings(service_user) sln_i_settings = get_solution_settings_or_identity_settings(sln_settings, service_identity) sln_settings.updates_pending = True provider = put_provider(provider_id, data.settings.to_dict(), service_identity, sln_i_settings.payment_test_mode, data.enabled, data.fee) sln_i_settings.payment_enabled = any(p.is_enabled for p in get_providers_settings(service_user, service_identity)) db.put([sln_i_settings, sln_settings]) broadcast_updates_pending(sln_settings) return provider def get_providers_settings(service_user, service_identity): # type: (users.User, unicode) -> list[ServicePaymentProviderTO] service_identity = service_identity or ServiceIdentity.DEFAULT test_mode = is_in_test_mode(service_user, service_identity) results = [] visible_providers = get_visible_payment_providers(service_user, service_identity, test_mode) with users.set_user(service_user): providers = {provider.provider_id: provider for provider in list_providers(service_identity, test_mode)} for provider_id in visible_providers: provider = providers.get(provider_id) if provider: results.append(provider) else: provider = ServicePaymentProviderTO( provider_id=provider_id, fee=ServicePaymentProviderFeeTO( amount=ServicePaymentProviderFeeTO.amount.default, # @UndefinedVariable precision=ServicePaymentProviderFeeTO.precision.default, # @UndefinedVariable currency=None ), enabled=False) provider.settings = PAYMENT_SETTINGS_MAPPING[provider_id]() results.append(provider) return results def get_visible_payment_providers(service_user, service_identity, test_mode): community = get_community(get_service_profile(service_user).community_id) providers = [] if DEBUG or community.demo: return ['payconiq'] if community.country == 'BE': providers.append('payconiq') return providers def get_transaction_details(payment_provider, transaction_id, service_user, service_identity, app_user): # type: (unicode, unicode, users.User, unicode, users.User) -> TransactionDetailsTO mod = get_api_module(payment_provider) if payment_provider == 'payconiq': transaction = mod.get_public_transaction(transaction_id) return TransactionDetailsTO.from_dict(transaction) else: raise Exception('Unknown payment provider %s' % payment_provider)
apache-2.0
-6,421,945,510,801,639,000
43.766667
118
0.711467
false
tyagi-prashant/letsencrypt
letsencrypt-compatibility-test/letsencrypt_compatibility_test/configurators/apache/common.py
26
10466
"""Provides a common base for Apache proxies""" import re import os import subprocess import mock import zope.interface from letsencrypt import configuration from letsencrypt import errors as le_errors from letsencrypt_apache import configurator from letsencrypt_compatibility_test import errors from letsencrypt_compatibility_test import interfaces from letsencrypt_compatibility_test import util from letsencrypt_compatibility_test.configurators import common as configurators_common APACHE_VERSION_REGEX = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE) APACHE_COMMANDS = ["apachectl", "a2enmod", "a2dismod"] class Proxy(configurators_common.Proxy): # pylint: disable=too-many-instance-attributes """A common base for Apache test configurators""" zope.interface.implements(interfaces.IConfiguratorProxy) def __init__(self, args): """Initializes the plugin with the given command line args""" super(Proxy, self).__init__(args) self.le_config.apache_le_vhost_ext = "-le-ssl.conf" self._setup_mock() self.modules = self.server_root = self.test_conf = self.version = None self._apache_configurator = self._all_names = self._test_names = None def _setup_mock(self): """Replaces specific modules with mock.MagicMock""" mock_subprocess = mock.MagicMock() mock_subprocess.check_call = self.check_call mock_subprocess.Popen = self.popen mock.patch( "letsencrypt_apache.configurator.subprocess", mock_subprocess).start() mock.patch( "letsencrypt_apache.parser.subprocess", mock_subprocess).start() mock.patch( "letsencrypt.le_util.subprocess", mock_subprocess).start() mock.patch( "letsencrypt_apache.configurator.le_util.exe_exists", _is_apache_command).start() patch = mock.patch( "letsencrypt_apache.configurator.display_ops.select_vhost") mock_display = patch.start() mock_display.side_effect = le_errors.PluginError( "Unable to determine vhost") def check_call(self, command, *args, **kwargs): """If command is an Apache command, command is executed in the running docker image. Otherwise, subprocess.check_call is used. """ if _is_apache_command(command): command = _modify_command(command) return super(Proxy, self).check_call(command, *args, **kwargs) else: return subprocess.check_call(command, *args, **kwargs) def popen(self, command, *args, **kwargs): """If command is an Apache command, command is executed in the running docker image. Otherwise, subprocess.Popen is used. """ if _is_apache_command(command): command = _modify_command(command) return super(Proxy, self).popen(command, *args, **kwargs) else: return subprocess.Popen(command, *args, **kwargs) def __getattr__(self, name): """Wraps the Apache Configurator methods""" method = getattr(self._apache_configurator, name, None) if callable(method): return method else: raise AttributeError() def load_config(self): """Loads the next configuration for the plugin to test""" if hasattr(self.le_config, "apache_init_script"): try: self.check_call([self.le_config.apache_init_script, "stop"]) except errors.Error: raise errors.Error( "Failed to stop previous apache config from running") config = super(Proxy, self).load_config() self.modules = _get_modules(config) self.version = _get_version(config) self._all_names, self._test_names = _get_names(config) server_root = _get_server_root(config) with open(os.path.join(config, "config_file")) as f: config_file = os.path.join(server_root, f.readline().rstrip()) self.test_conf = _create_test_conf(server_root, config_file) self.preprocess_config(server_root) self._prepare_configurator(server_root, config_file) try: self.check_call("apachectl -d {0} -f {1} -k start".format( server_root, config_file)) except errors.Error: raise errors.Error( "Apache failed to load {0} before tests started".format( config)) return config def preprocess_config(self, server_root): # pylint: disable=anomalous-backslash-in-string, no-self-use """Prepares the configuration for use in the Docker""" find = subprocess.Popen( ["find", server_root, "-type", "f"], stdout=subprocess.PIPE) subprocess.check_call([ "xargs", "sed", "-e", "s/DocumentRoot.*/DocumentRoot " "\/usr\/local\/apache2\/htdocs/I", "-e", "s/SSLPassPhraseDialog.*/SSLPassPhraseDialog builtin/I", "-e", "s/TypesConfig.*/TypesConfig " "\/usr\/local\/apache2\/conf\/mime.types/I", "-e", "s/LoadModule/#LoadModule/I", "-e", "s/SSLCertificateFile.*/SSLCertificateFile " "\/usr\/local\/apache2\/conf\/empty_cert.pem/I", "-e", "s/SSLCertificateKeyFile.*/SSLCertificateKeyFile " "\/usr\/local\/apache2\/conf\/rsa1024_key2.pem/I", "-i"], stdin=find.stdout) def _prepare_configurator(self, server_root, config_file): """Prepares the Apache plugin for testing""" self.le_config.apache_server_root = server_root self.le_config.apache_ctl = "apachectl -d {0} -f {1}".format( server_root, config_file) self.le_config.apache_enmod = "a2enmod.sh {0}".format(server_root) self.le_config.apache_dismod = "a2dismod.sh {0}".format(server_root) self.le_config.apache_init_script = self.le_config.apache_ctl + " -k" self._apache_configurator = configurator.ApacheConfigurator( config=configuration.NamespaceConfig(self.le_config), name="apache") self._apache_configurator.prepare() def cleanup_from_tests(self): """Performs any necessary cleanup from running plugin tests""" super(Proxy, self).cleanup_from_tests() mock.patch.stopall() def get_all_names_answer(self): """Returns the set of domain names that the plugin should find""" if self._all_names: return self._all_names else: raise errors.Error("No configuration file loaded") def get_testable_domain_names(self): """Returns the set of domain names that can be tested against""" if self._test_names: return self._test_names else: return {"example.com"} def deploy_cert(self, domain, cert_path, key_path, chain_path=None): """Installs cert""" cert_path, key_path, chain_path = self.copy_certs_and_keys( cert_path, key_path, chain_path) self._apache_configurator.deploy_cert( domain, cert_path, key_path, chain_path) def _is_apache_command(command): """Returns true if command is an Apache command""" if isinstance(command, list): command = command[0] for apache_command in APACHE_COMMANDS: if command.startswith(apache_command): return True return False def _modify_command(command): """Modifies command so configtest works inside the docker image""" if isinstance(command, list): for i in xrange(len(command)): if command[i] == "configtest": command[i] = "-t" else: command = command.replace("configtest", "-t") return command def _create_test_conf(server_root, apache_config): """Creates a test config file and adds it to the Apache config""" test_conf = os.path.join(server_root, "test.conf") open(test_conf, "w").close() subprocess.check_call( ["sed", "-i", "1iInclude test.conf", apache_config]) return test_conf def _get_server_root(config): """Returns the server root directory in config""" subdirs = [ name for name in os.listdir(config) if os.path.isdir(os.path.join(config, name))] if len(subdirs) != 1: errors.Error("Malformed configuration directiory {0}".format(config)) return os.path.join(config, subdirs[0].rstrip()) def _get_names(config): """Returns all and testable domain names in config""" all_names = set() non_ip_names = set() with open(os.path.join(config, "vhosts")) as f: for line in f: # If parsing a specific vhost if line[0].isspace(): words = line.split() if words[0] == "alias": all_names.add(words[1]) non_ip_names.add(words[1]) # If for port 80 and not IP vhost elif words[1] == "80" and not util.IP_REGEX.match(words[3]): all_names.add(words[3]) non_ip_names.add(words[3]) elif "NameVirtualHost" not in line: words = line.split() if (words[0].endswith("*") or words[0].endswith("80") and not util.IP_REGEX.match(words[1]) and words[1].find(".") != -1): all_names.add(words[1]) return all_names, non_ip_names def _get_modules(config): """Returns the list of modules found in module_list""" modules = [] with open(os.path.join(config, "modules")) as f: for line in f: # Modules list is indented, everything else is headers/footers if line[0].isspace(): words = line.split() # Modules redundantly end in "_module" which we can discard modules.append(words[0][:-7]) return modules def _get_version(config): """Return version of Apache Server. Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7)). Code taken from the Apache plugin. """ with open(os.path.join(config, "version")) as f: # Should be on first line of input matches = APACHE_VERSION_REGEX.findall(f.readline()) if len(matches) != 1: raise errors.Error("Unable to find Apache version") return tuple([int(i) for i in matches[0].split(".")])
apache-2.0
7,075,958,859,080,747,000
35.852113
87
0.604624
false
mensler/ansible
lib/ansible/plugins/action/junos_config.py
19
4243
# # (c) 2017, Red Hat, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import re import time import glob from ansible.plugins.action.junos import ActionModule as _ActionModule from ansible.module_utils._text import to_text from ansible.module_utils.six.moves.urllib.parse import urlsplit from ansible.module_utils._text import to_native from ansible.utils.vars import merge_hash PRIVATE_KEYS_RE = re.compile('__.+__') class ActionModule(_ActionModule): def run(self, tmp=None, task_vars=None): if self._task.args.get('src'): try: self._handle_template() except ValueError as exc: return dict(failed=True, msg=exc.message) result = super(ActionModule, self).run(tmp, task_vars) if self._task.args.get('backup') and result.get('__backup__'): # User requested backup and no error occurred in module. # NOTE: If there is a parameter error, _backup key may not be in results. filepath = self._write_backup(task_vars['inventory_hostname'], result['__backup__']) result['backup_path'] = filepath # strip out any keys that have two leading and two trailing # underscore characters for key in result.keys(): if PRIVATE_KEYS_RE.match(key): del result[key] return result def _get_working_path(self): cwd = self._loader.get_basedir() if self._task._role is not None: cwd = self._task._role._role_path return cwd def _write_backup(self, host, contents): backup_path = self._get_working_path() + '/backup' if not os.path.exists(backup_path): os.mkdir(backup_path) for fn in glob.glob('%s/%s*' % (backup_path, host)): os.remove(fn) tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time())) filename = '%s/%s_config.%s' % (backup_path, host, tstamp) open(filename, 'w').write(to_native(contents, encoding='latin1')) return filename def _handle_template(self): src = self._task.args.get('src') working_path = self._get_working_path() if os.path.isabs(src) or urlsplit('src').scheme: source = src else: source = self._loader.path_dwim_relative(working_path, 'templates', src) if not source: source = self._loader.path_dwim_relative(working_path, src) if not os.path.exists(source): raise ValueError('path specified in src not found') try: with open(source, 'r') as f: template_data = to_text(f.read()) except IOError: return dict(failed=True, msg='unable to load src file') # Create a template search path in the following order: # [working_path, self_role_path, dependent_role_paths, dirname(source)] searchpath = [working_path] if self._task._role is not None: searchpath.append(self._task._role._role_path) if hasattr(self._task, "_block:"): dep_chain = self._task._block.get_dep_chain() if dep_chain is not None: for role in dep_chain: searchpath.append(role._role_path) searchpath.append(os.path.dirname(source)) self._templar.environment.loader.searchpath = searchpath self._task.args['src'] = self._templar.template(template_data)
gpl-3.0
-6,914,351,979,465,944,000
36.548673
85
0.618195
false
karlinjf/ChromiumXRefs
lib/chromium_code_search.py
1
7067
# Copyright 2017 Josh Karlin. All rights reserved. # Use of this source code is governed by the Apache license found in the LICENSE # file. import argparse import datetime import getopt import json import sys import tempfile import threading import time import urllib.request import urllib.parse gFileCache = None; # A key/value store that stores objects to disk in temporary objects # for 30 minutes. class FileCache: def __init__(self): self.store = {} threading.Timer(15 * 60, self.gc).start(); def put(self, url, data): f = tempfile.TemporaryFile(); f.write(data); self.store[url] = (f, datetime.datetime.now()); def get(self, url): if not url in self.store: return '' (f, timestamp) = self.store[url] f.seek(0); return f.read(); def gc(self): threading.Timer(15 * 60, self.gc).start(); expired = datetime.datetime.now() - datetime.timedelta(minutes=30); remove = [] for url, (f, timestamp) in self.store.items(): if timestamp < expired: remove.append(url) for url in remove: self.store.pop(url); def cacheResponses(should_cache): global gFileCache if not should_cache: gFileCache = None; return if gFileCache: return gFileCache = FileCache(); # Retrieve the url by first trying to cache and falling back to the network. def retrieve(url): global gFileCache if gFileCache: cached_response = gFileCache.get(url); if (cached_response): return cached_response.decode('utf8'); response = None try: if len(url) > 1500: short_url = url.split('?')[0] data = url.split('?')[1] response = urllib.request.urlopen(short_url, data=data.encode('utf-8'), timeout=3) else: response = urllib.request.urlopen(url, timeout=3) except error: return '' result = response.read() if gFileCache: gFileCache.put(url, result); return result.decode('utf8'); def getSignatureFor(src_file, method): url = ('https://cs.chromium.org/codesearch/json' '?annotation_request=b' '&file_spec=b' '&package_name=chromium' '&name={file_name}' '&file_spec=e' '&type=b' '&id=1' '&type=e' '&label=' '&follow_branches=false' '&annotation_request=e') url = url.format(file_name=urllib.parse.quote(src_file, safe='')) result = retrieve(url); if not result: return '' result = json.loads(result)['annotation_response'][0] for snippet in result.get('annotation', []): if not 'type' in snippet: continue if 'xref_signature' in snippet: signature = snippet['xref_signature']['signature'] if '%s(' % method in signature: return signature elif 'internal_link' in snippet: signature = snippet['internal_link']['signature'] if '::%s' % method in signature or 'class-%s' % method in signature: return signature return '' def getCallGraphFor(signature): url = ('https://cs.chromium.org/codesearch/json' '?call_graph_request=b' '&signature={signature}' '&file_spec=b' '&package_name=chromium' '&name=.' '&file_spec=e' '&max_num_results=500' '&call_graph_request=e') url = url.format(signature=urllib.parse.quote(signature, safe='')) result = retrieve(url); if not result: return {} result = json.loads(result)['call_graph_response'][0]; node = result['node']; callers = []; last_signature = '' if not 'children' in node: return callers for child in node['children']: if child['signature'] == last_signature: continue if not 'snippet_file_path' in child: continue caller = {} caller['filename'] = child['snippet_file_path']; caller['line'] = child['call_site_range']['start_line'] caller['col'] = child['call_site_range']['start_column'] caller['text'] = child['snippet']['text']['text'] caller['calling_method'] = child['identifier'] caller['calling_signature'] = child['signature'] last_signature = child['signature'] caller['display_name'] = child['display_name'] callers.append(caller) return callers def getRefForMatch(filename, match): ref = {'filename': filename, 'line': match['line_number'], 'signature': match['signature']} if 'line_text' in match: ref['line_text'] = match['line_text'] return ref; def getXrefsFor(signature): url = ('https://cs.chromium.org/codesearch/json' '?xref_search_request=b' '&query={signature}' '&file_spec=b' '&name=.' '&package_name=chromium' '&file_spec=e' '&max_num_results=500' '&xref_search_request=e') url = url.format(signature=urllib.parse.quote(signature, safe='')) result = retrieve(url); if not result: return {} result = json.loads(result)['xref_search_response'][0] status = result['status'] if not 'search_result' in result: return {} search_results = result['search_result'] xrefs = {} for file_result in search_results: filename = file_result['file']['name'] for match in file_result['match']: if match['type'] == 'HAS_DEFINITION': xrefs['definition'] = getRefForMatch(filename, match); elif match['type'] == 'HAS_DECLARATION': xrefs['declaration'] = getRefForMatch(filename, match); elif match['type'] == 'OVERRIDDEN_BY': xrefs.setdefault('overrides', []); xrefs['overrides'].append(getRefForMatch(filename, match)); elif match['type'] == 'REFERENCED_AT': xrefs.setdefault('references', []); xrefs['references'].append(getRefForMatch(filename, match)); return xrefs def logAndExit(msg): print(msg); sys.exit(2); if __name__ == "__main__": parser = argparse.ArgumentParser(description='Searches Chromium Code Search for X-Refs.') parser.add_argument('-p', '--path', help='The path to this file starting with src/') parser.add_argument('-w', '--word', help='The word to search for in the file denoted by the path argument. You must also specify -p') parser.add_argument('-s', '--signature', help='A signature provided from a previous search. No -p or -w arguments required.') args = parser.parse_args() signature = args.signature; results = {} if not signature: if bool(args.path) ^ bool(args.word): print("Both path and word must be supplied if one is supplied"); sys.exit(2); signature = getSignatureFor(args.path, args.word); results['signature'] = signature if not signature: logAndExit("Could not find signature for %s" % (args.word)) results['xrefs'] = getXrefsFor(signature); results['callers'] = getCallGraphFor(signature); print(json.dumps(results))
apache-2.0
-5,727,343,849,390,877,000
28.944915
119
0.606622
false
jeromesaiz/autoXLIFF
addXLIFF.py
1
7701
#!/usr/bin/env python # -*- coding: UTF-8 -*- __description__ = 'A companion tool to autoXLIFF.py. Helps batch-add translation strings to existing XLIFF documents (useful to add arbitrary strings like those that appear in files other than twig templates, since those are not picked-up automatically by autoXLIFF. Example : form labels in controllers' __author__ = 'Jerome Saiz (https://twitter.com/jeromesaiz)' __version__ = '0.0.2a' __date__ = '2016/05/12' # Coloring definition INFO = '\033[94m' OK = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' import os import sys import re import argparse try: from lxml import etree except ImportError: print FAIL+'FATAL : '+ENDC+' This programm requires the lxml module. Please '+INFO+'pip install lxml'+ENDC sys.exit(1) # Define command-line arguments # @input : VOID # @return : argparse.Namespace def get_args(): parser = argparse.ArgumentParser(description='A companion tool to autoXLIFF.py. Helps batch-add translation strings to existing XLIFF files.') parser.add_argument("app_path", help="Absolute path to your web application root directory", type=str, ) parser.add_argument("source", help="Absolute path to source file storing the translation tokens to add", type=str, ) parser.add_argument("locfile", help="Locale file to be edited (if no extension provided will search for .xlf and .xliff files as well)", type=str, ) parser.add_argument("--locdir", help="Relative path to your localization files within the web application directory structure (default to locales/)", type=str, ) parser.add_argument("--dry", help="Dry run mode : do not commit any change to files, only output the modified XLIFF to screen", action="store_true", ) return parser.parse_args() # Process command-line arguments to validate path, determine full paths, etc... # @input argparse.Namespace # @return LIST (STR path to localization file, FILE (descriptor for language file opened from disk) def get_setup(args): if not os.path.exists(args.app_path): print FAIL+'FATAL : '+ENDC+args.app_path+' does not seem to be workable. Try another directory.' sys.exit(1) # determine the localization files directory if args.locdir: locpath = os.path.join(args.app_path,args.locdir) else: locpath = os.path.join(args.app_path,'locales') # determine locfile full path. We also look for .xlf or .xliff extensions if not provided locfile = os.path.normpath(os.path.join(locpath,args.locfile)) try: f = open(locfile, "r+") except IOError: try: f = open(locfile+'.xlf', "r+") locfile = locfile+'.xlf' except IOError: try: f = open(locfile+'.xliff', "r+") locfile = locfile+'.xliff' except IOError: # localization file does not exist. It will need to be created first with autoXLIFF print FAIL+'FATAL : '+ENDC+locfile+' does not seem to exist. Try running autoXLIFF first.' sys.exit(1) # check for source file if not os.path.isfile(args.source): print FAIL+'FATAL : '+ENDC+'Source file '+args.source+' does not seem to exist. Try another path or create it first.' sys.exit(1) return locfile,f def get_source(sourcefile): tokens = [line.rstrip('\n') for line in open(sourcefile,'r')] tokens = filter(None, tokens) # remove empty lines if not tokens: print FAIL+'FATAL : '+ENDC+'Source file '+args.source+' is empty. Please add translation tokens.' sys.exit(1) return tokens # Loads a XLIFF structure from file and does a quick validation # @intput STRING (absolute path to XLIFF language file to load) # @return False if file is no XML or not XLIFF # @return object (etree Element) as XML root if content is valid XLIFF # @return STRING as XLIFF namespace of document or None if importation triggered an error def load_xliff(xml_content): try: xml = etree.parse(xml_content) root = xml.getroot() try: ns = re.search(r'({.*})xliff', root.tag).group(1) # extract existing namespace, if any except AttributeError: ns = '' except: print 'Error : XML import failed (from file '+xml_content+')' print 'Message : ', except_class,except_message,except_tb = sys.exc_info() print except_message return False,None return root,ns # Gets a list of all trans-units already defined within the loaded XML object # @input object (Element) representing the XLIFF root # @input STRING as the XLIFF namespace # @return SET (list of existing trans-units) def get_trans_units(root,ns): trans=set() for elem in root.iter(tag=ns+'trans-unit'): trans.add(elem.attrib.get('id')) print 'Found a total of '+str(len(trans))+' trans keywords already defined in your XLIFF file' return trans # Remove existing transunits from source file # @input SET (list of existing trans-units within the XLIFF language file) # @input LIST (list of new trans-unit keywords to add, coming from source file) # @return SET (new list of trans-unit keywords without doubles) def prune(trans,keywords): doubles = trans & set(keywords) for elem in doubles: keywords.remove(elem) return set(keywords) # Adds trans keywords to XML structure from source file and writes it back # @input object (Element) representing XLIFF root # @input STRING as the XLIFF namespace # @input SET (list of existing trans-units within the XLIFF language file) # @input SET (list of new trans-unit keywords to add) # @input STRING (path to XLIFF file) # @input FILE (file descriptor to XLIFF file) # @input argparse.Namespace # @return VOID def update_locfile(root,ns,trans,keywords,locfile,f,args): print "\nOperations :" # check if file need to be updated if len(keywords) == 0: print 'Nothing to update\n' if f: f.close() return # Add the new trans-units for elem in root.iter(tag=ns+'body'): #ugly hack to skip over other elements between root and body body = elem # Create the new trans-unit elements for transunit in keywords: tu = etree.SubElement(body,'trans-unit', id=transunit) so = etree.SubElement(tu,'source') ta = etree.SubElement(tu,'target') so.text = transunit print '\t'+OK+'adding\t\t'+ENDC, transunit # Attach our root to a new XML structure xml = etree.ElementTree(root) # And write it back ! # ... to stdout if in dry run mode if args.dry: print '\nDumping file (dry run mode) :\n-------- XML file dump --------' xml.write(sys.stdout, encoding="utf-8", xml_declaration=True) print '\n-------------------------------\n' # ... or to disk else: # create file for writing if it does not exist, or clear existing file from existing content if None == f: f = open(locfile,'w') else: os.ftruncate(f.fileno(), 0) os.lseek(f.fileno(), 0, os.SEEK_SET) # And write it try: xml.write(f, encoding="utf-8", xml_declaration=True) f.close() print INFO+'\nDone !'+ENDC+' File saved. Now go translate those strings !\n' except IOError: print FAIL+'FATAL : '+ENDC+' could not write back to file '+locfile # Let's go ! def Main(): args = get_args() setup = get_setup(args) locfile = setup[0] f = setup[1] # file descriptor of language file # load XLIFF either from existing file or empty structure as string root,ns = load_xliff(locfile) # get existing trans-units from XLIFF object trans = get_trans_units(root,ns) # get news trans-units to add from source file keywords = get_source(args.source) # remove existing transunits from source keywords = prune(trans,keywords) # Update language file update_locfile(root,ns,trans,keywords,locfile,f,args) # Execute if __name__ == '__main__': Main()
mit
-6,682,406,918,590,872,000
34.652778
304
0.692248
false
coreos/depot_tools
third_party/boto/pyami/copybot.py
102
4273
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import boto from boto.pyami.scriptbase import ScriptBase import os, StringIO class CopyBot(ScriptBase): def __init__(self): ScriptBase.__init__(self) self.wdir = boto.config.get('Pyami', 'working_dir') self.log_file = '%s.log' % self.instance_id self.log_path = os.path.join(self.wdir, self.log_file) boto.set_file_logger(self.name, self.log_path) self.src_name = boto.config.get(self.name, 'src_bucket') self.dst_name = boto.config.get(self.name, 'dst_bucket') self.replace = boto.config.getbool(self.name, 'replace_dst', True) s3 = boto.connect_s3() self.src = s3.lookup(self.src_name) if not self.src: boto.log.error('Source bucket does not exist: %s' % self.src_name) dest_access_key = boto.config.get(self.name, 'dest_aws_access_key_id', None) if dest_access_key: dest_secret_key = boto.config.get(self.name, 'dest_aws_secret_access_key', None) s3 = boto.connect(dest_access_key, dest_secret_key) self.dst = s3.lookup(self.dst_name) if not self.dst: self.dst = s3.create_bucket(self.dst_name) def copy_bucket_acl(self): if boto.config.get(self.name, 'copy_acls', True): acl = self.src.get_xml_acl() self.dst.set_xml_acl(acl) def copy_key_acl(self, src, dst): if boto.config.get(self.name, 'copy_acls', True): acl = src.get_xml_acl() dst.set_xml_acl(acl) def copy_keys(self): boto.log.info('src=%s' % self.src.name) boto.log.info('dst=%s' % self.dst.name) try: for key in self.src: if not self.replace: exists = self.dst.lookup(key.name) if exists: boto.log.info('key=%s already exists in %s, skipping' % (key.name, self.dst.name)) continue boto.log.info('copying %d bytes from key=%s' % (key.size, key.name)) prefix, base = os.path.split(key.name) path = os.path.join(self.wdir, base) key.get_contents_to_filename(path) new_key = self.dst.new_key(key.name) new_key.set_contents_from_filename(path) self.copy_key_acl(key, new_key) os.unlink(path) except: boto.log.exception('Error copying key: %s' % key.name) def copy_log(self): key = self.dst.new_key(self.log_file) key.set_contents_from_filename(self.log_path) def main(self): fp = StringIO.StringIO() boto.config.dump_safe(fp) self.notify('%s (%s) Starting' % (self.name, self.instance_id), fp.getvalue()) if self.src and self.dst: self.copy_keys() if self.dst: self.copy_log() self.notify('%s (%s) Stopping' % (self.name, self.instance_id), 'Copy Operation Complete') if boto.config.getbool(self.name, 'exit_on_completion', True): ec2 = boto.connect_ec2() ec2.terminate_instances([self.instance_id])
bsd-3-clause
-5,595,883,459,057,661,000
43.051546
106
0.613386
false
pdebuyl/numpy
numpy/polynomial/tests/test_polynomial.py
10
20035
"""Tests for polynomial module. """ from functools import reduce import numpy as np import numpy.polynomial.polynomial as poly from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, assert_warns, assert_array_equal, assert_raises_regex) def trim(x): return poly.polytrim(x, tol=1e-6) T0 = [1] T1 = [0, 1] T2 = [-1, 0, 2] T3 = [0, -3, 0, 4] T4 = [1, 0, -8, 0, 8] T5 = [0, 5, 0, -20, 0, 16] T6 = [-1, 0, 18, 0, -48, 0, 32] T7 = [0, -7, 0, 56, 0, -112, 0, 64] T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] class TestConstants: def test_polydomain(self): assert_equal(poly.polydomain, [-1, 1]) def test_polyzero(self): assert_equal(poly.polyzero, [0]) def test_polyone(self): assert_equal(poly.polyone, [1]) def test_polyx(self): assert_equal(poly.polyx, [0, 1]) class TestArithmetic: def test_polyadd(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = poly.polyadd([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_polysub(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = poly.polysub([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_polymulx(self): assert_equal(poly.polymulx([0]), [0]) assert_equal(poly.polymulx([1]), [0, 1]) for i in range(1, 5): ser = [0]*i + [1] tgt = [0]*(i + 1) + [1] assert_equal(poly.polymulx(ser), tgt) def test_polymul(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" tgt = np.zeros(i + j + 1) tgt[i + j] += 1 res = poly.polymul([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_polydiv(self): # check zero division assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) # check scalar division quo, rem = poly.polydiv([2], [2]) assert_equal((quo, rem), (1, 0)) quo, rem = poly.polydiv([2, 2], [2]) assert_equal((quo, rem), ((1, 1), 0)) # check rest. for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" ci = [0]*i + [1, 2] cj = [0]*j + [1, 2] tgt = poly.polyadd(ci, cj) quo, rem = poly.polydiv(tgt, ci) res = poly.polyadd(poly.polymul(quo, ci), rem) assert_equal(res, tgt, err_msg=msg) def test_polypow(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) tgt = reduce(poly.polymul, [c]*j, np.array([1])) res = poly.polypow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) class TestEvaluation: # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([1., 2., 3.]) c2d = np.einsum('i,j->ij', c1d, c1d) c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 y = poly.polyval(x, [1., 2., 3.]) def test_polyval(self): #check empty input assert_equal(poly.polyval([], [1]).size, 0) #check normal input) x = np.linspace(-1, 1) y = [x**i for i in range(5)] for i in range(5): tgt = y[i] res = poly.polyval(x, [0]*i + [1]) assert_almost_equal(res, tgt) tgt = x*(x**2 - 1) res = poly.polyval(x, [0, -1, 0, 1]) assert_almost_equal(res, tgt) #check that shape is preserved for i in range(3): dims = [2]*i x = np.zeros(dims) assert_equal(poly.polyval(x, [1]).shape, dims) assert_equal(poly.polyval(x, [1, 0]).shape, dims) assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) #check masked arrays are processed correctly mask = [False, True, False] mx = np.ma.array([1, 2, 3], mask=mask) res = np.polyval([7, 5, 3], mx) assert_array_equal(res.mask, mask) #check subtypes of ndarray are preserved class C(np.ndarray): pass cx = np.array([1, 2, 3]).view(C) assert_equal(type(np.polyval([2, 3, 4], cx)), C) def test_polyvalfromroots(self): # check exception for broadcasting x values over root array with # too few dimensions assert_raises(ValueError, poly.polyvalfromroots, [1], [1], tensor=False) # check empty input assert_equal(poly.polyvalfromroots([], [1]).size, 0) assert_(poly.polyvalfromroots([], [1]).shape == (0,)) # check empty input + multidimensional roots assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0) assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0)) # check scalar input assert_equal(poly.polyvalfromroots(1, 1), 0) assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,)) # check normal input) x = np.linspace(-1, 1) y = [x**i for i in range(5)] for i in range(1, 5): tgt = y[i] res = poly.polyvalfromroots(x, [0]*i) assert_almost_equal(res, tgt) tgt = x*(x - 1)*(x + 1) res = poly.polyvalfromroots(x, [-1, 0, 1]) assert_almost_equal(res, tgt) # check that shape is preserved for i in range(3): dims = [2]*i x = np.zeros(dims) assert_equal(poly.polyvalfromroots(x, [1]).shape, dims) assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims) assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims) # check compatibility with factorization ptest = [15, 2, -16, -2, 1] r = poly.polyroots(ptest) x = np.linspace(-1, 1) assert_almost_equal(poly.polyval(x, ptest), poly.polyvalfromroots(x, r)) # check multidimensional arrays of roots and values # check tensor=False rshape = (3, 5) x = np.arange(-3, 2) r = np.random.randint(-5, 5, size=rshape) res = poly.polyvalfromroots(x, r, tensor=False) tgt = np.empty(r.shape[1:]) for ii in range(tgt.size): tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii]) assert_equal(res, tgt) # check tensor=True x = np.vstack([x, 2*x]) res = poly.polyvalfromroots(x, r, tensor=True) tgt = np.empty(r.shape[1:] + x.shape) for ii in range(r.shape[1]): for jj in range(x.shape[0]): tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii]) assert_equal(res, tgt) def test_polyval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises_regex(ValueError, 'incompatible', poly.polyval2d, x1, x2[:2], self.c2d) #test values tgt = y1*y2 res = poly.polyval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = poly.polyval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) def test_polyval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises_regex(ValueError, 'incompatible', poly.polyval3d, x1, x2, x3[:2], self.c3d) #test values tgt = y1*y2*y3 res = poly.polyval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = poly.polyval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) def test_polygrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j->ij', y1, y2) res = poly.polygrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = poly.polygrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) def test_polygrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = poly.polygrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = poly.polygrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) class TestIntegral: def test_polyint(self): # check exceptions assert_raises(TypeError, poly.polyint, [0], .5) assert_raises(ValueError, poly.polyint, [0], -1) assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) assert_raises(ValueError, poly.polyint, [0], lbnd=[0]) assert_raises(ValueError, poly.polyint, [0], scl=[0]) assert_raises(TypeError, poly.polyint, [0], axis=.5) with assert_warns(DeprecationWarning): poly.polyint([1, 1], 1.) # test integration of zero polynomial for i in range(2, 5): k = [0]*(i - 2) + [1] res = poly.polyint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [1/scl] res = poly.polyint(pol, m=1, k=[i]) assert_almost_equal(trim(res), trim(tgt)) # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 pol = [0]*i + [1] res = poly.polyint(pol, m=1, k=[i], lbnd=-1) assert_almost_equal(poly.polyval(-1, res), i) # check single integration with integration constant and scaling for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [2/scl] res = poly.polyint(pol, m=1, k=[i], scl=2) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with default k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1) res = poly.polyint(pol, m=j) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with defined k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1, k=[k]) res = poly.polyint(pol, m=j, k=list(range(j))) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with scaling for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1, k=[k], scl=2) res = poly.polyint(pol, m=j, k=list(range(j)), scl=2) assert_almost_equal(trim(res), trim(tgt)) def test_polyint_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T res = poly.polyint(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([poly.polyint(c) for c in c2d]) res = poly.polyint(c2d, axis=1) assert_almost_equal(res, tgt) tgt = np.vstack([poly.polyint(c, k=3) for c in c2d]) res = poly.polyint(c2d, k=3, axis=1) assert_almost_equal(res, tgt) class TestDerivative: def test_polyder(self): # check exceptions assert_raises(TypeError, poly.polyder, [0], .5) assert_raises(ValueError, poly.polyder, [0], -1) # check that zeroth derivative does nothing for i in range(5): tgt = [0]*i + [1] res = poly.polyder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = poly.polyder(poly.polyint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) def test_polyder_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T res = poly.polyder(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([poly.polyder(c) for c in c2d]) res = poly.polyder(c2d, axis=1) assert_almost_equal(res, tgt) class TestVander: # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 def test_polyvander(self): # check for 1d x x = np.arange(3) v = poly.polyvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], poly.polyval(x, coef)) # check for 2d x x = np.array([[1, 2], [3, 4], [5, 6]]) v = poly.polyvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], poly.polyval(x, coef)) def test_polyvander2d(self): # also tests polyval2d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3)) van = poly.polyvander2d(x1, x2, [1, 2]) tgt = poly.polyval2d(x1, x2, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = poly.polyvander2d([x1], [x2], [1, 2]) assert_(van.shape == (1, 5, 6)) def test_polyvander3d(self): # also tests polyval3d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3, 4)) van = poly.polyvander3d(x1, x2, x3, [1, 2, 3]) tgt = poly.polyval3d(x1, x2, x3, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) assert_(van.shape == (1, 5, 24)) class TestCompanion: def test_raises(self): assert_raises(ValueError, poly.polycompanion, []) assert_raises(ValueError, poly.polycompanion, [1]) def test_dimensions(self): for i in range(1, 5): coef = [0]*i + [1] assert_(poly.polycompanion(coef).shape == (i, i)) def test_linear_root(self): assert_(poly.polycompanion([1, 2])[0, 0] == -.5) class TestMisc: def test_polyfromroots(self): res = poly.polyfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) tgt = Tlist[i] res = poly.polyfromroots(roots)*2**(i-1) assert_almost_equal(trim(res), trim(tgt)) def test_polyroots(self): assert_almost_equal(poly.polyroots([1]), []) assert_almost_equal(poly.polyroots([1, 2]), [-.5]) for i in range(2, 5): tgt = np.linspace(-1, 1, i) res = poly.polyroots(poly.polyfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) def test_polyfit(self): def f(x): return x*(x - 1)*(x - 2) def f2(x): return x**4 + x**2 + 1 # Test exceptions assert_raises(ValueError, poly.polyfit, [1], [1], -1) assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) assert_raises(TypeError, poly.polyfit, [], [1], 0) assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) assert_raises(ValueError, poly.polyfit, [1], [1], [-1,]) assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6]) assert_raises(TypeError, poly.polyfit, [1], [1], []) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = poly.polyfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(poly.polyval(x, coef3), y) coef3 = poly.polyfit(x, y, [0, 1, 2, 3]) assert_equal(len(coef3), 4) assert_almost_equal(poly.polyval(x, coef3), y) # coef4 = poly.polyfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(poly.polyval(x, coef4), y) coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4]) assert_equal(len(coef4), 5) assert_almost_equal(poly.polyval(x, coef4), y) # coef2d = poly.polyfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3]) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 yw[0::2] = 0 wcoef3 = poly.polyfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1]) # test fitting only even Polyendre polynomials x = np.linspace(-1, 1) y = f2(x) coef1 = poly.polyfit(x, y, 4) assert_almost_equal(poly.polyval(x, coef1), y) coef2 = poly.polyfit(x, y, [0, 2, 4]) assert_almost_equal(poly.polyval(x, coef2), y) assert_almost_equal(coef1, coef2) def test_polytrim(self): coef = [2, -1, 1, 0] # Test exceptions assert_raises(ValueError, poly.polytrim, coef, -1) # Test results assert_equal(poly.polytrim(coef), coef[:-1]) assert_equal(poly.polytrim(coef, 1), coef[:-3]) assert_equal(poly.polytrim(coef, 2), [0]) def test_polyline(self): assert_equal(poly.polyline(3, 4), [3, 4])
bsd-3-clause
-8,480,984,396,433,499,000
32.785835
78
0.507811
false
orion1024/Sick-Beard
sickbeard/nzbSplitter.py
35
5802
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib2 import xml.etree.cElementTree as etree import xml.etree import re from name_parser.parser import NameParser, InvalidNameException from sickbeard import logger, classes, helpers from sickbeard.common import Quality def getSeasonNZBs(name, urlData, season): try: showXML = etree.ElementTree(etree.XML(urlData)) except SyntaxError: logger.log(u"Unable to parse the XML of "+name+", not splitting it", logger.DEBUG) return ({},'') filename = name.replace(".nzb", "") nzbElement = showXML.getroot() regex = '([\w\._\ ]+)[\. ]S%02d[\. ]([\w\._\-\ ]+)[\- ]([\w_\-\ ]+?)' % season sceneNameMatch = re.search(regex, filename, re.I) if sceneNameMatch: showName, qualitySection, groupName = sceneNameMatch.groups() #@UnusedVariable else: logger.log(u"Unable to parse "+name+" into a scene name. If it's a valid one log a bug.", logger.ERROR) return ({},'') regex = '(' + re.escape(showName) + '\.S%02d(?:[E0-9]+)\.[\w\._]+\-\w+' % season + ')' regex = regex.replace(' ', '.') epFiles = {} xmlns = None for curFile in nzbElement.getchildren(): xmlnsMatch = re.match("\{(http:\/\/[A-Za-z0-9_\.\/]+\/nzb)\}file", curFile.tag) if not xmlnsMatch: continue else: xmlns = xmlnsMatch.group(1) match = re.search(regex, curFile.get("subject"), re.I) if not match: #print curFile.get("subject"), "doesn't match", regex continue curEp = match.group(1) if curEp not in epFiles: epFiles[curEp] = [curFile] else: epFiles[curEp].append(curFile) return (epFiles, xmlns) def createNZBString(fileElements, xmlns): rootElement = etree.Element("nzb") if xmlns: rootElement.set("xmlns", xmlns) for curFile in fileElements: rootElement.append(stripNS(curFile, xmlns)) return xml.etree.ElementTree.tostring(rootElement, 'utf-8') def saveNZB(nzbName, nzbString): nzb_fh = open(nzbName+".nzb", 'w') nzb_fh.write(nzbString) nzb_fh.close() def stripNS(element, ns): element.tag = element.tag.replace("{"+ns+"}", "") for curChild in element.getchildren(): stripNS(curChild, ns) return element def splitResult(result): urlData = helpers.getURL(result.url) if urlData is None: logger.log(u"Unable to load url "+result.url+", can't download season NZB", logger.ERROR) return False # parse the season ep name try: np = NameParser(False) parse_result = np.parse(result.name) except InvalidNameException: logger.log(u"Unable to parse the filename "+result.name+" into a valid episode", logger.WARNING) return False # bust it up season = parse_result.season_number if parse_result.season_number != None else 1 separateNZBs, xmlns = getSeasonNZBs(result.name, urlData, season) resultList = [] for newNZB in separateNZBs: logger.log(u"Split out "+newNZB+" from "+result.name, logger.DEBUG) # parse the name try: np = NameParser(False) parse_result = np.parse(newNZB) except InvalidNameException: logger.log(u"Unable to parse the filename "+newNZB+" into a valid episode", logger.WARNING) return False # make sure the result is sane if (parse_result.season_number != None and parse_result.season_number != season) or (parse_result.season_number == None and season != 1): logger.log(u"Found "+newNZB+" inside "+result.name+" but it doesn't seem to belong to the same season, ignoring it", logger.WARNING) continue elif len(parse_result.episode_numbers) == 0: logger.log(u"Found "+newNZB+" inside "+result.name+" but it doesn't seem to be a valid episode NZB, ignoring it", logger.WARNING) continue wantEp = True for epNo in parse_result.episode_numbers: if not result.extraInfo[0].wantEpisode(season, epNo, result.quality): logger.log(u"Ignoring result "+newNZB+" because we don't want an episode that is "+Quality.qualityStrings[result.quality], logger.DEBUG) wantEp = False break if not wantEp: continue # get all the associated episode objects epObjList = [] for curEp in parse_result.episode_numbers: epObjList.append(result.extraInfo[0].getEpisode(season, curEp)) # make a result curResult = classes.NZBDataSearchResult(epObjList) curResult.name = newNZB curResult.provider = result.provider curResult.quality = result.quality curResult.extraInfo = [createNZBString(separateNZBs[newNZB], xmlns)] resultList.append(curResult) return resultList
gpl-3.0
-2,497,992,401,074,617,000
32.754491
152
0.621855
false
raonyguimaraes/mendelmd
individuals/urls.py
1
1500
from django.conf.urls import * from individuals.views import IndividualDeleteView, GroupDeleteView from django.contrib.admin.views.decorators import staff_member_required from . import views urlpatterns = [ url(r'^create/$', views.create, name='individual_create'), url(r'^edit/(?P<individual_id>[0-9]+)/$', views.edit, name='individual_edit'), url(r'^view/(?P<individual_id>\d+)/$', views.view, name='individual_view'), url(r'^browse/(?P<individual_id>\d+)/$', views.browse, name='individual_browse'), url(r'^delete/(?P<pk>\d+)$', staff_member_required(IndividualDeleteView.as_view()), {}, 'individual_delete'), url(r'^annotate/(?P<individual_id>\d+)/$', views.annotate, name='individual_annotate'), url(r'^populate/(?P<individual_id>\d+)/$', views.populate, name='individual_populate'), url(r'^populate_mongo/(?P<individual_id>\d+)/$', views.populate_mongo, name='individual_populate_mongo'), url(r'^$', views.list, name='individuals_list'), url(r'^download/(?P<individual_id>\d+)/$', views.download, name='individual_download'), url(r'^download_annotated/(?P<individual_id>\d+)/$', views.download_annotated, name='individual_download_annotated'), url(r'^create_group/$', views.create_group, name='create_group'), url(r'^view_group/(?P<group_id>\d+)/$', views.view_group, name='view_group'), url(r'^delete_group/(?P<pk>\d+)$', GroupDeleteView.as_view(), {}, 'group_delete'), url(r'^comparison/$', views.comparison, name='comparison'), ]
bsd-3-clause
104,670,721,587,466,820
59.04
121
0.674
false
ksophocleous/grpc
src/python/grpcio/grpc/framework/interfaces/links/utilities.py
29
1810
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Utilities provided as part of the links interface.""" from grpc.framework.interfaces.links import links class _NullLink(links.Link): """A do-nothing links.Link.""" def accept_ticket(self, ticket): pass def join_link(self, link): pass NULL_LINK = _NullLink()
bsd-3-clause
7,096,764,880,075,014,000
40.136364
72
0.767956
false
vmax-feihu/hue
desktop/core/ext-py/Django-1.6.10/django/utils/termcolors.py
117
6948
""" termcolors.py """ from django.utils import six color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white') foreground = dict([(color_names[x], '3%s' % x) for x in range(8)]) background = dict([(color_names[x], '4%s' % x) for x in range(8)]) RESET = '0' opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'} def colorize(text='', opts=(), **kwargs): """ Returns your text, enclosed in ANSI graphics codes. Depends on the keyword arguments 'fg' and 'bg', and the contents of the opts tuple/list. Returns the RESET code if no parameters are given. Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold' 'underscore' 'blink' 'reverse' 'conceal' 'noreset' - string will not be auto-terminated with the RESET code Examples: colorize('hello', fg='red', bg='blue', opts=('blink',)) colorize() colorize('goodbye', opts=('underscore',)) print(colorize('first line', fg='red', opts=('noreset',))) print('this should be red too') print(colorize('and so should this')) print('this should not be red') """ code_list = [] if text == '' and len(opts) == 1 and opts[0] == 'reset': return '\x1b[%sm' % RESET for k, v in six.iteritems(kwargs): if k == 'fg': code_list.append(foreground[v]) elif k == 'bg': code_list.append(background[v]) for o in opts: if o in opt_dict: code_list.append(opt_dict[o]) if 'noreset' not in opts: text = '%s\x1b[%sm' % (text or '', RESET) return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '') def make_style(opts=(), **kwargs): """ Returns a function with default parameters for colorize() Example: bold_red = make_style(opts=('bold',), fg='red') print(bold_red('hello')) KEYWORD = make_style(fg='yellow') COMMENT = make_style(fg='blue', opts=('bold',)) """ return lambda text: colorize(text, opts, **kwargs) NOCOLOR_PALETTE = 'nocolor' DARK_PALETTE = 'dark' LIGHT_PALETTE = 'light' PALETTES = { NOCOLOR_PALETTE: { 'ERROR': {}, 'NOTICE': {}, 'SQL_FIELD': {}, 'SQL_COLTYPE': {}, 'SQL_KEYWORD': {}, 'SQL_TABLE': {}, 'HTTP_INFO': {}, 'HTTP_SUCCESS': {}, 'HTTP_REDIRECT': {}, 'HTTP_NOT_MODIFIED': {}, 'HTTP_BAD_REQUEST': {}, 'HTTP_NOT_FOUND': {}, 'HTTP_SERVER_ERROR': {}, }, DARK_PALETTE: { 'ERROR': { 'fg': 'red', 'opts': ('bold',) }, 'NOTICE': { 'fg': 'red' }, 'SQL_FIELD': { 'fg': 'green', 'opts': ('bold',) }, 'SQL_COLTYPE': { 'fg': 'green' }, 'SQL_KEYWORD': { 'fg': 'yellow' }, 'SQL_TABLE': { 'opts': ('bold',) }, 'HTTP_INFO': { 'opts': ('bold',) }, 'HTTP_SUCCESS': { }, 'HTTP_REDIRECT': { 'fg': 'green' }, 'HTTP_NOT_MODIFIED': { 'fg': 'cyan' }, 'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) }, 'HTTP_NOT_FOUND': { 'fg': 'yellow' }, 'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) }, }, LIGHT_PALETTE: { 'ERROR': { 'fg': 'red', 'opts': ('bold',) }, 'NOTICE': { 'fg': 'red' }, 'SQL_FIELD': { 'fg': 'green', 'opts': ('bold',) }, 'SQL_COLTYPE': { 'fg': 'green' }, 'SQL_KEYWORD': { 'fg': 'blue' }, 'SQL_TABLE': { 'opts': ('bold',) }, 'HTTP_INFO': { 'opts': ('bold',) }, 'HTTP_SUCCESS': { }, 'HTTP_REDIRECT': { 'fg': 'green', 'opts': ('bold',) }, 'HTTP_NOT_MODIFIED': { 'fg': 'green' }, 'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) }, 'HTTP_NOT_FOUND': { 'fg': 'red' }, 'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) }, } } DEFAULT_PALETTE = DARK_PALETTE def parse_color_setting(config_string): """Parse a DJANGO_COLORS environment variable to produce the system palette The general form of a pallete definition is: "palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option" where: palette is a named palette; one of 'light', 'dark', or 'nocolor'. role is a named style used by Django fg is a background color. bg is a background color. option is a display options. Specifying a named palette is the same as manually specifying the individual definitions for each role. Any individual definitions following the pallete definition will augment the base palette definition. Valid roles: 'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table', 'http_info', 'http_success', 'http_redirect', 'http_bad_request', 'http_not_found', 'http_server_error' Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold', 'underscore', 'blink', 'reverse', 'conceal' """ if not config_string: return PALETTES[DEFAULT_PALETTE] # Split the color configuration into parts parts = config_string.lower().split(';') palette = PALETTES[NOCOLOR_PALETTE].copy() for part in parts: if part in PALETTES: # A default palette has been specified palette.update(PALETTES[part]) elif '=' in part: # Process a palette defining string definition = {} # Break the definition into the role, # plus the list of specific instructions. # The role must be in upper case role, instructions = part.split('=') role = role.upper() styles = instructions.split(',') styles.reverse() # The first instruction can contain a slash # to break apart fg/bg. colors = styles.pop().split('/') colors.reverse() fg = colors.pop() if fg in color_names: definition['fg'] = fg if colors and colors[-1] in color_names: definition['bg'] = colors[-1] # All remaining instructions are options opts = tuple(s for s in styles if s in opt_dict.keys()) if opts: definition['opts'] = opts # The nocolor palette has all available roles. # Use that palette as the basis for determining # if the role is valid. if role in PALETTES[NOCOLOR_PALETTE] and definition: palette[role] = definition # If there are no colors specified, return the empty palette. if palette == PALETTES[NOCOLOR_PALETTE]: return None return palette
apache-2.0
-1,743,803,650,243,122,000
33.74
89
0.525187
false
kaplun/invenio
modules/miscutil/lib/dateutils.py
11
18834
# -*- coding: utf-8 -*- ## ## Some functions about dates ## ## This file is part of Invenio. ## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ API for date conversion and date related GUI creation. Lexicon datetext: textual format => 'YEAR-MONTH-DAY HOUR:MINUTE:SECOND' e.g. '2005-11-16 15:11:44' default value: '0000-00-00 00:00:00' datestruct: tuple format => see http://docs.python.org/lib/module-time.html (YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, WEEKDAY, YEARDAY, DAYLIGHT) e.g. (2005, 11, 16, 15, 11, 44, 2, 320, 0) default value: (0, 0, 0, 0, 0, 0, 0, 0, 0) dategui: textual format for output => 'DAY MONTH YEAR, HOUR:MINUTE' e.g. '16 nov 2005, 15:11' default value: _("N/A") """ __revision__ = "$Id$" import re import time from datetime import date as real_date, \ datetime as real_datetime, \ time as real_time, \ timedelta from invenio.config import CFG_SITE_LANG from invenio.messages import gettext_set_language try: from mx.DateTime import Parser CFG_HAS_EGENIX_DATETIME = True except ImportError: CFG_HAS_EGENIX_DATETIME = False datetext_default = '0000-00-00 00:00:00' datestruct_default = (0, 0, 0, 0, 0, 0, 0, 0, 0) datetext_format = "%Y-%m-%d %H:%M:%S" class date(real_date): def strftime(self, fmt): return strftime(fmt, self) class datetime(real_datetime): def strftime(self, fmt): return strftime(fmt, self) def __add__(self, other): d = real_datetime.combine(self, self.timetz()) d += other return self.combine(d, d.timetz()) def date(self): return date(self.year, self.month, self.day) @staticmethod def strptime(date_string, format): return datetime(*(time.strptime(date_string, format)[0:6])) def convert_datetext_to_dategui(datetext, ln=CFG_SITE_LANG, secs=False): """ Convert: '2005-11-16 15:11:57' => '16 nov 2005, 15:11' Or optionally with seconds: '2005-11-16 15:11:57' => '16 nov 2005, 15:11:57' Month is internationalized """ try: datestruct = convert_datetext_to_datestruct(datetext) if datestruct == datestruct_default: raise ValueError month = get_i18n_month_name(datestruct[1], ln=ln) if secs: output_format = "%d " + month + " %Y, %H:%M:%S" else: output_format = "%d " + month + " %Y, %H:%M" return strftime(output_format, datestruct) except: _ = gettext_set_language(ln) return _("N/A") def convert_datetext_to_datestruct(datetext): """ Convert: '2005-11-16 15:11:57' => (2005, 11, 16, 15, 11, 44, 2, 320, 0) """ try: return time.strptime(datetext, datetext_format) except: return datestruct_default def convert_datestruct_to_dategui(datestruct, ln=CFG_SITE_LANG): """ Convert: (2005, 11, 16, 15, 11, 44, 2, 320, 0) => '16 nov 2005, 15:11' Month is internationalized """ try: if datestruct[0] and datestruct[1] and datestruct[2]: month = get_i18n_month_name(datestruct[1], ln=ln) output_format = "%d " + month + " %Y, %H:%M" return strftime(output_format, datestruct) else: raise ValueError except: _ = gettext_set_language(ln) return _("N/A") def convert_datestruct_to_datetext(datestruct): """ Convert: (2005, 11, 16, 15, 11, 44, 2, 320, 0) => '2005-11-16 15:11:57' """ try: return strftime(datetext_format, datestruct) except: return datetext_default def convert_datecvs_to_datestruct(datecvs): """ Convert CVS $Date$ and $Id$ formats into datestruct. Useful for later conversion of Last updated timestamps in the page footers. Example: '$Date$' => (2006, 09, 20, 19, 27, 11, 0, 0) """ try: if datecvs.startswith("$Id"): date_time = ' '.join(datecvs.split(" ")[3:5]) return time.strptime(date_time, '%Y/%m/%d %H:%M:%S') else: # here we have to use '$' + 'Date...' here, otherwise the CVS # commit would erase this time format to put commit date: return time.strptime(datecvs, '$' + 'Date: %Y/%m/%d %H:%M:%S $') except ValueError: return datestruct_default def get_datetext(year, month, day): """ year=2005, month=11, day=16 => '2005-11-16 00:00:00' """ input_format = "%Y-%m-%d" try: datestruct = time.strptime("%i-%i-%i"% (year, month, day), input_format) return strftime(datetext_format, datestruct) except: return datetext_default def get_datestruct(year, month, day): """ year=2005, month=11, day=16 => (2005, 11, 16, 0, 0, 0, 2, 320, -1) """ input_format = "%Y-%m-%d" try: return time.strptime("%i-%i-%i"% (year, month, day), input_format) except ValueError or TypeError: return datestruct_default def get_i18n_day_name(day_nb, display='short', ln=CFG_SITE_LANG): """ get the string representation of a weekday, internationalized @param day_nb: number of weekday UNIX like. => 0=Sunday @param ln: language for output @return: the string representation of the day """ _ = gettext_set_language(ln) if display == 'short': days = {0: _("Sun"), 1: _("Mon"), 2: _("Tue"), 3: _("Wed"), 4: _("Thu"), 5: _("Fri"), 6: _("Sat")} else: days = {0: _("Sunday"), 1: _("Monday"), 2: _("Tuesday"), 3: _("Wednesday"), 4: _("Thursday"), 5: _("Friday"), 6: _("Saturday")} return days[day_nb] def get_i18n_month_name(month_nb, display='short', ln=CFG_SITE_LANG): """ get a non-numeric representation of a month, internationalized. @param month_nb: number of month, (1 based!) =>1=jan,..,12=dec @param ln: language for output @return: the string representation of month """ _ = gettext_set_language(ln) if display == 'short': months = {0: _("Month"), 1: _("Jan"), 2: _("Feb"), 3: _("Mar"), 4: _("Apr"), 5: _("May"), 6: _("Jun"), 7: _("Jul"), 8: _("Aug"), 9: _("Sep"), 10: _("Oct"), 11: _("Nov"), 12: _("Dec")} else: months = {0: _("Month"), 1: _("January"), 2: _("February"), 3: _("March"), 4: _("April"), 5: _("May "), # trailing space distinguishes short/long form 6: _("June"), 7: _("July"), 8: _("August"), 9: _("September"), 10: _("October"), 11: _("November"), 12: _("December")} return months[month_nb].strip() def create_day_selectbox(name, selected_day=0, ln=CFG_SITE_LANG): """ Creates an HTML menu for day selection. (0..31 values). @param name: name of the control (i.e. name of the var you'll get) @param selected_day: preselect a day. Use 0 for the label 'Day' @param ln: language of the menu @return: html a string """ _ = gettext_set_language(ln) out = "<select name=\"%s\">\n"% name for i in range(0, 32): out += " <option value=\"%i\""% i if (i == selected_day): out += " selected=\"selected\"" if (i == 0): out += ">%s</option>\n"% _("Day") else: out += ">%i</option>\n"% i out += "</select>\n" return out def create_month_selectbox(name, selected_month=0, ln=CFG_SITE_LANG): """ Creates an HTML menu for month selection. Value of selected field is numeric @param name: name of the control (your form will be sent with name=value...) @param selected_month: preselect a month. use 0 for the Label 'Month' @param ln: language of the menu @return: html as string """ out = "<select name=\"%s\">\n"% name for i in range(0, 13): out += "<option value=\"%i\""% i if (i == selected_month): out += " selected=\"selected\"" out += ">%s</option>\n"% get_i18n_month_name(i, ln) out += "</select>\n" return out def create_year_inputbox(name, value=0): """ Creates an HTML field (simple input) for year selection. @param name: name of the control (i.e. name of the variable you'll get) @param value: prefilled value (int) @return: html as string """ out = "<input type=\"text\" name=\"%s\" value=\"%i\" maxlength=\"4\" size=\"4\"/>\n"% (name, value) return out def create_year_selectbox(name, from_year=-1, length=10, selected_year=0, ln=CFG_SITE_LANG): """ Creates an HTML menu (dropdownbox) for year selection. @param name: name of control( i.e. name of the variable you'll get) @param from_year: year on which to begin. if <0 assume it is current year @param length: number of items in menu @param selected_year: initial selected year (if in range), else: label is selected @param ln: language @return: html as string """ _ = gettext_set_language(ln) if from_year < 0: from_year = time.localtime()[0] out = "<select name=\"%s\">\n"% name out += ' <option value="0"' if selected_year == 0: out += ' selected="selected"' out += ">%s</option>\n"% _("Year") for i in range(from_year, from_year + length): out += "<option value=\"%i\""% i if (i == selected_year): out += " selected=\"selected\"" out += ">%i</option>\n"% i out += "</select>\n" return out _RE_RUNTIMELIMIT_FULL = re.compile(r"(?:(?P<weekday_begin>[a-z]+)(?:-(?P<weekday_end>[a-z]+))?)?\s*((?P<hour_begin>\d\d?(:\d\d?)?)(-(?P<hour_end>\d\d?(:\d\d?)?))?)?", re.I) _RE_RUNTIMELIMIT_HOUR = re.compile(r'(?P<hours>\d\d?)(:(?P<minutes>\d\d?))?') def parse_runtime_limit(value, now=None): """ Parsing CLI option for runtime limit, supplied as VALUE. Value could be something like: Sunday 23:00-05:00, the format being [Wee[kday]] [hh[:mm][-hh[:mm]]]. The function will return two valid time ranges. The first could be in the past, containing the present or in the future. The second is always in the future. """ def extract_time(value): value = _RE_RUNTIMELIMIT_HOUR.search(value).groupdict() return timedelta(hours=int(value['hours']), minutes=int(value['minutes'])) def extract_weekday(value): key = value[:3].lower() try: return { 'mon' : 0, 'tue' : 1, 'wed' : 2, 'thu' : 3, 'fri' : 4, 'sat' : 5, 'sun' : 6, }[key] except KeyError: raise ValueError("%s is not a good weekday name." % value) if now is None: now = datetime.now() today = now.date() g = _RE_RUNTIMELIMIT_FULL.search(value) if not g: raise ValueError('"%s" does not seem to be correct format for parse_runtime_limit() [Wee[kday]] [hh[:mm][-hh[:mm]]]).' % value) pieces = g.groupdict() if pieces['weekday_begin'] is None: # No weekday specified. So either today or tomorrow first_occasion_day = timedelta(days=0) next_occasion_delta = timedelta(days=1) else: # If given 'Mon' then we transform it to 'Mon-Mon' if pieces['weekday_end'] is None: pieces['weekday_end'] = pieces['weekday_begin'] # Day range weekday_begin = extract_weekday(pieces['weekday_begin']) weekday_end = extract_weekday(pieces['weekday_end']) if weekday_begin <= today.weekday() <= weekday_end: first_occasion_day = timedelta(days=0) else: days = (weekday_begin - today.weekday()) % 7 first_occasion_day = timedelta(days=days) weekday = (now + first_occasion_day).weekday() if weekday < weekday_end: # Fits in the same week next_occasion_delta = timedelta(days=1) else: # The week after days = weekday_begin - weekday + 7 next_occasion_delta = timedelta(days=days) if pieces['hour_begin'] is None: pieces['hour_begin'] = '00:00' if pieces['hour_end'] is None: pieces['hour_end'] = '00:00' beginning_time = extract_time(pieces['hour_begin']) ending_time = extract_time(pieces['hour_end']) if not ending_time: ending_time = beginning_time + timedelta(days=1) elif beginning_time and ending_time and beginning_time > ending_time: ending_time += timedelta(days=1) start_time = real_datetime.combine(today, real_time(hour=0, minute=0)) current_range = ( start_time + first_occasion_day + beginning_time, start_time + first_occasion_day + ending_time ) if now > current_range[1]: current_range = tuple(t + next_occasion_delta for t in current_range) future_range = ( current_range[0] + next_occasion_delta, current_range[1] + next_occasion_delta ) return current_range, future_range def guess_datetime(datetime_string): """ Try to guess the datetime contained in a string of unknow format. @param datetime_string: the datetime representation. @type datetime_string: string @return: the guessed time. @rtype: L{time.struct_time} @raises ValueError: in case it's not possible to guess the time. """ if CFG_HAS_EGENIX_DATETIME: try: return Parser.DateTimeFromString(datetime_string).timetuple() except ValueError: pass else: for format in (None, '%x %X', '%X %x', '%Y-%M-%dT%h:%m:%sZ'): try: return time.strptime(datetime_string, format) except ValueError: pass raise ValueError("It is not possible to guess the datetime format of %s" % datetime_string) def get_time_estimator(total): """ Given a total amount of items to compute, return a function that, if called every time an item is computed (or every step items are computed) will give a time estimation for how long it will take to compute the whole set of itmes. The function will return two values: the first is the number of seconds that are still needed to compute the whole set, the second value is the time in the future when the operation is expected to end. """ t1 = time.time() count = [0] def estimate_needed_time(step=1): count[0] += step t2 = time.time() t3 = 1.0 * (t2 - t1) / count[0] * (total - count[0]) return t3, t3 + t1 return estimate_needed_time # This library does not support strftime's "%s" or "%y" format strings. # Allowed if there's an even number of "%"s because they are escaped. _illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])") def _findall(text, substr): # Also finds overlaps sites = [] i = 0 while 1: j = text.find(substr, i) if j == -1: break sites.append(j) i=j+1 return sites def strftime(fmt, dt): if not isinstance(dt, real_date): dt = datetime(dt[0], dt[1], dt[2], dt[3], dt[4], dt[5]) if dt.year >= 1900: return time.strftime(fmt, dt.timetuple()) illegal_formatting = _illegal_formatting.search(fmt) if illegal_formatting: raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0)) year = dt.year # For every non-leap year century, advance by # 6 years to get into the 28-year repeat cycle delta = 2000 - year off = 6 * (delta // 100 + delta // 400) year = year + off # Move to around the year 2000 year = year + ((2000 - year) // 28) * 28 timetuple = dt.timetuple() s1 = time.strftime(fmt, (year,) + timetuple[1:]) sites1 = _findall(s1, str(year)) s2 = time.strftime(fmt, (year+28,) + timetuple[1:]) sites2 = _findall(s2, str(year+28)) sites = [] for site in sites1: if site in sites2: sites.append(site) s = s1 syear = "%04d" % (dt.year,) for site in sites: s = s[:site] + syear + s[site+4:] return s def get_dst(date_obj): """Determine if dst is locally enabled at this time""" dst = 0 if date_obj.year >= 1900: tmp_date = time.mktime(date_obj.timetuple()) # DST is 1 so reduce time with 1 hour. dst = time.localtime(tmp_date)[-1] return dst def utc_to_localtime(date_str, fmt="%Y-%m-%d %H:%M:%S", input_fmt="%Y-%m-%dT%H:%M:%SZ"): """ Convert UTC to localtime Reference: - (1) http://www.openarchives.org/OAI/openarchivesprotocol.html#Dates - (2) http://www.w3.org/TR/NOTE-datetime This function works only with dates complying with the "Complete date plus hours, minutes and seconds" profile of ISO 8601 defined by (2), and linked from (1). Eg: 1994-11-05T13:15:30Z """ date_struct = datetime.strptime(date_str, input_fmt) date_struct += timedelta(hours=get_dst(date_struct)) date_struct -= timedelta(seconds=time.timezone) return strftime(fmt, date_struct) def localtime_to_utc(date_str, fmt="%Y-%m-%dT%H:%M:%SZ", input_fmt="%Y-%m-%d %H:%M:%S"): """Convert localtime to UTC""" date_struct = datetime.strptime(date_str, input_fmt) date_struct -= timedelta(hours=get_dst(date_struct)) date_struct += timedelta(seconds=time.timezone) return strftime(fmt, date_struct) def strptime(date_string, fmt): return real_datetime(*(time.strptime(date_string, fmt)[:6]))
gpl-2.0
1,494,649,386,233,748,200
32.99639
172
0.569449
false
quantifiedcode-bot/blitzdb
blitzdb/tests/test_querying.py
2
10823
from __future__ import absolute_import from .fixtures import * from blitzdb.tests.helpers.movie_data import Actor, Director, Movie import blitzdb def test_basic_delete(backend, small_test_data): backend.filter(Actor, {}).delete() backend.commit() assert len(backend.filter(Actor, {})) == 0 def test_basic_storage(backend, small_test_data): (movies, actors, directors) = small_test_data assert len(backend.filter(Movie, {})) == len(movies) assert len(backend.filter(Actor, {})) == len(actors) #removed this functionality since it was misleading... @pytest.mark.skipif(True, reason='Removed functionality') def test_keys_with_dots(backend): actor = Actor({'some.key.with.nasty.dots': [{'some.more.nasty.dots': 100}], 'pk': 'test'}) backend.save(actor) backend.commit() assert actor == backend.get(Actor, {'pk': 'test'}) def test_delete(backend): actor = Actor({'foo' : 'bar'}) backend.save(actor) backend.commit() assert actor.foo == 'bar' assert backend.get(Actor,{'pk' : actor.pk}).foo == 'bar' del actor.foo with pytest.raises(AttributeError): actor.foo with pytest.raises(KeyError): actor['foo'] backend.save(actor) backend.commit() with pytest.raises(AttributeError): backend.get(Actor,{'pk' : actor.pk}).foo def test_negative_indexing(backend, small_test_data): (movies, actors, directors) = small_test_data actors = backend.filter(Actor, {}) assert actors[-1] == actors[len(actors) - 1] assert actors[-10:-1] == actors[len(actors) - 10:len(actors) - 1] assert actors[-len(actors):-1] == actors[0:len(actors) - 1] # To do: Make step tests for file backend (MongoDB does not support this) # assert actors[-10:-1:2] == actors[len(actors)-10:len(actors)-1:2] def test_missing_keys_in_slice(backend, small_test_data): (movies, actors, directors) = small_test_data actors = backend.filter(Actor, {}) assert actors[:] == actors assert actors[1:] == actors[1:len(actors)] assert actors[:len(actors)] == actors[0:len(actors)] def test_query_set(backend): actors = [Actor({'foo': 'bar', 'value': 10}), Actor({'foo': 'baz', 'value': 10}), Actor({'foo': 'baz', 'value': 11}), Actor({'foo': 'bar', 'value': 11}) ] for actor in actors: backend.save(actor) backend.commit() queryset = backend.filter(Actor, {'foo': 'bar','value' : 10}) assert queryset.next() == actors[0] def test_and_queries(backend): backend.save(Actor({'foo': 'bar', 'value': 10})) backend.save(Actor({'foo': 'baz', 'value': 10})) backend.save(Actor({'foo': 'baz', 'value': 11})) backend.save(Actor({'foo': 'bar', 'value': 11})) backend.commit() assert len(backend.filter(Actor, {'foo': 'bar'})) == 2 assert len(backend.filter(Actor, {'value': 10})) == 2 assert len(backend.filter(Actor, {'foo': 'bar', 'value': 10})) == 1 assert len(backend.filter(Actor, {'foo': 'baz', 'value': 10})) == 1 assert len(backend.filter(Actor, {'foo': 'bar', 'value': 11})) == 1 assert len(backend.filter(Actor, {'foo': 'baz', 'value': 11})) == 1 def test_composite_queries(backend): backend.filter(Actor, {}).delete() backend.save(Actor({'values': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})) backend.save(Actor({'values': [7, 6, 5, 4, 3, 2, 1]})) backend.save(Actor({'values': [1, 2, 3, 4]})) backend.save(Actor({'values': [1, 2, 3, 4, {'foo': 'bar'}]})) backend.save(Actor({'values': 'foobar'})) backend.commit() for f in (lambda: True, lambda: backend.create_index(Actor, 'values')): assert len(backend.filter(Actor, {})) == 5 assert len(backend.filter(Actor, {'values': [1, 2, 3, 4]})) == 1 assert len(backend.filter(Actor, {'values': [1, 2, 3, 4, {'foo': 'bar'}]})) == 1 assert len(backend.filter(Actor, {'values': [1, 2, 3, {'foo': 'bar'}, 4]})) == 0 assert len(backend.filter(Actor, {'values': [1, 2, 3, 4, 5]})) == 0 assert len(backend.filter(Actor, {'values': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]})) == 0 assert len(backend.filter(Actor, {'values': {'$all': [4, 3, 2, 1]}})) == 4 assert len(backend.filter(Actor, {'values': {'$all': [4, 3, 2, 1, {'foo': 'bar'}]}})) == 1 assert len(backend.filter(Actor, {'values': {'$all': [{'foo': 'bar'}]}})) == 1 assert len(backend.filter(Actor, {'values': {'$all': [4, 3, 2, 1, 14]}})) == 0 assert len(backend.filter(Actor, {'values': {'$all': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]}})) == 1 assert len(backend.filter(Actor, {'values': {'$in': [[1, 2, 3, 4], [7, 6, 5, 4, 3, 2, 1], [1, 2, 3, 5], 'foobar']}})) == 3 def test_operators(backend): backend.filter(Actor, {}).delete() marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924}) leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974}) david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952}) charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889}) backend.save(marlon_brando) backend.save(leonardo_di_caprio) backend.save(david_hasselhoff) backend.save(charlie_chaplin) backend.commit() assert len(backend.filter(Actor, {})) == 4 for op, results in (('$gt', [david_hasselhoff]), ('$gte', [david_hasselhoff]), ('$lt', [charlie_chaplin]), ('$lte', [charlie_chaplin])): query = { '$and': [ {'gross_income_m': {op: 1.0}}, {'is_funny': True} ] } assert len(backend.filter(Actor, query)) == len(results) assert results in backend.filter(Actor, query) for op, results in (('$gt', [david_hasselhoff, charlie_chaplin, marlon_brando]), ('$gte', [marlon_brando, david_hasselhoff, charlie_chaplin]), ('$lt', [charlie_chaplin]), ('$lte', [charlie_chaplin])): query = { '$and': [ {'$or': [ {'gross_income_m': {op: 1.0}}, {'birth_year': {'$lt': 1900}}, ]}, {'$or': [ {'is_funny': True}, {'name': 'Marlon Brando'}, ]}, ] } assert len(backend.filter(Actor, query)) == len(results) assert results in backend.filter(Actor, query) assert len(backend.filter(Actor, {'name': {'$ne': 'David Hasselhoff'}})) == 3 assert len(backend.filter(Actor, {'name': 'David Hasselhoff'})) == 1 assert len(backend.filter(Actor, {'name': {'$not': {'$in': ['David Hasselhoff', 'Marlon Brando', 'Charlie Chaplin']}}})) == 1 assert len(backend.filter(Actor, {'name': {'$in': ['Marlon Brando', 'Leonardo di Caprio']}})) == 2 def test_regex_operator(backend, small_test_data): backend.filter(Actor, {}).delete() marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924}) marlon_wayans = Actor({'name': 'Marlon Wayans'}) backend.save(marlon_brando) backend.save(marlon_wayans) backend.commit() assert backend.get(Actor, {'name': {'$regex': r'^Marlon\s+(?!Wayans)[\w]+$'}}) == marlon_brando assert len(backend.filter(Actor, {'name': {'$regex': r'^Marlon\s+.*$'}})) == 2 assert len(backend.filter(Actor, {'name': {'$regex': r'^.*\s+Brando$'}})) == 1 def test_list_query(backend, small_test_data): (movies, actors, directors) = small_test_data movie = None i = 0 while not movie or len(movie.cast) < 4: movie = movies[i] i += 1 actor = movie.cast[0]['actor'] other_movie = movies[i % len(movies)] while other_movie in actor.movies: other_movie = movies[i % len(movies)] i += 1 assert actor in backend.filter(Actor, {'movies': movie}) assert actor not in backend.filter(Actor, {'movies': other_movie}) def test_list_query_multiple_items(backend, small_test_data): (movies, actors, directors) = small_test_data actor = None i = 0 while not actor or len(actor.movies) < 2: actor = actors[i] i += 1 assert actor in backend.filter(Actor, {'movies': actor.movies}) def test_indexed_delete(backend, small_test_data): all_movies = backend.filter(Movie, {}) for movie in all_movies: backend.filter(Actor, {'movies': movie}).delete() backend.commit() for actor in backend.filter(Actor, {}): assert actor.movies == [] def test_non_indexed_delete(backend, small_test_data): (movies, actors, directors) = small_test_data for movie in movies: backend.filter(Director, {'movies': {'$all': [movie]}}).delete() backend.commit() for director in backend.filter(Director, {}): assert director.movies == [] def test_positional_query(backend, small_test_data): """ We test a search query which explicitly references a given list item in an object """ (movies, actors, directors) = small_test_data movie = None i = 0 while not movie or len(movie.cast) < 3: if len(movies[i].cast): movie = movies[i] actor = movie.cast[0]['actor'] index = actor.movies.index(movie) i += 1 assert actor in backend.filter(Actor, {'movies.%d' % index: movie}) def test_default_backend(backend, small_test_data): movies = backend.filter(Movie, {}) old_len = len(movies) movie = movies[0] movie.delete() backend.commit() with pytest.raises(Movie.DoesNotExist): backend.get(Movie, {'pk': movie.pk}) assert old_len == len(backend.filter(Movie, {})) + 1 def test_index_reloading(backend, small_test_data): (movies, actors, directors) = small_test_data backend.filter(Actor, {'movies': movies[0]}).delete() backend.commit() assert list(backend.filter(Actor, {'movies': movies[0]})) == [] def test_query_function(backend): if isinstance(backend, blitzdb.backends.mongo.Backend): pytest.skip('Query by function is not supported for MongoDB') Movie({'name': 'The Godfather', 'year': 1972}).save(backend) Movie({'name': 'Goodfellas', 'year': 1990}).save(backend) Movie({'name': 'Star Wars', 'year': 1977}).save(backend) backend.commit() movies = backend.filter(Movie, { 'year': lambda year: year >= 1970 and year <= 1979, }) assert sorted([m.name for m in movies]) == ['Star Wars', 'The Godfather']
mit
314,545,444,638,545,340
31.02071
204
0.586159
false
google/objax
objax/jaxboard.py
1
4637
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum import os from time import time from typing import Union, Callable, Tuple, ByteString import numpy as np from tensorboard.compat.proto import event_pb2 from tensorboard.compat.proto import summary_pb2 from tensorboard.summary.writer.event_file_writer import EventFileWriter from tensorboard.util.tensor_util import make_tensor_proto from objax import util class Reducer(enum.Enum): """Reduces tensor batch into a single tensor.""" FIRST = lambda x: x[0] LAST = lambda x: x[-1] MEAN = lambda x: np.mean(x) class DelayedScalar: def __init__(self, reduce: Union[Callable, Reducer]): self.values = [] self.reduce = reduce def __call__(self): return self.reduce(self.values) class Image: def __init__(self, shape: Tuple[int, int, int], png: ByteString): self.shape = shape self.png = png class Text: def __init__(self, text: str): self.text = text class Summary(dict): """Writes entries to `Summary` protocol buffer.""" def image(self, tag: str, image: np.ndarray): """Adds image to the summary. Float image in [-1, 1] in CHW format expected.""" self[tag] = Image(image.shape, util.image.to_png(image)) def scalar(self, tag: str, value: float, reduce: Union[Callable, Reducer] = Reducer.MEAN): """Adds scalar to the summary.""" if tag not in self: self[tag] = DelayedScalar(reduce) self[tag].values.append(value) def text(self, tag: str, text: str): """Adds text to the summary.""" self[tag] = Text(text) def __call__(self): entries = [] for tag, value in self.items(): if isinstance(value, DelayedScalar): entries.append(summary_pb2.Summary.Value(tag=tag, simple_value=value())) elif isinstance(value, Image): image_summary = summary_pb2.Summary.Image(encoded_image_string=value.png, colorspace=value.shape[0], height=value.shape[1], width=value.shape[2]) entries.append(summary_pb2.Summary.Value(tag=tag, image=image_summary)) elif isinstance(value, Text): metadata = summary_pb2.SummaryMetadata( plugin_data=summary_pb2.SummaryMetadata.PluginData(plugin_name='text')) entries.append(summary_pb2.Summary.Value(tag=tag, metadata=metadata, tensor=make_tensor_proto(values=value.text.encode('utf-8'), shape=(1,)))) else: raise NotImplementedError(tag, value) return summary_pb2.Summary(value=entries) class SummaryWriter: """Writes entries to event files in the logdir to be consumed by Tensorboard.""" def __init__(self, logdir: str, queue_size: int = 5, write_interval: int = 5): """Creates SummaryWriter instance. Args: logdir: directory where event file will be written. queue_size: size of the queue for pending events and summaries before one of the 'add' calls forces a flush to disk. write_interval: how often, in seconds, to write the pending events and summaries to disk. """ if not os.path.isdir(logdir): os.makedirs(logdir, exist_ok=True) self.writer = EventFileWriter(logdir, queue_size, write_interval) def write(self, summary: Summary, step: int): """Adds on event to the event file.""" self.writer.add_event(event_pb2.Event(step=step, summary=summary(), wall_time=time())) def close(self): """Flushes the event file to disk and close the file.""" self.writer.close() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close()
apache-2.0
5,695,499,043,143,684,000
36.395161
116
0.605564
false